text
stringlengths 0
3.34M
|
---|
lemma filterlim_at_top_at_left: fixes f :: "'a::linorder_topology \<Rightarrow> 'b::linorder" assumes mono: "\<And>x y. Q x \<Longrightarrow> Q y \<Longrightarrow> x \<le> y \<Longrightarrow> f x \<le> f y" and bij: "\<And>x. P x \<Longrightarrow> f (g x) = x" "\<And>x. P x \<Longrightarrow> Q (g x)" and Q: "eventually Q (at_left a)" and bound: "\<And>b. Q b \<Longrightarrow> b < a" and P: "eventually P at_top" shows "filterlim f at_top (at_left a)" |
[STATEMENT]
lemma has_mode\<^sub>A: "var\<^sub>C_of x \<in> mds\<^sub>C m \<Longrightarrow> x \<in> mds\<^sub>A_of mds\<^sub>C m"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. var\<^sub>C_of x \<in> mds\<^sub>C m \<Longrightarrow> x \<in> mds\<^sub>A_of mds\<^sub>C m
[PROOF STEP]
using doesnt_have_mode\<^sub>C
[PROOF STATE]
proof (prove)
using this:
?x \<notin> mds\<^sub>A_of ?mds\<^sub>C ?m \<Longrightarrow> var\<^sub>C_of ?x \<notin> ?mds\<^sub>C ?m
goal (1 subgoal):
1. var\<^sub>C_of x \<in> mds\<^sub>C m \<Longrightarrow> x \<in> mds\<^sub>A_of mds\<^sub>C m
[PROOF STEP]
by fastforce |
proposition homotopic_loops_eq: "\<lbrakk>path p; path_image p \<subseteq> s; pathfinish p = pathstart p; \<And>t. t \<in> {0..1} \<Longrightarrow> p(t) = q(t)\<rbrakk> \<Longrightarrow> homotopic_loops s p q" |
= = Statements on the BDS movement = =
|
{-
This second-order signature was created from the following second-order syntax description:
syntax STLC | Λ
type
N : 0-ary
_↣_ : 2-ary | r30
term
app : α ↣ β α -> β | _$_ l20
lam : α.β -> α ↣ β | ƛ_ r10
theory
(ƛβ) b : α.β a : α |> app (lam(x.b[x]), a) = b[a]
(ƛη) f : α ↣ β |> lam (x. app(f, x)) = f
-}
module STLC.Signature where
open import SOAS.Context
-- Type declaration
data ΛT : Set where
N : ΛT
_↣_ : ΛT → ΛT → ΛT
infixr 30 _↣_
open import SOAS.Syntax.Signature ΛT public
open import SOAS.Syntax.Build ΛT public
-- Operator symbols
data Λₒ : Set where
appₒ lamₒ : {α β : ΛT} → Λₒ
-- Term signature
Λ:Sig : Signature Λₒ
Λ:Sig = sig λ
{ (appₒ {α}{β}) → (⊢₀ α ↣ β) , (⊢₀ α) ⟼₂ β
; (lamₒ {α}{β}) → (α ⊢₁ β) ⟼₁ α ↣ β
}
open Signature Λ:Sig public
|
module nodcap.NF.Show where
open import Data.String using (String)
open import nodcap.Base
open import nodcap.NF.Typing
import nodcap.Show as S
showTerm : {Γ : Environment} → ⊢ⁿᶠ Γ → String
showTerm {Γ} x = S.showTerm (fromNF x)
|
-- In this file we consider the special of localising at a single
-- element f : R (or rather the set of powers of f). This is also
-- known as inverting f.
{-# OPTIONS --cubical --no-import-sorts --safe --experimental-lossy-unification #-}
module Cubical.Algebra.CommRing.Localisation.InvertingElements where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Function
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.Univalence
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Powerset
open import Cubical.Foundations.Transport
open import Cubical.Functions.FunExtEquiv
import Cubical.Data.Empty as ⊥
open import Cubical.Data.Bool
open import Cubical.Data.Nat renaming ( _+_ to _+ℕ_ ; _·_ to _·ℕ_
; +-comm to +ℕ-comm ; +-assoc to +ℕ-assoc
; ·-assoc to ·ℕ-assoc ; ·-comm to ·ℕ-comm)
open import Cubical.Data.Vec
open import Cubical.Data.Sigma.Base
open import Cubical.Data.Sigma.Properties
open import Cubical.Data.FinData
open import Cubical.Relation.Nullary
open import Cubical.Relation.Binary
open import Cubical.Algebra.Group
open import Cubical.Algebra.AbGroup
open import Cubical.Algebra.Monoid
open import Cubical.Algebra.Ring
open import Cubical.Algebra.CommRing
open import Cubical.Algebra.CommRing.Localisation.Base
open import Cubical.Algebra.CommRing.Localisation.UniversalProperty
open import Cubical.HITs.SetQuotients as SQ
open import Cubical.HITs.PropositionalTruncation as PT
open Iso
private
variable
ℓ ℓ' : Level
A : Type ℓ
module _(R' : CommRing {ℓ}) where
open isMultClosedSubset
private R = R' .fst
-- open CommRingStr ⦃...⦄
open CommRingStr (R' .snd)
open Exponentiation R'
[_ⁿ|n≥0] : R → ℙ R
[ f ⁿ|n≥0] g = (∃[ n ∈ ℕ ] g ≡ f ^ n) , propTruncIsProp
-- Σ[ n ∈ ℕ ] (s ≡ f ^ n) × (∀ m → s ≡ f ^ m → n ≤ m) maybe better, this isProp:
-- (n,s≡fⁿ,p) (m,s≡fᵐ,q) then n≤m by p and m≤n by q => n≡m
powersFormMultClosedSubset : (f : R) → isMultClosedSubset R' [ f ⁿ|n≥0]
powersFormMultClosedSubset f .containsOne = ∣ zero , refl ∣
powersFormMultClosedSubset f .multClosed =
PT.map2 λ (m , p) (n , q) → (m +ℕ n) , (λ i → (p i) · (q i)) ∙ ·-of-^-is-^-of-+ f m n
R[1/_] : R → Type ℓ
R[1/ f ] = Loc.S⁻¹R R' [ f ⁿ|n≥0] (powersFormMultClosedSubset f)
R[1/_]AsCommRing : R → CommRing {ℓ}
R[1/ f ]AsCommRing = Loc.S⁻¹RAsCommRing R' [ f ⁿ|n≥0] (powersFormMultClosedSubset f)
-- A useful lemma: (gⁿ/1)≡(g/1)ⁿ in R[1/f]
^-respects-/1 : {f g : R} (n : ℕ) → [ (g ^ n) , 1r , ∣ 0 , (λ _ → 1r) ∣ ] ≡
Exponentiation._^_ R[1/ f ]AsCommRing [ g , 1r , powersFormMultClosedSubset _ .containsOne ] n
^-respects-/1 zero = refl
^-respects-/1 {f} {g} (suc n) = eq/ _ _ ( (1r , powersFormMultClosedSubset f .containsOne)
, cong (1r · (g · (g ^ n)) ·_) (·-lid 1r))
∙ cong (CommRingStr._·_ (R[1/ f ]AsCommRing .snd)
[ g , 1r , powersFormMultClosedSubset f .containsOne ]) (^-respects-/1 n)
-- A slight improvement for eliminating into propositions
InvElPropElim : {f : R} {P : R[1/ f ] → Type ℓ'}
→ (∀ x → isProp (P x))
→ (∀ (r : R) (n : ℕ) → P [ r , (f ^ n) , ∣ n , refl ∣ ])
----------------------------------------------------------
→ (∀ x → P x)
InvElPropElim {f = f} {P = P} PisProp base = elimProp (λ _ → PisProp _) []-case
where
S[f] = Loc.S R' [ f ⁿ|n≥0] (powersFormMultClosedSubset f)
[]-case : (a : R × S[f]) → P [ a ]
[]-case (r , s , s∈S[f]) = PT.rec (PisProp _) Σhelper s∈S[f]
where
Σhelper : Σ[ n ∈ ℕ ] s ≡ f ^ n → P [ r , s , s∈S[f] ]
Σhelper (n , p) = subst P (cong [_] (≡-× refl (Σ≡Prop (λ _ → propTruncIsProp) (sym p)))) (base r n)
|
open import Level using () renaming (zero to ℓ₀)
open import Relation.Binary using (DecSetoid)
module BFFPlug (A : DecSetoid ℓ₀ ℓ₀) where
open import Data.Nat using (ℕ ; _≟_ ; _+_ ; zero ; suc ; ⌈_/2⌉)
open import Data.Maybe using (Maybe ; just ; nothing)
open import Data.Vec using (Vec)
open import Data.Product using (∃ ; _,_)
open import Relation.Binary using (module DecSetoid)
open import Relation.Binary.PropositionalEquality using (refl ; cong ; subst ; sym ; module ≡-Reasoning) renaming (setoid to PropEq)
open import Relation.Nullary using (yes ; no)
open import Function using (flip ; id ; _∘_)
open import Function.LeftInverse using (_RightInverseOf_)
import Category.Monad
open Category.Monad.RawMonad {ℓ₀} Data.Maybe.monad using (_>>=_)
open import Generic using (sequenceV ; ≡-to-Π)
import BFF
import GetTypes
import Examples
open DecSetoid A using (Carrier)
open GetTypes.PartialVecVec public using (Get)
open BFF.PartialVecBFF A public using (sbff ; bff)
bffsameshape : (G : Get) → {i : Get.I G} → Vec Carrier (Get.gl₁ G i) → Vec Carrier (Get.gl₂ G i) → Maybe (Vec Carrier (Get.gl₁ G i))
bffsameshape G {i} = sbff G i
bffplug : (G : Get) → (Get.I G → ℕ → Maybe (Get.I G)) → {i : Get.I G} → {m : ℕ} → Vec Carrier (Get.gl₁ G i) → Vec Carrier m → Maybe (∃ λ j → Vec (Maybe Carrier) (Get.gl₁ G j))
bffplug G sput {i} {m} s v with sput i m
... | nothing = nothing
... | just j with Get.gl₂ G j ≟ m
... | no gl₂j≢m = nothing
bffplug G sput {i} s v | just j | yes refl with bff G j s v
... | nothing = nothing
... | just s′ = just (j , s′)
_SimpleRightInvOf_ : {A B : Set} → (A → B) → (B → A) → Set
f SimpleRightInvOf g = ≡-to-Π f RightInverseOf ≡-to-Π g
bffinv : (G : Get) → (nelteg : ℕ → Get.I G) → nelteg SimpleRightInvOf Get.gl₂ G → {i : Get.I G} → {m : ℕ} → Vec Carrier (Get.gl₁ G i) → Vec Carrier m → Maybe (Vec (Maybe Carrier) (Get.gl₁ G (nelteg m)))
bffinv G nelteg inv {m = m} s v = bff G (nelteg m) s (subst (Vec Carrier) (sym (inv m)) v)
module InvExamples where
open Examples using (reverse' ; drop' ; sieve' ; tail' ; take')
reverse-put : {n m : ℕ} → Vec Carrier n → Vec Carrier m → Maybe (Vec Carrier m)
reverse-put s v = bffinv reverse' id (λ _ → refl) s v >>= sequenceV
drop-put : (k : ℕ) → {n m : ℕ} → Vec Carrier (k + n) → Vec Carrier m → Maybe (Vec (Maybe Carrier) (k + m))
drop-put k = bffinv (drop' k) id (λ _ → refl)
double : ℕ → ℕ
double zero = zero
double (suc n) = suc (suc (double n))
sieve-inv-len : double SimpleRightInvOf ⌈_/2⌉
sieve-inv-len zero = refl
sieve-inv-len (suc zero) = refl
sieve-inv-len (suc (suc x)) = cong (suc ∘ suc) (sieve-inv-len x)
sieve-put : {n m : ℕ} → Vec Carrier n → Vec Carrier m → Maybe (Vec (Maybe Carrier) (double m))
sieve-put = bffinv sieve' double sieve-inv-len
tail-put : {n m : ℕ} → Vec Carrier (suc n) → Vec Carrier m → Maybe (Vec (Maybe Carrier) (suc m))
tail-put = bffinv tail' id (λ _ → refl)
take-put : (k : ℕ) → {n : ℕ} → Vec Carrier (k + n) → Vec Carrier k → Maybe (Vec Carrier (k + n))
take-put k = bffsameshape (take' k)
|
module Flexidisc.THList
%default total
%access export
||| `THList` stands for "TagLess Heterogenous list
||| It's a record demoted to an heterogeneous list,
||| where the label are only kept at the type lebvel
public export
data THList : (m : Type -> Type) -> (k : Type) -> (List (k, Type)) -> Type where
Nil : THList m k []
(::) : m a -> THList m k xs -> THList m k ((l, a)::xs)
implementation Eq (THList m k []) where
(==) x y = True
(/=) x y = False
implementation (Eq (m t), Eq (THList m k ts)) => Eq (THList m k ((l,t)::ts)) where
(==) (x :: xs) (y :: ys) = x == y && xs == ys
(/=) (x :: xs) (y :: ys) = x /= y || xs /= ys
interface Shows t where
shows : t -> List String
implementation Shows (THList m k []) where
shows _ = []
implementation (Show k, Show (m t), Shows (THList m k ts)) =>
Shows (THList m k ((l,t) :: ts)) where
shows (x::xs) {l} = unwords [show l, ":=", show x] :: shows xs
implementation Shows (THList m k xs) => Show (THList m k xs) where
show xs = unwords ["[", concat (intersperse ", " (shows xs)), "]"]
|
State Before: l : Type u_2
m : Type u_4
n : Type u_1
o : Type u_5
p : Type ?u.12383
q : Type ?u.12386
m' : o → Type ?u.12391
n' : o → Type ?u.12396
p' : o → Type ?u.12401
R : Type ?u.12404
S : Type ?u.12407
α : Type u_3
β : Type ?u.12413
A : Matrix n l α
B : Matrix n m α
C : Matrix o l α
D : Matrix o m α
⊢ (fromBlocks A B C D)ᵀ = fromBlocks Aᵀ Cᵀ Bᵀ Dᵀ State After: case a.h
l : Type u_2
m : Type u_4
n : Type u_1
o : Type u_5
p : Type ?u.12383
q : Type ?u.12386
m' : o → Type ?u.12391
n' : o → Type ?u.12396
p' : o → Type ?u.12401
R : Type ?u.12404
S : Type ?u.12407
α : Type u_3
β : Type ?u.12413
A : Matrix n l α
B : Matrix n m α
C : Matrix o l α
D : Matrix o m α
i : l ⊕ m
j : n ⊕ o
⊢ (fromBlocks A B C D)ᵀ i j = fromBlocks Aᵀ Cᵀ Bᵀ Dᵀ i j Tactic: ext i j State Before: case a.h
l : Type u_2
m : Type u_4
n : Type u_1
o : Type u_5
p : Type ?u.12383
q : Type ?u.12386
m' : o → Type ?u.12391
n' : o → Type ?u.12396
p' : o → Type ?u.12401
R : Type ?u.12404
S : Type ?u.12407
α : Type u_3
β : Type ?u.12413
A : Matrix n l α
B : Matrix n m α
C : Matrix o l α
D : Matrix o m α
i : l ⊕ m
j : n ⊕ o
⊢ (fromBlocks A B C D)ᵀ i j = fromBlocks Aᵀ Cᵀ Bᵀ Dᵀ i j State After: no goals Tactic: rcases i with ⟨⟩ <;> rcases j with ⟨⟩ <;> simp [fromBlocks] |
/-
Copyright (c) 2018 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Bhavik Mehta
-/
import category_theory.discrete_category
/-!
# The empty category
Defines a category structure on `pempty`, and the unique functor `pempty ⥤ C` for any category `C`.
-/
universes v u w -- morphism levels before object levels. See note [category_theory universes].
namespace category_theory
namespace functor
variables (C : Type u) [category.{v} C]
/-- The canonical functor out of the empty category. -/
def empty : discrete pempty.{v+1} ⥤ C := discrete.functor pempty.elim
variable {C}
/-- Any two functors out of the empty category are isomorphic. -/
def empty_ext (F G : discrete pempty.{v+1} ⥤ C) : F ≅ G :=
discrete.nat_iso (λ x, pempty.elim x)
/--
Any functor out of the empty category is isomorphic to the canonical functor from the empty
category.
-/
def unique_from_empty (F : discrete pempty.{v+1} ⥤ C) : F ≅ empty C :=
empty_ext _ _
/--
Any two functors out of the empty category are *equal*. You probably want to use
`empty_ext` instead of this.
-/
lemma empty_ext' (F G : discrete pempty.{v+1} ⥤ C) : F = G :=
functor.ext (λ x, x.elim) (λ x _ _, x.elim)
end functor
end category_theory
|
Formal statement is: lemma open_affinity: fixes S :: "'a::real_normed_vector set" assumes "open S" "c \<noteq> 0" shows "open ((\<lambda>x. a + c *\<^sub>R x) ` S)" Informal statement is: If $S$ is an open set and $c \neq 0$, then the set $a + cS = \{a + cx : x \in S\}$ is open. |
lemma Zfun_add: assumes f: "Zfun f F" and g: "Zfun g F" shows "Zfun (\<lambda>x. f x + g x) F" |
\chapter{Minor constructions}\label{cpt:Minor constructions}
\largerpage[2.5]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Comparative constructions}
\label{sec:Comparative constructions}
In \is{comparative construction}comparative constructions two or more items are examined in order to note similarities and differences in degree between them \citep[787]{Dixon2008}. Inequality between two items is expressed by means of one of the \is{spatial case}spatial cases (\refsec{sssec:spr-lative -le/-ja/-a, spr-essive -le-b/-ja-b/-a-b and spr-ablative -le-r/-ja-r/-a-r}). In superlative constructions, \is{degree adverb}degree adverbs occur. Equative constructions and the expression of similarity are realized by means of several \is{particle}particles (\refsec{sec:Equative constructions and the expression of similarity}).
In Sanzhi \is{comparative construction}comparative constructions we find a comparee, the \isi{standard of comparison}, and the parameter of comparison. The \isi{standard of comparison} is marked with the \tsc{loc}-\isi{ablative} case that has the suffixes \tit{-ler(ka)}, \tit{-ar(ka)} or \tit{-jar(ka)} (\refsec{sssec:spr-lative -le/-ja/-a, spr-essive -le-b/-ja-b/-a-b and spr-ablative -le-r/-ja-r/-a-r}). It is cross-linguistically common to mark the \isi{standard of comparison} with an \isi{ablative} (or locative) case \citep[791]{Dixon2008}, and East Caucasian languages including Dargwa varieties nicely confirm this tendency. Neither the comparee nor the parameter of comparison bears any special marking. Consequently, if the \isi{standard of comparison} were to be omitted, the construction would be a \isi{simple clause} and not a \isi{comparative construction}. Most commonly the standard precedes the comparee. The parameter is a gradable adjective or adverb that occurs in its plain form without any additional index (as, e.g., English \tit{more}).
\begin{exe}
\ex \label{ex:Bahmud was smarter than Bahamma}
\gll Baħaˁmma-ja-rka Baˁħmud šːustri=de \\
Bahamma\tsc{-loc-abl} Bahmud smart\tsc{=pst}\\
\glt \sqt{Bahmud was smarter than Bahamma.}
\ex \label{ex:There were those older than father}
\gll atːa-ja-r χːula-te=ra b-irχʷ-i\\
father\tsc{-loc-abl} big\tsc{-dd.pl=add} \tsc{hpl-}be\tsc{.ipfv-hab.pst.3}\\
\glt \sqt{There were those older than father.}
\ex \label{ex:It (bread) is better (when made) of barley than of wheat.}
\gll ij ač'i-lla-ja-rka [\ldots] muqi-lla=ra ʡaˁħ-ce b-irχ-u \\
this wheat\tsc{-gen-loc-abl} {} barley\tsc{-gen=add} good\tsc{-dd.sg} \tsc{n-}become\tsc{.ipfv-prs.3}\\
\glt \sqt{It (bread) is better (when made) of barley than of wheat.}
\ex \label{ex:I (will) make a big barrow (maχ) earlier than you}
\gll u-le-rka sala-r du-l maχ χːula-ce b-arq'-ij\\
\tsc{2sg-loc-abl} front\tsc{-abl} \tsc{1sg-erg} barrow big\tsc{-dd.sg} \tsc{n-}do\tsc{.pfv-inf} \\
\glt \sqt{I (will) make a big barrow (\tit{maχ}) earlier than you.}
\ex \label{ex:The animals had apparently more conscience than our rich (people)}
\gll žaniwar-t-a-lla χʷal-le jaˁħ=ra namus=ra b-už-ib ca-b nišːa-la dawla-či-b-t-a-lla-ja-r \\
animal\tsc{-pl-obl-gen} big\tsc{-advz} conscience\tsc{=add} conscience\tsc{=add} \tsc{n-}stay\tsc{-pret} \tsc{cop-n} \tsc{1pl-gen} wealth\tsc{-adjvz-hpl-pl-obl-gen-loc-abl}\\
\glt \sqt{The animals had apparently more conscience than our rich (people).} (lit. \sqt{their conscience was bigger})
\end{exe}
Superlative constructions contain a comparee, a \isi{standard of comparison}, and a parameter. They basically have the same structure as the constructions described so far in this section. The only differences are the case marking of the standard, which is now the \tsc{in}-lative, and the additional \isi{degree adverb} modifying the parameter. The \isi{standard of comparison} can be omitted if it is inferable from the context
\begin{exe}
\ex \label{ex:Among all as the most beautiful (country) seemed to me Latvia}
\gll li<b>il-li-cːe-rka bah qːuʁa-ce dune ka-b-icː-ur-il dam dejstwitelno Latwija=de \\
all\tsc{<n>-obl-in-abl} most beautiful\tsc{-dd.sg} world \tsc{down-n-}stand\tsc{.pfv-pret-ref} \tsc{1sg.dat} really Latvia\tsc{=pst} \\
\glt \sqt{Among all as the most beautiful (country) seemed to me Latvia.}
\ex \label{ex:She was the oldest within her family.}
\gll il kulpat-li-cːe-r bah χːula-ce r-už-ib ca-r\\
that family\tsc{-obl-in-abl} most big\tsc{-dd.sg} \tsc{f-}be\tsc{-pret} \tsc{cop-f}\\
\glt \sqt{She was the oldest within her family.}
\ex \label{ex:The worst (place) was Azerbajan}
\gll bah wahi-ce ʡaˁzirbažan=de\\
most bad\tsc{-dd.sg} Azerbajan\tsc{=pst}\\
\glt \sqt{The worst (place) was Azerbajan.}
\end{exe}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Equative constructions and the expression of similarity}
\label{sec:Equative constructions and the expression of similarity}
For \isi{equative constructions} and the expression of similarity Sanzhi has two \is{particle}particles, \tit{ʁuna} and \tit{daˁʡle} \sqt{as, like}, and the adjective \tit{miši} \sqt{similar}.
The \isi{particle} \tit{ʁuna} \sqt{as, like} immediately follows the parameter of comparison that it has scope over like, e.g., focus-sensitive \is{focus-sensitive particle}particles. The parameter can be a pronoun, an adjective \refex{ex:He is like a young man}, an adverbial \refex{ex:There were not such customs as here}, or a noun \refex{ex:This was like a mug}. Very often it is simply a \isi{demonstrative pronoun}, and the combination of demonstrative and equative \isi{particle} means \sqt{like this, such} \refex{ex:They were so good people}. Depending on the parameter, the \isi{particle} thus appears, e.g., within noun phrases \refex{ex:He is like a young man} or in the position of adverbial modifiers.
\begin{exe}
\ex \label{ex:He is like a young man}
\gll žahil ʁuna admi ca-w heχ\\
young \tsc{eq} person \tsc{cop-m} \tsc{dem.down}\\
\glt \sqt{He is like a young man.}
\ex \label{ex:There were not such customs as here}
\gll heštːu-d ʁuna ʡaˁdat-urme akːʷ-i\\
here\tsc{-npl} \tsc{eq} custom\tsc{-pl} \tsc{cop.neg-hab.pst.3}\\
\glt \sqt{There were not such customs as here.}
\ex \label{ex:They were so good people}
\gll hel ʁuna ʡaˁħ χalq' b-irχʷ-iri\\
that \tsc{eq} good people \tsc{hpl-}become\tsc{.ipfv-hab.pst.3}\\
\glt \sqt{They were good people like that.}
\end{exe}
It can also occur as a predicate in a \isi{copula} clause without a head noun and it can be nominalized by suffixing -\textit{b} (unclear origin) and the \isi{cross-categorical suffix} in the plural form -\textit{te} (\tit{ʁunabte}).
\begin{exe}
\ex \label{ex:This was like a mug}
\gll heχ kuruškːa ʁuna b-irχʷ-i\\
\tsc{dem.down} mug \tsc{eq} \tsc{n-}be\tsc{.ipfv-hab.pst.3}\\
\glt \sqt{This was like a mug.}
\end{exe}
The \isi{particle} \tit{daˁʡle} \sqt{as, like}, which diachronically seems to be an adverbial derived with the adverbializing suffix -\textit{le}, has a meaning very similar but not identical to \tit{ʁuna}. It indicates only that some situation or some item resembles another situation or item. Both \is{particle}particles slightly differ in their distribution. The \isi{particle} \tit{daˁʡle} follows the parameter of comparison over which it has scope. As with \tit{ʁuna}, the parameter can be expressed by \isi{nouns} \refex{ex:This looks like kiwi, similar to kiwi or so}, adverbials \refex{ex:At that (time) there were no minibuses like now}, or \isi{adjectives} \refex{ex:This woman looks even like old.}. But in contrast to \tit{ʁuna}, \tit{daˁʡle} is most frequently used in non-finite clauses headed by \is{participle}participles \refex{ex:as I said} or the \isi{infinitive} \refex{ex:He is keeping his jar as if it fell down}.
\begin{exe}
\ex \label{ex:This looks like kiwi, similar to kiwi or so}
\gll kiwi daˁʡle χe-d heχ-tːi, kiwi ʁuna cik'al\\
kiwi as exist.\tsc{down-npl} \tsc{dem.down}\tsc{-pl} kiwi \tsc{eq} something\\
\glt \sqt{This looks like kiwi, something similar to kiwi.}
\ex \label{ex:At that (time) there were no minibuses like now}
\gll it=qːella hana daˁʡle maršrutka-be a-d-irχʷ-i=q'al\\
that=when now as minibus\tsc{-pl} \tsc{neg-npl-}be\tsc{.ipfv-hab.pst.3=mod}\\
\glt \sqt{At that (time) there were no minibuses like now.}
\ex \label{ex:This woman looks even like old.}
\gll heχ xːunul bulan r-uqna-ce daˁʡle či-r-ig-ul ca-r\\
\tsc{dem.down} woman even \tsc{f-}old\tsc{-dd} as \tsc{spr-f-}see\tsc{.ipfv-icvb} \tsc{cop-f}\\
\glt \sqt{This woman even looks like she is old.}
\ex \label{ex:as I said}
\gll du-l haʔ-ib daˁʡle\\
\tsc{1sg-erg} say\tsc{.pfv-pret} as\\
\glt \sqt{as I said}
\ex \label{ex:He is keeping his jar as if it fell down}
\gll qaˁjqaˁj b-uc-ib ca-b a-ka-b-ič-ij daˁʡle\\
jaw \tsc{n-}catch\tsc{.pfv-pret} \tsc{cop-n} \tsc{neg-down}\tsc{-n-}occur\tsc{.pfv-inf} as\\
\glt \sqt{He is keeping his jar as if it fell down.} (lit. \sqt{like not to fall down})
\end{exe}
Finally, the adjective \tit{miši} \sqt{similar} assigns the \isi{dative} case to its complement that represents the \isi{standard of comparison} \refex{ex:This is not similar to a prison}. In \isi{copula} clauses, in which it is used in the \isi{copula} complement, the adverbializing suffix \tit{-le} is added, as it regularly happens with adjectival stems in \isi{copula} construction.
\begin{exe}
\ex \label{ex:This is not similar to a prison}
\gll tusnaq-li-j miši-l akːu\\
prison\tsc{-obl-dat} similar\tsc{-advz} \tsc{cop.neg}\\
\glt \sqt{This is not similar to a prison.}
\end{exe}
The differences between the three \is{comparative construction}comparative constructions lie mostly in their morphosyntactic behavior, with an additional semantic distinction between \tit{ʁuna} and \tit{daˁʡle} on the one side, and \tit{mišil} on the other \refex{ex:(He) is like a young man. (i.e. He seems to be young, he looks young or behaves as if he were young)}, \refex{ex:(He) is similar to a young man}. The \is{particle}particles \tit{ʁuna} and \tit{daˁʡle} have the distribution of focus-sensitive \is{focus-sensitive particle}particles and can therefore occur within certain types of phrases as, e.g., noun phrases, but do not assign case to the items they scope over, in contrast to the case-assigning adjective \tit{miši}.
\begin{exe}
\ex \label{ex:(He) is like a young man. (i.e. He seems to be young, he looks young or behaves as if he were young)}
\gll žahil admi ʁuna / daˁʡle ca-w\\
young person \tsc{eq} / as \tsc{cop-m}\\
\glt \sqt{(He) is like a young man. (i.e. He seems to be young, he looks young or behaves as if he were young)}
\ex \label{ex:(He) is similar to a young man}
\gll žahil admi-li-j miši-l ca-w\\
young person\tsc{-obl-dat} similar\tsc{-advz} \tsc{cop-m}\\
\glt \sqt{(He) is similar to a young man.}
\end{exe}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Comitative constructions}
\label{sec:Comitative constructions}
Sanzhi has two ways of expressing \isi{comitative} meaning: case marking (in combination with optional postpositions) and a construction involving the use of \is{reflexive pronoun}reflexive pronouns.
The cases used are the \isi{comitative} case (\tit{-cːella}, \refsec{sssec:Comitative}) or, more rarely, the \tsc{in}-\isi{ablative} case (\refsec{sssec:in-lative -cːe, in-essive -cːe-b, and in-ablative -cːe-r}). They can occur together with the postposition \tit{b-alli} (\refsec{ssec:postposition balli}) or the postposition\slash adverb \tit{canille} (\refsec{ssec:postposition canille}). These constructions can be used with animate and inanimate \isi{nouns}. In the latter case they can express instruments \refex{ex:having made a hole with a fork}.
\begin{exe}
\ex \label{ex:having made a hole with a fork}
\gll č'ala-cːella ʡaˁmi ka-b-at-ur-re\\
fork\tsc{-comit} hole \tsc{down-n-}let\tsc{.pfv-pret-cvb}\\
\glt \sqt{having made a hole with a fork}
\ex \label{ex:Well, he was together with him at that time, right}
\gll nu, iž-i-cːella canille=qːel il hel zamana, akː-u=w?\\
well this\tsc{-obl-comit} together=when that that time \tsc{cop.neg-prs.3=q}\\
\glt \sqt{Well, he was together with him at that time, right?}
\end{exe}
There does not seem to be a clear semantic difference between \tit{b-alli} and \tit{canille} \refex{ex:The lid should be together with the pot}. The two items can only be distinguished by means of their morphosyntactic behavior, because \tit{b-alli} agrees in \isi{gender} with the argument in the \isi{absolutive} \refex{ex:The lid should be together with the pot} and it always implies a complement even when the complement is not overtly expressed. For instance, \refex{ex:Madina and I came together (with somebody else / with other people)} entails that there were other people with whom we came, whereas in \refex{ex:Madina and I came together} there is no such implication and \tit{canille} only functions as an adverb that expresses the fact that Madina and the speaker came together:
\begin{exe}
\ex \label{ex:The lid should be together with the pot}
\gll burta ħaˁšak-li-cːella b-alli / canille b-irχʷ-an ca-b\\
lid pot\tsc{-obl-comit} \tsc{n-}together / together \tsc{n-}be\tsc{.ipfv-ptcp} \tsc{cop-n}\\
\glt \sqt{The lid should be together with the pot.} (E)
\ex \label{ex:Madina and I came together (with somebody else / with other people)}
\gll Madina=ra du=ra d-alli ag-ur=da\\
Madina\tsc{=add} \tsc{1sg=add} \tsc{1/2pl-}together go\tsc{.pfv-pret=1}\\
\glt \sqt{Madina and I came together (with somebody else\slash with other people).} (E)
\ex \label{ex:Madina and I came together}
\gll Madina=ra du=ra canille ag-ur=da\\
Madina\tsc{=add} \tsc{1sg=add} together go\tsc{.pfv-pret=1}\\
\glt \sqt{Madina and I came together.} (E)
\end{exe}
The second construction is the use of a \isi{reflexive pronoun} in what looks like a \isi{coordination} of noun phrases. This construction has been described for Standard Dargwa by \citet{vandenBerg2004}. The structure is [Y\tit{=ra} X\tit{=ra}] \sqt{X with Y}. X refers to an animate (usually human) entity and is formally expressed through the \isi{reflexive pronoun}. Y is a nominal that can be animate or inanimate and takes case suffixes. It can be a common noun, a pronoun, a personal name or any other type of \isi{noun phrase}. Both X and Y are marked with the \isi{additive} \tit{=ra} and are often adjacent to each other, which makes them look like a coordinated \isi{noun phrase}. However, the argument referred to by the reflexive can be expressed independently. Furthermore, the coordinated \isi{noun phrase} usually does not take an argument position in the clause. It is rather one of the individual members that functions as argument. For instance, in \refex{ex:With a bucket of water he is standing} the pronoun \tit{heχ} that is following the \isi{comitative} phrase represents the subject of the following verb as the agreement on the verb shows (masculine singular).
\begin{exe}
\ex \label{ex:With a bucket of water he is standing}
\gll [hin-na badra=ra ca-w=ra] heχ ka-jcː-ur ca-w\\
water\tsc{-gen} bucket\tsc{=add} \tsc{refl-m=add} \tsc{dem.down} \tsc{down}-stand\tsc{.m.pfv-pret} \tsc{cop-m}\\
\glt \sqt{With a bucket of water he is standing.}
\ex \label{ex:He ran away with his dog}
\gll sa-r-uq-un ca-w χːʷe=ra ca-w=ra\\
\tsc{hither-abl}-go.\tsc{pfv-pret} \tsc{cop-m} dog\tsc{=add} \tsc{refl-m=add}\\
\glt \sqt{He ran away with his dog.}
\end{exe}
Example \refex{ex:Everybody is with a bottle in their hands. (lit. There is one bottle each in everybody's (hand), and they also.)} shows that the two items bearing the \isi{additive enclitic} \tit{=ra} can be separated by other material. The agreement on the \isi{existential copula} is controlled by the first \isi{noun phrase} \tit{ca ca šuša,} which is semantically plural and functions as the \isi{copula} subject of the existential\slash \isi{locational copula} \textit{χe-d}, and the \isi{reflexive pronoun} appears in a kind of right-dislocated position, such that it is syntactically not part of the subject constituent.
\begin{exe}
\ex \label{ex:Everybody is with a bottle in their hands. (lit. There is one bottle each in everybody's (hand), and they also.)}
\gll harkil-li-cːe-d ca ca šuša=ra χe-d ca-b=ra\\
every-\tsc{obl-in-npl} one one bottle=\tsc{add} exist.\tsc{down-npl} \tsc{refl-hpl=add} \\
\glt \sqt{Everybody is with a bottle in their hands.} (lit. \sqt{There is one bottle each in everybody's (hand), and they also.})
\end{exe}
%
It is also possible to elicit examples in which the semantically coordinated items function as a coordinated \isi{noun phrase}. The coordinated \isi{noun phrase} controls plural agreement on \is{intransitive verb}intransitive verbs if it functions as subject \refex{ex:He and (his) sister were standing there}. However, masculine singular would also be possible in this type of construction as \refex{ex:He ran away with his dog} shows. In example \refex{ex:Musa sang a song together with his sister} the two coordinated items are marked for the \isi{ergative} case. Again the coordinated \isi{noun phrase} rather looks like an adjunct in the clause in which \tit{Musal} is the agentive argument.
\begin{exe}
\ex \label{ex:He and (his) sister were standing there}
\gll rucːi=ra ca-w=ra ka-b-icː-ur ca-b\\
sister\tsc{=add} \tsc{refl-m=add} \tsc{down}\tsc{-hpl-}stand\tsc{.pfv-pret} \tsc{cop-hpl}\\
\glt \sqt{He and (his) sister were standing there.} (E)
\ex \label{ex:Musa sang a song together with his sister}
\gll rucːi-li=ra cin-ni=ra Musa-l dalaj b-uč'-un ca-b\\
sister\tsc{-erg=add} \tsc{refl.sg-erg=add} Musa\tsc{-erg} song \tsc{n-}sing\tsc{.ipfv-pret} \tsc{cop-n}\\
\glt \sqt{Musa sang a song together with his sister.}
\end{exe}
The construction has probably evolved from the emphatic use of \is{reflexive pronoun}reflexive pronouns (\refsec{ssec:Emphatic reflexive use}) in combination with the \isi{additive} meaning of the \isi{enclitic} \tit{=ra}. Thus, in \refex{ex:There one woman grabbed his backpack, took it, and went away with it} the two parts occur in independent clauses that follow each other as arguments of their respective verbs. The \isi{reflexive pronoun} in the second clause doubles an omitted subject argument and conveys the emphatic meaning \sqt{she herself}. The whole construction can be rephrased as \sqt{both the backpack and she herself} and has a \isi{comitative} reading (\sqt{she went away with the backpack}) that has to be inferred from the structure.
\begin{exe}
\ex \label{ex:There one woman grabbed his backpack, took it, and went away with it}
\gll ca xːunul-li χːap b-arq'-ib-le hil-i-la wešimišuk'=ra b-erqː-ib ca-b, ca-r=ra ag-ur ca-r\\
one woman\tsc{-erg} grab \tsc{n-}do\tsc{.pfv-pret-cvb} that\tsc{-obl-gen} backpack\tsc{=add} \tsc{n-}carry\tsc{.pfv-pret} \tsc{cop-n} \tsc{refl-f=add} go\tsc{.pfv-pret} \tsc{cop-f}\\
\glt \sqt{There one woman grabbed his backpack, took it, and went away with it.} (lit. \sqt{She also took [the backpack], and [she herself also] went away.})
\end{exe}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Possession}
\label{sec:Possession}
Possession is either expressed by cases or by means of the \tit{b-ah} construction. In the first case the possessor is marked with the \isi{genitive} case and most commonly preceding the possessed item \refex{ex:I had mother and father}, but other positions are available, too \refex{ex:Is your sack there, Ashura} (see \refsec{sec:Noun phrases} on noun phrases and \refsec{sec:Constituent order at the phrase level} on the \isi{constituent order} of phrases). There is no grammaticalized distinction between alienable and inalienable \isi{possession}. Clauses expressing \isi{possession} are \isi{copula} clauses (\refsec{sec:copulaclauses}) containing locational copulas (\refsec{sec:Locational copulae}).
\begin{exe}
\ex \label{ex:I had mother and father}
\gll di-la atːa aba le-b=de\\
\tsc{1sg-gen} father mother exist\tsc{-hpl=pst}\\
\glt \sqt{I had mother and father.}
\ex \label{ex:Is your sack there, Ashura}
\gll qːap χe-b=uw, wa Ašura, ala?\\
sack exist.\tsc{down-n=q} hey Ashura \tsc{2sg.gen}\\
\glt \sqt{Is your sack there, Ashura?}
\end{exe}
If the respective item is not permanently possessed but only temporarily in the custody of the possessor, the \tsc{in}-essive case is used \refex{ex:He has a bottle with a drink} (\refsec{sssec:in-lative -cːe, in-essive -cːe-b, and in-ablative -cːe-r}).
\begin{exe}
\ex \label{ex:He has a bottle with a drink}
\gll hež-i-cːe-b šuša ca-b deč-la\\
this\tsc{-obl-in-n} bottle \tsc{cop-n} drinking\tsc{-gen}\\
\glt \sqt{He has a bottle with a drink.}
\end{exe}
The second way of expressing \isi{possession} is the \tit{b-ah} construction. The noun \tit{b-ah} means \sqt{owner} (plural \tit{b-ahin-te}, also translates as \sqt{parents}). It is one of the few \isi{nouns} that have a \isi{gender} prefix expressing the \isi{gender} of the owner. The possessed item appears in the \isi{genitive} with \tit{b-ah} as the head noun of the \isi{genitive} phrase. If the possessor is overt it occurs after \tit{b-ah}. Both noun phrases together form an appositive phrase (\refsec{sec:Noun phrases}). The possessed items in this construction are normally inanimate objects \xxref{ex:There was one with a tail}{ex:On the upper side you smear water, egg, whatever you have}. Often they refer to clothes \refex{ex:All three had hats} or body parts \refex{ex:There was one with a tail}, \refex{ex:the devil with one eye} that are used to characterize and identify the owner. From this noun the adjective-like item \tit{wahwalla} \sqt{own, everybody's own} with frozen \isi{gender} agreement has been derived.
\begin{exe}
\ex \label{ex:There was one with a tail}
\gll č'imi-la b-ah b-irχʷ-i\\
tail\tsc{-gen} \tsc{n-}owner \tsc{n-}be\tsc{.ipfv-hab.pst.3}\\
\glt \sqt{There was one with a tail.}
\ex \label{ex:the devil with one eye}
\gll ca ul-la b-ah šajt'an\\
one eye\tsc{-gen} \tsc{n-}owner devil\\
\glt \sqt{the devil with one eye}
\ex \label{ex:All three had hats}
\gll šljaˁp'a-la b-ahin-te=de ʡaˁbal-ra\\
hat\tsc{-gen} \tsc{hpl-}owner\tsc{-pl=pst} three\tsc{-num}\\
\glt \sqt{All three had hats.}
\ex \label{ex:On the upper side you smear water, egg, whatever you have}
\gll qar=či-b sa-b-ik-u [\ldots] hin-ni, duqu-l, le-b-il ʁuna w-ah-la cik'al-li\\
up=on\tsc{-n} \tsc{hither-n-}smear\tsc{.ipfv-prs} {} water\tsc{-erg} egg\tsc{-erg} exist\tsc{-n-ref} \tsc{eq} \tsc{m-}owner\tsc{-gen} thing\tsc{-erg}\\
\glt \sqt{On the upper side you smear [\ldots] water, egg, whatever you have.}
\end{exe}
|
`is_element/colour_profiles` := (C::set) -> (n::nonnegint) -> proc(c)
type(c,list) and nops(c) = n and {op(c)} minus C = {};
end:
`is_equal/colour_profiles` := (C::set) -> (n::nonnegint) -> (c,b) -> evalb(c = b);
`list_elements/colour_profiles` := (C::set) -> proc(n::nonnegint)
local L,i,x,c;
L := [[]];
for i from 1 to n do
L := [seq(seq([op(c),x],x in C),c in L)];
od:
return L;
end:
`count_elements/colour_profiles` := (C::set) -> (n::nonnegint) -> nops(C)^n;
`random_element/colour_profiles` := (C::set) -> (n::nonnegint) -> proc()
local i;
[seq(random_element_of(C),i=1..n)];
end:
`o/colour_profiles` := proc() map(op,[args]); end:
`gamma/colour_profiles` := (C::set) -> (L::list(nonnegint)) -> proc(c,d)
map(op,d);
end:
`circ/colour_profiles` := (C::set) -> (i,m,n) -> proc(c,b)
[op(1..i-1,c),op(b),op(i+1..m,c)];
end:
|
\documentclass[xcolor=table,usenames,dvipsnames]{beamer}
\usepackage{lscape, amsmath, amsfonts, amssymb, setspace, theorem, wrapfig, graphicx, float, multirow, subfig, color, rotating, multicol, datetime, natbib, venndiagram, pstricks, xkeyval, tikz, etoolbox, url, hyperref, nth}
\usepackage[T1]{fontenc}
\usepackage[latin1]{inputenc}
\usepackage[english]{babel}
\usetikzlibrary{arrows,calc,matrix}
\title{GV217 - Conflict Analysis}
\subtitle{University of Essex - Department of Government}
\date{Week 24 -- 13 March, 2020} % or you can specify a date, just write it down instead of "\today"
\author{Lorenzo Crippa}
\usetheme[progressbar=frametitle]{metropolis}
\usecolortheme{seahorse} % try others: wolverine; crane...
\begin{document}
\frame{
\titlepage
}
\section{Online test}
\begin{frame}{Online test}
\begin{itemize}
\item The test will assess your knowledge of class content including concepts
\item It is \textbf{not} designed to cause you troubles
\item Multiple-choice questions
\item It will be available on Moodle
\item \textbf{You will not get a notification from FASER}
\item The test opens next week for 24h
\item Once opened you will have 30m to complete
\end{itemize}
\end{frame}
\section{Conflict management: resolution and maintenance of peace}
\begin{frame}{General framework}
Two factors favor a conflict resolution: \pause
\begin{enumerate}
\item Military victory \pause
\item Negotiated, \textbf{mediated} settlement. Examples of mediators? \pause
\begin{itemize}
\item Third-party states \pause
\item International organisations
\end{itemize}
\end{enumerate} \pause
Two factors keep a conflict from re-emerging: \pause
\begin{enumerate}
\item Power-sharing \pause
\item Peacekeeping (next week) \pause
\end{enumerate}
\textcolor{red}{Yet, problem of right-truncation (we don't have any information about when/if a conflict will re-ignite in the future)}
\end{frame}
\begin{frame}{Mediation}
What is mediation? \pause
``A process of conflict management where disputants seek the assistance of, or accept an offer of help from, an individual, group, state, or organization to settle their conflict or resolve their differences without resorting to physical force or invoking the authority of the law'' (Bercovitch, Anagnoson, and Wille 1991: 8).
\end{frame}
\begin{frame}{Incentives for mediators and for parties}
Third-parties often have an interest in peace in a region or state. Examples of interest: \pause
\begin{itemize}
\item Security (reducing conflicts and militia) \pause
\item Economic (trade and investments) \pause
\item Political (increasing soft-power) \pause
\end{itemize}
Problems for parties in the talks are in common with bargaining in general: \pause
\begin{itemize}
\item Information asymmetry
\item Commitment problems
\end{itemize}
\end{frame}
\begin{frame}{Bargaining Strategies for mediators}
Three strategies to mediate \pause
\begin{enumerate}
\item \textbf{Facilitate talks}: good offices, information, clarification, message transmission \pause
\item \textbf{Formulate talks}: setting up the meeting, controlling pace and formality, structure and agenda, suggest concession and aid in face-saving, highlight interests \pause
\item \textbf{Manipulate talks}: change expectations, show commitments, promise resources or threaten withdrawal, threaten punishments
\end{enumerate}
\end{frame}
\begin{frame}{Mediation Outcomes: Formal Agreement}
\begin{itemize}
\item Most likely when both sides agree the outcome lies within the bargaining range
\item Manipulation is most likely to end in formal agreement because it creates the largest bargaining range and/or provides security guarantees.
\end{itemize} \pause
\textbf{H1: Manipulative > Formulative > Facilitative > No mediation}
\end{frame}
\begin{frame}{Mediation Outcomes: Tension reduction}
\begin{itemize}
\item When outcome is likely to persist tensions are reduced
\item Most likely to occur when outcome is near the centre of the bargaining range
\item Formulation is expected to bring actors closest to this mid-point
\item Manipulation increases bargaining space but does not guarantee outcomes close to the centre; also guarantors may be unreliable \pause
\end{itemize}
\textbf{H2: Formulative > Facilitative > Manipulative > No mediation}
\end{frame}
\begin{frame}{Mediation Outcomes: Crisis Abatement}
\begin{itemize}
\item The threat of violence ends
\item Strategies that increase the costs of conflict will most powerfully increase the chances of conflict abatement \pause
\end{itemize}
\textbf{H3: Manipulative > Formulative > Facilitative}
\end{frame}
\begin{frame}{Mediation strategies and outcomes: General findings}
\begin{itemize}
\item Pretty much in line with theory (except that facilitation seems to be more efficient than formulation at reducing tensions) \pause
\item Mediators should use a mix of strategies \pause
\item Most durable agreements are those achieved with as little help as possible \pause
\item Finding an agreement in line with the true bargaining range minimises risk of conflict recurrence and need for third party intervention
\end{itemize}
\end{frame}
\begin{frame}{Power sharing}
Power can be shared in four ways: \pause
\begin{enumerate}
\item \textbf{Political}: \pause divide appointments in public offices. Examples: \pause
\begin{itemize}
\item Lebanon, Iraq, Bosnia \pause
\end{itemize}
\item \textbf{Territorial}: \pause divide land and territory.
Examples: \pause
\begin{itemize}
\item The Colombian-FARC peace agreement \pause
\end{itemize}
\item \textbf{Military}: \pause share participation to military offices. \pause
\item \textbf{Economic}: \pause share access to natural resources and their revenues.
\end{enumerate} \pause
Durable peace becomes more likely as more elements are included in an agreement
\end{frame}
\begin{frame}{Power sharing -- hypotheses and findings}
\begin{itemize}
\item \textcolor{ForestGreen}{The more extensive the power-sharing arrangements in a civil war settlement, the more likely it the peace will endure} \pause
\item \textcolor{ForestGreen}{Settlements of civil wars characterized by high casualty rates are unlikely to yield a durable peace} \pause
\item \textcolor{ForestGreen}{Settlements that call for third-party \textbf{enforcement} are more likely to produce a durable peace than those that make no provision for enforcement by third-party actors} \pause
\item \textcolor{ForestGreen}{Negotiated settlements are more likely to produce an enduring peace when the issue at stake in the conflict is politico-economic rather than identity based} \pause
\item \textcolor{YellowOrange}{The risk of war breaking out again following the negotiated settlement of a civil war should decline with time}
\end{itemize}
\end{frame}
\begin{frame}{Mediation and power sharing in Burundi}
\centering
\includegraphics[scale=0.40]{pictures/week24.png}
\end{frame}
\frame{
\frametitle{Conclusion}
\begin{center}
All clear? More questions? \\
See you next week!
\end{center}
}
\end{document} |
(** Based on Benjamin Pierce's "Software Foundations" *)
Require Import List.
Import ListNotations.
Require Import Omega.
Require Export Arith Arith.EqNat.
Require Export Id.
From hahn Require Import HahnBase.
Section S.
Variable A : Set.
Definition state := list (id * A).
Reserved Notation "st / x => y" (at level 0).
Inductive st_binds : state -> id -> A -> Prop :=
st_binds_hd : forall st id x, ((id, x) :: st) / id => x
| st_binds_tl : forall st id x id' x', id <> id' -> st / id => x -> ((id', x')::st) / id => x
where "st / x => y" := (st_binds st x y).
Definition update (st : state) (id : id) (a : A) : state := (id, a) :: st.
Notation "st [ x '<-' y ]" := (update st x y) (at level 0).
Lemma state_deterministic (st : state) (x : id) (n m : A)
(SN : st / x => n)
(SM : st / x => m) :
n = m.
Proof.
induction st; inversion SN; subst.
all: inversion SM; desf.
apply IHst; desf.
Qed.
Lemma update_eq (st : state) (x : id) (n : A) :
st [x <- n] / x => n.
Proof. unfold update. constructor. Qed.
Lemma update_neq (st : state) (x2 x1 : id) (n m : A)
(NEQ : x2 <> x1) : st / x1 => m <-> st [x2 <- n] / x1 => m.
Proof.
split; intros HH.
{ unfold update. apply st_binds_tl; auto. }
unfold update in HH. inv HH.
Qed.
Lemma update_shadow (st : state) (x1 x2 : id) (n1 n2 m : A) :
st[x2 <- n1][x2 <- n2] / x1 => m <-> st[x2 <- n2] / x1 => m.
Proof.
split; intros HH.
{ unfold update in HH.
inv HH.
{ apply update_eq. }
inv H5. apply update_neq; auto. }
inv HH.
{ apply update_eq. }
repeat (apply update_neq; auto).
Qed.
Lemma update_same (st : state) (x1 x2 : id) (n1 m : A)
(SN : st / x1 => n1)
(SM : st / x2 => m) :
st [x1 <- n1] / x2 => m.
Proof.
destruct (id_eq_dec x1 x2) as [|NEQ]; subst.
2: by apply update_neq.
assert (m = n1); subst.
2: by apply update_eq.
eapply state_deterministic; eauto.
Qed.
Lemma update_permute (st : state) (x1 x2 x3 : id) (n1 n2 m : A)
(NEQ : x2 <> x1)
(SM : st [x2 <- n1][x1 <- n2] / x3 => m) :
st [x1 <- n2][x2 <- n1] / x3 => m.
Proof.
destruct (id_eq_dec x1 x3) as [|NN]; subst.
{ apply update_neq; auto.
inv SM. apply update_eq. }
destruct (id_eq_dec x2 x3) as [|AA]; subst.
{ inv SM. inv H5. apply update_eq. }
repeat (apply update_neq; auto).
inv SM. inv H5.
Qed.
End S.
|
\documentclass[9pt,twocolumn,twoside,lineno]{pnas-new}
% Use the lineno option to display guide line numbers if required.
% Note that the use of elements such as single-column equations
% may affect the guide line number alignment.
\templatetype{pnasresearcharticle} % Choose template
% {pnasresearcharticle} = Template for a two-column research article
% {pnasmathematics} = Template for a one-column mathematics article
% {pnasinvited} = Template for a PNAS invited submission
\title{Template for preparing your research report submission to PNAS using Overleaf}
% Use letters for affiliations, numbers to show equal authorship (if applicable) and to indicate the corresponding author
\author[a,c,1]{Author One}
\author[b,1,2]{Author Two}
\author[a]{Author Three}
\affil[a]{Affiliation One}
\affil[b]{Affiliation Two}
\affil[c]{Affiliation Three}
% Please give the surname of the lead author for the running footer
\leadauthor{Lead author last name}
% Please add here a significance statement to explain the relevance of your work
\significancestatement{Authors must submit a 120-word maximum statement about the significance of their research paper written at a level understandable to an undergraduate educated scientist outside their field of speciality. The primary goal of the Significance Statement is to explain the relevance of the work in broad context to a broad readership. The Significance Statement appears in the paper itself and is required for all research papers.}
% Please include corresponding author, author contribution and author declaration information
\authorcontributions{Please provide details of author contributions here.}
\authordeclaration{Please declare any conflict of interest here.}
\equalauthors{\textsuperscript{1}A.O.(Author One) and A.T. (Author Two) contributed equally to this work (remove if not applicable).}
\correspondingauthor{\textsuperscript{2}To whom correspondence should be addressed. E-mail: author.two\@email.com}
% Keywords are not mandatory, but authors are strongly encouraged to provide them. If provided, please include two to five keywords, separated by the pipe symbol, e.g:
\keywords{Keyword 1 $|$ Keyword 2 $|$ Keyword 3 $|$ ...}
\begin{abstract}
Please provide an abstract of no more than 250 words in a single paragraph. Abstracts should explain to the general reader the major contributions of the article. References in the abstract must be cited in full within the abstract itself and cited in the text.
\end{abstract}
\dates{This manuscript was compiled on \today}
\doi{\url{www.pnas.org/cgi/doi/10.1073/pnas.XXXXXXXXXX}}
\begin{document}
% Optional adjustment to line up main text (after abstract) of first page with line numbers, when using both lineno and twocolumn options.
% You should only change this length when you've finalised the article contents.
\verticaladjustment{-2pt}
\maketitle
\thispagestyle{firststyle}
\ifthenelse{\boolean{shortarticle}}{\ifthenelse{\boolean{singlecolumn}}{\abscontentformatted}{\abscontent}}{}
% If your first paragraph (i.e. with the \dropcap) contains a list environment (quote, quotation, theorem, definition, enumerate, itemize...), the line after the list may have some extra indentation. If this is the case, add \parshape=0 to the end of the list environment.
\dropcap{T}his PNAS journal template is provided to help you write your work in the correct journal format. Instructions for use are provided below.
Note: please start your introduction without including the word ``Introduction'' as a section heading (except for math articles in the Physical Sciences section); this heading is implied in the first paragraphs.
\section*{Guide to using this template on Overleaf}
Please note that whilst this template provides a preview of the typeset manuscript for submission, to help in this preparation, it will not necessarily be the final publication layout. For more detailed information please see the \href{http://www.pnas.org/site/authors/format.xhtml}{PNAS Information for Authors}.
If you have a question while using this template on Overleaf, please use the help menu (``?'') on the top bar to search for \href{https://www.overleaf.com/help}{help and tutorials}. You can also \href{https://www.overleaf.com/contact}{contact the Overleaf support team} at any time with specific questions about your manuscript or feedback on the template.
\subsection*{Author Affiliations}
Include department, institution, and complete address, with the ZIP/postal code, for each author. Use lower case letters to match authors with institutions, as shown in the example. Authors with an ORCID ID may supply this information at submission.
\subsection*{Submitting Manuscripts}
All authors must submit their articles at \href{http://www.pnascentral.org/cgi-bin/main.plex}{PNAScentral}. If you are using Overleaf to write your article, you can use the ``Submit to PNAS'' option in the top bar of the editor window.
\subsection*{Format}
Many authors find it useful to organize their manuscripts with the following order of sections; Title, Author Affiliation, Keywords, Abstract, Significance Statement, Results, Discussion, Materials and methods, Acknowledgments, and References. Other orders and headings are permitted.
\subsection*{Manuscript Length}
PNAS generally uses a two-column format averaging 67 characters, including spaces, per line. The maximum length of a Direct Submission research article is six pages and a PNAS PLUS research article is ten pages including all text, spaces, and the number of characters displaced by figures, tables, and equations. When submitting tables, figures, and/or equations in addition to text, keep the text for your manuscript under 39,000 characters (including spaces) for Direct Submissions and 72,000 characters (including spaces) for PNAS PLUS.
\subsection*{References}
References should be cited in numerical order as they appear in text; this will be done automatically via bibtex, e.g. \cite{belkin2002using} and \cite{berard1994embedding,coifman2005geometric}. All references, including for the SI, should be included in the main manuscript file. References appearing in both sections should not be duplicated. SI references included in tables should be included with the main reference section.
\subsection*{Data Archival}
PNAS must be able to archive the data essential to a published article. Where such archiving is not possible, deposition of data in public databases, such as GenBank, ArrayExpress, Protein Data Bank, Unidata, and others outlined in the Information for Authors, is acceptable.
\subsection*{Language-Editing Services}
Prior to submission, authors who believe their manuscripts would benefit from professional editing are encouraged to use a language-editing service (see list at www.pnas.org/site/authors/language-editing.xhtml). PNAS does not take responsibility for or endorse these services, and their use has no bearing on acceptance of a manuscript for publication.
\begin{figure}%[tbhp]
\centering
\includegraphics[width=.8\linewidth]{frog}
\caption{Placeholder image of a frog with a long example caption to show justification setting.}
\label{fig:frog}
\end{figure}
\subsection*{Digital Figures}
\label{sec:figures}
Only TIFF, EPS, and high-resolution PDF for Mac or PC are allowed for figures that will appear in the main text, and images must be final size. Authors may submit U3D or PRC files for 3D images; these must be accompanied by 2D representations in TIFF, EPS, or high-resolution PDF format. Color images must be in RGB (red, green, blue) mode. Include the font files for any text.
Figures and Tables should be labelled and referenced in the standard way using the \verb|\label{}| and \verb|\ref{}| commands.
Figure \ref{fig:frog} shows an example of how to insert a column-wide figure. To insert a figure wider than one column, please use the \verb|\begin{figure*}...\end{figure*}| environment. Figures wider than one column should be sized to 11.4 cm or 17.8 cm wide.
\subsection*{Single column equations}
Authors may use 1- or 2-column equations in their article, according to their preference.
To allow an equation to span both columns, options are to use the \verb|\begin{figure*}...\end{figure*}| environment mentioned above for figures, or to use the \verb|\begin{widetext}...\end{widetext}| environment as shown in equation \ref{eqn:example} below.
Please note that this option may run into problems with floats and footnotes, as mentioned in the \href{http://texdoc.net/pkg/cuted}{cuted package documentation}. In the case of problems with footnotes, it may be possible to correct the situation using commands \verb|\footnotemark| and \verb|\footnotetext|.
%% Do not use widetext if paper is in single column.
\begin{widetext}
\begin{align*}
(x+y)^3&=(x+y)(x+y)^2\\
&=(x+y)(x^2+2xy+y^2) \numberthis \label{eqn:example} \\
&=x^3+3x^2y+3xy^3+x^3.
\end{align*}
\end{widetext}
\begin{table}%[tbhp]
\centering
\caption{Comparison of the fitted potential energy surfaces and ab initio benchmark electronic energy calculations}
\begin{tabular}{lrrr}
Species & CBS & CV & G3 \\
\midrule
1. Acetaldehyde & 0.0 & 0.0 & 0.0 \\
2. Vinyl alcohol & 9.1 & 9.6 & 13.5 \\
3. Hydroxyethylidene & 50.8 & 51.2 & 54.0\\
\bottomrule
\end{tabular}
\addtabletext{nomenclature for the TSs refers to the numbered species in the table.}
\end{table}
\subsection*{Supporting Information (SI)}
The main text of the paper must stand on its own without the SI. Refer to SI in the manuscript at an appropriate point in the text. Number supporting figures and tables starting with S1, S2, etc. Authors are limited to no more than 10 SI files, not including movie files. Authors who place detailed materials and methods in SI must provide sufficient detail in the main text methods to enable a reader to follow the logic of the procedures and results and also must reference the online methods. If a paper is fundamentally a study of a new method or technique, then the methods must be described completely in the main text. Because PNAS edits SI and composes it into a single PDF, authors must provide the following file formats only.
\subsubsection*{SI Text}
Supply Word, RTF, or LaTeX files (LaTeX files must be accompanied by a PDF with the same file name for visual reference).
\subsubsection*{SI Figures}
Provide a brief legend for each supporting figure after the supporting text. Provide figure images in TIFF, EPS, high-resolution PDF, JPEG, or GIF format; figures may not be embedded in manuscript text. When saving TIFF files, use only LZW compression; do not use JPEG compression. Do not save figure numbers, legends, or author names as part of the image. Composite figures must be pre-assembled.
\subsubsection*{3D Figures}
Supply a composable U3D or PRC file so that it may be edited and composed. Authors may submit a PDF file but please note it will be published in raw format and will not be edited or composed.
\subsubsection*{SI Tables}
Supply Word, RTF, or LaTeX files (LaTeX files must be accompanied by a PDF with the same file name for visual reference); include only one table per file. Do not use tabs or spaces to separate columns in Word tables.
\subsubsection*{SI Datasets}
Supply Excel (.xls), RTF, or PDF files. This file type will be published in raw format and will not be edited or composed.
\subsubsection*{SI Movies}
Supply Audio Video Interleave (avi), Quicktime (mov), Windows Media (wmv), animated GIF (gif), or MPEG files and submit a brief legend for each movie in a Word or RTF file. All movies should be submitted at the desired reproduction size and length. Movies should be no more than 10 MB in size.
\subsubsection*{Still images}
Authors must provide a still image from each video file. Supply TIFF, EPS, high-resolution PDF, JPEG, or GIF files.
\subsubsection*{Appendices}
PNAS prefers that authors submit individual source files to ensure readability. If this is not possible, supply a single PDF file that contains all of the SI associated with the paper. This file type will be published in raw format and will not be edited or composed.
\matmethods{Please describe your materials and methods here. This can be more than one paragraph, and may contain subsections and equations as required. Authors should include a statement in the methods section describing how readers will be able to access the data in the paper.
\subsection*{Subsection for Method}
Example text for subsection.
}
\showmatmethods % Display the Materials and Methods section
\acknow{Please include your acknowledgments here, set in a single paragraph. Please do not include any acknowledgments in the Supporting Information, or anywhere else in the manuscript.}
\showacknow % Display the acknowledgments section
% \pnasbreak splits and balances the columns before the references.
% If you see unexpected formatting errors, try commenting out this line
% as it can run into problems with floats and footnotes on the final page.
\pnasbreak
% Bibliography
\bibliography{pnas-sample}
\end{document} |
!@(#) show basic line drawing, text and (if applicable) color.
!
! As none of the projection routines have been called we
! move and draw in the initial coordinate system -1.0 to 1.0.
!
program fsimple
!(LICENSE:PD)
use M_draw
integer,parameter :: BLACK = 0, GREEN = 2
character(len=50) :: device
character(len=80) :: fname
character(len=11) :: p
data p/'Hello world'/
print*,'Enter output device:'
read(*,'(a)') device
print*,'Enter a font name:'
read(*,'(a)') fname
! set up device
call vinit(device)
! change font to the argument
call font(fname)
! set current color
call color(BLACK)
! clear screen to current color
call clear
call color(GREEN)
! 2 d move to start where we want drawstr to start
call move2(-0.9, 0.9)
! draw string in current color
call drawstr('A Simple Example')
! the next four lines draw the x
call move2(0.0, 0.0)
call draw2(0.76, 0.76)
call move2(0.0, 0.76)
call draw2(0.76, 0.0)
call move2(0.0, 0.5)
call drawstr('x done')
call drawstr('next sentence')
call move2(0.0, 0.1)
do i = 1, 11
call drawchar(p(i:i))
enddo
! the next five lines draw the square
call move2(0.0, 0.0)
call draw2(0.76, 0.0)
call draw2(0.76, 0.76)
call draw2(0.0, 0.76)
call draw2(0.0, 0.0)
! wait for some input
idum=getkey()
! set the screen back to its original state
call vexit
end program fsimple
|
module MultiCellMaps
using Gridap
export MultiCellMap
import Gridap.CellIntegration: integrate
import Base: +, -, *
struct MultiCellMap{N}
blocks::Vector{<:CellMap}
fieldids::Vector{NTuple{N,Int}}
end
function integrate(
mcm::MultiCellMap,
trian::Triangulation{Z},
quad::CellQuadrature{Z}) where Z
blocks = [ integrate(b,trian,quad) for b in mcm.blocks ]
MultiCellArray(blocks,mcm.fieldids)
end
function (+)(a::MultiCellMap{N},b::MultiCellMap{N}) where N
blocks = CellMap[]
append!(blocks,a.blocks)
append!(blocks,b.blocks)
fieldids = NTuple{N,Int}[]
append!(fieldids,a.fieldids)
append!(fieldids,b.fieldids)
MultiCellMap(blocks,fieldids)
end
function (-)(a::MultiCellMap{N},b::MultiCellMap{N}) where N
blocks = CellMap[]
append!(blocks,a.blocks)
append!(blocks,[ -k for k in b.blocks])
fieldids = NTuple{N,Int}[]
append!(fieldids,a.fieldids)
append!(fieldids,b.fieldids)
MultiCellMap(blocks,fieldids)
end
function (+)(b::MultiCellMap{N}) where N
b
end
function (-)(b::MultiCellMap{N}) where N
blocks = CellMap[]
append!(blocks,[ -k for k in b.blocks])
MultiCellMap(blocks,b.fieldids)
end
function (*)(a::Real,b::MultiCellMap{N}) where N
blocks = CellMap[]
append!(blocks,[ a*k for k in b.blocks])
MultiCellMap(blocks,b.fieldids)
end
end # module
|
section \<open>Frame Inference\<close>
theory Frame_Infer
imports "Sep_Algebra_Add" Basic_VCG
begin
subsection \<open>Separation Algebra Specific Setup of VCG\<close>
lemmas [vcg_prep_ext_rules] = pure_part_split_conj
subsection \<open>Entails Connective\<close>
definition "entails" :: "('a::sep_algebra \<Rightarrow> bool) \<Rightarrow> _ \<Rightarrow> _" (infix "\<turnstile>" 25) where "P \<turnstile> Q \<equiv> \<forall>s. P s \<longrightarrow> Q s"
lemma entails_refl[intro!,simp]: "P \<turnstile> P" by (simp add: entails_def)
lemma entails_false[simp, intro!]: "sep_false \<turnstile> Q" by (simp add: entails_def)
lemma entails_true[simp, intro!]: "P \<turnstile> sep_true" by (simp add: entails_def)
lemma entails_trans[trans]: "P \<turnstile> Q \<Longrightarrow> Q \<turnstile> R \<Longrightarrow> P \<turnstile> R"
by (simp add: entails_def)
lemma entails_mp: "\<lbrakk>Q \<turnstile> Q'; P \<turnstile> Q \<and>* F\<rbrakk> \<Longrightarrow> P \<turnstile> Q' \<and>* F"
apply (clarsimp simp: entails_def)
using sep_conj_impl1 by blast
lemma conj_entails_mono: "P\<turnstile>P' \<Longrightarrow> Q\<turnstile>Q' \<Longrightarrow> P**Q \<turnstile> P'**Q'"
apply (clarsimp simp: entails_def)
using sep_conj_impl by blast
lemma entails_exI: "P\<turnstile>Q x \<Longrightarrow> P\<turnstile>(EXS x. Q x)"
by (metis (mono_tags, lifting) entails_def)
lemma entails_pureI: "\<lbrakk>pure_part P \<Longrightarrow> P\<turnstile>Q\<rbrakk> \<Longrightarrow> P\<turnstile>Q"
by (auto simp: entails_def intro: pure_partI)
lemma entails_lift_extract_simps:
"(\<up>\<Phi> \<turnstile> Q) \<longleftrightarrow> (\<Phi> \<longrightarrow> \<box> \<turnstile> Q)"
"(\<up>\<Phi>**P \<turnstile> Q) \<longleftrightarrow> (\<Phi> \<longrightarrow> P \<turnstile> Q)"
unfolding entails_def
by (auto simp: sep_algebra_simps pred_lift_extract_simps)
lemma entails_eq_iff: "A=B \<longleftrightarrow> (A\<turnstile>B) \<and> (B\<turnstile>A)"
unfolding entails_def by (auto)
lemma entails_eqI: "\<lbrakk> A\<turnstile>B; B\<turnstile>A \<rbrakk> \<Longrightarrow> A=B" by (simp add: entails_eq_iff)
definition "is_sep_red P' Q' P Q \<longleftrightarrow> (\<forall>Ps Qs. (P'**Ps\<turnstile>Q'**Qs) \<longrightarrow> (P**Ps\<turnstile>Q**Qs))"
lemma is_sep_redI: "\<lbrakk>\<And>Ps Qs. P'**Ps\<turnstile>Q'**Qs \<Longrightarrow> P**Ps\<turnstile>Q**Qs \<rbrakk> \<Longrightarrow> is_sep_red P' Q' P Q"
unfolding is_sep_red_def by blast
lemma is_sep_redD: "\<lbrakk>is_sep_red P' Q' P Q; P'**Ps\<turnstile>Q'**Qs\<rbrakk> \<Longrightarrow> P**Ps\<turnstile>Q**Qs"
unfolding is_sep_red_def by blast
subsection \<open>Tags for Frame Inference\<close>
definition "FRI_END \<equiv> \<box>"
definition "FRAME_INFER P Qs F \<equiv> P \<turnstile> Qs ** F"
lemmas fri_prems_cong = arg_cong[where f="\<lambda>P. FRAME_INFER P _ _"]
lemma fri_prems_cong_meta: "P\<equiv>P' \<Longrightarrow> FRAME_INFER P Q F \<equiv> FRAME_INFER P' Q F" by simp
lemmas fri_concls_cong = arg_cong[where f="\<lambda>P. FRAME_INFER _ P _"]
lemma fri_prepare: "FRAME_INFER Ps (Qs**FRI_END) F \<Longrightarrow> FRAME_INFER Ps Qs F"
by (auto simp: FRI_END_def)
lemma fri_prepare_round: "FRAME_INFER (\<box>**Ps) Qs F \<Longrightarrow> FRAME_INFER Ps Qs F"
by simp
lemma fri_end: (* Potential premises get solved by entails_refl. *)
"Ps \<turnstile> F \<Longrightarrow> FRAME_INFER Ps FRI_END F"
by (auto simp: FRAME_INFER_def FRI_END_def)
lemma fri_step_rl:
assumes "P \<turnstile> Q" (* Gets instantiated with frame_infer_rules *)
assumes "FRAME_INFER Ps Qs F"
shows "FRAME_INFER (P**Ps) (Q**Qs) F"
using assms
unfolding FRAME_INFER_def
by (simp add: conj_entails_mono)
lemma fri_reduce_rl:
assumes "is_sep_red P' Q' P Q"
assumes "FRAME_INFER (P'**Ps) (Q'**Qs) F"
shows "FRAME_INFER (P**Ps) (Q**Qs) F"
using assms unfolding FRAME_INFER_def
by (auto dest: is_sep_redD)
subsection \<open>Configurable Rule Sets\<close>
(*named_theorems fri_prepare_simps*)
named_simpset fri_prepare_simps = HOL_basic_ss_nomatch
named_simpset fri_prepare_precond_simps = HOL_basic_ss_nomatch
named_theorems fri_rules
named_theorems fri_red_rules
lemma fri_empty_concl_simp: "(\<box> ** FRI_END) = FRI_END" by simp
lemmas [named_ss fri_prepare_simps] = sep_conj_assoc sep_conj_empty sep_conj_empty' sep_conj_exists
declare entails_refl[fri_rules]
lemma fri_move_sep_true_forward[named_ss fri_prepare_simps]:
"(sep_true ** sep_true) = sep_true"
"(sep_true ** (sep_true**A)) = (sep_true ** A)"
"NO_MATCH sep_true A \<Longrightarrow> (A ** sep_true) = (sep_true ** A)"
"NO_MATCH sep_true A \<Longrightarrow> (A ** (sep_true ** B)) = (sep_true ** (A**B))"
by (auto simp: sep_algebra_simps sep_conj_ac)
lemma fri_prepare_sep_true_concl[named_ss fri_prepare_simps]:
"FRAME_INFER Ps (sep_true ** Q) \<box> = FRAME_INFER Ps Q sep_true"
by (auto simp: FRAME_INFER_def sep_algebra_simps sep_conj_ac)
lemma fri_exI: "FRAME_INFER Ps (Qs x) F \<Longrightarrow> FRAME_INFER Ps (EXS x. Qs x) F"
by (auto simp: FRAME_INFER_def sep_algebra_simps intro: entails_exI)
lemma fri_trueI: "FRAME_INFER Ps Qs sep_true \<Longrightarrow> FRAME_INFER (sep_true ** Ps) Qs sep_true"
apply (simp add: FRAME_INFER_def sep_algebra_simps)
by (smt entails_mp entails_refl fri_move_sep_true_forward(2) sep.mult_commute)
lemma fri_pureI: "\<lbrakk>P \<Longrightarrow> FRAME_INFER A Q F\<rbrakk> \<Longrightarrow> FRAME_INFER (\<up>P ** A) Q F"
by (cases P) (auto simp: FRAME_INFER_def sep_algebra_simps)
lemmas [named_ss fri_prepare_precond_simps] = pred_lift_extract_simps
lemmas [named_ss fri_prepare_precond_simps cong] = fri_prems_cong
subsection \<open>ML Code\<close>
ML \<open>
structure Frame_Infer = struct
open VCG_Lib
(**** Utilities *)
val simp_ai_tac = simp_only_tac @{thms sep_conj_assoc sep_conj_empty sep_conj_empty'}
val simp_a_tac = simp_only_tac @{thms sep_conj_assoc}
val rewrite_a_conv = rewrite_only_conv @{thms sep_conj_assoc}
val rewrite_ai_conv = rewrite_only_conv @{thms sep_conj_assoc sep_conj_empty sep_conj_empty'}
(**** Rotation Tactic *)
local
fun eq_rotate1_tac ctxt = CONVERSION (Refine_Util.HOL_concl_conv (
fn ctxt => Conv.arg1_conv (
Conv.rewr_conv @{thm sep_conj_commute[THEN eq_reflection]}
then_conv Simplifier.rewrite (put_simpset HOL_basic_ss ctxt addsimps @{thms sep_conj_assoc})
)) ctxt)
fun eq_rotateN_tac ctxt n = WITH_concl (fn
@{mpat "Trueprop (?lhs = _)"} => let
val nc = length (SepConj.break_sep_conj lhs)
val n = n mod nc
fun tc 0 = K all_tac
| tc n = eq_rotate1_tac ctxt THEN' tc (n-1)
in tc n end
| _ => K no_tac
)
(*
fun eq_rotateN_tac _ 0 = K all_tac
| eq_rotateN_tac ctxt n = eq_rotate1_tac ctxt THEN' eq_rotateN_tac ctxt (n-1)
*)
in
(*
Takes a congruence rule of the form a=b \<Longrightarrow> h a = h b,
then expects a subgoal of the form h (a\<^sub>1**...**a\<^sub>n), and produces
a sequence of new subgoals h (...) corresponding to all rotations of the a\<^sub>is.
*)
fun rotations_tac cong_rls ctxt = let
val cong_rls = map_filter (try (fn thm => @{thm iffD2} OF [thm])) cong_rls
in
resolve_tac ctxt cong_rls
THEN'
WITH_concl (
fn @{mpat \<open>Trueprop (?lhs = _)\<close>} => let
val n = length (SepConj.break_sep_conj lhs)
fun tac n = eq_rotateN_tac ctxt n
val tacs = map tac (0 upto n-1)
in
APPEND_LIST' tacs
end
| _ => K no_tac
)
THEN'
resolve_tac ctxt @{thms refl}
end
fun rotate_tac cong_rls ctxt n = let
val cong_rls = map_filter (try (fn thm => @{thm iffD2} OF [thm])) cong_rls
in
resolve_tac ctxt cong_rls
THEN' eq_rotateN_tac ctxt n
THEN' resolve_tac ctxt @{thms refl}
end
end
(**** Frame Inference Tactic *)
fun start_tac ctxt =
asm_simp_named_tac ctxt @{named_simpset fri_prepare_simps}
THEN' asm_simp_named_tac ctxt @{named_simpset fri_prepare_precond_simps}
THEN' REPEAT' (resolve_tac ctxt @{thms fri_exI fri_trueI fri_pureI})
THEN' resolve_tac ctxt @{thms fri_prepare}
THEN' simp_only_tac @{thms sep_conj_assoc fri_empty_concl_simp} ctxt
fun end_tac ctxt =
simp_ai_tac ctxt
THEN' resolve_tac ctxt @{thms fri_end}
THEN' resolve_tac ctxt @{thms entails_refl entails_true}
fun start_round_tac ctxt =
simp_ai_tac ctxt
THEN' resolve_tac ctxt @{thms fri_prepare_round}
THEN' simp_a_tac ctxt
fun solve_round_thms ctxt = let
val thms1 = Named_Theorems.get ctxt @{named_theorems fri_rules}
|> map (fn thm => @{thm fri_step_rl} OF [thm])
val thms2 = Named_Theorems.get ctxt @{named_theorems fri_red_rules}
|> map (fn thm => @{thm fri_reduce_rl} OF [thm])
in thms1@thms2 end
fun solve_round_tac ctxt = let
val thms = solve_round_thms ctxt
in
Basic_VCG.step_precond_tac ctxt (resolve_tac ctxt thms)
end
fun round_tac_aux ctxt =
start_round_tac ctxt
THEN' rotations_tac @{thms fri_prems_cong} ctxt
THEN' solve_round_tac ctxt
fun round_tac ctxt =
round_tac_aux ctxt
ORELSE' (CHANGED o asm_full_simp_tac ctxt ORELSE' round_tac_aux ctxt)
fun infer_tac ctxt = start_tac ctxt THEN' REPEAT' (end_tac ctxt ORELSE' round_tac ctxt)
(**** Debugging Tactics *)
fun dbg_solve_round_tac ctxt = let
val thms = solve_round_thms ctxt
in
Basic_VCG.step_precond_tac ctxt (resolve_tac ctxt thms)
ORELSE' resolve_tac ctxt thms
end
fun dbg_round_tac_aux ctxt =
start_round_tac ctxt
THEN' rotations_tac @{thms fri_prems_cong} ctxt
THEN' dbg_solve_round_tac ctxt
fun dbg_round_tac ctxt =
dbg_round_tac_aux ctxt
ORELSE' (CHANGED o asm_full_simp_tac ctxt ORELSE' dbg_round_tac_aux ctxt)
end
\<close>
subsubsection \<open>Methods\<close>
definition "FRAME P Q F \<equiv> P \<turnstile> Q ** F"
definition "ENTAILS P Q \<equiv> P \<turnstile> Q"
lemma ENTAILSD: "ENTAILS P Q \<Longrightarrow> P \<turnstile> Q" by (simp add: ENTAILS_def)
lemma fri_startI:
"\<lbrakk>pure_part P \<Longrightarrow> FRAME_INFER P Q F\<rbrakk> \<Longrightarrow> FRAME P Q F"
"\<lbrakk>pure_part P \<Longrightarrow> FRAME_INFER P Q \<box>\<rbrakk> \<Longrightarrow> ENTAILS P Q"
unfolding FRAME_INFER_def FRAME_def ENTAILS_def
by (auto intro: entails_pureI)
lemma fri_startI_extended:
"\<lbrakk>pure_part P \<Longrightarrow> FRAME_INFER P Q F\<rbrakk> \<Longrightarrow> FRAME P Q F"
"\<lbrakk>pure_part P \<Longrightarrow> FRAME_INFER P Q \<box>\<rbrakk> \<Longrightarrow> ENTAILS P Q"
"\<lbrakk>pure_part P \<Longrightarrow> FRAME_INFER P Q \<box>\<rbrakk> \<Longrightarrow> P \<turnstile> Q"
unfolding FRAME_INFER_def FRAME_def ENTAILS_def
by (auto intro: entails_pureI)
method_setup fri_rotations =
\<open>(Attrib.thms >> (fn cong_rls => fn ctxt => SIMPLE_METHOD' (Frame_Infer.rotations_tac cong_rls ctxt )))\<close>
\<open>Generate sequence of rotations wrt. specified congruence rule\<close>
method_setup fri_rotate =
\<open>(Attrib.thms -- Scan.lift (Scan.optional (Parse.$$$ ":" |-- Parse.int) 1) >>
(fn (cong_rls,n) => fn ctxt => SIMPLE_METHOD' (Frame_Infer.rotate_tac cong_rls ctxt n)))\<close>
\<open>Rotate left n steps wrt. specified congruence rule\<close>
method_setup fri_keep_aux =
\<open>(Scan.succeed (fn ctxt => SIMPLE_METHOD' (Frame_Infer.infer_tac ctxt)))\<close>
\<open>Frame Inference, solve from left to right, as far as possible\<close>
method fri_keep = (rule fri_startI_extended)?; fri_keep_aux
method fri = fri_keep;fail
method_setup fri_dbg_step =
\<open>(Scan.succeed (fn ctxt => SIMPLE_METHOD' (Frame_Infer.dbg_round_tac ctxt)))\<close>
\<open>Frame Inference, one round, keep unsolved goals\<close>
method_setup fri_dbg_end =
\<open>(Scan.succeed (fn ctxt => SIMPLE_METHOD' (Frame_Infer.end_tac ctxt)))\<close>
\<open>Frame Inference, end inference\<close>
subsubsection \<open>Solver Setup\<close>
declaration \<open>
K (Basic_VCG.add_solver (@{thms fri_startI},@{binding infer_frame},Frame_Infer.infer_tac))
\<close>
method_setup fri_dbg_start =
\<open>(Scan.succeed (fn ctxt => SIMPLE_METHOD' (TRY o resolve_tac ctxt @{thms fri_startI_extended} THEN' Frame_Infer.start_tac ctxt)))\<close>
\<open>Frame Inference, start\<close>
subsection \<open>Solving Pure Assertions\<close>
lemma fri_pure_rl[fri_rules]: "PRECOND (SOLVE_DEFAULT_AUTO \<Phi>) \<Longrightarrow> \<box>\<turnstile>\<up>\<Phi>"
by (auto simp: sep_algebra_simps vcg_tag_defs)
abbreviation pred_lift_ASM ("\<up>\<^sub>a_" [100] 100) where "\<up>\<^sub>a\<Phi> \<equiv> \<up>SOLVE_ASM \<Phi>"
abbreviation pred_lift_AUTO_DEFER ("\<up>\<^sub>d_" [100] 100) where "\<up>\<^sub>d\<Phi> \<equiv> \<up>SOLVE_AUTO_DEFER \<Phi>"
abbreviation pred_lift_AUTO ("\<up>\<^sub>!_" [100] 100) where "\<up>\<^sub>!\<Phi> \<equiv> \<up>SOLVE_AUTO \<Phi>"
subsection \<open>Extraction\<close>
text \<open>A transformer that applies a configurable set of simplification rules
only to certrain parts of the subgoal, as specified by a configurable set of
congruence rules. Afterwards, VCG normalization is performed.
The envisaged use of this transformer is to process the
postcondition of a Hoare-triple when it is transformed to the current symbolic state,
extracting all pure content.
\<close>
named_theorems fri_extract_congs \<open>Congruence rules for extraction\<close>
named_theorems fri_extract_simps \<open>Simplification rules for extraction\<close>
lemmas fri_basic_extract_simps = pred_lift_move_merge_simps sep_conj_exists
definition EXTRACT :: "bool \<Rightarrow> bool" where [vcg_tag_defs]: "EXTRACT x \<equiv> x"
lemma
EXTRACTI: "x \<Longrightarrow> EXTRACT x" and
EXTRACTD: "EXTRACT x \<Longrightarrow> x"
by (auto simp: vcg_tag_defs)
ML \<open>
structure Fri_Extract = struct
(* TODO: Move *)
(* Conversion wrt. congruence rule. The rule must have the form a\<equiv>b \<Longrightarrow> c\<equiv>d. *)
fun cong_rl_conv (conv:conv) rule ct = let
val rule = Thm.incr_indexes (Thm.maxidx_of_cterm ct + 1) rule;
val lhs = Thm.cprop_of rule |> Thm.dest_implies |> snd |> Thm.dest_equals_lhs;
val rule = Thm.rename_boundvars (Thm.term_of lhs) (Thm.term_of ct) rule;
val rule =
Thm.instantiate (Thm.match (lhs, ct)) rule
handle Pattern.MATCH => raise CTERM ("cong_rl_conv", [lhs, ct]);
val lhs' = Thm.cprop_of rule |> Thm.dest_implies |> fst |> Thm.dest_equals_lhs;
in rule OF [conv lhs'] end
fun cong_rls_conv conv rules =
Conv.first_conv (map (cong_rl_conv conv) rules)
fun extract_basic_tac ctxt thms = let
val ctxt = Named_Simpsets.put @{named_simpset Main_ss} ctxt addsimps @{thms fri_basic_extract_simps} addsimps thms
val cong_thms = Named_Theorems.get ctxt @{named_theorems fri_extract_congs}
in
CONVERSION (Conv.top_sweep_conv (fn ctxt => cong_rls_conv (Simplifier.rewrite ctxt) cong_thms) ctxt)
end
fun extract_tac ctxt thms =
extract_basic_tac ctxt (
Named_Theorems.get ctxt @{named_theorems fri_extract_simps}
@ Named_Theorems.get ctxt @{named_theorems vcg_tag_defs}
@ thms
)
end
\<close>
declaration \<open>
let
in K (I
#> Basic_VCG.add_xformer (@{thms EXTRACTI},@{binding extract_xformer}, fn ctxt =>
Fri_Extract.extract_tac ctxt []
THEN' Basic_VCG.vcg_normalize_tac ctxt
)
) end
\<close>
method_setup fri_extract_basic = \<open>Scan.lift (Args.mode "no_norm") -- Attrib.thms
>> (fn (no_norm,thms) => fn ctxt => SIMPLE_METHOD' (
Fri_Extract.extract_basic_tac ctxt thms
THEN' (if no_norm then K all_tac else Basic_VCG.vcg_normalize_tac ctxt)
))\<close>
\<open>Extraction of pure content, only basic rules\<close>
method_setup fri_extract = \<open>Scan.lift (Args.mode "no_norm") -- Attrib.thms
>> (fn (no_norm,thms) => fn ctxt => SIMPLE_METHOD' (
Fri_Extract.extract_tac ctxt thms
THEN' (if no_norm then K all_tac else Basic_VCG.vcg_normalize_tac ctxt)
))\<close>
\<open>Extraction of pure content\<close>
subsection \<open>Basic Methods\<close>
lemma entails_pre_cong: "A=B \<Longrightarrow> (A\<turnstile>C) = (B\<turnstile>C)" by simp
lemma entails_post_cong: "B=C \<Longrightarrow> (A\<turnstile>B) = (A\<turnstile>C)" by simp
thm conj_entails_mono
lemma sep_drule:
"A \<turnstile> B \<Longrightarrow> B \<turnstile> Q \<Longrightarrow> A \<turnstile> Q"
"A \<turnstile> B \<Longrightarrow> B**F \<turnstile> Q \<Longrightarrow> A**F \<turnstile> Q"
apply (rule entails_trans; assumption)
using entails_mp entails_trans by blast
lemma sep_rule:
"A \<turnstile> B \<Longrightarrow> P \<turnstile> A \<Longrightarrow> P \<turnstile> B"
"A \<turnstile> B \<Longrightarrow> P \<turnstile> A**F \<Longrightarrow> P \<turnstile> B**F"
apply (rule entails_trans; assumption)
using entails_mp entails_trans by blast
(* TODO/FIXME: Frame inference does not work the right way round for backwards reasoning *)
lemma sep_rule':
assumes "Q\<^sub>1 \<turnstile> Q\<^sub>1'"
assumes "FRAME_INFER Q Q\<^sub>1' F" (* ? *)
assumes "P \<turnstile> Q\<^sub>1 ** F"
shows "P \<turnstile> Q"
oops
lemma sep_drule':
assumes "P\<^sub>1 \<turnstile> P\<^sub>1'"
assumes "FRAME_INFER P P\<^sub>1 F"
assumes "P\<^sub>1' ** F \<turnstile> Q"
shows "P \<turnstile> Q"
using assms
apply (auto simp: FRAME_INFER_def entails_def)
using sep_conj_impl by blast
thm entails_trans
method_setup sep_drule = \<open>Attrib.thms >> (fn thms => fn ctxt => SIMPLE_METHOD' (let
val thms0 = map_filter (try (fn thm => @{thm entails_trans} OF [thm])) thms
val thms = map_product (fn a => try (fn b => a OF [b])) @{thms sep_drule'} thms
|> map_filter I
in
resolve_tac ctxt thms0
ORELSE'
resolve_tac ctxt thms
THEN' SOLVED' (Frame_Infer.infer_tac ctxt)
end))\<close>
method_setup sep_drule_simple = \<open>Attrib.thms >> (fn thms => fn ctxt => SIMPLE_METHOD' (let
val thms = map_product (fn a => try (fn b => a OF [b])) @{thms sep_drule} thms
|> map_filter I
in
Frame_Infer.rotations_tac @{thms entails_pre_cong} ctxt
THEN' resolve_tac ctxt thms
THEN' Frame_Infer.simp_a_tac ctxt
end))\<close>
method_setup sep_rule = \<open>Attrib.thms >> (fn thms => fn ctxt => SIMPLE_METHOD' (let
val thms = map_product (fn a => try (fn b => a OF [b])) @{thms sep_rule} thms
|> map_filter I
in
Frame_Infer.rotations_tac @{thms entails_post_cong} ctxt
THEN' resolve_tac ctxt thms
THEN' Frame_Infer.simp_a_tac ctxt
end))\<close>
subsection \<open>Utilities\<close>
lemma fri_red_img_is: "PRECOND (SOLVE_AUTO (k\<in>I)) \<Longrightarrow> is_sep_red (\<Union>*i\<in>I-{k}. P i) \<box> (\<Union>*i\<in>I. P i) (P k)"
unfolding vcg_tag_defs apply (rule is_sep_redI)
by (auto simp: conj_entails_mono sep_set_img_remove)
lemma fri_red_img_si: "PRECOND (SOLVE_AUTO (k\<in>I)) \<Longrightarrow> is_sep_red \<box> (\<Union>*i\<in>I-{k}. P i) (P k) (\<Union>*i\<in>I. P i)"
unfolding vcg_tag_defs apply (rule is_sep_redI)
by (smt conj_entails_mono entails_refl sep.add.left_neutral sep.mult.left_commute sep.mult_commute sep_set_img_remove)
lemma fri_red_img_ss: "PRECOND (SOLVE_AUTO (I\<inter>I' \<noteq> {})) \<Longrightarrow> is_sep_red (\<Union>*i\<in>I-I'. P i) (\<Union>*i\<in>I'-I. P i) (\<Union>*i\<in>I. P i) (\<Union>*i\<in>I'. P i)"
unfolding vcg_tag_defs apply (rule is_sep_redI)
proof -
fix Ps Qs
assume "I \<inter> I' \<noteq> {}"
and A: "(\<Union>*i\<in>I - I'. P i) \<and>* Ps \<turnstile> (\<Union>*i\<in>I' - I. P i) \<and>* Qs"
have DJ: "(I-I') \<inter> (I\<inter>I') = {}" "(I'-I) \<inter> (I\<inter>I') = {}" by auto
have II: "(I-I') \<union> (I\<inter>I') = I" "(I'-I) \<union> (I\<inter>I') = I'" by auto
show "(\<Union>*i\<in>I. P i) \<and>* Ps \<turnstile> (\<Union>*i\<in>I'. P i) \<and>* Qs"
unfolding sep_set_img_union[OF DJ(1), simplified II]
unfolding sep_set_img_union[OF DJ(2), simplified II]
by (smt A conj_entails_mono entails_def semigroup.assoc sep.mult.semigroup_axioms sep.mult_commute)
qed
lemmas fri_red_img = fri_red_img_is fri_red_img_si fri_red_img_ss
end
|
[STATEMENT]
lemma in_epsclosure_steps:
"[| (p,q) : steps A w; (q,r) : (eps A)\<^sup>* |] ==> (p,r) : steps A w"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(p, q) \<in> NAe.steps A w; (q, r) \<in> (eps A)\<^sup>*\<rbrakk> \<Longrightarrow> (p, r) \<in> NAe.steps A w
[PROOF STEP]
apply(rule epsclosure_steps[THEN equalityE])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(p, q) \<in> NAe.steps A w; (q, r) \<in> (eps A)\<^sup>*; NAe.steps ?A1 ?w1 O (eps ?A1)\<^sup>* \<subseteq> NAe.steps ?A1 ?w1; NAe.steps ?A1 ?w1 \<subseteq> NAe.steps ?A1 ?w1 O (eps ?A1)\<^sup>*\<rbrakk> \<Longrightarrow> (p, r) \<in> NAe.steps A w
[PROOF STEP]
apply blast
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
Set Universe Polymorphism.
Section Graph.
Class Graph := {
Vertex : Type;
Edge : Vertex -> Vertex -> Type
}.
Context `{Graph}.
Inductive Path : Vertex -> Vertex -> Type :=
| refl {a} : Path a a
| step {a b c} : Edge a b -> Path b c -> Path a c.
End Graph.
|
[STATEMENT]
lemma action_closed: "s \<in> S \<Longrightarrow> cfg \<in> cfg_on s \<Longrightarrow> t \<in> action cfg \<Longrightarrow> t \<in> S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>s \<in> S; cfg \<in> cfg_on s; t \<in> set_pmf (action cfg)\<rbrakk> \<Longrightarrow> t \<in> S
[PROOF STEP]
using cfg_onD_action[of cfg s] invariant[of s]
[PROOF STATE]
proof (prove)
using this:
cfg \<in> cfg_on s \<Longrightarrow> action cfg \<in> K s
\<lbrakk>s \<in> S; ?D \<in> K s\<rbrakk> \<Longrightarrow> \<forall>s'\<in>set_pmf ?D. s' \<in> S
goal (1 subgoal):
1. \<lbrakk>s \<in> S; cfg \<in> cfg_on s; t \<in> set_pmf (action cfg)\<rbrakk> \<Longrightarrow> t \<in> S
[PROOF STEP]
by auto |
-- examples in "Type-Driven Development with Idris"
-- chapter 8
import Data.Vect
-- check that all functions are total
%default total
--
-- section 8.1
--
data Vector : Nat -> Type -> Type where
Nil : Vector Z a
(::) : a -> Vector k a -> Vector (S k) a
-- exactLength : (len : Nat) -> (input : Vector m a) -> Maybe (Vector len a)
-- exactLength {m} len input = case m == len of
-- False => Nothing
-- True => Just ?exactLength_rhs_2
data EqNat : (num1 : Nat) -> (num2 : Nat) -> Type where
Same : (num : Nat) -> EqNat num num
sameS : (k : Nat) -> (j : Nat) -> (eq : EqNat k j) -> EqNat (S k) (S j)
sameS j j (Same j) = Same (S j)
checkEqNat : (num1 : Nat) -> (num2 : Nat) -> Maybe (EqNat num1 num2)
checkEqNat Z Z = Just (Same 0)
checkEqNat Z (S k) = Nothing
checkEqNat (S k) Z = Nothing
checkEqNat (S k) (S j) = checkEqNat k j >>= Just . (sameS _ _)
exactLength : (len : Nat) -> (input : Vector m a) -> Maybe (Vector len a)
exactLength {m} len input = case checkEqNat m len of
Nothing => Nothing
(Just (Same len)) => Just input
--
-- section 8.2
--
myReverse : Vect n elem -> Vect n elem
myReverse [] = []
myReverse {n = S k} (x :: xs)
= let result = myReverse xs ++ [x] in
rewrite plusCommutative 1 k in result
reverse_proof : (x : elem) -> (xs : Vect len elem) ->
Vect (len + 1) elem -> Vect (S len) elem
reverse_proof {len} x xs result = rewrite plusCommutative 1 len in result
myReverse' : Vect n elem -> Vect n elem
myReverse' [] = []
myReverse' (x :: xs) = reverse_proof x xs (myReverse' xs ++ [x])
append_nil : (ys : Vect m elem) ->
Vect (plus m 0) elem
append_nil {m} ys = rewrite plusZeroRightNeutral m in ys
append_xs : Vect (S (m + len)) elem -> Vect (plus m (S len)) elem
append_xs {m} {len} xs = rewrite sym (plusSuccRightSucc m len) in xs
myAppend : Vect n elem -> Vect m elem -> Vect (m + n) elem
myAppend [] ys = append_nil ys
myAppend (x :: xs) ys = append_xs (x :: myAppend xs ys)
--
-- section 8.3
--
twoPlusTwoNotFive : 2 + 2 = 5 -> Void
twoPlusTwoNotFive Refl impossible
valueNotSucc : x = S x -> Void
valueNotSucc Refl impossible
zeroNotSucc : (0 = S k) -> Void
zeroNotSucc Refl impossible
succNotZero : (S k = 0) -> Void
succNotZero Refl impossible
noRec : (contra : (k = j) -> Void) -> (S k = S j) -> Void
noRec contra Refl = contra Refl
checkEqNat2 : (num1 : Nat) -> (num2 : Nat) -> Dec (num1 = num2)
checkEqNat2 Z Z = Yes Refl
checkEqNat2 Z (S k) = No zeroNotSucc
checkEqNat2 (S k) Z = No succNotZero
checkEqNat2 (S k) (S j) = case checkEqNat2 k j of
(Yes prf) => Yes (cong prf)
(No contra) => No (noRec contra)
exactLength2 : (len : Nat) -> (input : Vect m a) -> Maybe (Vect len a)
exactLength2 {m} len input = case decEq m len of
(Yes Refl) => Just input
(No contra) => Nothing
|
Require Import Coq.ZArith.ZArith.
Require Import Coq.Setoids.Setoid.
Require Import Coq.Relations.Relation_Definitions.
Require Import Poulet4.P4light.Syntax.Typed.
Require Import Poulet4.P4light.Syntax.Syntax.
Require Import Poulet4.P4light.Syntax.Value.
Require Import Poulet4.P4light.Semantics.Semantics.
Require Import Poulet4.Utils.Utils.
Require Import ProD3.core.Coqlib.
Require Import ProD3.core.SvalRefine.
Require Import ProD3.core.AssertionLang.
Require Import ProD3.core.AssertionNotations.
Require Import ProD3.core.Hoare.
Require Import ProD3.core.ExtPred.
Require Import Poulet4.P4light.Syntax.SyntaxUtil.
Require Import Hammer.Plugin.Hammer.
Section Implies.
Context {tags_t: Type} {tags_t_inhabitant : Inhabitant tags_t}.
Notation Val := (@ValueBase bool).
Notation Sval := (@ValueBase (option bool)).
Notation Lval := ValueLvalue.
Notation ident := (String.string).
Notation path := (list ident).
Notation Expression := (@Expression tags_t).
Context {target : @Target tags_t Expression}.
Definition mem_simplify_aux (a : mem_assertion) '((p, sv) : path * Sval) : option (Sval * Sval) :=
match AList.get a p with
| Some sv' => Some (sv, sv')
| None => None
end.
Lemma mem_simplify_aux_sound : forall a psv svp,
mem_simplify_aux a psv = Some svp ->
uncurry sval_refine svp ->
forall m, mem_denote a m -> mem_satisfies_unit m psv.
Proof.
intros.
destruct psv as [p sv].
unfold mem_simplify_aux in *.
destruct (AList.get a p) eqn:H_get; inv H.
eapply mem_denote_get in H_get; eauto.
unfold mem_satisfies_unit in *.
destruct (PathMap.get p m); only 2 : inv H_get.
eapply sval_refine_trans; eauto.
Qed.
Definition mem_implies_simplify (a a' : mem_assertion) : option (list (Sval * Sval)) :=
lift_option (map (mem_simplify_aux a) a').
Lemma mem_implies_simplify_sound : forall a a' svps,
mem_implies_simplify a a' = Some svps ->
Forall (uncurry sval_refine) svps ->
forall m, mem_denote a m -> mem_denote a' m.
Proof.
intros.
unfold mem_implies_simplify in *.
apply lift_option_inv in H.
unfold mem_denote, mem_satisfies.
rewrite fold_right_and_True in *.
list_simplify.
apply mem_simplify_aux_sound with a (Znth i svps).
- list_simplify.
(* list_solve cannot perform this simplification because the implicit type mem_unit and (path * Sval)
are not converted automatically. *)
rewrite Znth_map in H12 by auto. list_solve.
- list_solve.
- auto.
Qed.
Definition ext_implies (a a' : ext_assertion) : Prop :=
forall es, ext_denote a es -> ext_denote a' es.
Global Add Parametric Morphism : ext_implies with
signature ext_assertion_equiv ==> ext_assertion_equiv ==> iff as ext_implies_mor.
Proof.
intros. unfold ext_implies.
rewrite H, H0.
reflexivity.
Qed.
Lemma ext_implies_refl : forall (a : ext_assertion),
ext_implies a a.
Proof. unfold ext_implies; auto. Qed.
Lemma ext_implies_nil : forall a, ext_implies a [].
Proof. repeat intro. red. red. easy. Qed.
Lemma ext_implies_cons : forall (a c : ext_assertion) b,
ext_implies a (b :: c) <-> (ext_implies a [b] /\ ext_implies a c).
Proof.
intros. unfold ext_implies, ext_denote. split; intros.
- split; intros; apply H in H0; red in H0; simpl in H0; red; simpl; destruct H0; auto.
- destruct H. specialize (H _ H0). specialize (H1 _ H0). clear H0.
unfold ext_satisfies in *. simpl in *. destruct H. split; auto.
Qed.
Lemma ext_implies_prop_intro : forall pre (P : Prop),
P ->
ext_implies pre [ExtPred.prop P].
Proof.
intros.
sauto.
Qed.
Lemma ext_cons_implies : forall a (b c : ext_assertion),
(ext_implies [a] c \/ ext_implies b c) -> ext_implies (a :: b) c.
Proof.
intros. destruct H; unfold ext_implies, ext_denote in *; intros; apply H.
- destruct H0. unfold ext_satisfies. simpl. auto.
- destruct H0. auto.
Qed.
Lemma ext_implies_singleton : forall (p : path) (eo1 eo2 : extern_object),
eo1 = eo2 ->
ext_implies [ExtPred.singleton p eo1] [ExtPred.singleton p eo2].
Proof.
intros; subst; apply ext_implies_refl.
Qed.
Lemma implies_simplify : forall pre_mem pre_ext post_mem post_ext svps,
mem_implies_simplify pre_mem post_mem = Some svps ->
Forall (uncurry sval_refine) svps ->
ext_implies pre_ext post_ext ->
implies (MEM pre_mem (EXT pre_ext)) (MEM post_mem (EXT post_ext)).
Proof.
unfold implies; intros.
destruct st as [m es].
destruct H2; split.
- eapply mem_implies_simplify_sound; eauto.
- apply H1. auto.
Qed.
Lemma arg_implies_simplify : forall pre_arg pre_mem pre_ext post_arg post_mem post_ext svps,
Forall2 sval_refine post_arg pre_arg ->
mem_implies_simplify pre_mem post_mem = Some svps ->
Forall (uncurry sval_refine) svps ->
ext_implies pre_ext post_ext ->
arg_implies (ARG pre_arg (MEM pre_mem (EXT pre_ext))) (ARG post_arg (MEM post_mem (EXT post_ext))).
Proof.
unfold arg_implies; intros.
destruct H3.
split. 2 : { eapply implies_simplify; eauto. }
eapply Forall2_trans. 1 : { unfold rel_trans. apply sval_refine_trans. }
all : eauto.
Qed.
Lemma ret_implies_simplify : forall pre_ret pre_mem pre_ext post_ret post_mem post_ext svps,
sval_refine post_ret pre_ret ->
mem_implies_simplify pre_mem post_mem = Some svps ->
Forall (uncurry sval_refine) svps ->
ext_implies pre_ext post_ext ->
ret_implies (RET pre_ret (MEM pre_mem (EXT pre_ext))) (RET post_ret (MEM post_mem (EXT post_ext))).
Proof.
unfold ret_implies; intros.
destruct H3.
split. 2 : { eapply implies_simplify; eauto. }
clear -H H3. unfold ret_denote, ret_satisfies in *.
intros sv' H0. specialize (H3 sv' H0).
eapply sval_refine_trans; eauto.
Qed.
Lemma arg_ret_implies_simplify : forall pre_arg pre_ret pre_mem pre_ext post_arg post_ret post_mem post_ext svps,
Forall2 sval_refine post_arg pre_arg ->
sval_refine post_ret pre_ret ->
mem_implies_simplify pre_mem post_mem = Some svps ->
Forall (uncurry sval_refine) svps ->
ext_implies pre_ext post_ext ->
arg_ret_implies
(ARG_RET pre_arg pre_ret (MEM pre_mem (EXT pre_ext)))
(ARG_RET post_arg post_ret (MEM post_mem (EXT post_ext))).
Proof.
unfold arg_ret_implies; intros.
destruct H4.
split. 2 : { eapply ret_implies_simplify; eauto. }
eapply Forall2_trans. 1 : { unfold rel_trans. apply sval_refine_trans. }
all : eauto.
Qed.
End Implies.
Ltac simpl_single_ext_implies :=
first [
apply (@id (ext_implies [] _));
fail "No remaining assumptions"
| apply (@id (ext_implies (_ :: _) _));
apply ext_cons_implies;
first [
left;
first [
apply ext_implies_refl
| lazymatch goal with
| |- ext_implies [ExtPred.singleton ?p1 ?o1] [ExtPred.singleton ?p2 ?o2] =>
simple apply ext_implies_singleton;
try reflexivity
end
]
| right; simpl_single_ext_implies
]
].
Ltac simpl_ext_implies :=
first [
apply (@id (ext_implies _ []));
apply ext_implies_nil
| apply (@id (ext_implies _ (_ :: _)));
apply ext_implies_cons; split;
[ try simpl_single_ext_implies
| simpl_ext_implies
]
].
(* lazymatch goal with
|
repeat match goal with
| |- ext_implies _ [] => apply ext_implies_nil
| |- ext_implies _ (_ :: _ :: _) => apply ext_implies_cons; split
| |- ext_implies _ [_] =>
try reflexivity_simpl_ext_implies;
try (let es := fresh "es" in
let H := fresh "H" in
unfold ext_implies, ext_denote, ext_satisfies;
intros es H;
simpl in H |- *;
intuition; easy)
end. *)
Section SIMPL_EXT_IMPLIES_TEST.
Context {tags_t: Type} {tags_t_inhabitant : Inhabitant tags_t}.
Notation Expression := (@Expression tags_t).
Context {target : @Target tags_t Expression}.
Variable P Q R S: ext_pred.
Goal ext_implies [P; Q; R; S] [].
Proof. simpl_ext_implies. Qed.
(* Rearrange order doesn't matter *)
Goal ext_implies [P; Q; R; S] [R; S; Q; P].
Proof. simpl_ext_implies. Qed.
(* It will leave unsolved goals *)
Goal ext_implies [P; Q; S] [R; S; Q].
Proof. simpl_ext_implies. Abort.
(* If we have additional rules, the tactic can solve the goal *)
(* Goal (forall es, P es -> R es) -> ext_implies [P; Q; S] [R; S; Q].
Proof. intros. simpl_ext_implies. Qed. *)
End SIMPL_EXT_IMPLIES_TEST.
|
There was a Wiki Gatherings Wiki BBQ on January 17, 2009 at Slide Hill Park.
Who came:
Users/JessicaLuedtke and family are, as usual, bringing cookies.
Users/NickSchmalenberger will bring some pasta salad or cheeseburger pie, as well as WilliamLewis.
Users/JasonAller will bring Wiki Button Wiki Buttons
Users/AlexMandel, Users/MicheleTobias, Users/RyanMikulovsky and Users/StaceyEllis will bring some sort of snack food, to be determined...
Nobody in the larger nonrecentchangeshawk community is gonna come to this! You needed to have made this page, posted it around, etc for at least a week prior. That might not be a problem, but Im just saying. Users/PhilipNeustrom
Maybe thats a good thing because there might not be enough wiki buttons to go around. ...wouldnt want to start a button riot, you know.
20090117 14:10:04 nbsp I completely agree with Philip, it was random that I saw the word BBQ in chat and started poking around to figure out what was going on and didnt know for sure until about noon. If we really want to do these, put the date down, make a page and advertise, otherwise its more like a gnome party and we missed quite a few of those too. Users/AlexMandel
20090117 14:40:56 nbsp We are here and talking about why you are not. Users/JasonAller
20090117 14:46:27 nbsp wiki bbq?
Damn! Users/StevenDaubert
20090117 15:12:30 nbsp I told you guys to do it next week. Ill be there next week if anyone wants to do some sort of gathering then. Users/BrentLaabs
20090117 15:46:36 nbsp Id be down for next week! Unfortunately, I had something today. Users/CurlyGirl26
20090117 16:05:55 nbsp Oh, hell... Had I but known. ;) Philip is right though it should have hit the Featured Page awhile back. Somebody go update the Featured Page entry to something either I Love Davis listed here, or some obscure group you know and love and think others might like. Honestly, its a great visible spot for events and groups, but its seldom updated. Use it! (And keep in mind it can always be Featured Pages, so long as it doesnt fill up the front page) Users/JabberWokky
20090117 16:58:58 nbsp Re: the photo: Why is Nick using the power of his mind to levitate the dog in the foreground? Users/JabberWokky
20090117 18:02:42 nbsp This was a bad situation either way. Yes, it should have been publicized more, but the date and time have been posted for a while, and I thought it better to go ahead and try to get some people there and not risk having some random person show up to the announced party and find no one else there.
In any case, the weather was as perfect as were likely to get for a while, and I dont think anyone who attended regretted it! Users/JessicaLuedtke
20090117 18:52:32 nbsp Well do another one in Feb since I wasnt able to make it to this one either... Users/SunjeetBaadkar
20100318 12:13:57 nbsp Will there be another BBQ coming up in 2010?? Users/DagonJones
Theres Wiki Gatherings talk of April 4. Users/TomGarberson Edit: but it turns out thats Easter, so I imagine turnout would be low.
|
(***********************************************************************************
* Copyright (c) 2016-2020 The University of Sheffield, UK
* 2019-2020 University of Exeter, UK
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* SPDX-License-Identifier: BSD-2-Clause
***********************************************************************************)
(* This file is automatically generated, please do not modify! *)
section\<open>Testing Document\_getElementById\<close>
text\<open>This theory contains the test cases for Document\_getElementById.\<close>
theory Document_getElementById
imports
"Core_DOM_BaseTest"
begin
definition Document_getElementById_heap :: heap\<^sub>f\<^sub>i\<^sub>n\<^sub>a\<^sub>l where
"Document_getElementById_heap = create_heap [(cast (document_ptr.Ref 1), cast (create_document_obj html (Some (cast (element_ptr.Ref 1))) [])),
(cast (element_ptr.Ref 1), cast (create_element_obj ''html'' [cast (element_ptr.Ref 2), cast (element_ptr.Ref 9)] fmempty None)),
(cast (element_ptr.Ref 2), cast (create_element_obj ''head'' [cast (element_ptr.Ref 3), cast (element_ptr.Ref 4), cast (element_ptr.Ref 5), cast (element_ptr.Ref 6), cast (element_ptr.Ref 7), cast (element_ptr.Ref 8)] fmempty None)),
(cast (element_ptr.Ref 3), cast (create_element_obj ''meta'' [] (fmap_of_list [(''charset'', ''utf-8'')]) None)),
(cast (element_ptr.Ref 4), cast (create_element_obj ''title'' [cast (character_data_ptr.Ref 1)] fmempty None)),
(cast (character_data_ptr.Ref 1), cast (create_character_data_obj ''Document.getElementById'')),
(cast (element_ptr.Ref 5), cast (create_element_obj ''link'' [] (fmap_of_list [(''rel'', ''author''), (''title'', ''Tetsuharu OHZEKI''), (''href'', ''mailto:[email protected]'')]) None)),
(cast (element_ptr.Ref 6), cast (create_element_obj ''link'' [] (fmap_of_list [(''rel'', ''help''), (''href'', ''https://dom.spec.whatwg.org/#dom-document-getelementbyid'')]) None)),
(cast (element_ptr.Ref 7), cast (create_element_obj ''script'' [] (fmap_of_list [(''src'', ''/resources/testharness.js'')]) None)),
(cast (element_ptr.Ref 8), cast (create_element_obj ''script'' [] (fmap_of_list [(''src'', ''/resources/testharnessreport.js'')]) None)),
(cast (element_ptr.Ref 9), cast (create_element_obj ''body'' [cast (element_ptr.Ref 10), cast (element_ptr.Ref 11), cast (element_ptr.Ref 12), cast (element_ptr.Ref 13), cast (element_ptr.Ref 16), cast (element_ptr.Ref 19)] fmempty None)),
(cast (element_ptr.Ref 10), cast (create_element_obj ''div'' [] (fmap_of_list [(''id'', ''log'')]) None)),
(cast (element_ptr.Ref 11), cast (create_element_obj ''div'' [] (fmap_of_list [(''id'', '''')]) None)),
(cast (element_ptr.Ref 12), cast (create_element_obj ''div'' [] (fmap_of_list [(''id'', ''test1'')]) None)),
(cast (element_ptr.Ref 13), cast (create_element_obj ''div'' [cast (element_ptr.Ref 14), cast (element_ptr.Ref 15)] (fmap_of_list [(''id'', ''test5''), (''data-name'', ''1st'')]) None)),
(cast (element_ptr.Ref 14), cast (create_element_obj ''p'' [cast (character_data_ptr.Ref 2)] (fmap_of_list [(''id'', ''test5''), (''data-name'', ''2nd'')]) None)),
(cast (character_data_ptr.Ref 2), cast (create_character_data_obj ''P'')),
(cast (element_ptr.Ref 15), cast (create_element_obj ''input'' [] (fmap_of_list [(''id'', ''test5''), (''type'', ''submit''), (''value'', ''Submit''), (''data-name'', ''3rd'')]) None)),
(cast (element_ptr.Ref 16), cast (create_element_obj ''div'' [cast (element_ptr.Ref 17)] (fmap_of_list [(''id'', ''outer'')]) None)),
(cast (element_ptr.Ref 17), cast (create_element_obj ''div'' [cast (element_ptr.Ref 18)] (fmap_of_list [(''id'', ''middle'')]) None)),
(cast (element_ptr.Ref 18), cast (create_element_obj ''div'' [] (fmap_of_list [(''id'', ''inner'')]) None)),
(cast (element_ptr.Ref 19), cast (create_element_obj ''script'' [cast (character_data_ptr.Ref 3)] fmempty None)),
(cast (character_data_ptr.Ref 3), cast (create_character_data_obj ''%3C%3Cscript%3E%3E''))]"
definition Document_getElementById_document :: "(unit, unit, unit, unit, unit, unit) object_ptr option" where "Document_getElementById_document = Some (cast (document_ptr.Ref 1))"
text \<open>"Document.getElementById with a script-inserted element"\<close>
lemma "test (do {
gBody \<leftarrow> Document_getElementById_document . body;
TEST_ID \<leftarrow> return ''test2'';
test \<leftarrow> Document_getElementById_document . createElement(''div'');
test . setAttribute(''id'', TEST_ID);
gBody . appendChild(test);
result \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_not_equals(result, None, ''should not be null.'');
tmp0 \<leftarrow> result . tagName;
assert_equals(tmp0, ''div'', ''should have appended element's tag name'');
gBody . removeChild(test);
removed \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(removed, None, ''should not get removed element.'')
}) Document_getElementById_heap"
by eval
text \<open>"update `id` attribute via setAttribute/removeAttribute"\<close>
lemma "test (do {
gBody \<leftarrow> Document_getElementById_document . body;
TEST_ID \<leftarrow> return ''test3'';
test \<leftarrow> Document_getElementById_document . createElement(''div'');
test . setAttribute(''id'', TEST_ID);
gBody . appendChild(test);
UPDATED_ID \<leftarrow> return ''test3-updated'';
test . setAttribute(''id'', UPDATED_ID);
e \<leftarrow> Document_getElementById_document . getElementById(UPDATED_ID);
assert_equals(e, test, ''should get the element with id.'');
old \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(old, None, ''shouldn't get the element by the old id.'');
test . removeAttribute(''id'');
e2 \<leftarrow> Document_getElementById_document . getElementById(UPDATED_ID);
assert_equals(e2, None, ''should return null when the passed id is none in document.'')
}) Document_getElementById_heap"
by eval
text \<open>"Ensure that the id attribute only affects elements present in a document"\<close>
lemma "test (do {
TEST_ID \<leftarrow> return ''test4-should-not-exist'';
e \<leftarrow> Document_getElementById_document . createElement(''div'');
e . setAttribute(''id'', TEST_ID);
tmp0 \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(tmp0, None, ''should be null'');
tmp1 \<leftarrow> Document_getElementById_document . body;
tmp1 . appendChild(e);
tmp2 \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(tmp2, e, ''should be the appended element'')
}) Document_getElementById_heap"
by eval
text \<open>"in tree order, within the context object's tree"\<close>
lemma "test (do {
gBody \<leftarrow> Document_getElementById_document . body;
TEST_ID \<leftarrow> return ''test5'';
target \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_not_equals(target, None, ''should not be null'');
tmp0 \<leftarrow> target . getAttribute(''data-name'');
assert_equals(tmp0, ''1st'', ''should return the 1st'');
element4 \<leftarrow> Document_getElementById_document . createElement(''div'');
element4 . setAttribute(''id'', TEST_ID);
element4 . setAttribute(''data-name'', ''4th'');
gBody . appendChild(element4);
target2 \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_not_equals(target2, None, ''should not be null'');
tmp1 \<leftarrow> target2 . getAttribute(''data-name'');
assert_equals(tmp1, ''1st'', ''should be the 1st'');
tmp2 \<leftarrow> target2 . parentNode;
tmp2 . removeChild(target2);
target3 \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_not_equals(target3, None, ''should not be null'');
tmp3 \<leftarrow> target3 . getAttribute(''data-name'');
assert_equals(tmp3, ''4th'', ''should be the 4th'')
}) Document_getElementById_heap"
by eval
text \<open>"Modern browsers optimize this method with using internal id cache. This test checks that their optimization should effect only append to `Document`, not append to `Node`."\<close>
lemma "test (do {
TEST_ID \<leftarrow> return ''test6'';
s \<leftarrow> Document_getElementById_document . createElement(''div'');
s . setAttribute(''id'', TEST_ID);
tmp0 \<leftarrow> Document_getElementById_document . createElement(''div'');
tmp0 . appendChild(s);
tmp1 \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(tmp1, None, ''should be null'')
}) Document_getElementById_heap"
by eval
text \<open>"changing attribute's value via `Attr` gotten from `Element.attribute`."\<close>
lemma "test (do {
gBody \<leftarrow> Document_getElementById_document . body;
TEST_ID \<leftarrow> return ''test7'';
element \<leftarrow> Document_getElementById_document . createElement(''div'');
element . setAttribute(''id'', TEST_ID);
gBody . appendChild(element);
target \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(target, element, ''should return the element before changing the value'');
element . setAttribute(''id'', (TEST_ID @ ''-updated''));
target2 \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(target2, None, ''should return null after updated id via Attr.value'');
target3 \<leftarrow> Document_getElementById_document . getElementById((TEST_ID @ ''-updated''));
assert_equals(target3, element, ''should be equal to the updated element.'')
}) Document_getElementById_heap"
by eval
text \<open>"update `id` attribute via element.id"\<close>
lemma "test (do {
gBody \<leftarrow> Document_getElementById_document . body;
TEST_ID \<leftarrow> return ''test12'';
test \<leftarrow> Document_getElementById_document . createElement(''div'');
test . setAttribute(''id'', TEST_ID);
gBody . appendChild(test);
UPDATED_ID \<leftarrow> return (TEST_ID @ ''-updated'');
test . setAttribute(''id'', UPDATED_ID);
e \<leftarrow> Document_getElementById_document . getElementById(UPDATED_ID);
assert_equals(e, test, ''should get the element with id.'');
old \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(old, None, ''shouldn't get the element by the old id.'');
test . setAttribute(''id'', '''');
e2 \<leftarrow> Document_getElementById_document . getElementById(UPDATED_ID);
assert_equals(e2, None, ''should return null when the passed id is none in document.'')
}) Document_getElementById_heap"
by eval
text \<open>"where insertion order and tree order don't match"\<close>
lemma "test (do {
gBody \<leftarrow> Document_getElementById_document . body;
TEST_ID \<leftarrow> return ''test13'';
container \<leftarrow> Document_getElementById_document . createElement(''div'');
container . setAttribute(''id'', (TEST_ID @ ''-fixture''));
gBody . appendChild(container);
element1 \<leftarrow> Document_getElementById_document . createElement(''div'');
element1 . setAttribute(''id'', TEST_ID);
element2 \<leftarrow> Document_getElementById_document . createElement(''div'');
element2 . setAttribute(''id'', TEST_ID);
element3 \<leftarrow> Document_getElementById_document . createElement(''div'');
element3 . setAttribute(''id'', TEST_ID);
element4 \<leftarrow> Document_getElementById_document . createElement(''div'');
element4 . setAttribute(''id'', TEST_ID);
container . appendChild(element2);
container . appendChild(element4);
container . insertBefore(element3, element4);
container . insertBefore(element1, element2);
test \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(test, element1, ''should return 1st element'');
container . removeChild(element1);
test \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(test, element2, ''should return 2nd element'');
container . removeChild(element2);
test \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(test, element3, ''should return 3rd element'');
container . removeChild(element3);
test \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(test, element4, ''should return 4th element'');
container . removeChild(element4)
}) Document_getElementById_heap"
by eval
text \<open>"Inserting an id by inserting its parent node"\<close>
lemma "test (do {
gBody \<leftarrow> Document_getElementById_document . body;
TEST_ID \<leftarrow> return ''test14'';
a \<leftarrow> Document_getElementById_document . createElement(''a'');
b \<leftarrow> Document_getElementById_document . createElement(''b'');
a . appendChild(b);
b . setAttribute(''id'', TEST_ID);
tmp0 \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(tmp0, None);
gBody . appendChild(a);
tmp1 \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(tmp1, b)
}) Document_getElementById_heap"
by eval
text \<open>"Document.getElementById must not return nodes not present in document"\<close>
lemma "test (do {
TEST_ID \<leftarrow> return ''test15'';
outer \<leftarrow> Document_getElementById_document . getElementById(''outer'');
middle \<leftarrow> Document_getElementById_document . getElementById(''middle'');
inner \<leftarrow> Document_getElementById_document . getElementById(''inner'');
tmp0 \<leftarrow> Document_getElementById_document . getElementById(''middle'');
outer . removeChild(tmp0);
new_el \<leftarrow> Document_getElementById_document . createElement(''h1'');
new_el . setAttribute(''id'', ''heading'');
inner . appendChild(new_el);
tmp1 \<leftarrow> Document_getElementById_document . getElementById(''heading'');
assert_equals(tmp1, None)
}) Document_getElementById_heap"
by eval
end
|
//////////////////////////////////////////////////////////////////////////////////////////////
/// \file OptimizationProblem.hpp
///
/// \author Sean Anderson, ASRL
//////////////////////////////////////////////////////////////////////////////////////////////
#ifndef STEAM_OPTIMIZATION_PROBLEM_HPP
#define STEAM_OPTIMIZATION_PROBLEM_HPP
#include <Eigen/Core>
#include <Eigen/Sparse>
#include <steam/state/StateVector.hpp>
#include <steam/problem/ParallelizedCostTermCollection.hpp>
namespace steam {
//////////////////////////////////////////////////////////////////////////////////////////////
/// \brief Container for active state variables and cost terms associated with the
/// optimization problem to be solved.
//////////////////////////////////////////////////////////////////////////////////////////////
class OptimizationProblem
{
public:
//////////////////////////////////////////////////////////////////////////////////////////////
/// \brief Default constructor
//////////////////////////////////////////////////////////////////////////////////////////////
OptimizationProblem();
//////////////////////////////////////////////////////////////////////////////////////////////
/// \brief Add an 'active' state variable
//////////////////////////////////////////////////////////////////////////////////////////////
void addStateVariable(const StateVariableBase::Ptr& statevar);
//////////////////////////////////////////////////////////////////////////////////////////////
/// \brief Add a cost term (should depend on active states that were added to the problem)
//////////////////////////////////////////////////////////////////////////////////////////////
void addCostTerm(const CostTermBase::ConstPtr& costTerm);
//////////////////////////////////////////////////////////////////////////////////////////////
/// \brief Compute the cost from the collection of cost terms
//////////////////////////////////////////////////////////////////////////////////////////////
double cost() const;
//////////////////////////////////////////////////////////////////////////////////////////////
/// \brief Get reference to state variables
//////////////////////////////////////////////////////////////////////////////////////////////
const std::vector<StateVariableBase::Ptr>& getStateVariables() const;
//////////////////////////////////////////////////////////////////////////////////////////////
/// \brief Get the total number of cost terms
//////////////////////////////////////////////////////////////////////////////////////////////
unsigned int getNumberOfCostTerms() const;
//////////////////////////////////////////////////////////////////////////////////////////////
/// \brief Fill in the supplied block matrices
//////////////////////////////////////////////////////////////////////////////////////////////
void buildGaussNewtonTerms(const StateVector& stateVector,
Eigen::SparseMatrix<double>* approximateHessian,
Eigen::VectorXd* gradientVector) const;
private:
//////////////////////////////////////////////////////////////////////////////////////////////
/// \brief Single threaded cost terms
//////////////////////////////////////////////////////////////////////////////////////////////
ParallelizedCostTermCollection singleCostTerms_;
//////////////////////////////////////////////////////////////////////////////////////////////
/// \brief Collection of cost terms that implement their own parallelization
//////////////////////////////////////////////////////////////////////////////////////////////
std::vector<CostTermBase::ConstPtr> parallelizedCostTerms_;
//////////////////////////////////////////////////////////////////////////////////////////////
/// \brief Collection of state variables
//////////////////////////////////////////////////////////////////////////////////////////////
std::vector<StateVariableBase::Ptr> stateVariables_;
};
} // namespace steam
#endif // STEAM_OPTIMIZATION_PROBLEM_HPP
|
export reconstruction
include("RecoParameters.jl")
include("DirectReconstruction.jl")
include("IterativeReconstruction.jl")
"""
reconstruction(acqData::AcquisitionData, recoParams::Dict)
Performs image reconstruction of an AcquisitionData object.
Parameters are specified in a dictionary.
Reconstruction types are specified by the symbol `:reco`.
Valid reconstruction names are:
* :direct - direct Fourier reconstruction
* :standard - iterative reconstruction for all contrasts, coils & slices independently
* :multiEcho - iterative joint reconstruction of all echo images
* :multiCoil - SENSE-type iterative reconstruction
* :multiCoilMultiEcho - SENSE-type iterative reconstruction of all echo images
"""
function reconstruction(acqData::AcquisitionData, recoParams::Dict)
# check dimensionality of encoding
encodingDims = dims(trajectory(acqData))
if encodingDims==3 && numSlices(acqData)>1
@error "reconstruction of multiple 3d-encoded volumina is not yet supported"
end
# load reconstruction parameters
recoParams = merge(defaultRecoParams(), recoParams)
# iterative reco
reconSize, weights, sparseTrafo, reg, normalize, encOps, solvername, senseMaps = setupIterativeReco(acqData, recoParams)
if recoParams[:reco] == "standard"
return reconstruction_simple(acqData, reconSize[1:encodingDims], reg, sparseTrafo, weights, solvername, normalize, encOps, recoParams)
elseif recoParams[:reco] == "multiEcho"
return reconstruction_multiEcho(acqData, reconSize[1:encodingDims], reg, sparseTrafo, weights, solvername, normalize, encOps, recoParams)
elseif recoParams[:reco] == "multiCoil"
return reconstruction_multiCoil(acqData, reconSize[1:encodingDims], reg, sparseTrafo, weights, solvername, senseMaps, normalize, encOps, recoParams)
elseif recoParams[:reco] == "multiCoilMultiEcho"
return reconstruction_multiCoilMultiEcho(acqData, reconSize[1:encodingDims], reg, sparseTrafo, weights, solvername, senseMaps, normalize, encOps, recoParams)
end
# direct reco
if recoParams[:reco] != "direct"
@error "reco modell $(recoParams[:reco]) not found"
end
reconSize, weights, cmap = setupDirectReco(acqData, recoParams)
return reconstruction_direct(acqData, reconSize[1:encodingDims], weights, cmap)
end
"""
reconstruction(acqData::AcquisitionData, recoParams::Dict,filename::String; force=false)
performs the same image reconstrucion as `reconstruction(acqData::AcquisitionData, recoParams::Dict)`
and saves the image in a file with name `filename`.
If `force=false`, the reconstructed image is loaded from the the file `filename` if the latter is
present.
"""
function reconstruction(acqData::AcquisitionData, recoParams::Dict, filename::String;
force=false)
if !force && isfile(filename)
return recoImage( RecoFileIBI(filename) )
else
I = reconstruction(acqData, recoParams)
saveasRecoFile(filename, I, recoParams)
return I
end
end
|
suppressPackageStartupMessages({
require(RColorBrewer)
require(tidyverse)
require(sf)
})
source('/Users/hamishgibbs/Documents/Covid-19/facebook_mobility_uk/src/visualization/utils/plot_default_theme.R')
if(interactive()){
.args <- c('/Users/hamishgibbs/Documents/Covid-19/facebook_mobility_uk/data/processed/infomap/infomap_full_norm_months.csv',
'/Users/hamishgibbs/Documents/Covid-19/facebook_mobility_uk/data/processed/infomap/leiden_full_norm_months.csv',
'/Users/hamishgibbs/Downloads/scan_results_marjun_nooverlap',
'/Users/hamishgibbs/Downloads/Middle_Layer_Super_Output_Areas__December_2011__Boundaries-shp/Middle_Layer_Super_Output_Areas__December_2011__Boundaries.shp',
'/Users/hamishgibbs/Documents/Covid-19/facebook_mobility_uk/data/processed/la_reference/a3_tile_reference.csv',
'/Users/hamishgibbs/Documents/Covid-19/facebook_mobility_uk/data/processed/tile_reference/tiles_zoom_12.shp',
'/Users/hamishgibbs/Documents/Covid-19/facebook_mobility_uk/data/processed/la_reference/a3_tile_reference.csv')
} else {
.args <- commandArgs(trailingOnly = T)
}
uk <- rnaturalearth::ne_states(country = 'United Kingdom', returnclass = 'sf')
#combine mob by month and repeat clustering
im <- read_csv(.args[1]) %>%
mutate(quadkey = str_pad(quadkey, 12, pad = "0")) %>%
group_by(date) %>%
group_split()
im <- im[1:4]
lei <- read_csv(.args[2]) %>%
mutate(quadkey = str_pad(quadkey, 12, pad = "0")) %>%
rename(date = month) %>%
group_by(date) %>%
group_split()
lei <- lei[1:4]
cases_clust <- list.files(.args[3], pattern = '.shp', full.names = T)
cases_clust <- list('March' = cases_clust[3], 'April' = cases_clust[1], 'May' = cases_clust[4], 'June' = cases_clust[2])
msoa <- st_read(.args[4]) %>%
st_simplify(., dTolerance = 300)
a3 <- read_csv(.args[5])
tiles <- st_read(.args[6], quiet = T) %>%
st_set_crs(4326)
#make_comparison plots of 3 shapes - msoa, im, lei X
#compute comparison metrics for comparing deaths with both lei and im (different files) existing functions should work X
#sbm if possible
process_scan <- function(scan_fn){
scan <- st_read(scan_fn)
scan <- st_transform(scan, crs = st_crs(msoa)) %>%
filter(P_VALUE < 0.1)
intersect <- st_intersection(st_centroid(msoa), scan)
msoa_clust <- msoa %>%
full_join(st_drop_geometry(intersect)) %>%
mutate(CLUSTER = as.factor(CLUSTER)) %>%
group_by(CLUSTER) %>%
summarise(REL_RISK = unique(REL_RISK)) %>%
ungroup() %>%
st_simplify(., dTolerance = 300) %>%
mutate(geometry = st_make_valid(geometry)) %>%
drop_na() %>%
st_as_sf()
return(msoa_clust)
}
# Identify which LTLAs from shape fall within cluster circles
plot_comparison_data <- function(msoa_clust, im, tiles = tiles){
im_data <- tiles %>%
left_join(im, by = c('quadkey')) %>%
drop_na(cluster) %>%
mutate(geometry = st_make_valid(geometry))
msoa_clust <- msoa_clust %>%
st_transform(4326) %>%
mutate(geometry = st_make_valid(geometry))
testthat::expect_equal(st_crs(msoa_clust), st_crs(im_data))
im_intersection <- st_intersection(msoa_clust, im_data)
im_clusters <- im %>% filter(cluster %in% unique(im_intersection$cluster))
im_clusters <- tiles %>% left_join(im_clusters, by = c('quadkey')) %>%
drop_na(cluster) %>%
group_by(cluster) %>%
summarise() %>%
mutate(type = 'Infomap') %>%
select(geometry, type)
msoa_clust <- msoa_clust %>%
mutate(type = 'Death Cluster') %>%
select(geometry, type)
p_data <- rbind(im_clusters, msoa_clust)
p_data <- nngeo::st_remove_holes(p_data)
return(p_data)
}
plot_comparison <- function(x, title){
bbox <- st_bbox(x)
p <- x %>%
ggplot() +
geom_sf(data = uk, fill = 'transparent', size = 0.05, color = 'black') +
geom_sf(aes(color = type), fill = 'transparent', size = 0.5) +
scale_color_manual(values = c('Death Cluster' = 'red', 'Infomap' = 'blue', 'Leiden' = 'darkgreen')) +
ylim(bbox$ymin, bbox$ymax) +
xlim(bbox$xmin, bbox$xmax) +
theme_void() +
plot_default_theme +
theme(legend.position = 'none',
legend.title = element_blank()) +
ggtitle(title)
return(p)
}
p_list <- list()
for (i in 1:length(cases_clust)){
scan <- process_scan(cases_clust[[i]])
p_data_im <- plot_comparison_data(scan, im[[i]], tiles)
p_data_lei <- plot_comparison_data(scan, lei[[i]], tiles) %>%
mutate(type = ifelse(type == 'Infomap', 'Leiden', type))
p_im <- plot_comparison(p_data_im, paste0('Infomap ', names(cases_clust)[i]))
p_lei <- plot_comparison(p_data_lei, paste0('Leiden ', names(cases_clust)[i]))
p_list[[paste0('Infomap', names(cases_clust)[i])]] <- p_im
p_list[[paste0('Leiden', names(cases_clust)[i])]] <- p_lei
}
p <- cowplot::plot_grid(plotlist = p_list, nrow = 2)
ggsave('/Users/hamishgibbs/Documents/Covid-19/facebook_mobility_uk/reports/figures/death_cluster_compare_map.png', p,
width = 16, height = 10,
units = 'in')
ggsave('/Users/hamishgibbs/Documents/Covid-19/facebook_mobility_uk/reports/figures/death_cluster_compare_map.pdf', p,
width = 16, height = 10,
units = 'in',
useDingbats = F)
#cluster comparison metrics
scan <- lapply(cases_clust, process_scan)
compare_cluster_metrics <- function(scan, comms){
scan <- scan %>%
group_by(CLUSTER) %>%
group_split()
comms <- tiles %>% left_join(comms, by = c('quadkey')) %>% drop_na(cluster) %>% st_as_sf() %>% st_transform(27700)
comms <- comms %>%
group_by(cluster) %>%
summarise()
results <- list()
for (i in 1:length(scan)){
s_clust <- scan[[i]]
c_intersection <- st_intersection(s_clust, comms)
c_intersection$tot_area <- as.numeric(st_area(s_clust))
c_intersection <- c_intersection %>%
mutate(int_area = as.numeric(st_area(geometry)),
overlap_perc = int_area / tot_area)
results[[i]] <- list('n_overlap' = length(c_intersection$overlap_perc),
'max_overlap_perc' = max(c_intersection$overlap_perc, na.rm = T),
'mean_overlap_perc' = mean(c_intersection$overlap_perc, na.rm = T))
}
return(results)
}
im_results <- list()
lei_results <- list()
for (i in 1:length(im)){
im_results[[names(cases_clust)[i]]] <- compare_cluster_metrics(scan[[i]], im[[i]])
lei_results[[names(cases_clust)[i]]] <- compare_cluster_metrics(scan[[i]], lei[[i]])
}
plot_comparison_metrics <- function(results, title){
df <- do.call(rbind, lapply(results, as.data.frame)) %>%
mutate(id = row_number())
p_max <- plot_metric(df, 'max_overlap_perc', 'Max % intersection')
p_mean <- plot_metric(df, 'mean_overlap_perc', 'Mean % intersection')
p_overlap <- plot_metric(df, 'n_overlap', 'Number of intersecting modules')
title <- cowplot::ggdraw() +
cowplot::draw_label(title,fontface = 'bold',x = 0,hjust = 0) +
theme(plot.margin = margin(0, 0, 0, 7))
p <- cowplot::plot_grid(p_max, p_mean, p_overlap, nrow = 1)
p <- cowplot::plot_grid(title, p, rel_heights = c(0.1, 0.9), nrow = 2)
return(p)
}
plot_metric <- function(data, attr, y){
data <- data %>% arrange(-!! sym(attr))
data$id <- factor(data$id, levels = data$id)
p <- data %>%
ggplot() +
geom_point(aes(x = id, y = !! sym(attr))) +
theme_bw() +
plot_default_theme +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank()) +
ylab(y) +
xlab('Cluster')
return(p)
}
plist <- list()
plist[[1]] <- plot_comparison_metrics(im_results[['March']], 'March - Infomap')
plist[[2]] <- plot_comparison_metrics(im_results[['April']], 'April - Infomap')
plist[[3]] <- plot_comparison_metrics(im_results[['May']], 'May - Infomap')
plist[[4]] <- plot_comparison_metrics(im_results[['June']], 'June - Infomap')
plist[[5]] <- plot_comparison_metrics(lei_results[['March']], 'March - Leiden')
plist[[6]] <- plot_comparison_metrics(lei_results[['April']], 'April - Leiden')
plist[[7]] <- plot_comparison_metrics(lei_results[['May']], 'May - Leiden')
plist[[8]] <- plot_comparison_metrics(lei_results[['June']], 'June - Leiden')
p <- cowplot::plot_grid(plist[[1]], plist[[5]], plist[[2]], plist[[6]], plist[[3]], plist[[7]], plist[[4]], plist[[8]], nrow = 8)
ggsave('/Users/hamishgibbs/Documents/Covid-19/facebook_mobility_uk/reports/figures/death_cluster_metrics.png', p,
width = 18, height = 16,
units = 'in')
ggsave('/Users/hamishgibbs/Documents/Covid-19/facebook_mobility_uk/reports/figures/death_cluster_metrics.pdf', p,
width = 18, height = 16,
units = 'in',
useDingbats = F)
############################################################################
p_data_im <- plot_comparison_data(msoa_clust, im[[1]], tiles)
p_data_lei <- plot_comparison_data(msoa_clust, lei[[1]], tiles) %>%
mutate(type = ifelse(type == 'Infomap', 'Leiden', type))
p_data_im <- nngeo::st_remove_holes(p_data_im)
p_data_lei <- nngeo::st_remove_holes(p_data_lei)
plot_comparison(p_data_im, 'Infomap')
plot_comparison(p_data_lei, 'Leiden')
bbox <- st_bbox(p_data_lei)
p2 <- p_data_lei %>%
ggplot() +
geom_sf(data = uk, fill = 'transparent', size = 0.05, color = 'black') +
geom_sf(aes(color = type), fill = 'transparent', size = 0.5) +
scale_color_manual(values = c('Death Cluster' = 'red', 'Infomap' = 'blue', 'Leiden' = 'darkgreen')) +
ylim(bbox$ymin, bbox$ymax) +
xlim(bbox$xmin, bbox$xmax) +
theme_bw() +
plot_default_theme +
theme(legend.position = c(0.8, 0.8),
legend.title = element_blank())
p <- cowplot::plot_grid(p1, p2)
p
ggplot() +
geom_sf(data = msoa_clust, aes(fill = CLUSTER), size = 0) +
geom_sf(data = uk, fill = 'transparent', size = 0.1, color = 'black') +
theme_void() +
plot_default_theme +
theme(legend.position = 'none')
tile_intersection <- as.data.frame(st_intersection(tiles, msoa_clust %>% drop_na() %>% st_as_sf() %>% st_transform(4326)))
tiles %>% left_join(tile_intersection, by = c('quadkey')) %>%
drop_na(CLUSTER) %>%
ggplot() +
geom_sf(data = uk, fill = 'transparent', size = 0.1, color = 'black') +
geom_sf(aes(fill = CLUSTER), size = 0) +
theme_void() +
plot_default_theme +
theme(legend.position = 'none')
nmi <- c()
dates <- c()
for (i in lei){
c_compare <- tile_intersection %>%
group_by(quadkey) %>%
summarise(CLUSTER = unique(CLUSTER)[1]) %>%
left_join(i, by = c('quadkey')) %>%
drop_na(CLUSTER, cluster)
nmi <- append(nmi, igraph::compare(c_compare$CLUSTER, as.integer(factor(c_compare$cluster)), method = 'nmi'))
dates <- append(dates, unique(i$date))
if (unique(i$date) == 5 & nmi[length(nmi)] == max_nmi){
stop()
}
}
df = data.frame(x = 1:length(nmi), nmi = nmi, month = dates)
df = df %>%
filter(month == 5)
max_nmi = df %>% pull(nmi) %>% max()
df %>% ggplot() + geom_path(aes(x = x, y = nmi, color = as.character(month))) + theme_bw() + plot_default_theme
p1 <- tiles %>% left_join(c_compare) %>%
drop_na(CLUSTER) %>%
st_as_sf() %>%
ggplot() +
geom_sf(aes(fill = CLUSTER), size = 0) +
theme(legend.position = 'none')
p2 <- tiles %>% left_join(c_compare) %>%
drop_na(cluster) %>%
st_as_sf() %>%
ggplot() +
geom_sf(aes(fill = as.character(cluster)), size = 0) +
theme(legend.position = 'none')
cowplot::plot_grid(p1, p2)
#repeat for im
|
If $p$ is a non-zero polynomial, then there exists a non-zero constant $a$ and a polynomial $q$ such that $p(z) = z^k a q(z)$ for some $k \geq 0$. |
/-
Copyright (c) 2019 The Flypitch Project. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jesse Han, Floris van Doorn
-/
import .compactness
open set function nat
universe variable u
namespace fol
local notation h :: t := dvector.cons h t
local notation `[` l:(foldr `, ` (h t, dvector.cons h t) dvector.nil `]`:0) := l
namespace Language
def Lconstants (α : Type u) : Language :=
⟨λn, nat.rec α (λn ih, pempty) n, λn, pempty⟩
protected def sum (L L' : Language) : Language :=
⟨λn, L.functions n ⊕ L'.functions n, λ n, L.relations n ⊕ L'.relations n⟩
def symbols (L : Language) := (Σl, L.functions l) ⊕ (Σl, L.relations l)
end Language
section
variable {L : Language}
@[simp] def symbols_in_term : ∀{l}, preterm L l → set L.symbols
| _ &k := ∅
| l (func f) := {sum.inl ⟨l,f⟩}
| _ (app t₁ t₂) := symbols_in_term t₁ ∪ symbols_in_term t₂
@[simp] def symbols_in_formula : ∀{l}, preformula L l → set L.symbols
| _ falsum := ∅
| _ (t₁ ≃ t₂) := symbols_in_term t₁ ∪ symbols_in_term t₂
| l (rel R) := {sum.inr ⟨l, R⟩}
| _ (apprel f t) := symbols_in_formula f ∪ symbols_in_term t
| _ (f₁ ⟹ f₂) := symbols_in_formula f₁ ∪ symbols_in_formula f₂
| _ (∀' f) := symbols_in_formula f
@[simp] lemma symbols_in_term_lift_at (n m) : ∀{l} (t : preterm L l),
symbols_in_term (t ↑' n # m) = symbols_in_term t
| _ &k := by by_cases h : m ≤ k; simp [h]
| l (func f) := rfl
| _ (app t₁ t₂) := by simp*
@[simp] lemma symbols_in_term_lift (n) {l} (t : preterm L l) :
symbols_in_term (t ↑ n) = symbols_in_term t :=
symbols_in_term_lift_at n 0 t
lemma symbols_in_term_subst (s : term L) (n) : ∀{l} (t : preterm L l),
symbols_in_term (t[s // n]) ⊆ symbols_in_term t ∪ symbols_in_term s
| _ &k :=
by apply lt_by_cases n k; intro h;
simp only [set.empty_subset, fol.symbols_in_term.equations._eqn_1,
fol.subst_term_var_gt, set.empty_union, fol.lift_term_def, fol.subst_term_var_eq,
fol.symbols_in_term_lift, h, fol.subst_term_var_lt, set.empty_union]
| _ (func f) := subset_union_left _ _
| _ (app t₁ t₂) :=
by { simp; split; refine subset.trans (symbols_in_term_subst _) _;
simp [subset_union2_left, subset_union2_middle] }
lemma symbols_in_formula_subst : ∀{l} (f : preformula L l) (s : term L) (n),
symbols_in_formula (f[s // n]) ⊆ symbols_in_formula f ∪ symbols_in_term s
| _ falsum s n := empty_subset _
| _ (t₁ ≃ t₂) s n :=
by { simp; split; refine subset.trans (symbols_in_term_subst _ _ _) _;
simp [subset_union2_left, subset_union2_middle] }
| _ (rel R) s n := subset_union_left _ _
| _ (apprel f t) s n :=
by { simp; split; [refine subset.trans (symbols_in_formula_subst _ _ _) _,
refine subset.trans (symbols_in_term_subst _ _ _) _];
simp [subset_union2_left, subset_union2_middle] }
| _ (f₁ ⟹ f₂) s n :=
by { simp; split; refine subset.trans (symbols_in_formula_subst _ _ _) _;
simp [subset_union2_left, subset_union2_middle] }
| _ (∀' f) s n := symbols_in_formula_subst f _ _
end
-- def symbols_in_prf : ∀{Γ : set $ formula L} {f : formula L} (P : Γ ⊢ f), set L.symbols
-- | Γ f (axm h) := symbols_in_formula f
-- | Γ (f₁ ⟹ f₂) (impI P) := symbols_in_prf P ∪ symbols_in_formula f₁
-- | Γ f₂ (impE f₁ P₁ P₂) := symbols_in_prf P₁ ∪ symbols_in_prf P₂
-- | Γ f (falsumE P) := symbols_in_prf P ∪ symbols_in_formula f
-- | Γ (∀' f) (allI P) := symbols_in_prf P
-- | Γ _ (allE₂ f t P) := symbols_in_prf P ∪ symbols_in_term t
-- | Γ (_ ≃ t) (ref _ _) := symbols_in_term t
-- | Γ _ (subst₂ s t f P₁ P₂) := symbols_in_prf P₁ ∪ symbols_in_prf P₂
-- def interpolation : ∀{Γ : set $ formula L} {f : formula L} (P : Γ ⊢ f),
-- Σ' (f' : formula L) (P₁ : Γ ⊢ f') (P₂ : {f'} ⊢ f),
-- symbols_in_prf P₁ ⊆ ⋃₀ (symbols_in_formula '' Γ) ∧
-- symbols_in_prf P₂ ⊆ symbols_in_formula f ∧
-- symbols_in_formula f' ⊆ ⋃₀ (symbols_in_formula '' Γ) ∩ symbols_in_formula f :=
-- sorry -- probably the last property follows automatically
structure Lhom (L L' : Language) :=
(on_function : ∀{n}, L.functions n → L'.functions n)
(on_relation : ∀{n}, L.relations n → L'.relations n)
infix ` →ᴸ `:10 := Lhom -- \^L
namespace Lhom
/- -/
variables {L : Language.{u}} {L' : Language.{u}} (ϕ : L →ᴸ L')
def cast1 {L0 L1 : Language} (heq : L0 = L1) : L0 →ᴸ L1 :=
⟨λ n, cast (by rw heq) , λ n, cast (by rw heq)⟩
protected def id (L : Language) : L →ᴸ L :=
⟨λn, id, λ n, id⟩
protected def sum_inl {L L' : Language} : L →ᴸ L.sum L' :=
⟨λn, sum.inl, λ n, sum.inl⟩
protected def sum_inr {L L' : Language} : L' →ᴸ L.sum L' :=
⟨λn, sum.inr, λ n, sum.inr⟩
@[reducible]def comp {L1} {L2} {L3} (g : L2 →ᴸ L3) (f : L1 →ᴸ L2) : L1 →ᴸ L3 :=
begin
-- rcases g with ⟨g1, g2⟩, rcases f with ⟨f1,f2⟩,
-- exact ⟨λn, g1 ∘ f1, λn, g2 ∘ f2⟩
split,
all_goals{intro n},
let g1 := g.on_function, let f1 := f.on_function,-- Lean's not letting me "@" g.on_function etc
exact (@g1 n) ∘ (@f1 n),
let g2 := g.on_relation, let f2 := f.on_relation,
exact (@g2 n) ∘ (@f2 n)
end
lemma Lhom_funext {L1} {L2} {F G : L1 →ᴸ L2} (h_fun : F.on_function = G.on_function ) (h_rel : F.on_relation = G.on_relation ) : F = G :=
by {cases F with Ff Fr, cases G with Gf Gr, simp only *, exact and.intro h_fun h_rel}
local infix ` ∘ `:60 := Lhom.comp
@[simp]lemma id_is_left_identity {L1 L2} {F : L1 →ᴸ L2} : (Lhom.id L2) ∘ F = F := by {cases F, refl}
@[simp]lemma trivial_cast1_is_left_identity {L1 L2} {F : L1 →ᴸ L2} :
(@Lhom.cast1 L2 L2 rfl) ∘ F = F := by {cases F, refl}
@[simp]lemma id_is_right_identity {L1 L2} {F : L1 →ᴸ L2} : F ∘ (Lhom.id L1) = F := by {cases F, refl}
@[simp]lemma trivial_cast1_is_right_identity {L1 L2} {F : L1 →ᴸ L2} :
F ∘ (@Lhom.cast1 L1 L1 rfl) = F := by {cases F, refl}
structure is_injective : Prop :=
(on_function {n} : injective (on_function ϕ : L.functions n → L'.functions n))
(on_relation {n} : injective (on_relation ϕ : L.relations n → L'.relations n))
class has_decidable_range : Type u :=
(on_function {n} : decidable_pred (range (on_function ϕ : L.functions n → L'.functions n)))
(on_relation {n} : decidable_pred (range (on_relation ϕ : L.relations n → L'.relations n)))
attribute [instance] has_decidable_range.on_function has_decidable_range.on_relation
@[simp] def on_symbol : L.symbols → L'.symbols
| (sum.inl ⟨l, f⟩) := sum.inl ⟨l, ϕ.on_function f⟩
| (sum.inr ⟨l, R⟩) := sum.inr ⟨l, ϕ.on_relation R⟩
@[simp] def on_term : ∀{l}, preterm L l → preterm L' l
| _ &k := &k
| _ (func f) := func $ ϕ.on_function f
| _ (app t₁ t₂) := app (on_term t₁) (on_term t₂)
@[simp] lemma on_term_lift_at : ∀{l} (t : preterm L l) (n m : ℕ),
ϕ.on_term (t ↑' n # m) = ϕ.on_term t ↑' n # m
| _ &k n m := rfl
| _ (func f) n m := rfl
| _ (app t₁ t₂) n m := by simp*
@[simp] lemma on_term_lift {l} (n : ℕ) (t : preterm L l) : ϕ.on_term (t ↑ n) = ϕ.on_term t ↑ n :=
ϕ.on_term_lift_at t n 0
@[simp] lemma on_term_subst : ∀{l} (t : preterm L l) (s : term L) (n : ℕ),
ϕ.on_term (t[s // n]) = ϕ.on_term t[ϕ.on_term s // n]
| _ &k s n := by apply lt_by_cases k n; intro h; simp only [h,
subst_term_var_lt, on_term, subst_term_var_eq, on_term_lift, subst_term_var_gt]
| _ (func f) s n := rfl
| _ (app t₁ t₂) s n := by simp*
@[simp] def on_term_apps : ∀{l} (t : preterm L l) (ts : dvector (term L) l),
ϕ.on_term (apps t ts) = apps (ϕ.on_term t) (ts.map ϕ.on_term)
| _ t [] := rfl
| _ t (t'::ts) := by simp*
lemma not_mem_symbols_in_term_on_term {s : L'.symbols} (h : s ∉ range (ϕ.on_symbol)) :
∀{l} (t : preterm L l), s ∉ symbols_in_term (ϕ.on_term t)
| _ &k h' := not_mem_empty _ h'
| l (func f) h' := h ⟨sum.inl ⟨l, f⟩, (eq_of_mem_singleton h').symm⟩
| _ (app t₁ t₂) h' :=
or.elim h' (not_mem_symbols_in_term_on_term t₁) (not_mem_symbols_in_term_on_term t₂)
@[simp] def on_formula : ∀{l}, preformula L l → preformula L' l
| _ falsum := falsum
| _ (t₁ ≃ t₂) := ϕ.on_term t₁ ≃ ϕ.on_term t₂
| _ (rel R) := rel $ ϕ.on_relation R
| _ (apprel f t) := apprel (on_formula f) $ ϕ.on_term t
| _ (f₁ ⟹ f₂) := on_formula f₁ ⟹ on_formula f₂
| _ (∀' f) := ∀' on_formula f
@[simp] lemma on_formula_lift_at : ∀{l} (n m : ℕ) (f : preformula L l),
ϕ.on_formula (f ↑' n # m) = ϕ.on_formula f ↑' n # m
| _ n m falsum := rfl
| _ n m (t₁ ≃ t₂) := by simp
| _ n m (rel R) := rfl
| _ n m (apprel f t) := by simp*
| _ n m (f₁ ⟹ f₂) := by simp*
| _ n m (∀' f) := by simp*
@[simp] lemma on_formula_lift {l} (n : ℕ) (f : preformula L l) :
ϕ.on_formula (f ↑ n) = ϕ.on_formula f ↑ n :=
ϕ.on_formula_lift_at n 0 f
@[simp] lemma on_formula_subst : ∀{l} (f : preformula L l) (s : term L) (n : ℕ),
ϕ.on_formula (f[s // n]) = (ϕ.on_formula f)[ϕ.on_term s // n]
| _ falsum s n := rfl
| _ (t₁ ≃ t₂) s n := by simp
| _ (rel R) s n := rfl
| _ (apprel f t) s n := by simp*
| _ (f₁ ⟹ f₂) s n := by simp*
| _ (∀' f) s n := by simp*
@[simp] def on_formula_apps_rel : ∀{l} (f : preformula L l) (ts : dvector (term L) l),
ϕ.on_formula (apps_rel f ts) = apps_rel (ϕ.on_formula f) (ts.map ϕ.on_term)
| _ f [] := rfl
| _ f (t'::ts) := by simp*
lemma not_mem_symbols_in_formula_on_formula {s : L'.symbols} (h : s ∉ range (ϕ.on_symbol)) :
∀{l} (f : preformula L l), s ∉ symbols_in_formula (ϕ.on_formula f)
| _ falsum h' := not_mem_empty _ h'
| _ (t₁ ≃ t₂) h' := by cases h'; apply not_mem_symbols_in_term_on_term ϕ h _ h'
| l (rel R) h' := h ⟨sum.inr ⟨l, R⟩, (eq_of_mem_singleton h').symm⟩
| _ (apprel f t) h' :=
by { cases h', apply not_mem_symbols_in_formula_on_formula _ h',
apply not_mem_symbols_in_term_on_term ϕ h _ h' }
| _ (f₁ ⟹ f₂) h' := by cases h'; apply not_mem_symbols_in_formula_on_formula _ h'
| _ (∀' f) h' := not_mem_symbols_in_formula_on_formula f h'
lemma not_mem_function_in_formula_on_formula {l'} {f' : L'.functions l'}
(h : f' ∉ range (@on_function _ _ ϕ l')) {l} (f : preformula L l) :
(sum.inl ⟨l', f'⟩ : L'.symbols) ∉ symbols_in_formula (ϕ.on_formula f) :=
begin
apply not_mem_symbols_in_formula_on_formula,
intro h', apply h,
rcases h' with ⟨⟨n, f⟩ | ⟨n, R⟩, hf₂⟩; dsimp at hf₂; cases hf₂ with hf₂',
apply mem_range_self
end
@[simp] def on_bounded_term {n} : ∀{l} (t : bounded_preterm L n l), bounded_preterm L' n l
| _ &k := &k
| _ (bd_func f) := bd_func $ ϕ.on_function f
| _ (bd_app t s) := bd_app (on_bounded_term t) (on_bounded_term s)
@[simp] def on_bounded_term_fst {n} : ∀{l} (t : bounded_preterm L n l),
(ϕ.on_bounded_term t).fst = ϕ.on_term t.fst
| _ &k := rfl
| _ (bd_func f) := rfl
| _ (bd_app t s) := by dsimp; simp*
lemma on_bounded_term_subst_bounded_term {L0 L1} {F : L0 →ᴸ L1} {c : L0.constants}
{n n' : ℕ} : Π {l : ℕ} {t : bounded_preterm L0 (n + n' + 1) l},
F.on_bounded_term (subst_bounded_term t (bd_const c)) =
subst_bounded_term (F.on_bounded_term t) (bd_const (F.on_function c))
| _ (bd_var k) :=
begin
by_cases hkn : k.val < n,
{ simp only [Lhom.on_bounded_term, subst_bounded_term, dif_pos hkn] },
{ by_cases hnk : n < k.val,
{simp only [Lhom.on_bounded_term, subst_bounded_term, dif_neg hkn,
dif_pos hnk]},
{simpa only [Lhom.on_bounded_term, subst_bounded_term, dif_neg hkn,
dif_neg hnk]},},
end
| _ (bd_func f) := rfl
| _ (bd_app t₁ t₂) :=
begin
simp only [Lhom.on_bounded_term, subst_bounded_term],
split,
exact on_bounded_term_subst_bounded_term,
exact on_bounded_term_subst_bounded_term,
end
@[simp] def on_bounded_formula : ∀{n l} (f : bounded_preformula L n l), bounded_preformula L' n l
| _ _ bd_falsum := ⊥
| _ _ (t₁ ≃ t₂) := ϕ.on_bounded_term t₁ ≃ ϕ.on_bounded_term t₂
| _ _ (bd_rel R) := bd_rel $ ϕ.on_relation R
| _ _ (bd_apprel f t) := bd_apprel (on_bounded_formula f) $ ϕ.on_bounded_term t
| _ _ (f₁ ⟹ f₂) := on_bounded_formula f₁ ⟹ on_bounded_formula f₂
| _ _ (∀' f) := ∀' on_bounded_formula f
@[simp] def on_bounded_formula_fst : ∀{n l} (f : bounded_preformula L n l),
(ϕ.on_bounded_formula f).fst = ϕ.on_formula f.fst
| _ _ bd_falsum := rfl
| _ _ (t₁ ≃ t₂) := by simp
| _ _ (bd_rel R) := rfl
| _ _ (bd_apprel f t) := by simp*
| _ _ (f₁ ⟹ f₂) := by simp*
| _ _ (∀' f) := by simp*
lemma on_bounded_formula_fst_imp : ∀{n} (f₁ f₂ : bounded_preformula L n 0),
(ϕ.on_bounded_formula (f₁ ⟹ f₂)).fst = ϕ.on_formula (f₁ ⟹ f₂).fst
:= by simp*
lemma on_bounded_formula_fst_ex : ∀{n} (f : bounded_preformula L (n+1) 0),
(ϕ.on_bounded_formula (∃' f)).fst = ϕ.on_formula (∃' f).fst
:= by simp*
lemma on_bounded_formula_all {n} (f : bounded_preformula L (n+1) 0) :
∀' ϕ.on_bounded_formula f = ϕ.on_bounded_formula (∀' f)
:= by simp*
lemma on_bounded_formula_not {n} (f : bounded_preformula L n 0) :
bd_not (ϕ.on_bounded_formula f) = ϕ.on_bounded_formula (bd_not f)
:= by simp [bd_not]
lemma on_bounded_formula_ex {n} (f : bounded_preformula L (n+1) 0) :
∃' ϕ.on_bounded_formula f = ϕ.on_bounded_formula (∃' f)
:= by simp only [bd_ex, on_bounded_formula_not, on_bounded_formula_all]
lemma on_bounded_term_cast {n} : Π {m l} {t : bounded_preterm L n l} {h : n ≤ m},
ϕ.on_bounded_term (bounded_preterm.cast h t) =
bounded_preterm.cast h (ϕ.on_bounded_term t)
| _ _ &k _ := rfl
| _ _ (bd_func f) _ := rfl
| _ _ (bd_app t s) _ :=
begin
rw bounded_preterm.cast,
rw on_bounded_term,
rw @on_bounded_term_cast _ _ t,
rw @on_bounded_term_cast _ _ s,
refl,
end
lemma on_bounded_formula_cast {L0 L1} {F : L0 →ᴸ L1} :
Π {n m l} {h : n ≤ m} {ψ : bounded_preformula L0 n l},
F.on_bounded_formula (bounded_preformula.cast h ψ) =
bounded_preformula.cast h (F.on_bounded_formula ψ)
| _ _ _ h bd_falsum := rfl
| _ _ _ h (t₁ ≃ t₂) :=
by simp only [bounded_preformula.cast, Lhom.on_bounded_formula,
Lhom.on_bounded_term_cast, eq_self_iff_true, and_self, cast_eq]
| _ _ _ h (bd_rel R) := rfl
| _ _ _ h (bd_apprel f t) :=
by simp only [bounded_preformula.cast, Lhom.on_bounded_formula,
Lhom.on_bounded_term_cast, @on_bounded_formula_cast _ _ _ _ f,
eq_self_iff_true, and_self, cast_eq]
| _ _ _ h (f₁ ⟹ f₂) :=
by simp only [bounded_preformula.cast, Lhom.on_bounded_formula,
Lhom.on_bounded_term_cast, @on_bounded_formula_cast _ _ _ _ f₁,
@on_bounded_formula_cast _ _ _ _ f₂,
eq_self_iff_true, and_self, cast_eq]
| _ _ _ h (∀' f) :=
by simp only [bounded_preformula.cast, Lhom.on_bounded_formula,
@on_bounded_formula_cast _ _ _ _ f]
lemma on_bounded_formula_cast1 : ∀{n l} (f : bounded_preformula L n l),
ϕ.on_bounded_formula f.cast1
=
(ϕ.on_bounded_formula f).cast1
| _ _ bd_falsum := rfl
| _ _ (t₁ ≃ t₂) :=
by simp only [bounded_preformula.cast1, bounded_preformula.cast,
on_bounded_formula, on_bounded_term_cast, eq_self_iff_true, true_and]
| _ _ (bd_rel R) :=
by simp only [bounded_preformula.cast1, bounded_preformula.cast,
on_bounded_formula]
| _ _ (bd_apprel f t) :=
begin
simp only [bounded_preformula.cast1, bounded_preformula.cast,
on_bounded_formula, on_bounded_term_cast, eq_self_iff_true, and_true],
exact on_bounded_formula_cast1 f,
end
| _ _ (f₁ ⟹ f₂) :=
begin
simp only [bounded_preformula.cast1, bounded_preformula.cast,
on_bounded_formula],
split,
repeat {apply on_bounded_formula_cast1},
end
| _ _ (∀' f) :=
begin
simp only [bounded_preformula.cast1, bounded_preformula.cast,
on_bounded_formula],
apply on_bounded_formula_cast1,
end
lemma on_bounded_formula_subst_bounded_formula
{L0 L1} {F : L0 →ᴸ L1} {c : L0.constants}
: Π {n n' n'' l} {ψ : bounded_preformula L0 n'' l} {h : n + (n' + 1) = n''},
F.on_bounded_formula (subst_bounded_formula ψ (bd_const c) h) =
subst_bounded_formula (F.on_bounded_formula ψ) (bd_const (F.on_function c)) h
| _ _ _ _ bd_falsum rfl := rfl
| _ _ _ _ (t₁ ≃ t₂) rfl :=
by simp only [Lhom.on_bounded_formula,
subst_bounded_formula_term, bounded_preformula.cast_eq,
Lhom.on_bounded_term_cast, on_bounded_term_subst_bounded_term,
eq_self_iff_true, and_self, cast_eq]
| _ _ _ _ (bd_rel R) rfl :=
by simp only [subst_bounded_formula, Lhom.on_bounded_formula]
| _ _ _ _ (bd_apprel f t) rfl :=
by simp only [subst_bounded_formula, Lhom.on_bounded_formula,
@on_bounded_formula_subst_bounded_formula _ _ _ _ f,
on_bounded_term_subst_bounded_term, eq_self_iff_true, and_self]
| _ _ _ _ (f₁ ⟹ f₂) rfl :=
by simp only [subst_bounded_formula, Lhom.on_bounded_formula,
@on_bounded_formula_subst_bounded_formula _ _ _ _ f₁,
@on_bounded_formula_subst_bounded_formula _ _ _ _ f₂,
eq_self_iff_true, and_self]
| _ _ _ _ (∀' f) rfl :=
by simp only [subst_bounded_formula, Lhom.on_bounded_formula,
bounded_preformula.cast_eq, on_bounded_formula_cast];
rw ← @on_bounded_formula_subst_bounded_formula _ _ _ _ f
lemma on_bounded_formula_subst0_bounded_formula
{L0 L1} {F : L0 →ᴸ L1} {c : L0.constants}
{n l} {ψ : bounded_preformula L0 (n+1) l} :
F.on_bounded_formula (ψ[bd_const c /0]) =
(F.on_bounded_formula ψ)[bd_const (F.on_function c) /0] :=
by simp only [subst0_bounded_formula, bounded_preformula.cast_eq,
on_bounded_formula_cast, on_bounded_formula_subst_bounded_formula]
/- Various lemmas of the shape "on_etc is a functor to Type*" -/
@[simp]lemma comp_on_function {L1} {L2} {L3} (g : L2 →ᴸ L3) (f : L1 →ᴸ L2):
(g ∘ f).on_function =
begin intro n, let g1 := g.on_function, let f1 := f.on_function,
exact function.comp (@g1 n) (@f1 n) end
:= rfl
/- comp_on_function with explicit nat parameter -/
@[simp]lemma comp_on_function' {L1} {L2} {L3} (g : L2 →ᴸ L3) (f : L1 →ᴸ L2) (n):
@on_function L1 L3 (g ∘ f) n =
function.comp (@on_function L2 L3 g n) (@on_function L1 L2 f n)
:= rfl
@[simp]lemma comp_on_relation {L1} {L2} {L3} (g : L2 →ᴸ L3) (f : L1 →ᴸ L2) :
(g ∘ f).on_relation =
begin intro n, let g1 := g.on_relation, let f1 := f.on_relation,
exact function.comp (@g1 n) (@f1 n) end
:= rfl
/- comp_on_relation with explicit nat parameter -/
@[simp]lemma comp_on_relation' {L1} {L2} {L3} (g : L2 →ᴸ L3) (f : L1 →ᴸ L2) (n):
@on_relation L1 L3 (g ∘ f) n =
function.comp (@on_relation L2 L3 g n) (@on_relation L1 L2 f n)
:= rfl
@[simp]lemma comp_on_term {L1} {L2} {L3} {l : ℕ} (g : L2 →ᴸ L3) (f : L1 →ᴸ L2) :
@on_term L1 L3 (g ∘ f) l = function.comp (@on_term L2 L3 g l) (@on_term L1 L2 f l) :=
by {fapply funext, intro x, induction x, tidy}
@[simp]lemma comp_on_formula {L1} {L2} {L3} {l : ℕ}(g : L2 →ᴸ L3) (f : L1 →ᴸ L2) :
@on_formula L1 L3 (g ∘ f) l = function.comp (@on_formula L2 L3 g l) (@on_formula L1 L2 f l) :=
by {fapply funext, intro x, induction x, tidy, all_goals{rw[comp_on_term]} }
@[simp]lemma comp_on_bounded_term {L1} {L2} {L3} {n l : ℕ}(g : L2 →ᴸ L3) (f : L1 →ᴸ L2) :
@on_bounded_term L1 L3 (g ∘ f) n l = function.comp (@on_bounded_term L2 L3 g n l) (@on_bounded_term L1 L2 f n l) :=
funext $ λ _, by tidy
@[simp]lemma comp_on_bounded_formula {L1} {L2} {L3} {n l : ℕ}(g : L2 →ᴸ L3) (f : L1 →ᴸ L2) :
@on_bounded_formula L1 L3 (g ∘ f) n l = function.comp (@on_bounded_formula L2 L3 g n l) (@on_bounded_formula L1 L2 f n l) :=
by {apply funext, intro x, ext, induction x; simp}
lemma id_term {L} : Πl, Π f, (@on_term L L (Lhom.id L) l) f = f
| _ &k := rfl
| _ (func f) := rfl
| l (app t₁ t₂) := by simp[id_term (l+1) t₁, id_term 0 t₂]
lemma id_formula {L} : Π l, Π f, (@on_formula L L (Lhom.id L) l) f = f
| _ falsum := rfl
| _ (t₁ ≃ t₂) := by simp[id_term]
| _ (rel R) := rfl
| l (apprel f t) := by {dsimp, rw[id_formula _ f, id_term _ t]}
| _ (f₁ ⟹ f₂) := by {dsimp, rw[id_formula _ f₁, id_formula _ f₂]}
| _ (∀' f) := by {dsimp, rw[id_formula _ f]}
lemma id_bounded_term {L} (n) : Πl, Π f, (@on_bounded_term L L (Lhom.id L) n l) f = f
| _ (bd_var k) := rfl
| _ (bd_func k) := rfl
| l (bd_app t₁ t₂) := by simp[id_bounded_term (l+1) t₁, id_bounded_term 0 t₂]
lemma id_bounded_formula {L} : Π n l, Π f, (@on_bounded_formula L L (Lhom.id L) n l) f = f
| _ _ bd_falsum := rfl
| _ _ (t₁ ≃ t₂) := by simp[id_bounded_term]
| _ _ (bd_rel R) := rfl
| _ l (bd_apprel f t) := by {dsimp, rw[id_bounded_formula _ _ f, id_bounded_term _ _ t]}
| _ _ (f₁ ⟹ f₂) := by {dsimp, rw[id_bounded_formula _ _ f₁, id_bounded_formula _ _ f₂]}
| _ _ (∀' f) := by {dsimp, rw[id_bounded_formula _ _ f]}
@[simp] def on_closed_term (t : closed_term L) : closed_term L' := ϕ.on_bounded_term t
@[simp] def on_sentence (f : sentence L) : sentence L' := ϕ.on_bounded_formula f
def on_sentence_fst (f : sentence L) : (ϕ.on_sentence f).fst = ϕ.on_formula f.fst :=
ϕ.on_bounded_formula_fst f
def on_prf {Γ : set $ formula L} {f : formula L} (h : Γ ⊢ f) : ϕ.on_formula '' Γ ⊢ ϕ.on_formula f :=
begin
induction h,
{ apply axm, exact mem_image_of_mem _ h_h, },
{ apply impI, rw [←image_insert_eq], exact h_ih },
{ exact impE _ h_ih_h₁ h_ih_h₂, },
{ apply falsumE, rw [image_insert_eq] at h_ih, exact h_ih },
{ apply allI, rw [image_image] at h_ih ⊢, simp [image_congr' (on_formula_lift ϕ 1)] at h_ih,
exact h_ih },
{ apply allE _ _ h_ih, symmetry, apply on_formula_subst },
{ apply prf.ref },
{ simp at h_ih_h₂, apply subst _ h_ih_h₁ h_ih_h₂, simp }
end
def on_sprf {Γ : set $ sentence L} {f : sentence L} (h : Γ ⊢ f) :
ϕ.on_sentence '' Γ ⊢ ϕ.on_sentence f :=
by have := ϕ.on_prf h; simp only [sprf, Theory.fst, image_image, function.comp,
on_bounded_formula_fst, on_sentence] at this ⊢; exact this
instance decidable_mem_of_decidable_pred_set {α : Type*} {r : set α}
[h : decidable_pred r] (a : α) : decidable (a ∈ r) :=
decidable.rec_on (h a) (λ h, is_false h) (λ h, is_true h)
/- replace all symbols not in the image of ϕ by a new variable -/
noncomputable def reflect_term [has_decidable_range ϕ] (t : term L') (m : ℕ) : term L :=
term.elim (λk, &k ↑' 1 # m)
(λl f' ts' ts, if hf' : f' ∈ range (@on_function _ _ ϕ l)
then apps (func (classical.some hf')) ts else &m) t
variable {ϕ}
lemma reflect_term_apps_pos [has_decidable_range ϕ] {l} {f : L'.functions l}
(hf : f ∈ range (@on_function _ _ ϕ l)) (ts : dvector (term L') l) (m : ℕ) :
ϕ.reflect_term (apps (func f) ts) m =
apps (func (classical.some hf)) (ts.map (λt, ϕ.reflect_term t m)) :=
(term.elim_apps _ _ f ts).trans $ by rw [dif_pos hf]; refl
lemma reflect_term_apps_neg [has_decidable_range ϕ] {l} {f : L'.functions l}
(hf : f ∉ range (@on_function _ _ ϕ l)) (ts : dvector (term L') l) (m : ℕ) :
ϕ.reflect_term (apps (func f) ts) m = &m :=
(term.elim_apps _ _ f ts).trans $ by rw [dif_neg hf]
lemma reflect_term_const_pos [has_decidable_range ϕ] {c : L'.constants}
(hf : c ∈ range (@on_function _ _ ϕ 0)) (m : ℕ) :
ϕ.reflect_term (func c) m = func (classical.some hf) :=
by apply reflect_term_apps_pos hf ([]) m
lemma reflect_term_const_neg [has_decidable_range ϕ] {c : L'.constants}
(hf : c ∉ range (@on_function _ _ ϕ 0)) (m : ℕ) :
ϕ.reflect_term (func c) m = &m :=
by apply reflect_term_apps_neg hf ([]) m
@[simp] lemma reflect_term_var [has_decidable_range ϕ] (k : ℕ) (m : ℕ) :
ϕ.reflect_term &k m = &k ↑' 1 # m := rfl
@[simp] lemma reflect_term_on_term [has_decidable_range ϕ] (hϕ : is_injective ϕ) (t : term L)
(m : ℕ) : ϕ.reflect_term (ϕ.on_term t) m = t ↑' 1 # m :=
begin
refine term.rec _ _ t; clear t; intros,
{ refl },
{ simp [reflect_term_apps_pos (mem_range_self f)],
rw [classical.some_eq f (λy hy, hϕ.on_function hy), dvector.map_congr_pmem ih_ts] }
end
lemma reflect_term_lift_at [has_decidable_range ϕ] (hϕ : is_injective ϕ)
{n m m' : ℕ} (h : m ≤ m') (t : term L') :
ϕ.reflect_term (t ↑' n # m) (m'+n) = ϕ.reflect_term t m' ↑' n # m :=
begin
refine term.rec _ _ t; clear t; intro l,
{ simp only [reflect_term_var],
rw[lift_term_at2_small _ _ _ h],
simpa only [lift_term_at], },
{ intros f _ ih_ts, by_cases h' : f ∈ range (@on_function _ _ ϕ l);
simp [reflect_term_apps_pos, reflect_term_apps_neg, h', h,
dvector.map_congr_pmem ih_ts, -add_comm]}
end
lemma reflect_term_lift [has_decidable_range ϕ] (hϕ : is_injective ϕ) {n m : ℕ}
(t : term L') : ϕ.reflect_term (t ↑ n) (m+n) = ϕ.reflect_term t m ↑ n :=
reflect_term_lift_at hϕ m.zero_le t
lemma reflect_term_subst [has_decidable_range ϕ] (hϕ : is_injective ϕ) (n m : ℕ)
(s t : term L') :
ϕ.reflect_term (t[s // n]) (m+n) = (ϕ.reflect_term t (m+n+1))[ϕ.reflect_term s m // n] :=
begin
refine term.rec _ _ t; clear t; intros,
{ simp [-lift_term_at, -add_comm, -add_assoc],
apply lt_by_cases k n; intro h,
{ have h₂ : ¬(m + n ≤ k), from λh', not_le_of_gt h (le_trans (nat.le_add_left n m) h'),
have h₃ : ¬(m + n + 1 ≤ k), from λh', h₂ $ le_trans (le_succ _) h',
simp [h, h₂, h₃, -add_comm, -add_assoc] },
{ have h₂ : ¬(m + n + 1 ≤ n), from not_le_of_gt (lt_of_le_of_lt (nat.le_add_left n m) (lt.base _)) ,
simp [h, h₂, reflect_term_lift hϕ, -add_comm, -add_assoc] },
{ have hk := one_le_of_lt h,
have h₄ : n < k + 1, from lt_trans h (lt.base k),
by_cases h₂' : m + n + 1 ≤ k,
{ have h₂ : m + n + 1 ≤ k, from h₂',
have h₃ : m + n ≤ k - 1, from (le_tsub_iff_right hk).mpr h₂,
simp only [h, h₂, h₃, h₄,
subst_term_var_gt, add_zero, if_true, add_succ_sub_one, lift_term_at, reflect_term_var],
rw [tsub_add_eq_max, max_eq_left hk] },
{ have h₂ : ¬(m + n + 1 ≤ k), from h₂',
have h₃ : ¬(m + n ≤ k - 1), from λh', h₂ $ (le_tsub_iff_right hk).mp h',
simp only [h, h₂, h₃, subst_term_var_gt, lift_term_at, if_false, reflect_term_var] }}},
{ have h : n < m + n + 1, from nat.lt_succ_of_le (nat.le_add_left n m),
by_cases h' : f ∈ range (@on_function _ _ ϕ l);
simp only [reflect_term_apps_pos, reflect_term_apps_neg, h, h',
dvector.map_congr_pmem ih_ts, dvector.map_map, subst_term_apps,
subst_term_func, add_zero, subst_term_var_gt, add_succ_sub_one, not_false_iff] }
end
variable (ϕ)
noncomputable def reflect_formula [has_decidable_range ϕ] (f : formula L') :
∀(m : ℕ), formula L :=
formula.rec (λm, ⊥) (λt₁ t₂ m, ϕ.reflect_term t₁ m ≃ ϕ.reflect_term t₂ m)
(λl R' xs' m, if hR' : R' ∈ range (@on_relation _ _ ϕ l)
then apps_rel (rel (classical.some hR')) (xs'.map $ λt, ϕ.reflect_term t m) else ⊥)
(λf₁' f₂' f₁ f₂ m, f₁ m ⟹ f₂ m) (λf' f m, ∀' f (m+1)) f
variable {ϕ}
lemma reflect_formula_apps_rel_pos [has_decidable_range ϕ] {l} {R : L'.relations l}
(hR : R ∈ range (@on_relation _ _ ϕ l)) (ts : dvector (term L') l) (m : ℕ) :
ϕ.reflect_formula (apps_rel (rel R) ts) m =
apps_rel (rel (classical.some hR)) (ts.map (λt, ϕ.reflect_term t m)) :=
by simp [reflect_formula, formula.rec_apps_rel, dif_pos hR]
lemma reflect_formula_apps_rel_neg [has_decidable_range ϕ] {l} {R : L'.relations l}
(hR : R ∉ range (@on_relation _ _ ϕ l)) (ts : dvector (term L') l) (m : ℕ) :
ϕ.reflect_formula (apps_rel (rel R) ts) m = ⊥ :=
by simp [reflect_formula, formula.rec_apps_rel, dif_neg hR]
@[simp] lemma reflect_formula_equal [has_decidable_range ϕ] (t₁ t₂ : term L') (m : ℕ) :
ϕ.reflect_formula (t₁ ≃ t₂) m = ϕ.reflect_term t₁ m ≃ ϕ.reflect_term t₂ m := rfl
@[simp] lemma reflect_formula_imp [has_decidable_range ϕ] (f₁ f₂ : formula L') (m : ℕ) :
ϕ.reflect_formula (f₁ ⟹ f₂) m = ϕ.reflect_formula f₁ m ⟹ ϕ.reflect_formula f₂ m := rfl
@[simp] lemma reflect_formula_all [has_decidable_range ϕ] (f : formula L') (m : ℕ) :
ϕ.reflect_formula (∀' f) m = ∀' (ϕ.reflect_formula f (m+1)) := rfl
@[simp] lemma reflect_formula_on_formula [has_decidable_range ϕ] (hϕ : is_injective ϕ) (m : ℕ)
(f : formula L) : ϕ.reflect_formula (ϕ.on_formula f) m = f ↑' 1 # m :=
begin
revert m, refine formula.rec _ _ _ _ _ f; clear f; intros,
{ refl },
{ simp [hϕ] },
{ simp [reflect_formula_apps_rel_pos (mem_range_self R), hϕ],
rw [classical.some_eq R (λy hy, hϕ.on_relation hy)] },
{ simp* },
{ simp* }
end
lemma reflect_formula_lift_at [has_decidable_range ϕ] (hϕ : is_injective ϕ) {n m m' : ℕ}
(h : m ≤ m') (f : formula L') :
ϕ.reflect_formula (f ↑' n # m) (m'+n) = ϕ.reflect_formula f m' ↑' n # m :=
begin
revert m m', refine formula.rec _ _ _ _ _ f; clear f,
{ intros, refl },
{ intros _ _ _ _ h, simp [reflect_term_lift_at hϕ h, -add_comm] },
{ intros l R ts _ _ h,
by_cases h' : R ∈ range (@on_relation _ _ ϕ l);
simp [reflect_formula_apps_rel_pos, reflect_formula_apps_rel_neg,
h', h, ts.map_congr (reflect_term_lift_at hϕ h), -add_comm] },
{ intros _ _ ih₁ ih₂ _ _ h, simp [ih₁ h, ih₂ h, -add_comm] },
{ intros _ ih m m' h, simp [-add_comm, -add_assoc], rw [←ih],
{
apply congr_arg (ϕ.reflect_formula (f ↑' n # (m + 1))),
repeat {rw add_assoc},
apply congr_arg (has_add.add m'),
exact add_comm _ _,
},
{exact add_le_add_right h 1} },
end
lemma reflect_formula_lift [has_decidable_range ϕ] (hϕ : is_injective ϕ) (n m : ℕ)
(f : formula L') : ϕ.reflect_formula (f ↑ n) (m+n) = ϕ.reflect_formula f m ↑ n :=
reflect_formula_lift_at hϕ m.zero_le f
lemma reflect_formula_lift1 [has_decidable_range ϕ] (hϕ : is_injective ϕ) (m : ℕ)
(f : formula L') : ϕ.reflect_formula (f ↑ 1) (m+1) = ϕ.reflect_formula f m ↑ 1 :=
reflect_formula_lift hϕ 1 m f
lemma reflect_formula_subst [has_decidable_range ϕ] (hϕ : is_injective ϕ) (f : formula L')
(n m : ℕ) (s : term L') :
ϕ.reflect_formula (f[s // n]) (m+n) = (ϕ.reflect_formula f (m+n+1))[ϕ.reflect_term s m // n] :=
begin
revert n, refine formula.rec _ _ _ _ _ f; clear f,
{ intro, refl },
{ intros, simp [reflect_term_subst hϕ] },
{ intros _ R _ n,
by_cases h' : R ∈ range (@on_relation _ _ ϕ l);
simp only [reflect_formula_apps_rel_pos,
reflect_formula_apps_rel_neg, h',
ts.map_congr (reflect_term_subst hϕ n m s), not_false_iff,
dvector.map_map, subst_formula, subst_formula_apps_rel]},
{ intros _ _ ih₁ ih₂, simp [ih₁, ih₂] },
{ intros _ ih _, simp only [subst_formula, reflect_formula_all, add_assoc, ih],},
end
@[simp] lemma reflect_formula_subst0 [has_decidable_range ϕ] (hϕ : is_injective ϕ) (m : ℕ)
(f : formula L') (s : term L') :
ϕ.reflect_formula (f[s // 0]) m = (ϕ.reflect_formula f (m+1))[ϕ.reflect_term s m // 0] :=
reflect_formula_subst hϕ f 0 m s
noncomputable def reflect_prf_gen [has_decidable_range ϕ] (hϕ : is_injective ϕ) {Γ}
{f : formula L'} (m) (H : Γ ⊢ f) : (λf, ϕ.reflect_formula f m) '' Γ ⊢ ϕ.reflect_formula f m :=
begin
induction H generalizing m,
{ apply axm, apply mem_image_of_mem _ H_h },
{ apply impI, have h := @H_ih m, rw [image_insert_eq] at h, exact h },
{ apply impE, apply H_ih_h₁, apply H_ih_h₂ },
{ apply falsumE, have h := @H_ih m, rw [image_insert_eq] at h, exact h },
{ apply allI, rw [image_image], have h := @H_ih (m+1), rw [image_image] at h,
apply cast _ h, congr1, apply image_congr' (reflect_formula_lift1 hϕ m) },
{ apply allE, have h := @H_ih m, simp at h, exact h, symmetry,
apply reflect_formula_subst0 hϕ },
{ apply ref },
{ apply subst, have h := @H_ih_h₁ m, simp at h, exact h,
have h := @H_ih_h₂ m, simp [hϕ] at h, exact h, simp [hϕ] },
end
section
/- maybe generalize to filter_symbol? -/
@[reducible] def filter_symbols (p : L.symbols → Prop) : Language :=
⟨λl, subtype (λf, p (sum.inl ⟨l, f⟩)), λl, subtype (λR, p (sum.inr ⟨l, R⟩))⟩
def filter_symbols_Lhom (p : L.symbols → Prop) : filter_symbols p →ᴸ L :=
⟨λl, subtype.val, λl, subtype.val⟩
def is_injective_filter_symbols_Lhom (p : L.symbols → Prop) :
is_injective (filter_symbols_Lhom p) :=
⟨λl, subtype.val_injective, λl, subtype.val_injective⟩
lemma find_term_filter_symbols (p : L.symbols → Prop) :
∀{l} (t : preterm L l) (h : symbols_in_term t ⊆ { s | p s }),
{ t' : preterm (filter_symbols p) l // (filter_symbols_Lhom p).on_term t' = t }
| _ &k h := ⟨&k, rfl⟩
| _ (func f) h := ⟨func ⟨f, h $ mem_singleton _⟩, rfl⟩
| _ (app t₁ t₂) h :=
begin
let ih₁ := find_term_filter_symbols t₁ (subset.trans (subset_union_left _ _) h),
let ih₂ := find_term_filter_symbols t₂ (subset.trans (subset_union_right _ _) h),
refine ⟨app ih₁.1 ih₂.1, _⟩,
simp only [on_term, ih₁.2, ih₂.2, eq_self_iff_true, and_self],
end
lemma find_formula_filter_symbols (p : L.symbols → Prop) :
∀{l} (f : preformula L l) (h : symbols_in_formula f ⊆ { s | p s }),
{ f' : preformula (filter_symbols p) l // (filter_symbols_Lhom p).on_formula f' = f }
| _ falsum h := ⟨⊥, rfl⟩
| _ (t₁ ≃ t₂) h :=
begin
let ih₁ := find_term_filter_symbols p t₁ (subset.trans (subset_union_left _ _) h),
let ih₂ := find_term_filter_symbols p t₂ (subset.trans (subset_union_right _ _) h),
refine ⟨ih₁.1 ≃ ih₂.1, _⟩,
simp only [on_formula, ih₁.2, ih₂.2, eq_self_iff_true, and_self],
end
| _ (rel R) h := ⟨rel ⟨R, h $ mem_singleton _⟩, rfl⟩
| _ (apprel f t) h :=
begin
let ih₁ := find_formula_filter_symbols f (subset.trans (subset_union_left _ _) h),
let ih₂ := find_term_filter_symbols p t (subset.trans (subset_union_right _ _) h),
refine ⟨apprel ih₁.1 ih₂.1, _⟩, simp only [on_formula, subtype.val_eq_coe],
exact ⟨ ih₁.2, ih₂.2 ⟩,
end
| _ (f₁ ⟹ f₂) h :=
begin
let ih₁ := find_formula_filter_symbols f₁ (subset.trans (subset_union_left _ _) h),
let ih₂ := find_formula_filter_symbols f₂ (subset.trans (subset_union_right _ _) h),
refine ⟨ih₁.1 ⟹ ih₂.1, _⟩,
simp only [on_formula, ih₁.2, ih₂.2, eq_self_iff_true, and_self]
end
| _ (∀' f) h :=
begin
let ih := find_formula_filter_symbols f h,
refine ⟨∀' ih.1, _⟩, simp only [on_formula, ih.2],
end
end
noncomputable def generalize_constant {Γ} (c : L.constants)
(hΓ : (sum.inl ⟨0, c⟩ : L.symbols) ∉ ⋃₀ (symbols_in_formula '' Γ))
{f : formula L} (hf : (sum.inl ⟨0, c⟩ : L.symbols) ∉ symbols_in_formula f)
(H : Γ ⊢ f[func c // 0]) : Γ ⊢ ∀' f :=
begin
apply allI,
let p : L.symbols → Prop := (≠ sum.inl ⟨0, c⟩),
let ϕ := filter_symbols_Lhom p,
have hϕ : is_injective ϕ := is_injective_filter_symbols_Lhom p,
have hc : c ∉ range (on_function ϕ),
{ intro hc, rw [mem_range] at hc, rcases hc with ⟨c', hc'⟩,
apply c'.2, rw [←hc'], refl },
have hf' : symbols_in_formula f ⊆ {s : Language.symbols L | p s},
{ intros s hs hps, subst hps, exact hf hs },
rcases find_formula_filter_symbols p f hf' with ⟨f, rfl⟩,
have : {Γ' // Lhom.on_formula ϕ '' Γ' = Γ } ,
{ refine ⟨Lhom.on_formula ϕ ⁻¹' Γ, _⟩,
apply image_preimage_eq_of_subset, intros f' hf',
have : symbols_in_formula f' ⊆ {s : Language.symbols L | p s},
{ intros s hs hps, subst hps, exact hΓ ⟨_, mem_image_of_mem _ hf', hs⟩ },
rcases find_formula_filter_symbols p f' this with ⟨f, rfl⟩,
apply mem_range_self },
rcases this with ⟨Γ, rfl⟩,
rw [image_image, ←image_congr' (ϕ.on_formula_lift 1),
←image_image ϕ.on_formula],
apply ϕ.on_prf,
haveI : has_decidable_range (filter_symbols_Lhom p) :=
⟨λn f, classical.prop_decidable _, λn R, classical.prop_decidable _⟩,
have := reflect_prf_gen hϕ 0 H,
rwa [reflect_formula_subst0 hϕ, reflect_term_const_neg hc, image_image,
image_congr' (reflect_formula_on_formula hϕ 0),
reflect_formula_on_formula hϕ, lift_subst_formula_cancel] at this
end
noncomputable def sgeneralize_constant {T : Theory L} (c : L.constants)
(hΓ : (sum.inl ⟨0, c⟩ : L.symbols) ∉ ⋃₀ (symbols_in_formula '' T.fst))
{f : bounded_formula L 1} (hf : (sum.inl ⟨0, c⟩ : L.symbols) ∉ symbols_in_formula f.fst)
(H : T ⊢ f[bd_func c /0]) : T ⊢ ∀' f :=
by simp only [sprf, subst0_bounded_formula_fst] at H;
exact generalize_constant c hΓ hf H
noncomputable def reflect_prf {Γ : set $ formula L} {f : formula L} (hϕ : ϕ.is_injective)
(h : ϕ.on_formula '' Γ ⊢ ϕ.on_formula f) : Γ ⊢ f :=
begin
haveI : has_decidable_range ϕ :=
⟨λl f, classical.prop_decidable _, λl R, classical.prop_decidable _⟩,
apply reflect_prf_lift1,
have := reflect_prf_gen hϕ 0 h, simp [image_image, hϕ] at this, exact this
end
noncomputable def reflect_sprf {Γ : set $ sentence L} {f : sentence L} (hϕ : ϕ.is_injective)
(h : ϕ.on_sentence '' Γ ⊢ ϕ.on_sentence f) : Γ ⊢ f :=
by { apply reflect_prf hϕ, simp only [sprf, Theory.fst, image_image, function.comp,
on_bounded_formula_fst, on_sentence] at h ⊢, exact h }
lemma on_term_inj (h : ϕ.is_injective) {l} : injective (ϕ.on_term : preterm L l → preterm L' l) :=
begin
intros x y hxy, induction x generalizing y; cases y; try {injection hxy with hxy' hxy''},
{ rw [hxy'] },
{ rw [h.on_function hxy'] },
{ congr1, exact x_ih_t hxy', exact x_ih_s hxy'' }
end
lemma on_formula_inj (h : ϕ.is_injective) {l} :
injective (ϕ.on_formula : preformula L l → preformula L' l) :=
begin
intros x y hxy, induction x generalizing y; cases y; try {injection hxy with hxy' hxy''},
{ refl },
{ rw [on_term_inj h hxy', on_term_inj h hxy''] },
{ rw [h.on_relation hxy'] },
{ rw [x_ih hxy', on_term_inj h hxy''] },
{ rw [x_ih_f₁ hxy', x_ih_f₂ hxy''] },
{ rw [x_ih hxy'] }
end
lemma on_bounded_term_inj (h : ϕ.is_injective) {n} {l} : injective (ϕ.on_bounded_term : bounded_preterm L n l → bounded_preterm L' n l) :=
begin
intros x y hxy, induction x generalizing y; cases y; try {injection hxy with hxy' hxy''},
{ rw [hxy'] },
{ rw [h.on_function hxy'] },
{ congr1, exact x_ih_t hxy', exact x_ih_s hxy'' }
end
lemma on_bounded_formula_inj (h : ϕ.is_injective) {n l}:
injective (ϕ.on_bounded_formula : bounded_preformula L n l → bounded_preformula L' n l) :=
begin
intros x y hxy, induction x generalizing y; cases y; try {injection hxy with hxy' hxy''},
{ refl },
{ rw [on_bounded_term_inj h hxy', on_bounded_term_inj h hxy''] },
{ rw [h.on_relation hxy'] },
{ rw [x_ih hxy', on_bounded_term_inj h hxy''] },
{ rw [x_ih_f₁ hxy', x_ih_f₂ hxy''] },
{ rw [x_ih hxy'] }
end
variable (ϕ)
/-- Given L → L' and an L'-structure S, the reduct of S to L is the L-structure given by
restricting interpretations from L' to L --/
def reduct (S : Structure L') : Structure L :=
⟨ S.carrier, λn f, S.fun_map $ ϕ.on_function f, λn R, S.rel_map $ ϕ.on_relation R⟩
notation S`[[`:95 ϕ`]]`:90 := reduct ϕ S
variable {ϕ}
@[simp] def reduct_coe (S : Structure L') : (reduct ϕ S : Type*) = S :=
rfl
def reduct_id {S : Structure L'} : S → S[[ϕ]] := id
@[simp] lemma reduct_term_eq {S : Structure L'} (hϕ : ϕ.is_injective) {n} :
Π(xs : dvector S n) {l} (t : bounded_preterm L n l) (xs' : dvector S l), realize_bounded_term xs (on_bounded_term ϕ t) xs' = @realize_bounded_term L (reduct ϕ S) n xs l t xs'
| xs _ (bd_var k) xs' := rfl
| xs _ (bd_func f) xs' := rfl
| xs l (bd_app t s) xs' := by simp*
lemma reduct_bounded_formula_iff {S : Structure L'} (hϕ : ϕ.is_injective) : Π{n l} (xs : dvector S n) (xs' : dvector S l) (f : bounded_preformula L n l),
realize_bounded_formula xs (on_bounded_formula ϕ f) xs' ↔ @realize_bounded_formula L (reduct ϕ S) n l xs f xs'
| _ _ xs xs' (bd_falsum) := by refl
| _ _ xs xs' (bd_equal t₁ t₂) := by simp [hϕ]
| _ _ xs xs' (bd_rel R) := by refl
| _ _ xs xs' (bd_apprel f t) := by simp*
| _ _ xs xs' (f₁ ⟹ f₂) := by simp*
| _ _ xs xs' (∀' f) := by apply forall_congr; intro x;simp*
lemma reduct_ssatisfied {S : Structure L'} {f : sentence L} (hϕ : ϕ.is_injective)
(h : S ⊨ ϕ.on_sentence f) : ϕ.reduct S ⊨ f :=
(reduct_bounded_formula_iff hϕ ([]) ([]) f).mp h
lemma reduct_ssatisfied' {S : Structure L'} {f : sentence L} (hϕ : ϕ.is_injective)
(h : S ⊨ ϕ.on_bounded_formula f) : ϕ.reduct S ⊨ f :=
(reduct_bounded_formula_iff hϕ ([]) ([]) f).mp h
def reduct_all_ssatisfied {S : Structure L'} {T : Theory L} (hϕ : ϕ.is_injective)
(h : S ⊨ ϕ.on_sentence '' T) : S[[ϕ]] ⊨ T :=
λf hf, reduct_ssatisfied hϕ $ h $ mem_image_of_mem _ hf
lemma reduct_nonempty_of_nonempty {S : Structure L'} (H : nonempty S) : nonempty (reduct ϕ S) :=
by {apply nonempty.map, repeat{assumption}, exact reduct_id}
variable (ϕ)
@[reducible]def Theory_induced (T : Theory L) : Theory L' := ϕ.on_sentence '' T
variable {ϕ}
lemma is_consistent_Theory_induced (hϕ : ϕ.is_injective) {T : Theory L} (hT : is_consistent T) :
is_consistent (ϕ.Theory_induced T) :=
λH, hT $ H.map $ λh, reflect_sprf hϕ (by apply h)
/- we could generalize this, replacing set.univ by any set s, but then we cannot use set.image
anymore (since the domain of g would be s), and things would be more annoying -/
lemma is_consistent_extend {T : Theory L} (hT : is_consistent T) (hϕ : ϕ.is_injective)
(h : bounded_formula L 1 → bounded_formula L 1)
(hT' : ∀(f : bounded_formula L 1), T ⊢ ∃' (h f))
(g : bounded_formula L 1 → L'.constants) (hg : injective g)
(hg' : ∀x, g x ∉ range (@on_function L L' ϕ 0)) :
is_consistent (ϕ.Theory_induced T ∪
(λf, (ϕ.on_bounded_formula (h f))[bd_const (g f)/0]) '' set.univ) :=
begin
haveI : decidable_eq (bounded_formula L 1) := λx y, classical.prop_decidable _,
haveI : decidable_eq (sentence L') := λx y, classical.prop_decidable _,
have lem : ∀(s₀ : finset (bounded_formula L 1)),
is_consistent (ϕ.Theory_induced T ∪
(λf, (ϕ.on_bounded_formula (h f))[bd_const (g f)/0]) '' ↑s₀),
{ refine finset.induction _ _,
{ simp, exact is_consistent_Theory_induced hϕ hT },
{ intros ψ s hψ ih hs, refine sprovable.elim _ hs, clear hs, intro hs, apply ih, constructor,
simp [image_insert_eq] at hs,
have : _ ⊢ (ϕ.on_bounded_formula $ ∼(h ψ))[bd_const (g ψ)/0] := simpI hs,
have := sgeneralize_constant (g ψ) _ _ this,
{ refine simpE _ _ this, apply sweakening (subset_union_left _ _) (ϕ.on_sprf $ hT' ψ) },
{ intro h', rcases h' with ⟨s', ⟨ψ', ⟨ψ', ⟨ψ', hψ₂, rfl⟩ | ⟨ψ', hψ₂, rfl⟩, rfl⟩, rfl⟩, hψ₃⟩,
{ rw [ϕ.on_sentence_fst] at hψ₃,
exact ϕ.not_mem_function_in_formula_on_formula (hg' _) _ hψ₃ },
{ simp at hψ₃,
-- have h : (ϕ.on_bounded_formula (h ψ'))[bd_const (g ψ') /0].fst =
-- (ϕ.on_bounded_formula (h ψ')).fst[bounded_preterm.fst (bd_const (g ψ')) // 0],
-- apply subst0_bounded_formula_fst,
rw subst0_bounded_formula_fst at hψ₃,
cases symbols_in_formula_subst _ _ _ hψ₃ with hψ₄ hψ₄,
{ rw [on_bounded_formula_fst] at hψ₄,
exact ϕ.not_mem_function_in_formula_on_formula (hg' _) _ hψ₄ },
{ injection eq_of_mem_singleton hψ₄ with hψ₅, injection hψ₅ with x hψ₆,
cases hg (eq_of_heq hψ₆), exact hψ hψ₂ }}},
{ rw [on_bounded_formula_fst],
apply not_mem_function_in_formula_on_formula, apply hg' }}},
intro H, rcases theory_proof_compactness H with ⟨T₀, h₀, hT⟩,
have : decidable_pred (∈ ϕ.Theory_induced T) := λx, classical.prop_decidable _,
rcases finset.subset_union_elim hT with ⟨t₀, s₀, rfl, ht₀, hs₀⟩,
have hs₀' := subset.trans hs₀ (diff_subset _ _),
rcases finset.subset_image_iff.mp hs₀' with ⟨s₀, hs₀x, rfl⟩,
apply lem s₀, refine h₀.map _, apply sweakening,
simp only [finset.coe_union]
,
apply union_subset_union ht₀,
rw finset.coe_image,
end
end Lhom
end fol
-- instance nonempty_Language_over : nonempty (Language_over) :=
-- begin fapply nonempty.intro, exact ⟨L, language_id_morphism L⟩ end
--TODO define map induced by a language_morphism on terms/preterms, formulas/preformulas, sets of formulas/theories
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Structures.Group.Morphism where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Equiv
open import Cubical.Structures.Group.Base
private
variable
ℓ ℓ' : Level
-- The following definition of GroupHom and GroupEquiv are level-wise heterogeneous.
-- This allows for example to deduce that G ≡ F from a chain of isomorphisms
-- G ≃ H ≃ F, even if H does not lie in the same level as G and F.
isGroupHom : (G : Group {ℓ}) (H : Group {ℓ'}) (f : ⟨ G ⟩ → ⟨ H ⟩) → Type _
isGroupHom G H f = (x y : ⟨ G ⟩) → f (x G.+ y) ≡ (f x H.+ f y) where
module G = Group G
module H = Group H
record GroupHom (G : Group {ℓ}) (H : Group {ℓ'}) : Type (ℓ-max ℓ ℓ') where
constructor grouphom
field
fun : ⟨ G ⟩ → ⟨ H ⟩
isHom : isGroupHom G H fun
record GroupEquiv (G : Group {ℓ}) (H : Group {ℓ'}) : Type (ℓ-max ℓ ℓ') where
constructor groupequiv
field
eq : ⟨ G ⟩ ≃ ⟨ H ⟩
isHom : isGroupHom G H (equivFun eq)
hom : GroupHom G H
hom = grouphom (equivFun eq) isHom
|
module Changepoints
using Distributions
using Distributions: Sampleable
using Base.Meta
import Base.rand
export PELT, @PELT, BS, @BS, CROPS, @segment_cost, NormalVarSegment, NormalMeanSegment, NormalMeanVarSegment, PoissonSegment, BetaSegment, ExponentialSegment, ChangepointSampler, GammaShapeSegment, GammaRateSegment, NonparametricSegment, OLSSegment, @changepoint_sampler
include("segment_costs.jl")
include("PELT.jl")
include("CROPS.jl")
include("BS.jl")
include("sim_type.jl")
include("macros.jl")
# This approach to loading supported plotting packages is taken directly from the "KernelDensity" package
macro glue(pkg)
path = joinpath(dirname(@__FILE__),"glue",string(pkg,".jl"))
init = Symbol(string(pkg,"_init"))
quote
$(esc(init))() = Base.include($path)
isdefined(Main,$(QuoteNode(pkg))) && $(esc(init))()
end
end
@glue Winston
@glue Gadfly
end # module
|
Formal statement is: lemma order_power_n_n: "order a ([:-a,1:]^n)=n" Informal statement is: The order of $a$ in the polynomial ring $F[x]/(x^n - a)$ is $n$. |
Under the Greek Ptolemaic Dynasty and then Roman rule , Greeks and Romans introduced their own deities to Egypt . These newcomers equated the Egyptian gods with their own , as part of the Greco @-@ Roman tradition of interpretatio graeca . But the worship of the native gods was not swallowed up by that of foreign ones . Instead , Greek and Roman gods were adopted as manifestations of Egyptian ones . Egyptian cults sometimes incorporated Greek language , philosophy , iconography , and even temple architecture . Meanwhile , the cults of several Egyptian deities — particularly Isis , Osiris , Anubis , the form of Horus named Harpocrates , and the fused Greco @-@ Egyptian god Serapis — were adopted into Roman religion and spread across the Roman Empire . Roman emperors , like Ptolemaic kings before them , invoked Isis and Serapis to endorse their authority , inside and outside Egypt . In the empire 's complex mix of religious traditions , Thoth was transmuted into the legendary esoteric teacher Hermes Trismegistus , and Isis , who was venerated from Britain to Mesopotamia , became the focus of a Greek @-@ style mystery cult . Isis and Hermes Trismegistus were both prominent in the Western esoteric tradition that grew from the Roman religious world .
|
= West Hendford Cricket Ground , Yeovil =
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.DStructures.Structures.SplitEpi where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.Structure
open import Cubical.Functions.FunExtEquiv
open import Cubical.Homotopy.Base
open import Cubical.Data.Sigma
open import Cubical.Relation.Binary
open import Cubical.Algebra.Group
open import Cubical.Structures.LeftAction
open import Cubical.DStructures.Base
open import Cubical.DStructures.Meta.Properties
open import Cubical.DStructures.Structures.Type
open import Cubical.DStructures.Structures.Constant
open import Cubical.DStructures.Structures.Group
private
variable
ℓ ℓ' : Level
open URGStr
---------------------------------------------
-- URG structures on the type of split epis,
-- and displayed structures over that
--
-- B
-- |
-- isSplit
-- |
-- G²FB
---------------------------------------------
module _ (ℓ ℓ' : Level) where
-- type of Split epimorphisms
SplitEpi = Σ[ ((G , H) , f , b) ∈ G²FB ℓ ℓ' ] isGroupSplitEpi f b
SplitEpi' = Σ[ G ∈ Group {ℓ} ] Σ[ H ∈ Group {ℓ'} ] Σ[ (f , b) ∈ (GroupHom G H) × (GroupHom H G) ] isGroupSplitEpi f b
IsoSplitEpi' : Iso SplitEpi' SplitEpi
IsoSplitEpi' = compIso (invIso Σ-assoc-Iso)
(invIso Σ-assoc-Iso)
-- split epimorphisms + a map back
SplitEpiB = Σ[ (((G , H) , f , b) , isRet) ∈ SplitEpi ] GroupHom H G
-- split epimorphisms displayed over pairs of groups
𝒮ᴰ-SplitEpi : URGStrᴰ (𝒮-G²FB ℓ ℓ')
(λ ((G , H) , (f , b)) → isGroupSplitEpi f b)
ℓ-zero
𝒮ᴰ-SplitEpi =
Subtype→Sub-𝒮ᴰ (λ ((G , H) , (f , b)) → isGroupSplitEpi f b , isPropIsGroupSplitEpi f b)
(𝒮-G²FB ℓ ℓ')
-- URG structure on type of split epimorphisms
𝒮-SplitEpi : URGStr SplitEpi (ℓ-max ℓ ℓ')
𝒮-SplitEpi = ∫⟨ 𝒮-G²FB ℓ ℓ' ⟩ 𝒮ᴰ-SplitEpi
-- morphisms back displayed over split epimorphisms,
-- obtained by lifting the morphisms back over
-- 𝒮-G² twice
𝒮ᴰ-G²FBSplit\B : URGStrᴰ 𝒮-SplitEpi
(λ (((G , H) , _) , _) → GroupHom H G)
(ℓ-max ℓ ℓ')
𝒮ᴰ-G²FBSplit\B =
VerticalLift2-𝒮ᴰ (𝒮-group ℓ ×𝒮 𝒮-group ℓ')
(𝒮ᴰ-G²\B ℓ ℓ')
(𝒮ᴰ-G²\FB ℓ ℓ')
𝒮ᴰ-SplitEpi
-- URG structure on split epis with an extra
-- morphism back
𝒮-SplitEpiB : URGStr SplitEpiB (ℓ-max ℓ ℓ')
𝒮-SplitEpiB = ∫⟨ 𝒮-SplitEpi ⟩ 𝒮ᴰ-G²FBSplit\B
𝒮ᴰ-G\GFBSplitEpi : URGStrᴰ (𝒮-group ℓ)
(λ G → Σ[ H ∈ Group {ℓ'} ] Σ[ (f , b) ∈ (GroupHom G H) × (GroupHom H G) ] isGroupSplitEpi f b )
(ℓ-max ℓ ℓ')
𝒮ᴰ-G\GFBSplitEpi =
splitTotal-𝒮ᴰ (𝒮-group ℓ)
(𝒮ᴰ-const (𝒮-group ℓ) (𝒮-group ℓ'))
(splitTotal-𝒮ᴰ (𝒮-group ℓ ×𝒮 𝒮-group ℓ')
(𝒮ᴰ-G²\FB ℓ ℓ')
𝒮ᴰ-SplitEpi)
--------------------------------------------------
-- This module introduces convenient notation
-- when working with a single split epimorphism
---------------------------------------------------
module SplitEpiNotation {G₀ : Group {ℓ}} {G₁ : Group {ℓ'}}
(ι : GroupHom G₀ G₁) (σ : GroupHom G₁ G₀)
(split : isGroupSplitEpi ι σ) where
open GroupNotation₀ G₀
open GroupNotation₁ G₁
ι∘σ : GroupHom G₁ G₁
ι∘σ = compGroupHom σ ι
s = GroupHom.fun σ
-- i is reserved for an interval variable (i : I) so we use 𝒾 instead
𝒾 = GroupHom.fun ι
-i = λ (x : ⟨ G₀ ⟩) → -₁ (𝒾 x)
s- = λ (x : ⟨ G₁ ⟩) → s (-₁ x)
si = λ (x : ⟨ G₀ ⟩) → s (𝒾 x)
is = λ (x : ⟨ G₁ ⟩) → 𝒾 (s x)
-si = λ (x : ⟨ G₀ ⟩) → -₀ (si x)
-is = λ (x : ⟨ G₁ ⟩) → -₁ (is x)
si- = λ (x : ⟨ G₀ ⟩) → si (-₀ x)
is- = λ (x : ⟨ G₁ ⟩) → is (-₁ x)
s-i = λ (x : ⟨ G₀ ⟩) → s (-₁ (𝒾 x))
isi = λ (x : ⟨ G₀ ⟩) → 𝒾 (s (𝒾 x))
|
#redirect Warren Jones, CMT Integrative Massage
|
import group_theory.perm group_theory.order_of_element data.set.finite
open equiv
example : (⟨1, rfl⟩ : {x : perm bool // x = 1}) = ⟨swap ff tt * swap ff tt, dec_trivial⟩ :=
subtype.eq dec_trivial
@[instance, priority 1000] def foo {α : Type*} [decidable_eq α] {P : α → Prop} :
decidable_eq (subtype P) :=
λ a b, decidable_of_iff (a.1 = b.1) (by cases a; cases b; simp)
example : (⟨1, rfl⟩ : {x : perm bool // x = 1}) = ⟨swap ff tt * swap ff tt, dec_trivial⟩ :=
dec_trivial
universes u v
open finset is_subgroup equiv equiv.perm
class simple_group (G : Type u) [group G] : Prop :=
(simple : ∀ (H : set G) [normal_subgroup H], H = trivial G ∨ H = set.univ)
variables {G : Type u} [group G] [fintype G] [decidable_eq G]
-- lemma simple_group_def : simple_group G ↔
-- ∀ (H : set G) [normal_subgroup H], H = trivial G ∨ H = set.univ :=
-- ⟨@simple_group.simple _ _, simple_group.mk⟩
-- lemma simple_group_iff_finset : simple_group G ↔
-- ∀ (H : finset G) [normal_subgroup H], H = trivial G ∨ H = set.univ
def conjugacy_class (a : G) : finset G :=
(@univ G _).image (λ x, x * a * x⁻¹)
#eval conjugacy_class (-1 : units ℤ)
#print finset.erase
lemma mem_conjugacy_class {a b : G} : b ∈ conjugacy_class a ↔ is_conj a b := sorry
def conjugacy_classes : Π l : list G, finset (finset G)
| [] := ∅
| (a::l) :=
let x := (conjugacy_class a) in
have (l.filter (∉ x)).length < 1 + l.length,
from lt_of_le_of_lt (list.length_le_of_sublist (list.filter_sublist _))
(by rw add_comm; exact nat.lt_succ_self _),
insert x (conjugacy_classes (l.filter (∉ x)))
using_well_founded {rel_tac := λ _ _, `[exact ⟨_, measure_wf list.length⟩]}
def conjugacy_classes' (G : Type u) [group G] [fintype G] [decidable_eq G] : finset (finset G) :=
quotient.lift_on (@univ G _).1 conjugacy_classes sorry
meta def thing {α : Type*} [has_reflect α] (f : α) : tactic unit :=
tactic.exact `(f)
def is_conjugacy_partition (s : finset (finset G)) : Prop :=
(∀ x, ∃ t ∈ s, x ∈ t) ∧ ∀ t ∈ s, ∃ x ∈ t, ∀ y, y ∈ t ↔ is_conj x y
instance {α β : Type*} [group α] [group β] [decidable_eq β] (f : α → β) [is_group_hom f] :
decidable_pred (is_group_hom.ker f) :=
λ _, decidable_of_iff _ (is_group_hom.mem_ker f).symm
def alternating (α : Type*) [decidable_eq α] [fintype α] : Type* :=
is_group_hom.ker (sign : perm α → units ℤ)
instance (α : Type*) [decidable_eq α] [fintype α] : decidable_eq (alternating α) :=
λ a b, decidable_of_iff (a.1 = b.1) (by cases a; cases b; simp [subtype.mk.inj_eq])
#print alternating.decidable_eq
#print subtype.decidable_eq
instance (α : Type*) [decidable_eq α] [fintype α] : fintype (alternating α) :=
set_fintype _
noncomputable def quotient_ker_equiv_range {α β : Type*} [group α] [group β] (f : α → β) [is_group_hom f] :
(quotient_group.quotient (is_group_hom.ker f)) ≃ set.range f :=
@equiv.of_bijective _ _ (λ x, quotient.lift_on' x (λ a, show set.range f, from ⟨f a, a, rfl⟩)
(λ a b (h : @setoid.r _ (quotient_group.left_rel (is_group_hom.ker f)) a b),
have h : a⁻¹ * b ∈ is_group_hom.ker f, from h,
subtype.eq
(by rw [is_group_hom.mem_ker, is_group_hom.mul f,
is_group_hom.inv f, inv_mul_eq_iff_eq_mul, mul_one] at h;
simp [h])))
⟨λ a b, quotient.induction_on₂' a b
(λ a b h, quotient.sound' (show a⁻¹ * b ∈ is_group_hom.ker f,
by rw [is_group_hom.mem_ker, is_group_hom.mul f, is_group_hom.inv f,
show f a = f b, from subtype.mk.inj h]; simp)),
λ ⟨b, a, hab⟩, ⟨quotient_group.mk a, subtype.eq hab⟩⟩
noncomputable def quotient_ker_equiv_of_surjective {α β : Type*} [group α] [group β]
(f : α → β) [is_group_hom f] (hf : function.surjective f) :
(quotient_group.quotient (is_group_hom.ker f)) ≃ β :=
calc (quotient_group.quotient (is_group_hom.ker f)) ≃ set.range f : quotient_ker_equiv_range _
... ≃ β : ⟨λ a, a.1, λ b, ⟨b, hf b⟩, λ ⟨_, _⟩, rfl, λ _, rfl⟩
section classical
local attribute [instance] classical.prop_decidable
lemma sign_surjective {α : Type*} [decidable_eq α] [fintype α] (hα : 1 < fintype.card α) :
function.surjective (sign : perm α → units ℤ) :=
λ a, (int.units_eq_one_or a).elim
(λ h, ⟨1, by simp [h]⟩)
(λ h, let ⟨x⟩ := fintype.card_pos_iff.1 (lt_trans zero_lt_one hα) in
let ⟨y, hxy⟩ := fintype.exists_ne_of_card_gt_one hα x in
⟨swap y x, by rw [sign_swap hxy, h]⟩ )
lemma card_alternating (α : Type*) [decidable_eq α] [fintype α] (h : 2 ≤ fintype.card α):
fintype.card (alternating α) * 2 = (fintype.card α).fact :=
have (quotient_group.quotient (is_group_hom.ker (sign : perm α → units ℤ))) ≃ units ℤ,
from quotient_ker_equiv_of_surjective _ (sign_surjective h),
calc fintype.card (alternating α) * 2 = fintype.card (units ℤ × alternating α) :
by rw [mul_comm, fintype.card_prod, fintype.card_units_int]
... = fintype.card (perm α) : fintype.card_congr
(calc (units ℤ × alternating α) ≃
(quotient_group.quotient (is_group_hom.ker (sign : perm α → units ℤ)) × alternating α) :
equiv.prod_congr this.symm (by refl)
... ≃ perm α : (group_equiv_quotient_times_subgroup _).symm)
... = (fintype.card α).fact : fintype.card_perm
instance (α : Type*) [decidable_eq α] [fintype α] : group (alternating α) :=
by unfold alternating; apply_instance
end classical
local notation `A5` := alternating (fin 5)
variables {α : Type*} [fintype α] [decidable_eq α]
section
local attribute [semireducible] reflected
-- meta instance (n : ℕ) : has_reflect (fin n) :=
-- nat.cases_on n (λ a, fin.elim0 a) $
-- λ n a, show reflected a, from `(@has_coe.coe ℕ (fin (nat.succ %%`(n))) _
-- (%%(nat.reflect a.1)))
-- meta instance (n : ℕ) (e : expr): has_reflect (fin n) :=
-- λ a, `(@fin.mk %%e %%(nat.reflect a.1) (of_as_true %%`(_root_.trivial)))
meta instance fin_reflect (n : ℕ) : has_reflect (fin n) :=
λ a, `(@fin.mk %%`(n) %%(nat.reflect a.1) (of_as_true %%`(_root_.trivial)))
-- λ a, expr.app (expr.app (expr.const `fin.mk []) `(a.1))
-- `(of_as_true trivial)
--#print fin.has_reflect
-- meta instance (n : ℕ) : has_reflect (perm (fin n)) :=
-- list.rec_on (quot.unquot (@univ (fin n) _).1)
-- (λ f, `(1 : perm (fin n)))
-- (λ x l ih f, let e : expr := ih (swap x (f x) * f) in
-- if x = f x then e
-- else if e = `(1 : perm (fin %%`(n)))
-- then `(@swap (fin %%`(n)) _ (%%`(x)) (%%(@reflect (fin n)
-- (f x) (fin.has_reflect n (f x)))))
-- else `(@swap (fin %%`(n)) _ (%%`(x)) (%%(@reflect (fin n)
-- (f x) (fin.has_reflect n (f x)))) * %%e))
meta instance fin_fun.has_reflect : has_reflect (fin 5 → fin 5) :=
list.rec_on (quot.unquot (@univ (fin 5) _).1)
(λ f, `(λ y : fin 5, y))
(λ x l ih f, let e := ih f in
if f x = x then e
else let ex := fin_reflect 5 x in
let efx := fin_reflect 5 (f x) in
if e = `(λ y : fin 5, y)
then `(λ y : fin 5, ite.{1} (y = %%ex) (%%efx) y)
else `(λ y : fin 5, ite.{1} (y = %%ex) (%%efx) ((%%e : fin 5 → fin 5) y)))
#print equiv.mk
instance {α β : Type*} [fintype α] [decidable_eq α] (f : α → β) (g : β → α) :
decidable (function.right_inverse f g) :=
show decidable (∀ x, g (f x) = x), by apply_instance
instance {α β : Type*} [fintype β] [decidable_eq β] (f : α → β) (g : β → α) :
decidable (function.left_inverse f g) :=
show decidable (∀ x, f (g x) = x), by apply_instance
meta instance : has_reflect (perm (fin 5)) :=
λ f, `(@equiv.mk.{1 1} (fin 5) (fin 5)
%%(fin_fun.has_reflect f.to_fun)
%%(fin_fun.has_reflect f.inv_fun)
(of_as_true %%`(_root_.trivial)) (of_as_true %%`(_root_.trivial)))
#print is_group_hom.mem_ker
meta instance I4 : has_reflect (alternating (fin 5)) :=
λ f, `(@subtype.mk (perm (fin 5)) (is_group_hom.ker (sign : perm (fin 5) → units ℤ))
%%(@reflect (perm (fin 5)) f.1 (equiv.perm.has_reflect f.1))
((is_group_hom.mem_ker sign).2 %%`(@eq.refl (units ℤ) 1)))
meta def afsf : has_reflect (list (list (list ℕ))) := by apply_instance
-- set_option pp.all true
instance f {α : Type*} [decidable_eq α] : decidable_pred (multiset.nodup : multiset α → Prop) :=
by apply_instance
meta instance multiset.has_reflect {α : Type} [reflected α] [has_reflect α] :
has_reflect (multiset α) :=
λ s, let l : list α := quot.unquot s in `(@quotient.mk.{1} (list %%`(α)) _ %%`(l))
meta instance I1 : has_reflect (finset (alternating (fin 5) × alternating (fin 5))) :=
λ s, `(let t : multiset (alternating (fin 5) × alternating (fin 5)) := %%(multiset.has_reflect s.1) in
@finset.mk.{0} (alternating (fin 5) × alternating (fin 5)) t (of_as_true %%`(_root_.trivial)))
--#print prod.has_reflect
meta instance I3 (a : alternating (fin 5)) :
has_reflect {b : alternating (fin 5) × alternating (fin 5) // b.2 * a * b.2⁻¹ = b.1} :=
λ b, `(@subtype.mk (alternating (fin 5) × alternating (fin 5))
(λ b, b.2 * %%`(a) * b.2⁻¹ = b.1)
%%(prod.has_reflect _ _ b.1) (of_as_true %%`(_root_.trivial)))
meta instance I5 (a : alternating (fin 5)) (m : reflected a) :
reflected {b : alternating (fin 5) × alternating (fin 5) // b.2 * a * b.2⁻¹ = b.1} :=
`({b : alternating (fin 5) × alternating (fin 5) // b.2 * %%m * b.2⁻¹ = b.1})
meta instance test : has_reflect (Σ n : ℕ, {m // m = n}) :=
λ x, `(@sigma.mk ℕ (λ n : ℕ, {m // m = n}) %%(nat.reflect (x.1 : ℕ))
(subtype.mk %%(nat.reflect x.2.1) %%`(@eq.refl ℕ %%(nat.reflect x.1))))
def n : (Σ n : ℕ, {m // m = n}) :=
by thing (show Σ n : ℕ, {m // m = n}, from ⟨5, ⟨5, rfl⟩⟩)
#print n
meta instance I2 : has_reflect
(Σ a : alternating (fin 5), finset
{b : alternating (fin 5) × alternating (fin 5) // b.2 * a * b.2⁻¹ = b.1}) :=
λ s, let ra : reflected s.1 := (I4 s.1) in
`(let a : alternating (fin 5) := %%ra in
let t : multiset {b : alternating (fin 5) × alternating (fin 5) // b.2 * a * b.2⁻¹ = b.1} :=
%%(@multiset.has_reflect _ (I5 s.1 ra) (I3 s.1) s.2.1) in
@sigma.mk (alternating (fin 5))
(λ a, finset {b : alternating (fin 5) × alternating (fin 5) // b.2 * a * b.2⁻¹ = b.1})
a (finset.mk t (of_as_true %%`(_root_.trivial))))
meta instance I6 : has_reflect (finset
(Σ a : alternating (fin 5), finset
{b : alternating (fin 5) × alternating (fin 5) // b.2 * a * b.2⁻¹ = b.1})) :=
λ s, `(@finset.mk (Σ a : alternating (fin 5), finset
{b : alternating (fin 5) × alternating (fin 5) // b.2 * a * b.2⁻¹ = b.1})
%%(@multiset.has_reflect _ _ I2 s.1)
(of_as_true %%`(_root_.trivial)))
meta instance I7 : has_reflect
(Σ a : alternating (fin 5), multiset
{b : alternating (fin 5) × alternating (fin 5) // b.2 * a * b.2⁻¹ = b.1}) :=
λ s, let ra : reflected s.1 := (I4 s.1) in
`(let a : alternating (fin 5) := %%ra in
@sigma.mk (alternating (fin 5))
(λ a, multiset {b : alternating (fin 5) × alternating (fin 5) // b.2 * a * b.2⁻¹ = b.1})
a %%(@multiset.has_reflect _ (I5 s.1 ra) (I3 s.1) s.2))
-- meta instance finset_finset.has_reflect (n : ℕ) : has_reflect (finset (finset (alternating (fin n)))) :=
-- λ s, `(let m : ℕ := %%`(n) in
-- let t : multiset (finset (alternating (fin m))) := %%(multiset.has_reflect s.1) in
-- @finset.mk.{0} (finset (alternating (fin m))) t (of_as_true %%`(_root_.trivial)))
-- instance afhasf (n : ℕ) : decidable_eq (finset (alternating (fin n) × alternating (fin n)) × alternating (fin n)) :=
-- by apply_instance
-- meta instance hklkhfhndihn (n : ℕ) : has_reflect
-- (finset (finset (alternating (fin n) × alternating (fin n)) × alternating (fin n))) :=
-- λ s, `(let m : ℕ := %%`(n) in
-- let t : multiset (finset (alternating (fin m) × alternating (fin m)) × alternating (fin m)) :=
-- %%(multiset.has_reflect s.1) in
-- @finset.mk.{0} (finset (alternating (fin m) × alternating (fin m)) × alternating (fin m)) t
-- (of_as_true %%`(_root_.trivial)))
end
#print expr
meta instance : has_reflect (list (list (perm (fin 5)))) := by apply_instance
meta def whatever {α : Sort*} : α := whatever
-- meta def conjugacy_classes_A5_aux : list (alternating (fin 5)) → list (list (alternating (fin 5) ×
-- alternating (fin 5)) × alternating (fin 5))
-- | [] := ∅
-- | (a :: l) :=
-- let m := (((quot.unquot (@univ (alternating (fin 5)) _).1).map
-- (λ x, (x * a * x⁻¹, x))).pw_filter (λ x y, x.1 ≠ y.1), a)
-- in m :: conjugacy_classes_A5_aux (l.diff (list.map prod.fst m.1))
-- meta def whatever {α : Sort*} : α := whatever
-- meta def conjugacy_classes_A5 : finset (finset (alternating (fin 5) ×
-- alternating (fin 5)) × alternating (fin 5)) :=
-- finset.mk (↑((conjugacy_classes_A5_aux (quot.unquot univ.1)).map
-- (λ l : list (alternating (fin 5) ×
-- alternating (fin 5)) × alternating (fin 5),
-- show finset (alternating (fin 5) ×
-- alternating (fin 5)) × alternating (fin 5),
-- from (finset.mk (↑l.1 : multiset _) whatever, l.2))) : multiset _) whatever
-- set_option profiler true
meta def conjugacy_classes_A5_meta_aux : list (alternating (fin 5)) → list
(Σ a : alternating (fin 5), list
{b : alternating (fin 5) × alternating (fin 5) // b.2 * a * b.2⁻¹ = b.1})
| [] := []
| (a :: l) := let m : Σ a : alternating (fin 5), list
{b : alternating (fin 5) × alternating (fin 5) // b.2 * a * b.2⁻¹ = b.1} :=
⟨a, ((quot.unquot (@univ (alternating (fin 5)) _).1).map
(λ x, show {b : alternating (fin 5) × alternating (fin 5) // b.2 * a * b.2⁻¹ = b.1},
from ⟨(x * a * x⁻¹, x), rfl⟩)).pw_filter (λ x y, x.1.1 ≠ y.1.1)⟩ in
m :: conjugacy_classes_A5_meta_aux (l.diff (m.2.map (prod.fst ∘ subtype.val)))
meta def conjugacy_classes_A5_meta : multiset (Σ a : alternating (fin 5), multiset
{b : alternating (fin 5) × alternating (fin 5) // b.2 * a * b.2⁻¹ = b.1}) :=
(quotient.mk ((conjugacy_classes_A5_meta_aux (quot.unquot univ.1)).map
(λ a, ⟨a.1, (quotient.mk a.2)⟩)))
@[irreducible] def conjugacy_classes_A5_aux : multiset (Σ a : alternating (fin 5), multiset
{b : alternating (fin 5) × alternating (fin 5) // b.2 * a * b.2⁻¹ = b.1}) :=
by thing (conjugacy_classes_A5_meta)
#print conjugacy_classes_A5_aux
def conjugacy_classes_A5_aux2 : multiset (multiset (alternating (fin 5))) :=
conjugacy_classes_A5_aux.map (λ s, s.2.map (λ b, b.1.1))
lemma nodup_conjugacy_classes_A5_aux2 : ∀ s : multiset (alternating (fin 5)),
s ∈ conjugacy_classes_A5_aux2 → s.nodup :=
dec_trivial
def conjugacy_classes_A5 : finset (finset (alternating (fin 5))) :=
⟨conjugacy_classes_A5_aux2.pmap finset.mk nodup_conjugacy_classes_A5_aux2, dec_trivial⟩
lemma is_conj_conjugacy_classes_A5 (s : finset A5) (h : s ∈ conjugacy_classes_A5) :
∀ x y ∈ s, is_conj x y :=
assume x y hx hy,
begin
simp only [conjugacy_classes_A5, finset.mem_def, multiset.mem_pmap,
conjugacy_classes_A5_aux2] at h,
rcases h with ⟨t, ht₁, ht₂⟩,
rw [multiset.mem_map] at ht₁,
rcases ht₁ with ⟨u, hu₁, hu₂⟩,
have hx' : x ∈ multiset.map (λ (b : {b : A5 × A5 // b.2 * u.1 * b.2⁻¹ = b.1}), b.1.1) u.2,
{ simpa [ht₂.symm, hu₂] using hx },
have hy' : y ∈ multiset.map (λ (b : {b : A5 × A5 // b.2 * u.1 * b.2⁻¹ = b.1}), b.1.1) u.2,
{ simpa [ht₂.symm, hu₂] using hy },
cases multiset.mem_map.1 hx' with xc hxc,
cases multiset.mem_map.1 hy' with yc hyc,
exact is_conj_trans
(is_conj_symm (show is_conj u.1 x, from hxc.2 ▸ ⟨_, xc.2⟩))
(hyc.2 ▸ ⟨_, yc.2⟩)
end
lemma eq_bind_conjugacy_classes (s : finset (finset G))
(h₁ : ∀ x, ∃ t ∈ s, x ∈ t)
(h₂ : ∀ t ∈ s, ∀ x y ∈ t, is_conj x y) (I : finset G) [nI : normal_subgroup (↑I : set G)] :
∃ u ⊆ s, I = u.bind id :=
⟨(s.powerset.filter (λ u : finset (finset G), u.bind id ⊆ I)).bind id,
(λ x, by simp only [finset.subset_iff, mem_bind, mem_filter, exists_imp_distrib, mem_powerset,
and_imp, id.def] {contextual := tt}; tauto),
le_antisymm
(λ x hxI, let ⟨t, ht₁, ht₂⟩ := h₁ x in
mem_bind.2 ⟨t, mem_bind.2 ⟨(s.powerset.filter (λ u : finset (finset G), u.bind id ⊆ I)).bind id,
mem_filter.2 ⟨mem_powerset.2
(λ u hu, let ⟨v, hv₁, hv₂⟩ := mem_bind.1 hu in
mem_powerset.1 (mem_filter.1 hv₁).1 hv₂),
λ y hy, let ⟨u, hu₁, hu₂⟩ := mem_bind.1 hy in
let ⟨v, hv₁, hv₂⟩ := mem_bind.1 hu₁ in
(mem_filter.1 hv₁).2 (mem_bind.2 ⟨u, hv₂, hu₂⟩)⟩,
mem_bind.2 ⟨{t}, mem_filter.2 ⟨by simp [ht₁, finset.subset_iff],
λ y hy, let ⟨u, hu₁, hu₂⟩ := mem_bind.1 hy in
let ⟨z, hz⟩ := h₂ t ht₁ x y ht₂ (by simp * at *) in
hz ▸ @normal_subgroup.normal G _ I.to_set nI _ hxI _⟩,
by simp⟩⟩,
ht₂⟩)
(λ x, by simp only [finset.subset_iff, mem_bind, exists_imp_distrib, mem_filter, mem_powerset]; tauto)⟩
--local attribute [instance, priority 0] classical.dec
lemma simple_of_card_conjugacy_classes [fintype G] [decidable_eq G] (s : finset (finset G))
(h₁ : ∀ x, ∃ t ∈ s, x ∈ t) (h₂ : ∀ t ∈ s, ∀ x y ∈ t, is_conj x y)
(hs : (s.1.bind finset.val).nodup)
(h₃ : ∀ t ≤ s.1.map finset.card, 1 ∈ t → t.sum ∣ fintype.card G → t.sum = 1 ∨ t.sum = fintype.card G) :
simple_group G :=
by haveI := classical.dec; exact
⟨λ H iH,
let I := (set.to_finset H) in
have Ii : normal_subgroup (↑I : set G), by simpa using iH,
let ⟨u, hu₁, hu₂⟩ :=
@eq_bind_conjugacy_classes G _ _ _ s h₁ h₂ I Ii in
have hInd : ∀ (x : finset G), x ∈ u → ∀ (y : finset G), y ∈ u → x ≠ y → id x ∩ id y = ∅,
from λ x hxu y hyu hxy,
begin
rw multiset.nodup_bind at hs,
rw [← finset.disjoint_iff_inter_eq_empty, finset.disjoint_left],
exact multiset.forall_of_pairwise
(λ (a b : finset G) (h : multiset.disjoint a.1 b.1),
multiset.disjoint.symm h) hs.2 x (hu₁ hxu) y (hu₁ hyu) hxy
end,
have hci : card I = u.sum finset.card,
by rw [hu₂, card_bind hInd]; refl,
have hu1 : (1 : G) ∈ u.bind id, by exactI hu₂ ▸ is_submonoid.one_mem (↑I : set G),
let ⟨v, hv₁, hv₂⟩ := mem_bind.1 hu1 in
have hv : v = finset.singleton (1 : G),
from finset.ext.2 $ λ a, ⟨λ hav, mem_singleton.2 $
is_conj_one_right.1 (h₂ v (hu₁ hv₁) _ _ hv₂ hav),
by simp [show (1 : G) ∈ v, from hv₂] {contextual := tt}⟩,
have hci' : card I = 1 ∨ card I = fintype.card G,
begin
rw [hci],
exact h₃ _ (multiset.map_le_map (show u.1 ≤ s.1,
from (multiset.le_iff_subset u.2).2 hu₁))
(multiset.mem_map.2 ⟨finset.singleton 1, hv ▸ hv₁, rfl⟩)
(calc u.sum finset.card = card I : hci.symm
... = fintype.card (↑I : set G) : (set.card_fintype_of_finset' I (by simp)).symm
... ∣ fintype.card G : by exactI card_subgroup_dvd_card _)
end,
hci'.elim
(λ hci', or.inl (set.ext (λ x,
let ⟨y, hy⟩ := finset.card_eq_one.1 hci' in
by resetI;
simp only [I, finset.ext, set.mem_to_finset, finset.mem_singleton] at hy;
simp [is_subgroup.mem_trivial, hy, (hy 1).1 (is_submonoid.one_mem H)])))
(λ hci', or.inr $
suffices I = finset.univ,
by simpa [I, set.ext_iff, finset.ext] using this,
finset.eq_of_subset_of_card_le (λ _, by simp) (by rw hci'; refl))⟩
lemma card_A5 : fintype.card A5 = 60 :=
(nat.mul_right_inj (show 2 > 0, from dec_trivial)).1 $
have 2 ≤ fintype.card (fin 5), from dec_trivial,
by rw [card_alternating _ this]; simp; refl
lemma nodup_conjugacy_classes_A5_bind :
(conjugacy_classes_A5.1.bind finset.val).nodup := dec_trivial
#eval (conjugacy_classes_A5.1.bind finset.val).card
lemma conjugacy_classes_A5_bind_eq_univ :
conjugacy_classes_A5.bind (λ t, t) = univ :=
eq_of_subset_of_card_le (λ _, by simp)
(calc card univ = 60 : card_A5
... = (conjugacy_classes_A5.bind id).card : dec_trivial
... ≤ _ : le_refl _)
lemma A5_simple : simple_group A5 :=
simple_of_card_conjugacy_classes conjugacy_classes_A5
(λ x, mem_bind.1 $ by rw [conjugacy_classes_A5_bind_eq_univ]; simp)
is_conj_conjugacy_classes_A5
nodup_conjugacy_classes_A5_bind
(by simp only [multiset.mem_powerset.symm, card_A5];
exact dec_trivial)
#print axioms A5_simple
example : conj_classes_A5.1.map (finset.card ∘ prod.fst) = {1, 12, 12, 15, 20} := dec_trivial
--example : (@univ (alternating (fin 5))).nodup := dec_trivial
example : (conj_classes_A5.1.bind (λ s, s.1.1.map prod.fst)).nodup := dec_trivial
#eval (conj_classes_A5.1.bind (λ s, s.1.1.map prod.fst) = finset.univ.1 : bool)
--set_option class.instance_max_depth 100
instance alifha : decidable (∀ s : finset (alternating (fin 5) ×
alternating (fin 5)) × alternating (fin 5), s ∈ conj_classes_A5 →
∀ (x : alternating (fin 5) × alternating (fin 5)), x ∈ s.1 →
x.2 * s.2 * x.2⁻¹ = x.1) :=(@finset.decidable_dforall_finset _ conj_classes_A5
(λ (s : finset (alternating (fin 5) ×
alternating (fin 5)) × alternating (fin 5)) _,
∀ (x : alternating (fin 5) × alternating (fin 5)), x ∈ s.1 →
x.2 * s.2 * x.2⁻¹ = x.1)
(λ s hs, @finset.decidable_dforall_finset _ s.1 _ _))
#eval (∀ s : finset (alternating (fin 5) ×
alternating (fin 5)) × alternating (fin 5), s ∈ conj_classes_A5 →
∀ (x : alternating (fin 5) × alternating (fin 5)), x ∈ s.1 →
x.2 * s.2 * x.2⁻¹ = x.1 : bool)
def conj_classes_A5 : ∀ s : finset (alternating (fin 5) ×
alternating (fin 5)) × alternating (fin 5), s ∈ conj_classes_A5 →
∀ (x : alternating (fin 5) × alternating (fin 5)), x ∈ s.1 →
x.2 * s.2 * x.2⁻¹ = x.1 :=
dec_trivial
-- @of_as_true _ (@finset.decidable_dforall_finset _ conj_classes_A5
-- (λ (s : finset (alternating (fin 5) ×
-- alternating (fin 5)) × alternating (fin 5)) _,
-- ∀ (x : alternating (fin 5) × alternating (fin 5)), x ∈ s.1 →
-- x.2 * s.2 * x.2⁻¹ = x.1)
-- (λ s hs, @finset.decidable_dforall_finset _ s.1 _ _)) _root_.trivial
--example : multiset.nodup (@univ (alternating (fin 5)) _).1 := dec_trivial
--#eval x.map list.length
--#eval x.to_fun 3
#exit
set_option profiler true
#eval (conjugacy_classes' (alternating (fin 5))).1.map finset.card
instance : decidable_pred (is_cycle : perm α → Prop) :=
by dunfold is_cycle decidable_pred; apply_instance
local attribute [instance, priority 100] fintype_perm
local attribute [instance, priority 0] equiv.fintype
#print equiv.fintype
#eval (conjugacy_classes' (alternating (fin 5))).1.map finset.card
--example : (conjugacy_classes' (alternating (fin 5))).card = 5 := rfl
--#eval conjugacy_classes (quot.unquot (univ : finset (perm (fin 5))).1) |
module School
public export
record School where
constructor CreateSchool
id : String
country : Integer
city : Integer
name : String
yearFrom : Integer
yearTo : Integer
yearGraduated : Integer
clazz : Integer
speciality : String
type : Integer
typeStr : Integer
|
module Generic.Lib.Equality.Coerce where
open import Generic.Lib.Intro
open import Generic.Lib.Equality.Propositional
open import Generic.Lib.Decidable
open import Generic.Lib.Data.Product
Coerce′ : ∀ {α β} -> α ≡ β -> Set α -> Set β
Coerce′ refl = id
coerce′ : ∀ {α β} {A : Set α} -> (q : α ≡ β) -> A -> Coerce′ q A
coerce′ refl = id
uncoerce′ : ∀ {α β} {A : Set α} -> (q : α ≡ β) -> Coerce′ q A -> A
uncoerce′ refl = id
inspectUncoerce′ : ∀ {α β} {A : Set α}
-> (q : α ≡ β) -> (p : Coerce′ q A) -> ∃ λ x -> p ≡ coerce′ q x
inspectUncoerce′ refl x = x , refl
split : ∀ {α β γ δ} {A : Set α} {B : A -> Set β} {C : Set γ}
-> (q : α ⊔ β ≡ δ) -> Coerce′ q (Σ A B) -> (∀ x -> B x -> C) -> C
split q p g = uncurry g (uncoerce′ q p)
decCoerce′ : ∀ {α β} {A : Set α} -> (q : α ≡ β) -> IsSet A -> IsSet (Coerce′ q A)
decCoerce′ refl = id
data Coerce {β} : ∀ {α} -> α ≡ β -> Set α -> Set β where
coerce : ∀ {A} -> A -> Coerce refl A
qcoerce : ∀ {α β} {A : Set α} {q : α ≡ β} -> A -> Coerce q A
qcoerce {q = refl} = coerce
|
import Data.String
-- StringOrInt : Bool -> Type
-- StringOrInt False = String
-- StringOrInt True = Int
valToString : (isInt : Bool) -> (case isInt of
True => String
False => Int) -> String
valToString False x = cast x
valToString True x = trim x
|
{- Comparison view and associated lemmas -}
module TemporalOps.Common.Compare where
open import CategoryTheory.Categories
open import Relation.Binary.PropositionalEquality as ≡
open import Relation.Binary.HeterogeneousEquality as ≅
open import Data.Nat.Properties using (+-identityʳ ; +-assoc ; +-suc)
open import Data.Nat using (ℕ ; zero ; suc ; _+_) public
-- (Very verbose) comparison view
-- Like 'compare', but only distinguishes ≤ or >.
data LeqOrdering : ℕ -> ℕ -> Set where
snd==[_+_] : ∀ k l → LeqOrdering k (k + l)
fst==suc[_+_] : ∀ k l → LeqOrdering (k + suc l) k
-- Auxiliary function to compareLeq
compareLeq-suc : ∀ n k -> LeqOrdering n k -> LeqOrdering (suc n) (suc k)
compareLeq-suc n .(n + l) snd==[ .n + l ] = snd==[ suc n + l ]
compareLeq-suc .(k + suc l) k fst==suc[ .k + l ] = fst==suc[ suc k + l ]
compareLeq : ∀ n k -> LeqOrdering n k
compareLeq zero k = snd==[ zero + k ]
compareLeq (suc n) zero = fst==suc[ zero + n ]
compareLeq (suc n) (suc k) = compareLeq-suc n k (compareLeq n k)
-- Lemmas for compareLeq
-- Comparing n and (n + k) always gives snd==[ n + k ]
compare-snd : ∀ (n k : ℕ) -> compareLeq n (n + k) ≡ snd==[ n + k ]
compare-snd zero k = refl
compare-snd (suc n) k rewrite compare-snd n k = refl
-- If n ≤ n + k, then l + n ≤ (l + n) + k
compare-snd-+ : ∀ (n k l : ℕ) -> compareLeq n (n + k) ≡ snd==[ n + k ]
-> compareLeq (l + n) ((l + n) + k) ≡ snd==[ (l + n) + k ]
compare-snd-+ n k zero pf = pf
compare-snd-+ zero k (suc l) pf rewrite +-identityʳ l = compare-snd (suc l) k
compare-snd-+ (suc n) k (suc l) pf
rewrite +-suc l n | compare-snd (l + n) k = refl
-- Heterogeneous version of compare-snd-+ with associativity
compare-snd-+-assoc : ∀ (n k l : ℕ) -> compareLeq n (n + k) ≡ snd==[ n + k ]
-> compareLeq (l + n) (l + (n + k)) ≅ snd==[ (l + n) + k ]
compare-snd-+-assoc n k l pf =
begin
compareLeq (l + n) (l + (n + k))
≅⟨ ≅.cong (λ x → compareLeq (l + n) x) (≡-to-≅ (≡.sym (+-assoc l n k))) ⟩
compareLeq (l + n) ((l + n) + k)
≅⟨ ≡-to-≅ (compare-snd-+ n k l pf) ⟩
snd==[ (l + n) + k ]
∎
where
open ≅.≅-Reasoning
-- Comparing (n + suc k and n always gives fst==suc[ n + k ]
compare-fst : ∀ (n k : ℕ) -> compareLeq (n + suc k) n ≡ fst==suc[ n + k ]
compare-fst zero k = refl
compare-fst (suc n) k rewrite compare-fst n k = refl
-- If n + suc k > n, then (l + n) + suc k > l + n
compare-fst-+ : ∀ (n k l : ℕ) -> compareLeq (n + suc k) n ≡ fst==suc[ n + k ]
-> compareLeq ((l + n) + suc k) (l + n) ≡ fst==suc[ (l + n) + k ]
compare-fst-+ n k zero pf = pf
compare-fst-+ zero k (suc l) pf rewrite +-identityʳ l = compare-fst (suc l) k
compare-fst-+ (suc n) k (suc l) pf
rewrite +-suc l n | compare-fst (l + n) k = refl
-- Heterogeneous version of compare-fst-+ with associativity
compare-fst-+-assoc : ∀ (n k l : ℕ) -> compareLeq (n + suc k) n ≡ fst==suc[ n + k ]
-> compareLeq (l + (n + suc k)) (l + n) ≅ fst==suc[ (l + n) + k ]
compare-fst-+-assoc n k l pf =
begin
compareLeq (l + (n + suc k)) (l + n)
≅⟨ ≅.cong (λ x → compareLeq x (l + n)) (≡-to-≅ (≡.sym (+-assoc l n (suc k)))) ⟩
compareLeq ((l + n) + suc k) (l + n)
≅⟨ ≡-to-≅ (compare-fst-+ n k l pf) ⟩
fst==suc[ (l + n) + k ]
∎
where
open ≅.≅-Reasoning
|
from pathlib import Path
import argparse
import cv2
import numpy as np
from tqdm import tqdm
def general_dice(y_true, y_pred):
if y_true.sum() == 0:
if y_pred.sum() == 0:
return 1
else:
return 0
return dice(y_true == 1, y_pred == 1)
def general_jaccard(y_true, y_pred):
if y_true.sum() == 0:
if y_pred.sum() == 0:
return 1
else:
return 0
return jaccard(y_true == 1, y_pred == 1)
def jaccard(y_true, y_pred):
intersection = (y_true * y_pred).sum()
union = y_true.sum() + y_pred.sum() - intersection
return (intersection + 1e-15) / (union + 1e-15)
def dice(y_true, y_pred):
return (2 * (y_true * y_pred).sum() + 1e-15) / (y_true.sum() + y_pred.sum() + 1e-15)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg('--train_path', type=str, default='data/train/angyodysplasia/masks', help='path where train images with ground truth are located')
arg('--target_path', type=str, default='predictions/UNet', help='path with predictions')
args = parser.parse_args()
result_dice = []
result_jaccard = []
for file_name in tqdm(list(Path(args.train_path).glob('*'))):
y_true = (cv2.imread(str(file_name), 0) > 255 * 0.5).astype(np.uint8)
pred_file_name = Path(args.target_path) / (file_name.stem.replace('_a', '') + '.png')
y_pred = (cv2.imread(str(pred_file_name), 0) > 255 * 0.5).astype(np.uint8)
result_dice += [dice(y_true, y_pred)]
result_jaccard += [jaccard(y_true, y_pred)]
print('Dice = ', np.mean(result_dice), np.std(result_dice))
print('Jaccard = ', np.mean(result_jaccard), np.std(result_jaccard))
|
(* Property from Case-Analysis for Rippling and Inductive Proof,
Moa Johansson, Lucas Dixon and Alan Bundy, ITP 2010.
This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.*)
theory TIP_prop_12
imports "../../Test_Base"
begin
datatype 'a list = nil2 | cons2 "'a" "'a list"
datatype Nat = Z | S "Nat"
fun map :: "('a => 'b) => 'a list => 'b list" where
"map x (nil2) = nil2"
| "map x (cons2 z xs) = cons2 (x z) (map x xs)"
fun drop :: "Nat => 'a list => 'a list" where
"drop (Z) y = y"
| "drop (S z) (nil2) = nil2"
| "drop (S z) (cons2 x2 x3) = drop z x3"
theorem property0 :
"((drop n (map f xs)) = (map f (drop n xs)))"
oops
end
|
# Direct methods for solving linear systems (homework)
**Exercise 1.** Let us consider the linear system $A\mathbf{x} = \mathbf{b}$ where
$$
A =
\begin{bmatrix}
\epsilon & 1 & 2\\
1 & 3 & 1 \\
2 & 1 & 3 \\
\end{bmatrix}.
$$
1. Find the range of values of $\epsilon \in \mathbb{R}$ such that the matrix $A$ is symmetric and positive definite.
**Suggestion**: use the *Sylvester's criterion* which states that a symmetric matrix $A \in \mathbb{R}^{n \times n}$ is positive definite if and only if all the main minors (The main minors of $A \in \mathbb{R}^{n \times n}$ are the determinants of the submatrices $A_p = (a_{i,j})_{1 \leq i, j \leq p}$, $p = 1, ..., n$). of $A$ are positive.
2. What factorization is more suitable for solving the linear system $A\mathbf{x}=\mathbf{b}$ for the case $\epsilon=0$? Motivate the answer.
3. Compute the Cholesky factorization $A = R^T R$ for the case $\epsilon = 2$.
4. Given $\mathbf{b} = (1,1,1)^T$, solve the linear system by using the Cholesky factorization computed at the previous point.
**Exercise 2.** Let us consider the following matrix $A \in \mathbb R^{3 \times 3}$ depending on the parameter $\epsilon \in \mathbb R$:
$$
A =
\begin{bmatrix}
1 & \epsilon & -1 \\
\epsilon & \frac{35}3 & 1 \\
-1 & \epsilon & 2 \\
\end{bmatrix}.
$$
1. Calculate the values of the parameter $\epsilon \in \mathbb R$ for which the matrix $A$ is invertible (non singular).
2. Calculate the Gauss factorization $LU$ of the matrix $A$ (when non singular) for a generic value of the parameter $\epsilon \in \mathbb R$.
3. Calculate the values of the parameter $\epsilon \in \mathbb R$ for which the Gauss factorization $LU$ of the matrix $A$ (when non singular) exists and is unique.
4. Set $\epsilon = \sqrt{\frac{35}3}$ and use the pivoting technique to calculate the Gauss factorization $LU$ of the matrix $A$.
5. For $\epsilon=1$, the matrix $A$ is symmetric and positive definite. Calculate the corresponding Cholesky factorization of the matrix $A$, i.e. the upper triangular matrix with positive elements on the diagonal, say $R$, for which $A = R^T R$.
# EXERCISE 1
```python
from numpy import *
from matplotlib.pyplot import *
from sympy import *
init_printing()
x=symbols("x")
A=Matrix([[x,1,2],[1,3,1],[2,1,3]])
det(A)
print(A)
results=zeros(5)
temp=A.berkowitz_minors()
temp
#x>11/8
```
```python
A[0,0]=0
A.LUdecomposition()
```
$$\left ( \left[\begin{matrix}1 & 0 & 0\\0 & 1 & 0\\2 & -5 & 1\end{matrix}\right], \quad \left[\begin{matrix}1 & 3 & 1\\0 & 1 & 2\\0 & 0 & 11\end{matrix}\right], \quad \left [ \left [ 0, \quad 1\right ]\right ]\right )$$
# EXERCISE 2
```python
x=symbols("x")
A=Matrix([[1,x,-1],[x,35/3,1],[-1,x,2]])
det(A)
solve(det(A))
#linsolve(det(A),0)
#invertible when 3x^2+2x-11.6666666!=0
```
```python
```
|
-- We show by allowing the reminder of integer division to be
-- negative, we can get more precise estimation of the rank of the
-- remainder: rank reminder ≤ (rank divisor) / 2.
{-# OPTIONS --without-K --safe #-}
module Integer.EucDomain2 where
-- ----------------------------------------------------------------------
-- ℕ is an "Euclidean SemiRing".
module NatESR where
-- ℕ satisfy the euc-eq and euc-rank property with the usual div and
-- mod function. Still we need to do the translation from non-equality
-- to NonZero predicate.
-- imports form stdlib.
open import Relation.Nullary using (¬_)
open import Relation.Binary.PropositionalEquality using (_≡_ ; refl)
open import Data.Nat as Nat using (ℕ ; zero ; suc)
import Data.Nat.DivMod as NDM
import Data.Nat.Properties as NatP
-- imports from local.
open import Instances
-- We already make ℕ an instance of Rank. For convinence we restate it
-- here.
rank' : ℕ -> ℕ
rank' x = x
-- Division with the annoying translation from ¬ d ≡ 0 to NonZero
-- predicate.
div : ∀ (n d : ℕ) -> ¬ d ≡ 0 -> ℕ
div n zero n0 with n0 refl
... | ()
div n (suc d) n0 = NDM._/_ n (suc d)
-- Reminder.
mod : ∀ (n d : ℕ) -> ¬ d ≡ 0 -> ℕ
mod n zero n0 with n0 refl
... | ()
mod n (suc d) n0 = NDM._%_ n (suc d)
-- Dividend = reminder + quotient * divisor.
euc-eq : ∀ (n d : ℕ) (n0 : ¬ d ≡ 0) ->
let r = mod n d n0 in let q = div n d n0 in
n ≡ r + q * d
euc-eq n zero n0 with n0 refl
... | ()
euc-eq n (suc d) n0 = NDM.m≡m%n+[m/n]*n n (suc d)
-- rank reminder < rank divisor.
euc-rank : ∀ (n d : ℕ) (n0 : ¬ d ≡ 0) ->
let r = mod n d n0 in let q = div n d n0 in
rank r < rank d
euc-rank n zero n0 with n0 refl
... | ()
euc-rank n (suc d) n0 = NDM.m%n<n n (suc d)
-- ----------------------------------------------------------------------
-- Allow Integer division to have negative reminder
-- Let n, d be integers and d nonzero, by the usual divison with
-- reminder we have q, r such that n = r + q * d and ∣ r ∣ < ∣ d ∣.
-- What we want is q' and r' such that n = r' + q' * d and ∣ r' ∣ ≤ ∣
-- d ∣ / 2. Asssume d > 0, an easy calculation shows if ∣ r ∣ ≤ ∣ d ∣
-- / 2, we let q' = q and r' = r, and if ∣ r ∣ > ∣ d ∣ / 2, we let q'
-- = q + 1 and r' = r - d. The case when d < 0 is similar.
-- imports from stdlib.
open import Relation.Nullary using (yes ; no ; ¬_)
open import Relation.Binary.PropositionalEquality as PE
using (_≡_ ; refl ; sym ; cong ; trans)
open import Data.Empty using (⊥-elim)
open import Data.Sum using (_⊎_ ; inj₁ ; inj₂)
open import Data.Bool using (T ; not)
open import Data.Nat as Nat using (ℕ ; suc ; s≤s ; zero ; z≤n)
import Data.Nat.DivMod as NDM
import Data.Nat.Properties as NatP
open import Data.Integer as Int
using (ℤ ; ∣_∣ ; +_ ; +[1+_] ; -[1+_] ; 1ℤ ; _◃_ ; +<+ ; -<- ; -≤- ; -<+ ; -≤+ ; +≤+ ; 0ℤ)
import Data.Integer.Properties as IntP
open import Data.Integer.DivMod
using (_div_ ; _mod_ ; a≡a%n+[a/n]*n ; n%d<d)
open import Data.Integer.Solver
-- imports from local.
open import Instances
-- Aother integer division allowing negative reminder.
div' : ∀ (n d : ℤ) -> ¬ d ≡ 0ℤ -> ℤ
div' n (+_ zero) n0 with n0 refl
... | ()
div' n d@(+[1+ e ]) n0 with n mod d ≤? ∣ d ∣ / 2
... | yes pr = n div d
... | no np = 1ℤ + n div d
div' n d@(-[1+ e ]) n0 with n mod d ≤? ∣ d ∣ / 2
... | yes pr = n div d
... | no np = - 1ℤ + n div d
-- Another integer mod allowing negative reminder.
mod' : ∀ (n d : ℤ) -> ¬ d ≡ 0ℤ -> ℤ
mod' n (+_ zero) n0 with n0 refl
... | ()
mod' n d@(+[1+ e ]) n0 with n mod d ≤? ∣ d ∣ / 2
... | yes pr = + (n mod d)
... | no np = + (n mod d) - d
mod' n d@(-[1+ e ]) n0 with n mod d ≤? ∣ d ∣ / 2
... | yes pr = + (n mod d)
... | no np = + (n mod d) + d
-- Dividend = reminder + quotient * divisor.
euc-eq' : ∀ (n d : ℤ) (n0 : ¬ d ≡ 0ℤ) ->
let r = mod' n d n0 in let q = div' n d n0 in
n ≡ r + q * d
euc-eq' n (+_ zero) n0 with n0 refl
... | ()
euc-eq' n d@(+[1+ e ]) n0 with n mod d ≤? ∣ d ∣ / 2
... | yes pr = a≡a%n+[a/n]*n n d
... | no np = claim
where
claim : n ≡ (+ (n mod d) - d) + (1ℤ + n div d) * d
claim = begin
n ≡⟨ a≡a%n+[a/n]*n n d ⟩
+ (n mod d) + (n div d) * d ≡⟨ solve 3 (\ x y z -> x :+ y :* z := x :- z :+ (con 1ℤ :+ y) :* z) refl (+ (n mod d)) (n div d) d ⟩
(+ (n mod d) - d) + (1ℤ + n div d) * d ∎
where
open +-*-Solver
open PE.≡-Reasoning
euc-eq' n d@(-[1+ e ]) n0 with n mod d ≤? ∣ d ∣ / 2
... | yes pr = a≡a%n+[a/n]*n n d
... | no np = claim
where
claim : n ≡ (+ (n mod d) + d) + (- 1ℤ + n div d) * d
claim = begin
n ≡⟨ a≡a%n+[a/n]*n n d ⟩
+ (n mod d) + (n div d) * d ≡⟨ solve 3 (\ x y z -> x :+ y :* z := x :+ z :+ (con (- 1ℤ) :+ y) :* z) refl (+ (n mod d)) (n div d) d ⟩
(+ (n mod d) + d) + (- 1ℤ + n div d) * d ∎
where
open +-*-Solver
open PE.≡-Reasoning
-- rank reminder ≤ (rank divisor) / 2.
euc-rank' : ∀ (n d : ℤ) (n0 : ¬ d ≡ 0ℤ) ->
let r = mod' n d n0 in let q = div' n d n0 in
∣ r ∣ ≤ ∣ d ∣ / 2
euc-rank' n (+_ zero) n0 with n0 refl
... | ()
euc-rank' n d@(+[1+ e ]) n0 with n mod d ≤? ∣ d ∣ / 2
... | yes pr = pr
... | no ¬p = claim
where
r = + (n mod d) - d
d≠0 : ¬ ∣ d ∣ ≡ 0
d≠0 = λ ()
-- We follow the steps to show ∣ r ∣ ≤ ∣ d ∣ / 2, in the current case, i.e. ∣
-- n % d - d ∣ ≤ d / 2. We follows the steps below:
-- 1) d / 2 < n % d
-- 2) d / 2 - d < n % d - d
-- 3) n % d - d < 0, hence d / 2 - d < n % d - d < 0
-- 4) ∣ d / 2 - d ∣ > ∣ n % d - d ∣
-- 5) d / 2 - d ≡ - d / 2 or d / 2 - d ≡ - (d / 2 + 1)
-- 6) ∣ d / 2 - d ∣ ≡ d / 2 or ∣ d / 2 - d ∣ ≡ d / 2 + 1
-- 7) d / 2 > ∣ n % d - d ∣ or d / 2 + 1 > ∣ n % d - d ∣
-- 8) d / 2 ≥ ∣ n % d - d ∣
-- By negate ¬p, we can show ∣ d ∣ / 2 < n mod d.
-- By negation of ¬p (we are using the decidable order).
open import Relation.Binary using (tri< ; tri≈ ; tri>)
step1 : ∣ d ∣ / 2 < n mod d
step1 with NatP.<-cmp (n mod d) (∣ d ∣ / 2)
... | tri< a ¬b ¬c = ⊥-elim (¬p (NatP.<⇒≤ a))
... | tri≈ ¬a b ¬c = ⊥-elim (¬p (NatP.≤-reflexive b))
... | tri> ¬a ¬b c = c
where
notle : ¬ (n mod d ≤ ∣ d ∣ / 2)
notle = ¬p
-- Step 2. ∣ d ∣ / 2 - ∣ d ∣ < n mod d - ∣ d ∣. Subtracting ∣ d ∣
-- from both sides of step1 preserves the inequality.
step2 : + (∣ d ∣ / 2) - + ∣ d ∣ < + (n mod d) - + ∣ d ∣
step2 = IntP.+-monoʳ-< (- (+ ∣ d ∣)) (+<+ step1)
-- Step 3. n mod d - ∣ d ∣ < 0
-- Some auxillary.
n-n=0 : ∀ {n} -> n - n ≡ 0ℤ
n-n=0 {n} = IntP.+-inverseʳ n
step3 : + (n mod d) - (+ ∣ d ∣) < 0ℤ
step3 rewrite (sym (n-n=0 {+ ∣ d ∣})) = r-d<d-d
where
-- n mod d < ∣ d ∣, by Euclidean property of integers.
r<∣d∣ : + (n mod d) < + ∣ d ∣
r<∣d∣ = +<+ (n%d<d n d)
-- Substract ∣ d ∣ on both sides of n mod d < ∣ d ∣.
r-d<d-d : + (n mod d) - (+ ∣ d ∣) < (+ ∣ d ∣) - (+ ∣ d ∣)
r-d<d-d = IntP.+-monoʳ-< (- + ∣ d ∣) r<∣d∣
-- Step 4. ∣ d / 2 - d ∣ > ∣ n % d - d ∣. By 2, we have
-- d / 2 - d < n % d - d < 0. By taking absolute values, we show
-- ∣ d / 2 - d ∣ > ∣ n % d - d ∣.
-- Some lemmas about take absolute value of negative and nonpositive
-- numbers.
lemma-∣neg∣ : ∀ {a : ℤ} -> a < 0ℤ -> + ∣ a ∣ ≡ - a
lemma-∣neg∣ -<+ = refl
lemma-∣non-pos∣ : ∀ {a : ℤ} -> a ≤ 0ℤ -> + ∣ a ∣ ≡ - a
lemma-∣non-pos∣ {.(-[1+ _ ])} -≤+ = refl
lemma-∣non-pos∣ {.(+ zero)} (+≤+ z≤n) = refl
-- The injection of natural numbers into integers reflect order.
lemma-inj-reflect-ord : ∀ {a b} -> + a < + b -> a < b
lemma-inj-reflect-ord (+<+ m<n) = m<n
lemma-inj-reflect-ord' : ∀ {a b} -> + a ≤ + b -> a ≤ b
lemma-inj-reflect-ord' (+≤+ m<n) = m<n
-- Taking absolute value on both sides of a strict inequality of
-- negative numbers (can relax this to non-positive numbers)
-- reverses the order.
lemma-∣neg<neg∣ : ∀ {a b} -> a < 0ℤ -> b < a -> ∣ a ∣ < ∣ b ∣
lemma-∣neg<neg∣ {a} {b} a<0 b<a = lemma-inj-reflect-ord claim where
0<b : b < 0ℤ
0<b = IntP.<-trans b<a a<0
claim : (+ ∣ a ∣) < (+ ∣ b ∣)
claim = begin-strict
+ ∣ a ∣ ≡⟨ lemma-∣neg∣ {a} a<0 ⟩
- a <⟨ IntP.neg-mono-< b<a ⟩
- b ≡⟨ sym (lemma-∣neg∣ 0<b) ⟩
+ ∣ b ∣ ∎
where open IntP.≤-Reasoning
-- Goal step4 achieved.
step4 : ∣ + (n mod d) - d ∣ < ∣ + (∣ d ∣ / 2) - d ∣
step4 = lemma-∣neg<neg∣ step3 step2
-- Step 5. d / 2 - d ≡ - d / 2 or d / 2 - d ≡ - (d / 2 + 1)
-- Natural number version of Step 5.
step5-nat : ∣ d ∣ / 2 + ∣ d ∣ / 2 ≡ ∣ d ∣ ⊎ suc (∣ d ∣ / 2 + ∣ d ∣ / 2) ≡ ∣ d ∣
step5-nat = lemma-div-by-2 ∣ d ∣
where
-- An easy and useful fact.
*2+ : ∀ {a} -> a * 2 ≡ a + a
*2+ {zero} = refl
*2+ {suc a} rewrite *2+ {a} | NatP.+-comm a (suc a) = refl
-- Either d / 2 + d / 2 ≡ d or d / 2 + d / 2 + 1 ≡ d.
lemma-div-by-2 : ∀ d -> let hd = d / 2 in
hd + hd ≡ d ⊎ suc (hd + hd) ≡ d
lemma-div-by-2 d with d / 2 | d % 2 | NatESR.euc-eq d 2 (λ { ()}) | NatESR.euc-rank d 2 (λ { ()})
... | hd | zero | proj₁₁ | pr rewrite *2+ {hd} = inj₁ (sym proj₁₁)
... | hd | suc zero | proj₁₁ | pr rewrite *2+ {hd} = inj₂ (sym proj₁₁)
... | hd | suc (suc r₁) | proj₁₁ | pr = ⊥-elim ((c1 r₁) pr )
where
c1 : ∀ a -> ¬ (suc (suc (suc a)) ≤ 2)
c1 a = λ { (s≤s (s≤s ()))}
-- Next we need inject naturals to integers. We do the two
-- identities separately.
-- Step 5a. If (d / 2) + (d / 2) ≡ d, we show d / 2 - d ≡ - d / 2.
step5a : ∀ d -> (d / 2) + (d / 2) ≡ d -> let -_ = λ x -> - (+ x) in
+ (d / 2) - (+ d) ≡ - (d / 2)
step5a d hyp = sym claim4
where
open PE.≡-Reasoning
claim : + (d / 2 + d / 2) ≡ + d
claim = cong +_ hyp
claim2 : + (d / 2) + + (d / 2) ≡ + d
claim2 = begin
+ (d / 2) + + (d / 2) ≡⟨ IntP.pos-+-commute (d / 2) (d / 2) ⟩
+ (d / 2 + d / 2) ≡⟨ cong +_ hyp ⟩
+ d ∎
claim3 : + (d / 2) ≡ + d - + (d / 2)
claim3 = begin
+ (d / 2) ≡⟨ (solve 1 (λ a -> a := a :+ a :- a) refl) (+ (d / 2)) ⟩
+ (d / 2) + + (d / 2) - + (d / 2) ≡⟨ cong (_- + (d / 2)) claim2 ⟩
+ d - + (d / 2) ∎
where open +-*-Solver
claim4 : let -_ = λ x -> - (+ x) in
- (d / 2) ≡ + (d / 2) - + d
claim4 = begin
- (+ (d / 2)) ≡⟨ cong -_ claim3 ⟩
- (+ d - + (d / 2)) ≡⟨ (solve 2 (λ a b -> :- (a :- b) := b :- a) refl) (+ d) (+ (d / 2)) ⟩
+ (d / 2) - + d ∎
where open +-*-Solver
-- Step 5b. If (d / 2) + (d / 2) + 1 ≡ d, we show d / 2 - d ≡ - (d
-- / 2 + 1). (Note that we change the sign of the equality)
step5b : ∀ d -> suc ((d / 2) + (d / 2)) ≡ d -> let -_ = λ x -> - (+ x) in
(+ d) - + (d / 2) ≡ + (d / 2) + 1ℤ
step5b d hyp = begin
+ d - + (d / 2) ≡⟨ cong (λ x → + x - + (d / 2)) (sym hyp) ⟩
+ (suc (d / 2 + d / 2)) - + (d / 2) ≡⟨ refl ⟩
+ (1 + (d / 2 + d / 2)) - + (d / 2) ≡⟨ cong (_- + (d / 2)) (IntP.pos-+-commute 1 (d / 2 + d / 2)) ⟩
+ 1 + + (d / 2 + d / 2) - + (d / 2) ≡⟨ cong (λ x → + 1 + x - + (d / 2)) (IntP.pos-+-commute (d / 2) (d / 2)) ⟩
+ 1 + + (d / 2) + + (d / 2) - + (d / 2) ≡⟨ solve 1 (λ x → (con 1ℤ) :+ (x :+ x) :- x := x :+ con 1ℤ) refl (+ (d / 2)) ⟩
+ (d / 2) + 1ℤ ∎
where
open +-*-Solver
open PE.≡-Reasoning
-- Goal 5.
step5 : let -_ = λ x -> - (+ x) in
+ (∣ d ∣ / 2) - d ≡ - (∣ d ∣ / 2) ⊎ d - + (∣ d ∣ / 2) ≡ + (∣ d ∣ / 2) + 1ℤ
step5 with step5-nat
... | inj₁ x = inj₁ (step5a ∣ d ∣ x)
... | inj₂ y = inj₂ (step5b ∣ d ∣ y)
-- Step 6. ∣ d / 2 - d ∣ ≡ d / 2 or ∣ d / 2 - d ∣ ≡ d / 2 + 1
step6 : let -_ = λ x -> - (+ x) in
∣ + (∣ d ∣ / 2) - d ∣ ≡ ∣ d ∣ / 2 ⊎ ∣ d - + (∣ d ∣ / 2) ∣ ≡ ∣ d ∣ / 2 + 1
step6 with step5
... | inj₁ x rewrite x = inj₁ (trans ∣-d/2∣≡∣d/2∣ ∣d/2∣≡d/2)
where
∣-d/2∣≡∣d/2∣ : ∣ - (+ (∣ d ∣ / 2)) ∣ ≡ ∣ + (∣ d ∣ / 2) ∣
∣-d/2∣≡∣d/2∣ = IntP.∣-i∣≡∣i∣ (+ (∣ d ∣ / 2))
∣d/2∣≡d/2 : ∣ + (∣ d ∣ / 2) ∣ ≡ (∣ d ∣ / 2)
∣d/2∣≡d/2 = refl
... | inj₂ y rewrite y = inj₂ claim
where
claim : ∣ + (∣ d ∣ / 2) + 1ℤ ∣ ≡ ∣ d ∣ / 2 + 1
claim = begin
∣ + (∣ d ∣ / 2) + 1ℤ ∣ ≡⟨ refl ⟩
∣ + ((∣ d ∣ / 2) + 1) ∣ ≡⟨ refl ⟩
∣ d ∣ / 2 + 1 ∎
where
open PE.≡-Reasoning
-- Step 7. d / 2 > ∣ n % d - d ∣ or d / 2 + 1 > ∣ n % d - d ∣
step7 : ∣ + (n mod d) - d ∣ < ∣ d ∣ / 2 ⊎ ∣ + (n mod d) - d ∣ < ∣ d ∣ / 2 + 1
step7 with step6
... | inj₁ x = inj₁ claim
where
claim : ∣ + (n mod d) - d ∣ < ∣ d ∣ / 2
claim = begin-strict
∣ + (n mod d) - d ∣ <⟨ step4 ⟩
∣ + (∣ d ∣ / 2) - d ∣ ≡⟨ x ⟩
∣ d ∣ / 2 ∎
where
open NatP.≤-Reasoning
... | inj₂ y = inj₂ claim
where
claim : ∣ + (n mod d) - d ∣ < ∣ d ∣ / 2 + 1
claim = begin-strict
∣ + (n mod d) - d ∣ <⟨ step4 ⟩
∣ + (∣ d ∣ / 2) - d ∣ ≡⟨ IntP.∣i-j∣≡∣j-i∣ (+ (∣ d ∣ / 2)) d ⟩
∣ d - + (∣ d ∣ / 2) ∣ ≡⟨ y ⟩
∣ d ∣ / 2 + 1 ∎
where
open NatP.≤-Reasoning
-- Step 8. d / 2 ≥ ∣ n % d - d ∣
step8 : ∣ + (n mod d) - d ∣ ≤ ∣ d ∣ / 2
step8 with step7
... | inj₁ x = NatP.<⇒≤ x
... | inj₂ y = m<sucn⇒m≤n claim
where
m<sucn⇒m≤n : ∀ {m n : ℕ} → m < suc n → m ≤ n
m<sucn⇒m≤n (s≤s le) = le
lemma-+1 : ∀ {a : ℕ} -> a + 1 ≡ suc a
lemma-+1 {zero} = refl
lemma-+1 {suc a} = cong suc (lemma-+1 {a})
claim : ∣ + (n mod d) - d ∣ < suc (∣ d ∣ / 2)
claim = begin-strict
∣ + (n mod d) - d ∣ <⟨ y ⟩
∣ d ∣ / 2 + 1 ≡⟨ lemma-+1 ⟩
suc (∣ d ∣ / 2) ∎
where
open NatP.≤-Reasoning
claim : ∣ r ∣ ≤ ∣ d ∣ / 2
claim = step8
-- This case is solved by recursive call with argument n and - d.
euc-rank' n d@(-[1+ e ]) n0 with n mod d ≤? ∣ d ∣ / 2 | euc-rank' n (+[1+ e ]) (λ {()})
... | yes pr | hyp = pr
... | no ¬p | hyp = hyp
-- We can relax the estimation.
euc-rank'' : ∀ (n d : ℤ) (n0 : ¬ d ≡ 0ℤ) ->
let r = mod' n d n0 in let q = div' n d n0 in
∣ r ∣ < ∣ d ∣
euc-rank'' n d n0 = let r = mod' n d n0 in let q = div' n d n0 in
begin-strict
∣ r ∣ ≤⟨ euc-rank' n d n0 ⟩
∣ d ∣ / 2 <⟨ aux ∣ d ∣ ∣d∣≠0 ⟩
∣ d ∣ ∎
where
open NatP.≤-Reasoning
aux : ∀ n -> ¬ n ≡ 0 -> n / 2 < n
aux zero n0 with n0 refl
... | ()
aux (suc n) n0 = NDM.m/n<m (suc n) 2 (s≤s (s≤s z≤n))
∣d∣≠0 : ¬ ∣ d ∣ ≡ 0
∣d∣≠0 x = n0 (IntP.∣i∣≡0⇒i≡0 x)
-- ----------------------------------------------------------------------
-- Alternative Euclidean Structure
-- The newly defined div' and mod' together with euc-eq' and
-- euc-rank'' make ℤ an Euclidean Domain with more precise estimate on
-- the rank of the reminder.
open import Integer.EucDomain
using (+-*-isEuclideanDomain ; +-*-euclideanDomain)
import EuclideanDomain
open EuclideanDomain.Bundles
-- We update the old Euclidean structure in EucDomain.
+-*-isEuclideanDomain' = record +-*-isEuclideanDomain
{ div = div'
; mod = mod'
; euc-eq = euc-eq'
; euc-rank = euc-rank''
}
-- Bundle.
+-*-euclideanDomain' : EuclideanDomainBundle _ _
+-*-euclideanDomain' = record
{ isEuclideanDomain = +-*-isEuclideanDomain'
}
-- ----------------------------------------------------------------------
-- Make a new instance for DivMod ℤ
-- Translation between two nonzeros.
nz : ∀ (x : ℤ) -> .{{NonZero x}} -> ¬ x ≡ 0#
nz +[1+ n ] ()
-- We use the newly defined div and mod function.
instance
new-DMℤ : DivMod ℤ
new-DMℤ .NZT = NZTℤ
new-DMℤ ._/_ n d = div' n d (nz d)
new-DMℤ ._%_ n d = mod' n d (nz d)
|
############## #afsnit #################
# set working directory og lav folder-struktur
#########################################
# set dit working directory her
setwd('/home/emil/Dropbox/Arbejde/CBS/Virksomhedsstrategy i netværksperspektiv/virkstrat-oevelser')
# kontroller at du har sat den rigtige sti
#- der burde staa navne paa filer og foldere i den projekt-mappe, I har
# oprettet
list.files('')
############## #afsnit #################
# installer R pakker
#########################################
install.packages('data.table')
install.packages('igraph')
install.packages('ggraph')
install.packages('readxl')
install.packages('writexl')
############## #afsnit #################
# load pakker
#########################################
library(data.table)
library(igraph)
library(ggraph)
library(readxl)
library(writexl)
############## #afsnit #################
# netvaerk: et simpelt eksempel
#########################################
# skab netvaerksobjektet - deltagere fra Paradise Hotel Season 3
# disclaimer: deres relationer her er ikke baseret paa nogen form for reel analyse.
g1 <- graph( c("Caroline", "Boris", "Boris", "Tanja", "Tanja", "Caroline", "Tanja", "Mark", "Tanja", "Jonas", "Jonas", "Simone"), directed=FALSE)
# print til skaerm
g1
# saadan ser det ud som adjacency matrix ('sparse' format)
g1[]
# kan laves om til en rigtig matrix
a1 <- as.matrix(g1[])
View(a1)
# visualisering, hurtig
# ikke super informativt, men virker
autograph(g1, layout = "dh")
# bygget om op som ggplot-objekt: Her laver vi grafen
# lidt flere oplysninger
ggraph(g1, layout = "dh") +
geom_edge_link0(color='black') +
geom_node_point(color='grey',size=15) +
geom_node_text(aes(label = name), color='black') +
theme_graph()
# data indlejret i graph objektet
E(g1) # edges / forbindelser
V(g1) # vertices / noder
V(g1)$name # navne
# man kan adde oplysninger som attributes - de skal vaere sorteret i samme
# raekkefoelge skal vaere den samme som noderns raekkefoelge, dvs i V(g1)$name
V(g1)$koen <- c("Kvinde", "Mand", "Kvinde", "Mand", "Mand", "Kvinde")
# se hvilke attributter der findes - baade navn og koen
vertex_attr(g1)
# vi kan farvelaegge efter attributter, her koen
ggraph(g1, layout= "dh") +
geom_edge_link0() +
geom_node_point( aes(color=koen), size=15) +
geom_node_text( aes(label=name)) +
theme_graph() +
scale_color_manual(values=c('indianred4', 'darkseagreen3'))
# en anden udgave # husk det med blue)
ggraph(g1, layout= "dh") +
geom_edge_link0(width=2, color='grey25') +
geom_node_point( aes(color=koen), fill='ghostwhite', size=15, shape=21, stroke=2) +
geom_node_text( aes(label=name)) +
theme_graph() +
scale_color_manual(values=c('red3','royalblue1'))
############## #afsnit #################
# EliteDB - vores datasaet
# indlaes data, kig lidt paa det
#########################################
# Christoph & Anton Graus eliteDB datasaet
# hent "den17-no-nordic-letters.csv" paa Canvas, i mappen til 1. oevelsestime. placer i input-mappen
# laes data ind - virker kun hvis data er placeret i input-mappen, og hedder det rigtige
den1 <- fread('input/den17-no-nordic-letters.csv')
# herfra
# https://github.com/antongrau/eliter/tree/master/raw_data
# forskel fra weblink: alle nordiske bogstaver lavet om, samt smaa bogstaver i variabelnavne.
# jargon: jeg refererer til en konkret data.table som en DT fra nu af.
# data.table syntaks
# DT1[*i*, *j*, *by*]
# | | |
# | | |
# | | -----> By - Grupppering
# | --------------> j - Hvilke kolonner/variable
# -------------------> i - Hvilke raekker
# subset/udvaelg de foerste 5 raekker, printes til konsollen
den1[1:5, ]
den1[1:5] # uden komma, som i en vector - virker kun paa data.tables, ikke data.frames
# se indhold i DT i en Viewer (vi indlaer kun de foerste 1000)
View(den1[1:1000,])
# subset/udvaelg to kolonner, printes til konsollen
den1[, .(name, tags)] # nem maade
den1[, list(name, tags)] # ".( )" er en nem maade at skrive list( ) paa
b1 <- c('name', 'tags')
den1[, b1, with=FALSE] # alternativ
den1[, ..b1] # alternativ
# lad os se paa koensfordelingen med special symbolet .N.
# først: .N er et special symbol i data.table, og staar for "antal raekker i DT"
# hvis .N bruges i *j*, printer den antal raekker i DT
den1[, .N]
# samme som:
nrow(den1)
# hvis det bruges i *i* tager den den sidste raekke, nr 56.536, og printer til konsollen.
den1[.N]
# .N kombineret med *by* : her bliver det interessant: vi kan lave en ny DT
# med antal i den variabel vi grupperer efter (*by*)
den1[, .N, by=.(gender)] # det samme
den1[ , .N, .(gender)] # det behoever ikke vaere et named argument
den1[,.N, by='gender'] # det samme (bedre naar man begynder at loope over DTs)
# kan ogsaa vaere flere variable
den1[, .N, by=.(gender, affiliation)]
den1[ , .N, .(gender, affiliation)]
den1[,.N, by=c('gender', 'affiliation')]
# hvem optraeder flest gange? som ny DF med summeret / aggregeret data
a1 <- den1[ , .N , by=.(name)]
a1
# ordn med hoejeste foerst- det goeres med minus foran den variabel man vil sortere efter.
a1[order(-N)]
a2 <- a1[order(-N), ]
# bemaerk at output ikke assignes til et objekt, fx tilbage til a1. saa a1 er
# stadig sorteret som da det blev aggregereet fra den-objektet.
a2
a1
############## #afsnit #################
# netvaerk fra eliteDB
#########################################
# lad os lige tjekke hvilke typer forbindelser, der findes, og hvor mange der
# er til hver
a1 <- den1[,.N,by=.(affiliation)][order(-N)]
View(a1[1:100])
# eller sektor
den1[, .N, by=.(sector)][order(-N)]
# vi udvaelger nogle affiliations
den2 <- den1[sector == c('Commissions')]
# ser paa deres naavne
den2[, .N, by=.(affiliation)][order(affiliation)]
# udvaelger nogle bestemte af interesse for at holde det simpelt
den3 <- den2[affiliation %in% c(
'Mastergruppen for styrkede laereplaner',
'Moensterbryderkorps',
'Dialoggruppen for Ny Nordisk Skole',
'Inklusionseftersynets referencegruppe'
)]
# inspicer data
View(den3)
nrow(den3)
####### #subafsnit #######
# lav graf-objekt: two mode netvaerk
# laver incidence matrix
incidence_matrice <- xtabs(formula = ~ name + affiliation, data = den3,
sparse = TRUE)
incidence_matrice
# kigge paa den
a1 <- as.matrix(incidence_matrice)
View(a1)
# skab graf objekt
net1 <- graph_from_incidence_matrix(incidence_matrice, directed=FALSE)
# see paa indhold
net1
# al information paa vertice niveau - typisk lidt for meget
vertex_attr(net1)
# bedre at faa navne paa attributter, og see paa dem een af gangen
names(vertex_attr(net1)) # vi vil se navnene paa de to
vertex_attr(net1)$type
# two-mode netvaerk
ggraph(net1, layout = 'stress') +
geom_edge_link0(color='black') +
geom_node_point(aes(color=type), size=5) +
geom_node_label(aes(filter=type==TRUE, label = name), color='black', repel=TRUE) + scale_color_manual(values=
c('darkseagreen3', 'indianred4')
, labels=c('person','organisation')) +
theme_graph() + labs(color='')
####### #subafsnit #######
# lav graf-objekt: one-mode netvaerk, organisationer
adjacency_matrice <- Matrix::t(incidence_matrice) %*% incidence_matrice
c1 <- as.matrix(adjacency_matrice)
View(c1)
# saa kan vi lave et one-mode netvaerk i stedet (organisation/organisation)
net_org <- graph_from_adjacency_matrix(adjacency_matrice)
autograph(net_org)
####### #subafsnit #######
# lav graf-objekt: one-mode netvaerk, individer
# vi vil gerne se paa individernes indbyrdes forbindelser, gennem organisationerne - det goer vi ved at gange matrixen med dens transposede selv.
adjacency_matrice <- incidence_matrice %*% Matrix::t(incidence_matrice)
b1 <- as.matrix(adjacency_matrice)
View(b1)
# saa kan vi lave et one-mode netvaerk i stedet (individ/individ)
net_pers <- graph_from_adjacency_matrix(adjacency_matrice) # ikke directed, standard setting i funktionen
autograph(net_pers)
|
import states observables
open set topological_space classical
local attribute [instance] prop_decidable
noncomputable theory
-- TODO: rename this file to essence.lean (maybe)
-- THIS FILE IS A WORK IN PROGRESS.
namespace ontology
variable {ω : ontology}
section relations
variable (ω)
-- abbreviation property (α := Prop) [topological_space α] [has_none α] := ω.observable α → ω.observable α
abbreviation property (α : Type := Prop) [topological_space α] := ω.entity → ω.world → α
def property.positive {ω : ontology}{α : Type} [topological_space α] (p : ω.property α) : Prop := ∀ e, continuous (p e)
abbreviation relation (α := Prop) [topological_space α] [has_none α] :=
ω.observable α → ω.observable α → ω.observable α
def {u} nary (α β : Type u) : ℕ → Type u
| 0 := β
| (n+1) := α → (nary n)
def {u} tnary (β : Type u) : (list $ Type u) → Type u
| [] := β
| (hd::tl) := hd → (tnary tl)
abbreviation nrelation (n : ℕ) (α := Prop) [topological_space α] [has_none α] :=
nary (ω.observable α) (ω.observable α) n
example : (ω.nrelation 2) = ω.relation := rfl
end relations
-- Next we must talk about predicates
section predicates
/-- A predicate is a function which outputs an event for an entity,
i.e. the event of the entity having the predicate. -/
abbreviation predicate (ω : ontology) := ω.entity → ω.event
/-- An analogical predicate, or apredicate, outputs an aevent instead of an event.
i.e the aevent of the entity having the predicate, which it can posses to a greater or lesser extent.
This is analogy in the sense of **intrinsic attribution**.
See thomistic manuals for a deeper informal discussion of intrinsic attribution.
-/
abbreviation apredicate (ω : ontology) := ω.entity → ω.aevent
variable (p : ω.predicate)
-- Apparently Lean already infers correctly the proper notation for boolean algebra operations
-- (which is amazing), even though it does not elaborate the full boolean algebra instance.
-- we may consider defining this instance properly in the future, but for now, we just use the notation.
-- instance boolean_predicate : boolean_algebra ω.predicate := sorry
/-! Now, a predicate which, unlike an apredicate, does not return an aevent is in some sense univocal,
but this univocity might occur in two different ways: in language alone, or both in language and in reality.
The latter we call univocal predicates properly speaking, while the former we call *hidden* or *abstracted*
analogical predicates. The difference between these two is that real univocal predicates preserve hierarchical
distinctions in being, while abstracted, "fake", univocal predicates ignore such ontological distinctions.
So essentially we say that a predicate is univocal in a possible world if and only if the entities which exemplify
it in that world are incomparable with respect to existential entailment.
-/
-- TODO: change this definition to use existential entailment notation when it becomes available.
def predicate.lunivocal : ω.event :=
{ w |
∀ e₁ e₂, w ∈ p e₁ ∩ p e₂ → ¬ e₁.exists ⊂ e₂.exists ∧ ¬ e₁.exists ⊃ e₂.exists
}
@[reducible]
def predicate.lhidden := -p.lunivocal
@[reducible]
def predicate.univocal := p.lunivocal.necessary
@[reducible]
def predicate.hidden := p.lhidden.necessary
/- The whole point of the Thomistic theory of the analogy of being consists
in the realization that existence is a hidden analogy abstracted away from a deeper
apredicate: Being.
-/
def existence (ω : ontology) : ω.predicate := entity.exists
-- In any possible world with at least 2 entities (hence at least one contingent entity)
-- existence is a hidden analogy.
theorem analogical_existence : ∀ w e₁ e₂, e₁ ≠ e₂ → e₁ ∈ w → e₂ ∈ w → w ∈ ω.existence.lhidden := sorry
instance predicate_inhabited : inhabited ω.predicate := ⟨ω.existence⟩
/-- a predicate `p` is **exemplifiable** if there is some `entity` which can possibly be `p`. -/
@[reducible]
def predicate.exemplifiable : Prop := ∃ e, ⋄(p e)
-- an entity is said to possibly exemplify a predicate if it does so in some possible world
@[reducible]
def entity.pexemplifies (e : ω.entity) := ⋄(p e)
-- a predicate is existential if an entity having the predicate implies its existence
@[reducible]
def predicate.existential := ∀ e, p e ⊆ e.exists
-- the common (sensical) predicates
structure predicate.common : Prop :=
(axiom₀ : p.exemplifiable)
(axiom₁ : p.existential)
-- A common predicate is positive if in any possible world in which an entity has the predicate,
-- there exists something in that world (e.g. an accident) to ground the predicate.
structure predicate.positive extends predicate.common p : Prop :=
(axiom₂ : ∀ e, is_open (p e))
/-- A predicate is said to be ***de re* necessary** of an entity if the entity has that predicate in
all and only the possible worlds in which it exists.
Which is also to say that the entity is a *fixed point* of the predicate. -/
@[reducible]
def predicate.dere_of (e : ω.entity) := p e = e.exists
/-- Negation of `predicate.dere_of` -/
@[reducible]
def predicate.ndere_of (e : ω.entity) := p e ≠ e.exists
/-- A predicate is ***de re* necessary** in itself if it is *de re* necessary of all entities which may possibly
exemplify it. -/
@[reducible]
def predicate.dere := ∀ e, ⋄(p e) → p.dere_of e
/-- A predicate is `adere` or **anti-*de re* necessary** if it fails to be *de re* necessary everywhere. -/
@[reducible]
def predicate.adere := ∀ e, ⋄(p e) → p.ndere_of e
/-- A positive predicate is possessed if it "talks about" its subject,
which is to say that it is an accident or essential of a substance,
or essential of an accident. -/
structure predicate.possessed extends predicate.positive p : Prop :=
(axiom₃ : ∀ (e : ω.entity) (h : e.pexemplifies p),
let r := (entity.mk (p e) (axiom₂ e) h) in
e.perfect → r.subsists e
)
(axiom₄ : ∀ (e : ω.entity), e.pexemplifies p → e.imperfect → p.dere_of e)
/-- The *significatum* or *res significata* is the entity signified by a possessed predicate `p`,
i.e. the entity such that its existence causes the truth of the predication
for some entity `e`. -/
def predicate.possessed.sign {p : ω.predicate} (h : p.possessed)
{e : ω.entity} (ne : e.pexemplifies p)
: ω.entity := ⟨p e, h.axiom₂ e, ne⟩
/- Intensionally, the previous definition is intended to be open to the idea
that the *significatum* of a predicate with respect to an entity
might be an intensional entity, and which entity it is might
even vary across possible worlds. Nevertheless, any two intensional
*siginificata* of a *de re* necessary predicate must be existentially
equivalent, so for this case the definition returns the corresponding extensional
entity. Introducing intensional *significata* for extensional predicates
would require an additional primitive beyond the introduction of an intensional
ontology, but it might be useful for e.g. defending an **intensional** distinction between
essence and existence/being (*esse*), if one is of the interpretation that the Thomistic distinction
is an intensional one. We however are not of this latter position, so we prefer
rather to introduce an **extensional** distinction between essence and existence,
that we present further below. -/
-- TODO: consider changing the name of these definitions in the future to potentially
-- avoid the same Kit Finean "essence is not de re necessity" objection we wished to avoid
-- when renaming "essential" to "dere".
-- A possessed predicate is accidental of an entity if it is not essential.
-- From this we can infer (informally) that a predicate is either essential or accidental
-- of an entity or does not "talk about" an entity at all
-- (or at least does not "talk about" all entities to which it is predicated).
@[reducible]
def predicate.accidental_of (e : ω.entity) := p.possessed ∧ p.ndere_of e
@[reducible]
def predicate.accidental := p.possessed ∧ p.adere
-- Notice there can be possessed predicates which are neither essential nor accidental in themselves,
-- but only with respect to a particular entity (it is in this sense that some say heat is
-- essential of fire but accidental of burning coal). But with respect to any particular entity
-- a possessed predicate is either essential or accidental. Notice also that this sort of predicate
-- is (almost) never univocal, because predicating something of both a substance and its accidents is never univocal.
-- a "proper" predicate, or a predicate in a more "proper" sense of the word,
-- is an univocal possessed predicate
structure predicate.proper extends predicate.possessed p : Prop :=
(axiom₅ : p.univocal)
variables (e : ω.entity) (ev : ω.event)
def predicate.bindp : ω.predicate :=
assume e₂, if e = e₂ then ev else p e₂
def predicate.localize : ω.predicate := (⊥ : ω.predicate).bindp e (p e)
variables (e) (p) (ev)
@[reducible, simp]
def entity.bindp : ω.predicate := p.bindp e ev
@[reducible, simp]
def entity.localize (p : ω.predicate) : ω.predicate := p.localize e
/-- The existence of an entity is the "existence" predicate localized to that entity.
It is an essential proper predicate from which all of its essential predicates
follow, or as Aquinas puts it, existence (esse) is the greatest
perfection of a thing, as it actualizes its very essence. -/
def entity.existence : ω.predicate := ω.existence.localize e
-- Note: "esse" used above is often translated as "existence" but this translation
-- has been disputed (see works of Cornelio Fabro CSS for an idea). More properly,
-- we consider "esse" to refer to "being" in Thomistic philosophy, which we shall
-- define further down.
theorem existence_dere : e.existence.dere := sorry
theorem existence_proper : e.existence.proper := sorry
theorem existence_follows_dere : p.dere_of e → e.existence ≤ p := sorry
/-- The haecceity of an entity is its incommunicable individual essence.
It differs from the existence of an entity in that while the existence
is *de re* necessary, the haecceity is absolutely necessary, or alternatively,
haecceity predication does not have existential import, while
existential predication (obviously) does.
However, for the necessary being existence and haecceity coincide. -/
def entity.haecceity : ω.predicate := λ e₂, {w | e = e₂}
theorem existence_haecceity_distinction : e.existence = e.haecceity ↔ e.necessary :=
begin
simp [ entity.existence
, entity.haecceity
, predicate.localize
, predicate.bindp
, existence
, function.funext_iff
, -entity_ext_iff
],
constructor; intro h,
ext w,
simp [nbe, univ, has_mem.mem, set.mem],
specialize h e w,
replace h := h.2,
simp at h,
exact h true.intro,
intros e₂ w,
by_cases c : e = e₂; simp [c],
rw h at c,
simp [nbe, ext_iff] at c,
specialize c w,
exact ⟨λ_,true.intro, λ_,c⟩,
refine ⟨_, false.elim⟩,
intro hyp,
simp [ has_bot.bot
, order_bot.bot
, bounded_lattice.bot
, complete_lattice.bot
] at hyp,
exact hyp,
end
-- a predicate is communicable if it can be possibly exemplified by more than one entity
def predicate.communicable := ∃ e₁ e₂ : ω.entity,
e₁ ≠ e₂ ∧
e₁.pexemplifies p ∧
e₂.pexemplifies p
def predicate.incommunicable := ¬ p.communicable
-- the individual existence of an entity is of course incommunicable
lemma existence_incommunicable : e.existence.incommunicable := sorry
-- and so is its haecceity
lemma haecceity_incommunicable : e.haecceity.incommunicable := sorry
-- A normal, "everyday", predicate like "being red"
-- (when e.g. it is predicated of substances, and not of "red" accidents)
-- is a communicable proper predicate.
structure predicate.normal extends predicate.proper p : Prop :=
(axiom₆ : p.communicable)
-- Don't know if this is true
-- there are counterexamples when S contains contradictory predicates
-- but otherwise maybe an adaptation of this lemma is true
-- lemma inf_normal_normal : ∀ S : set ω.predicate, (∀ p : ω.predicate, p ∈ S → p.normal) → (Inf S).normal :=
-- begin
-- intros S h,
-- have ne : S.nonempty,
-- admit,
-- obtain ⟨p, hp⟩ := ne,
-- have pn := h p hp,
-- constructor,
-- admit,
-- obtain ⟨e₁,e₂,neq, he₁,he₂⟩ := pn.axiom₆,
-- end
-- The specific essence, or species, of an entity is a normal essential predicate from which
-- all its normal essential predicates follow.
def predicate.species_of := e.pexemplifies p ∧
p.normal ∧
p.dere ∧
∀ p', e.pexemplifies p' →
p'.normal →
p'.dere →
p ≤ p'
def predicate.species := ∃ e, p.species_of e
def entity.has_species := ∃ p : ω.predicate, p.species_of e
-- An entity has at most one species
lemma unique_species : ∀ p₁ p₂ : ω.predicate, (∃ e, p₁.species_of e ∧ p₂.species_of e) → p₁ = p₂ := sorry
-- Now, essence is predicated in multiple ways, the foremost
-- of which is in the sense of species. However, among
-- things which have no species, essence only signifies haecceity,
-- leading us to the following definition:
/-- A predicate is the essence of an entity if it is either its specific essence or its haecceity. -/
def entity.is_essence := p.species_of e ∨ p = e.haecceity
-- We can then prove essence × existence distinction as well:
theorem existence_essence_distinction : e.is_essence e.existence ↔ e.necessary := sorry
end predicates
section apredicates
variables (p : ω.apredicate) (e : ω.entity)
def apredicate.sup := Sup (subtype.val '' (range $ p e))
def apredicate.inf := Inf (subtype.val '' (range $ p e))
def apredicate.max := Sup ⋃ e, (subtype.val '' (range $ p e))
-- def apredicate.min := Inf ⋃ e, (subtype.val '' (range $ p e)) -- I think maybe this is always 0
def apredicate.complete := p.max = 1
def apredicate.existential := ∀ e, ↑(p e) ⊆ e.exists
end apredicates
section happiness
variables (e : ω.entity) (p : ω.apredicate)
/-- An `entity` is said to be **happy**, or **naturally perfect**, w.r.t some apredicate `p` in possible world `w`
if it attains the greatest degree of `p` it can achieve, at `w`. -/
def entity.happy : ω.event := {w | e.exists w ∧ ↑(p e w) = p.sup e}
/-- An `entity` is said to be **wholesome** w.r.t some apredicate `p`
if it can possibly be happy w.r.t. `p`. -/
def entity.wholesome := ⋄e.happy p
/-- An `entity` is said to be **miserable** w.r.t some apredicate `p`
if it cannot possibly be happy w.r.t. `p`. -/
def entity.miserable := ¬ e.wholesome p
-- TODO: maybe this one should be generalized to arbitrary observables.
def entity.invariantly := ∀ w₁ w₂, e.exists w₁ → e.exists w₂ → p e w₁ = p e w₂
def entity.invariantly_happy := e.invariantly p ∧ e.wholesome p
def entity.absolutely_happy := □e.happy p
/-- A **maximally perfectible** `entity` w.r.t some apredicate `p` is one which
can possibly be progressively perfected in the direction of attaining
the greatest degree of `p` that is possible for any entity to have. -/
def entity.mperfectible := p.sup e = p.max
/-- A **completely perfectible** `entity` w.r.t some apredicate `p` is one which
can possibly attain the greatest conceivable degree of `p`. -/
def entity.cperfectible := ∃ w, p e w = 1
/-- An `entity` is said to be **exemplary** w.r.t some apredicate `p` in some possible world `w`
if it is maximally perfectible in itself and happy at `w`. -/
def entity.exemplary : ω.event := {w | e.mperfectible p ∧ e.happy p w}
/-- An `entity` is said to be an **exemplary cause of `p`**, or an **examplar** w.r.t some apredicate `p`,
if it can possibly attain the greatest degree of `p` that is possible for any entity to achieve.
i.e. it is an entity which is possibly `exemplary`. -/
@[reducible, simp]
def entity.ecause := ⋄e.exemplary p
/-- An apredicate is **exemplarily caused** if it admits an `exemplar`. I.e., an
`entity` which can possibly attain the greatest degree of `p` that is
possible for any entity to have. -/
def apredicate.ecaused := ∃ e : ω.entity, e.ecause p
/-- An `entity` is said to be **absolutely exemplary**, or **maximally perfect**, w.r.t some apredicate `p`
if it is exemplary in every possible world. -/
@[reducible, simp]
def entity.absolutely_exemplary := □e.exemplary p
/-- "**Maximally perfect**" is an alias for `absolutely_exemplary`. -/
@[reducible, simp, alias]
def entity.mperfect := e.absolutely_exemplary
/-! # The Intuition behind exemplary causes.
The property of real numbers of being "close" to a given
number, say `5`, is an analogical predicate which admits an exemplary cause,
namely `5`. We can define this predicate as something like `close₅(x) = 1 ÷ (∥x - 5∥ + 1)`,
as we can see that `close₅(5) = 1`, `∥x∥ ⇒ +∞, close₅(x) ⇒ 0`, `∀x, 0 ≤ close₅(x) ≤ 1`.
This predicate defines a so called "fuzzy set" of real numbers:
the real numbers which are "close" to `5`.
The number `5` is called an **exemplary cause** of the predicate `close₅`
because, given the fact that it attains the greatest conceivable degree of `close₅`,
it also serves as an ultimate criteria of comparison for determining the extent to which a real number is
close to `5`, i.e. a number will attain higher degrees of `close₅` precisely to the extent to which
it is close to `5`. `close₅` can then be deemed a *measure of similarity to a point*, namely the point
`5`, the **exemplar**.
What other kinds of predicates are measures of similarity to a point in a similar way? It appears
that natural and moral perfections are good candidates for being exemplarily caused.
We have an intuitive grasp, for instance, of what a good, or healthy, dog is, insofar as we can imagine
a "perfect" dog. The dog of our dreams is one which, perhaps,
is very strong, playful, healthy and active, lives by a healthy diet,
is cheerful, gets a lot of sunlight, exercises regularly,
is a very effective apologetics minister by
putting the fear of God into the hearts of burglars and trespassers,
is docile, etc...
It is the kind of dog you would likely see portrayed in a dog food commercial, or
something of the sort. He is the exemplar of what the natural powers of dogs
can achieve when they operate in the most perfect way possible. It is an ideal dog,
and just like with any ideal we can't help but measure other dogs with respect it.
There are then clearly different degrees to which a dog can fully achieve the potential
of its nature, so that we can even say, that the exemplar dog is a dog
*in the proper sense of the word*, or absolutely, *simpliciter*, without qualification,
while dogs not measuring up to its standards are only dogs in a more limited sense of the word,
with qualification, relatively, or *secundum quid*. All dogs can in a sense be said to
*participate* of the exemplar dog to the extent that they are similar to it, in a way reminiscent
to how things were supposed by Plato to participate in their Platonic forms.
-/
theorem inner_life_of_the_absolutely_exemplary : e.absolutely_exemplary p ↔ e.absolutely_happy p := sorry
lemma necessary_of_abs_exemplary : e.absolutely_exemplary p → e.necessary := sorry
end happiness
section being
/-! # The Analogy of Being
We have seem that existence is not truly an univocal property, but a hidden analogy.
This is obvious from the consideration that substances possess a higher degree
of existence than their accidents, insofar as the accidents do not subsist of themselves,
but must inhere in a substance. The analogical nature of existence formally follows
from the fact that accidents entail the existence of their substances.
As such we should expect there to be an analogical predicate from which existence is abstracted,
and with respect to which "existence" will, in a sense, come by degrees. However,
because we typically think of existence as an univocal property, having no degrees,
we shall name this "existence" which comes by degrees **being**, rather than existence.
Another, way to see that this "being" has degrees is to consider that being is identical
in reality to other two so called *transcendental properties* which quite clearly have degrees, namely
unity and actuality, which are called so because anything that exists is one individual unified thing
and also actual. And this we can show not only by comparing accidents to substances, but also from
comparing substances to substances.
We observe in reality that things are more or less unified and more or less actual;
the first we notice from the fact that all natural entities have an intrinsic unification principle which
unifies their parts more or less, allowing the entity to be more or less complex. At the bottom of the hierarchy
of unity we have gases, for which the unification principle is the weakest insofar as whatever
unifies gases in a whole is not strong enough to give them a definite shape, which is rather imposed from
without, by an external container. Next we have liquids which possess greater solidity, and hence unification,
than gases, but are still not unified enough to possess their own shape; though they already exhibit some
resistance against compression. We then have solids which are unified enough to have their own shapes
and are often much more resistant to compression and various pressures than the previous.
Plasmas are however harder to classify since the ancients would equate them with fire, which was considered
the most perfect of the 4 elements, though fire has a less stable shape than a solid, so it is unclear whether
they should be considered more perfect than solids or not. After the minerals, however, we have living organisms
which are much more unified and complex than any minerals, and among them we have animals which exhibit
even greater complexity and unification, and finally we have humans, which unify physical and metaphysical
substances into the same whole.
On the other hand, we have among these levels also greater degrees of actuality, since a thing is able
to act more perfectly insofar as it is more actual, and yet anything is actual insofar as it exists. A natural
entity is more actual to the extent that it has more energy and is habitually capable of using this energy
to perform more complex vital operations. It is in this respect that fire/plasma is the most perfect state of
inanimate matter, but any living organism is more perfect than it insofar as its energy does not come in the
form of useless heat, but of direct and complex vital operations.
It is furthermore the case that natural substances vary in perfection with respect to time,
becoming more or less perfect depending on the circumstances. A good man can be said to be more
perfect than a bad one, in a moral sense of the word "perfect", while a healthy man can be said
to be more perfect than an unhealthy one, in a more natural or biological sense of the word.
These considerations suffice for showing that natural entities exhibit greater or lesser degrees of perfection,
but we must also extend this consideration to metaphysical entities. Metaphysical entities which have no parts,
spatial extension and, specially, accidents, are more perfect than natural entities, not on account of a greater
amount of complexity or energy, but on account of their greater unification and actuality. Unification
in natural entities which have parts is exhibited in complexity insofar as a greater unifying principle is required
to amalgamate a multiplicity of disparate entities into a single unified whole; however the more unified
a substance is the less will their parts behave like disparate or independent entities to begin with.
As the unity of a substance increases, its parts become so intertwined with it that
they begin to exhibit fundamental ontological dependencies to the whole, and vice-versa.
This is already observable in the case of living animals,
for which the removal of an organ causes the death of the same organ,
and often the death of the animal, in a very short period of time;
unless artificial means are used to keep them alive.
In the limit, this ontological dependencies would grow to become full existential dependencies,
so that it would be impossible to distinguish a substance from its parts by extensional means,
and we could simply say that the substance has no parts, because it has "absorbed" all of its "parts"
within itself. A substance of this sort would exhibit greater unification than any natural
entity, even though it would be absolutely simple.
With respect to extension, metaphysical substances are more perfect, insofar as to have extension
is but to be limited to a particular (compact, connected) region of space, outside of which
the substance has no existence, while a substance without extension can be said to exist in all points
of space without limits, as we shall later formalize (in geometry.lean). Furthermore it is obvious
that simple substances, which have no accidents, are more perfect than composite substances
due to the the multiplicity of states being the origin of a multiplicity
of different ways of existing and of passive potentiality, and being the multiplicity of possible accidents
the origin of the multiplicity of states, it is clear that a substance without accidents has no
passive potentiality (outside perhaps of a potentiality for existence or non-existence) and no multiplicity
of ways of existing, hence being much more unified and actual than any substance with accidents.
Finally, metaphysical entities which are necessary are clearly more perfect than contingent ones,
just as any substance which depends on another, should be less perfect than this other.
These considerations suffice to show then that even among metaphysical substances,
or when comparing metaphysical substances to natural ones, there are differing degrees of
unification, actuality and, hence, also perfection and being. -/
/-- The **Being**, **analogy of being**, or *actus essendi* of an ontology is an `apredicate` **is**
which gives to every possible entity in every possible world the degree of being,
or degree of perfection, that the entity has in that world.
This perfection, or degree of being, is also a measurement of the degree of actuality of an entity,
as it varies across possible worlds. Substances which are invariant with respect to
being, are less fleeting, and as such more actual, while substances which vary greatly in being
are more fleeting, more potential and, hence, less perfect in being. They are also less unified,
less truthful, less good, less beautiful, etc... for all the so called *transcendentals of being*. -/
structure being (ω : ontology) :=
(is : ω.apredicate)
-- Being is synonymous with existence,
-- in the sense that an entity can only have
-- being in any capacity or amount whatsoever
-- if it exists, and to exist is nothing other
-- than to participate in being to some extent
-- or to some capacity. Existence is abstracted
-- from being.
(axiom₁ : ∀ e, ↑(is e) = e.exists)
-- Being respects the hierarchies of ontological dependencies,
-- indeed it is the very **origin** of said hierarchies. The most
-- basic relation of ontological dependency is existential entailment,
-- so entities which depend existentially on another shall be less perfect than
-- that other, in any possible worlds in which they exist.
(axiom₂ : ∀ e₁ e₂ : ω.entity, e₁ ≠ e₂ → e₁.exists ⇒ e₂.exists → is e₁ < is e₂)
-- Being is complete, for it if were not then the maximum degree
-- of perfection attainable by an entity would necessarily fall short
-- of 100% of the maximum degree of perfection attainable by an entity, which is absurd.
-- i.e., an entity that has degree of perfection 1.0 in any possible world
-- is to be interpreted as having attained the maximum possible degree of perfection
-- any entity could possibly achieve.
(axiom₃ : is.complete)
-- A substance is more or less perfect across possible worlds only because of the variability
-- of the accidents inhering in it, so in worlds in which a substance has the same state
-- it should have the same degree of perfection also.
(axiom₄ : ∀ (s : ω.substance) (w₁ w₂), s.equiv w₁ w₂ → is s.up w₁ = is s.up w₂)
-- Furthermore, in worlds in which more things subsist in a substance, it should also
-- be more perfect, for the perfection of the subsistent entities should, in some
-- sense, "add up" to an increase in the overall perfection of the substance.
(axiom₅ : ∀ (s : ω.substance) (w₁ w₂), s.state w₁ ⊂ s.state w₂ → is s.up w₁ < is s.up w₂)
-- Furthermore, states were not defined for accidents since nothing subsists in them.
-- It should follow that accidents have being invariantly, for without variation of
-- accidents there can be no variation of perfection.
(axiom₆ : ∀ (a : ω.accident), a.up.invariantly is)
-- In any possible world in which a simple substance exists, it is more perfect
-- than all composite substances existing in the same world.
-- Note: this does not assume that it is possible for simple substances to exist,
-- only that **if** they do exist, they are more perfect than the composites.
-- We also do not need this axiom to show that God is more perfect than composite things,
-- for this follows from axiom 2, but rather we only need it to show that other, contingent,
-- simple substances are more perfect than composite substances.
(axiom₇ : ∀ (s₁ : ω.substance) (w), s₁.simple → s₁.exists w →
∀ (s₂ : ω.substance), s₂.composite → s₂.exists w → is s₂.up w < is s₁.up w)
-- If two entities are of the same species, it should be
-- possible for both to achieve exactly the same levels of
-- perfection. TODO: number this axiom and make sure the numbering is consistent
-- with pextensions.
(axiom_to_number : ∀ (e₁ e₂ : ω.entity) (p : ω.predicate), p.species_of e₁ → p.species_of e₂ → range (is e₁) = range (is e₂))
variable (b : ω.being)
open predicate
-- The necessary being is maximally perfectible w.r.t. any analogy of being.
-- Only needed axiom₂ for this proof.
lemma nbe_mperfectible : ω.nbe.mperfectible b.is :=
begin
simp [entity.mperfectible],
symmetry,
apply cSup_intro,
-- goal 1
obtain ⟨w⟩ := ω.wne,
use b.is ω.nbe w,
simp,
constructor,
exact (b.is (nbe ω) w).property,
use ω.nbe, use w,
-- goal 2
intros r H,
simp at H,
obtain ⟨e, hr, w, eq⟩ := H,
set rhs := apredicate.sup b.is (nbe ω),
let r₂ := b.is ω.nbe w,
transitivity r₂.val, swap,
apply le_cSup, swap,
simp [range, image],
constructor,
exact r₂.property,
use w,
simp [bdd_above, upper_bounds],
use 1, simp, intros r₃ hr₃ _,
simp [set.Icc] at hr₃,
exact hr₃.right,
by_cases h : e = ω.nbe,
simp [r₂],
rw [←h, eq],
have c := b.axiom₂ e ω.nbe h _, swap,
simp [ontology.nbe, set.subset],
replace c := c.left w,
rw eq at c,
exact c,
-- goal 3
intros r hr,
obtain ⟨r₂, hr₂, c⟩ := exists_lt_of_lt_cSup _ hr,
use r₂,
simp [range, image] at hr₂,
simp [hr₂, c],
use ω.nbe,
exact hr₂,
obtain ⟨w⟩ := ω.wne,
use b.is ω.nbe w,
simp,
constructor,
exact (b.is (nbe ω) w).property,
use w,
end
/-- Misery begets misery. A wholesome entity should not depend on a miserable one.
analogies of `being` satisfying this principle are said to be **proportionally happy**, for they
satisfy a form of proportionate causality with respect to happiness. -/
def being.phappy : Prop := ∀ (e₁ e₂ : ω.entity) w, e₁.happy b.is w → (e₁.exists ⇒ e₂.exists) → e₂.happy b.is w
/-- An analogy of `being` is said to be **wholesome** if some `entity` is `wholesome` with respect to it. -/
def being.wholesome : Prop := ∃ e : ω.entity, e.wholesome b.is
/-- An analogy of `being` is said to be **absolutely exemplary** if some `entity` is `absolutely exemplary` with respect to it. -/
def being.absolutely_exemplary : Prop := ∃ e : ω.entity, e.absolutely_exemplary b.is
-- exemplary
/-- An analogy of `being` is said to be **Exemplarily Caused** if it is exemplifiable.
I.e. if some `entity` is an **exemplary cause** (`ecause`) of being. -/
def being.ecaused : Prop := b.is.ecaused
lemma ecaused_of_phappy_and_wholesome : b.phappy → b.wholesome → b.ecaused := sorry
lemma exemplar_nbe_of_ecaused : b.ecaused → ω.nbe.ecause b.is := sorry
/-- The perfection of a multitude of entities should increase with the number of entities.
Analogies of `being` satisfying this principle are called **composable**. -/
def being.composable := ∀ (s : set ω.entity) (e : ω.entity) (w₁ w₂ : ω.world),
e ∉ s →
e = Sup s →
w₁.entities ∩ s ⊂ w₂.entities ∩ s →
b.is e w₁ < b.is e w₂
/-- An analogy of `being` is said to be **essentially exemplary** if it is essential for entities to be exemplary
with respect to it.
Nothing could explain otherwise why an entity would be exemplary in one world
but not in some other.
-/
def being.eexemplary : Prop := dere (flip entity.exemplary b.is)
lemma abs_exemplary_intro {b : ω.being} : b.ecaused → b.eexemplary → b.absolutely_exemplary := sorry
lemma nbe_eq1_of_abs_exemplary {b : ω.being} : b.absolutely_exemplary → ∀ w, b.is ω.nbe w = 1 := sorry
/-- A **quasi-participated** `being` is essentially exemplary and exemplarily caused. -/
@[reducible, simp]
def being.qparticipated := b.ecaused ∧ b.eexemplary
def being.participated := b.composable ∧ b.qparticipated
def participated (ω : ontology) := ∃ b : ω.being, b.participated
def composable (ω : ontology) := ∃ b : ω.being, b.composable
def exemplary (ω : ontology) := ∃ b : ω.being, b.absolutely_exemplary
lemma exemplary_of_participated : ω.participated → ω.exemplary :=
begin
rintro ⟨b, hbc, hbs, hbes⟩,
use b,
exact abs_exemplary_intro hbs hbes,
end
section pextension
/-- A **Platonic Extension** equips every property of a certain kind (e.g. normal) with an analogical extension,
satisfying certain properties. These include possessing the predicate in a more perfect way
than whatever possesses it in the usual way, i.e. "formally".
This is done so that the concept of **eminence** can be defined. -/
structure pextension (ω : ontology) (prop : ω.predicate → Prop := predicate.normal) extends being ω :=
-- To every univocal, normal, predicate there corresponds some
-- analogical predicate
(extended : Π (p : ω.predicate) (h : prop p), ω.apredicate)
-- from which it was at least partially abstracted.
-- This means all entities having the predicate have the analogical predicate, in some
-- capacity, or to some extent.
(axiom₈ : ∀ (p : ω.predicate) (h : prop p) e, p e ⊆ ↑(extended p h e))
-- However, whatever has the apredicate to some capacity but does not have the predicate,
-- has the predicate to a strictly greater or more perfect extent than anything which can possibly
-- have the predicate.
(axiom₉ : ∀ (p : ω.predicate) (h : prop p) e₁ w₁, w₁ ∈ ↑(extended p h e₁) - p e₁ →
∀ e₂ w₂, p e₂ w₂ → extended p h e₂ w₂ < extended p h e₁ w₁)
-- The apredicate is always existential
(axiom₁₀ : ∀ (p : ω.predicate) (h : prop p), (extended p h).existential)
-- Equality in being implies equality in the perfection of the analogical extension
(axiom₁₁ : ∀ (p : ω.predicate) (h : prop p) e (w₁ w₂), is e w₁ = is e w₂ → extended p h e w₁ = extended p h e w₂)
-- We stipulate also that the class of predicates prop, has to satisfy certain conditions in order to admit
-- a platonic extension. All of these conditions are satisfied by the default (predicate.normal).
(condition₁ : ∃ p, prop p)
(condition₂ : ∀ (p : ω.predicate), prop p → p.exemplifiable)
variables {prop : ω.predicate → Prop} (ext : ω.pextension prop) (p : ω.predicate) (h : prop p)
def entity.eminently (e : ω.entity) : ω.event := {w | ¬ p e w ∧ ext.extended p h e w ≠ 0 }
def entity.eminent (e : ω.entity) : Prop := e.eminently ext p h = e.exists
def pextension.non_trivial := ∃ (p : ω.predicate) [h : prop p] (e : ω.entity), ⋄e.eminently ext p h
end pextension
end being
end ontology |
%2multibyte Version: 5.50.0.2953 CodePage: 1252
%% This document created by Scientific Word (R) Version 2.0
%\usepackage{sw20elba}
%\input tcilatex
\documentclass[thmsa,notitlepage,11pt]{article}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%TCIDATA{TCIstyle=Article/art1.lat,elba,article}
%TCIDATA{OutputFilter=LATEX.DLL}
%TCIDATA{Version=5.50.0.2953}
%TCIDATA{Codepage=1252}
%TCIDATA{<META NAME="SaveForMode" CONTENT="1">}
%TCIDATA{BibliographyScheme=Manual}
%TCIDATA{Created=Tue Nov 13 06:24:53 2001}
%TCIDATA{LastRevised=Tuesday, August 25, 2015 17:04:11}
%TCIDATA{<META NAME="GraphicsSave" CONTENT="32">}
%TCIDATA{Language=American English}
\input{tcilatex}
\begin{document}
\author{Richard Dennis\thanks{%
Email: [email protected]} \\
%EndAName
University of Glasgow}
\title{\textbf{Package Guide for SolveDSGE}\vspace{0.2in}}
\date{August 25, 2015}
\maketitle
\thispagestyle{empty}\newpage \setlength{\baselineskip}{18.95pt}%
\setcounter{page}{1}
\section{Overview}
SolveDSGE is a Julia package aimed at macroeconomics interested in solving
Dynamic Stochastic General Equilibrium (DSGE) models. \ The package provides
routines for solving rational expectations models and for solving optimal
policy problems. \ Using this package, DSGE models can be solved in logs or
levels to first- or second-order accuracy and optimal policy problems can be
solved under discretion, commitment, timeless-perspective commitment, and
quasi-commitment. \ Routines that solve robust-control versions of these
policy problems are in the works. \ Although there is much that this package
does not do, SolveDSGE offers a broad array of solution methods that can be
applied provided the DSGE model can be expressed in one of several standard
dynamic representations.
\section{Installation}
To install SolveDSGE you will need to type the following into the Julia REPL
\bigskip
\textbf{Pkg.add("SolveDSGE")}
\bigskip
\section{Solving rational expectations models}
\subsection{First-order-accurate solution methods}
SolveDSGE provides a range of solution methods for computing first-order
accurate solutions. \ Exploiting Julia's multiple dispatch all of these
solution methods are called via the single command \textbf{solve\_re}(). \
From this single command the particular solution method employed depends
principally on the model type that enters the \textbf{solve\_re()} function
call. \ Models are represented in various forms that are summarized by
types. \ The model types are
\begin{itemize}
\item \textbf{Blanchard\_Kahn\_Form}
\item \textbf{Klein\_Form}
\item \textbf{Sims\_Form}
\item \textbf{Binder\_Pesaran\_Form}
\end{itemize}
\subsubsection{Blanchard-Kahn form}
The Blanchard-Kahn Form is given by%
\[
\left[
\begin{array}{c}
\mathbf{x}_{t+1} \\
\text{E}_{t}\mathbf{y}_{t+1}%
\end{array}%
\right] =\mathbf{A}\left[
\begin{array}{c}
\mathbf{x}_{t} \\
\mathbf{y}_{t}%
\end{array}%
\right] +\mathbf{C}\left[ \mathbf{\epsilon }_{t+1}\right] ,
\]%
$\mathbf{\epsilon }_{t}\sim i.i.d.[\mathbf{0},\mathbf{\Sigma }]$, where $%
\mathbf{x}_{t}$ is an $n_{x}\times 1$ vector of predetermined variables and $%
\mathbf{y}_{t}$ is an $n_{y}\times 1$ vector of non-predetermined variables.
\ To solve models of this form we first create the \textbf{%
Blanchard\_Kahn\_Form} type for the model, then we use \textbf{solve\_re()}
to solve the model. \ The relevant lines of code would be something like
\bigskip
\textbf{cutoff = 1.0}
\textbf{model = Blanchard\_Kahn\_Form(nx,ny,a,c,sigma)}
\textbf{soln = solve\_re(model,cutoff)}
\bigskip
Here \textbf{soln} contains the solution, which is of the form%
\begin{eqnarray*}
\mathbf{x}_{t+1} &=&\mathbf{Px}_{t}+\mathbf{K\epsilon }_{t+1}, \\
\mathbf{y}_{t} &=&\mathbf{Fx}_{t},
\end{eqnarray*}%
and information about the number of eigenvalues with modulus greater than
cutoff and whether the \textquotedblleft solution\textquotedblright\
returned is determinate, indeterminate, or explosive.
Models of the \textbf{Blanchard\_Kahn\_Form} type can also be solved using
an iterative method to solve a non-symmetric, continuous, algebraic, Riccati
equation. \ In this case the relevant lines of code might look like
\bigskip
\textbf{tol = 1e-10}
\textbf{cutoff = 1.0}
\textbf{model = Blanchard\_Kahn\_Form(nx,ny,a,c,sigma)}
\textbf{soln = solve\_re(model,cutoff,tol)}
\bigskip
For this iterative method, the variable \textbf{cutoff} is still used to
establish determinacy, but is not used to order eigenvalues.
\subsubsection{Klein form}
The Klein Form for a model is given by%
\[
\mathbf{B}\left[
\begin{array}{c}
\mathbf{x}_{t+1} \\
\text{E}_{t}\mathbf{y}_{t+1}%
\end{array}%
\right] =\mathbf{A}\left[
\begin{array}{c}
\mathbf{x}_{t} \\
\mathbf{y}_{t}%
\end{array}%
\right] +\mathbf{C}\left[ \mathbf{\epsilon }_{t+1}\right] ,
\]%
$\mathbf{\epsilon }_{t}\sim i.i.d.[\mathbf{0},\mathbf{\Sigma }]$, where $%
\mathbf{x}_{t}$ is an $n_{x}\times 1$ vector of predetermined variables, $%
\mathbf{y}_{t}$ is an $n_{y}\times 1$ vector of non-predetermined variables,
and $\mathbf{B}$ need not have full rank. \ We can solve models of this form
using the code
\bigskip
\textbf{cutoff = 1.0}
\textbf{model = Klein\_Form(nx,ny,a,b,c,sigma)}
\textbf{soln = solve\_re(model,cutoff) }
\bigskip
The composite type \textbf{soln} contains the solution which is of the form%
\begin{eqnarray*}
\mathbf{x}_{t+1} &=&\mathbf{Px}_{t}+\mathbf{K\epsilon }_{t+1}, \\
\mathbf{y}_{t} &=&\mathbf{F}x_{t},
\end{eqnarray*}%
as well as information about the number of eigenvalues that have modulus
greater than cutoff and whether the \textquotedblleft
solution\textquotedblright\ returned is determinate, indeterminate, or
explosive.
\subsubsection{Sims form}
An alternative model form is used by Sims (2000) and is given by%
\[
\mathbf{\Gamma }_{0}\mathbf{z}_{t}=\mathbf{\Gamma }_{1}\mathbf{z}_{t-1}+%
\mathbf{C}+\mathbf{\Psi v}_{t}+\mathbf{\Pi \eta }_{t},
\]%
where $\mathbf{v}_{t}$ is a shock process, possibly serially correlated,
with mean-zero innovations whose variance-covarince matrix is given by $%
\mathbf{\Sigma }$. \ To solve models that are in this form we would do
something like the following
\bigskip
\textbf{cutoff = 1.0}
\textbf{model = Sims\_Form(gamma0,gamma1,c,psi,pi,sigma)}
\textbf{soln = solve\_re(model,cutoff)}
\bigskip
Here the solution, summarized by \textbf{soln}, is of the form%
\[
\mathbf{z}_{t}=\mathbf{G}_{1}\mathbf{z}_{t-1}+\mathbf{c}+\mathbf{impact}%
\times \mathbf{v}_{t}+\mathbf{ywt}\times \left[ \mathbf{I}-\mathbf{fmat}%
\times L^{-1}\right] ^{-1}\times \mathbf{fwt}\times \mathbf{v}_{t+1}.
\]
\subsubsection{Binder-Pesaran form}
A model is in \textquotedblleft structural form\textquotedblright\ if it is
written as%
\[
\mathbf{Az}_{t}=\mathbf{A}_{1}\mathbf{z}_{t-1}+\mathbf{B}\text{E}_{t}\mathbf{%
z}_{t+1}+\mathbf{C\epsilon }_{t},
\]%
where $\mathbf{\epsilon }_{t}\sim i.i.d.[\mathbf{0},\mathbf{\Sigma }]$. \ We
have two ways of solving structural form models. \ The first recasts them in
terms of the Klein form and here the relevant code would look something like
\bigskip
\textbf{cutoff = 1.0}
\textbf{model = Binder\_Pesaran\_Form(a,a1,b,c,sigma)}
\textbf{soln = solve\_re(model,cutoff)}
\bigskip
The second method is iterative, implementing Binder and Pesaran's
\textquotedblleft brute force\textquotedblright\ method; here the code would
be something like
\bigskip
\textbf{tol = 1e-10}
\textbf{cutoff = 1.0}
\textbf{model = Binder\_Pesaran\_Form(a,a1,b,c,sigma)}
\textbf{soln = solve\_re(model,cutoff,tol)}
\bigskip
Regardless of which of the two methods is used, the solution, summarized in
\textbf{soln}, has the form%
\[
\mathbf{z}_{t}=\mathbf{Pz}_{t-1}+\mathbf{K\epsilon }_{t}.
\]%
As earlier, \textbf{soln} is a composite type that in addition to the
solution itself also contains information about the number of eigenvalues
with modulus greater than cutoff and whether the solution returned is
determinate, indeterminate, or explosive.
\subsection{Second-order-accurate solution methods}
In addition to the first-order accurate solution methods documented above,
SolveDSGE also contains two methods form obtaining second-order-accurate
solutions to nonlinear DSGE models. \ As coded here, these two methods
employ the same model form but differ in how the solution is computed. \ To
employ either method the DSGE model is first expressed in the form%
\[
\text{E}_{t}\mathbf{G}\left( \mathbf{x}_{t},\mathbf{y}_{t},\mathbf{x}_{t+1},%
\mathbf{y}_{t+1}\right) =\mathbf{0}.
\]%
\ With $\mathbf{x}_{t}$ containing $n_{x}$ predetermined variables and $%
\mathbf{y}_{t}$ containing $n_{y}$ non-predetermined variables, $\mathbf{G}%
() $ is a vector containing $n_{x}+n_{y}$ functions. \ Bundling $x_{t}$ and $%
y_{t}$ into a new vector $\mathbf{z}_{t}=\left[
\begin{array}{cc}
\mathbf{x}_{t}^{^{\prime }} & \mathbf{y}_{t}^{^{\prime }}%
\end{array}%
\right] ^{^{\prime }}$ and bundling $\mathbf{z}_{t}$ and $\mathbf{z}_{t+1}$
into a new vector $\mathbf{p}_{t}=\left[
\begin{array}{cc}
\mathbf{z}_{t}^{^{\prime }} & \mathbf{z}_{t+1}^{^{\prime }}%
\end{array}%
\right] ^{^{\prime }}$ we get%
\[
\text{E}_{t}\mathbf{G}\left( \mathbf{p}_{t}\right) =\mathbf{0}.
\]%
We now approximate $\mathbf{G}\left( \mathbf{p}_{t}\right) $ around the
deterministic steady state, $\overline{\mathbf{p}}$, using a second-order
Taylor expansion giving%
\[
\mathbf{G}\left( \mathbf{p}_{t}\right) \simeq \mathbf{G}_{p}\left( \mathbf{p}%
_{t}-\overline{\mathbf{p}}\right) +\left[ \mathbf{I}\otimes \left( \mathbf{p}%
_{t}-\overline{\mathbf{p}}\right) \right] ^{^{\prime }}\mathbf{G}_{pp}\left[
\mathbf{I}\otimes \left( \mathbf{p}_{t}-\overline{\mathbf{p}}\right) \right]
=\mathbf{0},
\]%
where $\mathbf{G}_{p}$ is a matrix of first-derivatives and $\mathbf{G}_{pp}$
is a matrix of stacked Hessians, one Hessian for each of the $n_{x}+n_{y}$
equations.
We now recognize that some elements of $\mathbf{x}_{t}$ (usually the first $%
s $ elements) are shocks that have the form%
\[
\mathbf{s}_{t+1}=\mathbf{\Lambda s}_{t}+\mathbf{\eta \epsilon }_{t+1},
\]%
where $\mathbf{\epsilon }_{t}\sim i.i.d.[\mathbf{0},\mathbf{\Sigma }]$. \
The essential components required for a second-order-accurate solution are
now given by $n_{x}$, $n_{y}$, $\mathbf{G}_{p}$, $\mathbf{G}_{pp}$, $\mathbf{%
\eta }$, and $\mathbf{\Sigma }$.
The two model types that we consider for second-order-accurate solution
methods are
\begin{itemize}
\item \textbf{Gomme\_Klein\_Form}
\item \textbf{Lombardo\_Sutherland\_Form}
\end{itemize}
\subsubsection{Gomme-Klein form}
To compute a second-order accurate solution using the Gomme and Klein (2011)
method we summarize the model in the form of the \textbf{Gomme\_Klein\_Form}
composite type. \ Once this model type is constructed the model can be
solved. \ The code to compute the solution would be something like
\bigskip
\textbf{cutoff = 1.0}
\textbf{model = Gomme\_Klein\_Form(nx,ny,Gp,Gpp,eta,sigma)}
\textbf{soln = solve\_re(model,cutoff)}
\bigskip
Here \textbf{soln} is a composite type that contains the solution, which is
of the form%
\begin{eqnarray*}
\mathbf{x}_{t+1}-\overline{\mathbf{x}} &=&\frac{1}{2}\mathbf{ssh}+\mathbf{hx}%
\left( \mathbf{x}_{t}-\overline{\mathbf{x}}\right) +\frac{1}{2}\left[
\mathbf{I}\otimes \left( \mathbf{x}_{t}-\overline{\mathbf{x}}\right) \right]
^{^{\prime }}\mathbf{hxx}\left[ \mathbf{I}\otimes \left( \mathbf{x}_{t}-%
\overline{\mathbf{x}}\right) \right] +\mathbf{\eta \epsilon }_{t+1}, \\
\mathbf{y}_{t}-\overline{\mathbf{y}} &=&\frac{1}{2}\mathbf{ssg}+\mathbf{gx}%
\left( \mathbf{x}_{t}-\overline{\mathbf{x}}\right) +\frac{1}{2}\left[
\mathbf{I}\otimes \left( \mathbf{x}_{t}-\overline{\mathbf{x}}\right) \right]
^{^{\prime }}\mathbf{gxx}\left[ \mathbf{I}\otimes \left( \mathbf{x}_{t}-%
\overline{\mathbf{x}}\right) \right] ,
\end{eqnarray*}%
where $\mathbf{hxx}$ and $\mathbf{gxx}$ are stacked matrices containing the
second order coefficients for each of the $n_{x}$ and $n_{y}$ equations,
respectively. \ \textbf{soln} also contains information about the number of
eigenvalues with modulus greater than cutoff and the solution's determinacy
properties, where these properties are associated with the model first-order
dynamics.
\subsubsection{Lombardo-Sutherland form}
Implementing the Lombardo and Sutherland (2007) solution method is no
different than for Gomme and Klein (2010). \ The key difference is the form
in which the solution is presented. \ Ths code to implement the
Lombardo-Sutherland method would look something like
\bigskip
\textbf{cutoff = 1.0}
\textbf{model = Lombardo\_Sutherland\_Form(nx,ny,Gp,Gpp,eta,sigma)}
\textbf{soln = solve\_re(model,cutoff)}
\bigskip
where now the solution has the form%
\begin{eqnarray*}
\mathbf{x}_{t+1}-\overline{\mathbf{x}} &=&\frac{1}{2}\mathbf{ssh}+\mathbf{hx}%
\left( \mathbf{x}_{t}-\overline{\mathbf{x}}\right) +\frac{1}{2}\mathbf{hxx}%
\times \mathbf{v}_{t}+\mathbf{\eta \epsilon }_{t+1}, \\
\mathbf{y}_{t}-\overline{\mathbf{y}} &=&\frac{1}{2}\mathbf{ssg}+\mathbf{gx}%
\left( \mathbf{x}_{t}-\overline{\mathbf{x}}\right) +\frac{1}{2}\mathbf{gxx}%
\times \mathbf{v}_{t}, \\
\mathbf{v}_{t} &=&\mathbf{\Phi v}_{t-1}+\mathbf{\Gamma }vech\left( \mathbf{%
\epsilon }_{t}\mathbf{\epsilon }_{t}^{^{\prime }}\right) +\mathbf{\Psi }%
vec\left( \mathbf{x}_{t}\mathbf{\epsilon }_{t}^{^{\prime }}\right) ,
\end{eqnarray*}%
with $\mathbf{v}_{t}$ given by%
\[
\mathbf{v}_{t}=vech\left( \mathbf{x}_{t}\mathbf{x}_{t}^{\prime }\right) .
\]
The solution form produced by the Lombardo-Sutherland method can be
converted into that produced by the Gomme-Klein method by using the \textbf{%
convert\_second\_order} function as follows
\bigskip
\textbf{new\_soln = convert\_second\_order(soln)}
\bigskip
Where \textbf{soln} is of type \textbf{Lombardo\_Sutherland\_Soln}, \textbf{%
new\_soln} is of type \textbf{Gomme\_Klein\_Soln}.
\section{Solving optimal policy problems}
SolveDSGE provides routines for solving Linear-Quadratic (LQ) optimal policy
problems. \ These LQ problems allow policy to be conducted under:
discretion; commitment; quasi-commitment; and timeless-perspective
commitment. \ The solutions to these four policy problems are obtained using
the commands \textbf{solve\_disc()}, \textbf{solve\_commit()}, \textbf{%
solve\_quasi()}, and \textbf{solve\_timeless()}, respectively. \ The optimal
policy routines are based around four model types and two solution types. \
At this stage not all of these policies are supported for all model-types. \
The four model-types and the optimal policies that they support are
documented below.
\subsection{State space form}
The LQ optimal policy problem in \textbf{State\_Space\_Form} is described by
the quadratic objective function%
\[
Loss=\text{E}\left[ \dsum\limits_{t=0}^{\infty }\beta ^{t}\left( \mathbf{z}%
_{t}^{^{\prime }}\mathbf{Qz}_{t}+\mathbf{z}_{t}^{^{\prime }}\mathbf{Uu}_{t}+%
\mathbf{u}_{t}^{^{\prime }}\mathbf{U}^{^{\prime }}\mathbf{z}_{t}+\mathbf{u}%
_{t}^{^{\prime }}\mathbf{Ru}_{t}\right) \right] ,
\]%
and the linear constraints%
\[
\left[
\begin{array}{c}
\mathbf{x}_{t+1} \\
\text{E}_{t}\mathbf{y}_{t+1}%
\end{array}%
\right] =\mathbf{A}\left[
\begin{array}{c}
\mathbf{x}_{t} \\
\mathbf{y}_{t}%
\end{array}%
\right] +\mathbf{Bu}_{t}+\mathbf{C}\left[ \mathbf{\epsilon }_{t+1}\right] ,
\]%
where $\mathbf{x}_{t}$ contains $n_{x}$ predetermined variables, $\mathbf{y}%
_{t}$ contains $n_{y}$ non-predetermined variables, $\mathbf{u}_{t}$
contains $n_{p}$ policy instruments, and $\mathbf{\epsilon }_{t}$ contains $%
n_{s}$ stochastic innovations.
For this model form the following policies are supported:
\begin{itemize}
\item Discretion $\left( \mathbf{s}_{t}=\mathbf{x}_{t}\right) $
\item Commitment $\left( \mathbf{s}_{t}=\left[
\begin{array}{c}
\mathbf{x}_{t} \\
\mathbf{\lambda }_{t}%
\end{array}%
\right] \right) $
\item Quasi-commitment $\left( \mathbf{s}_{t}=\left[
\begin{array}{c}
\mathbf{x}_{t} \\
\mathbf{\lambda }_{t}%
\end{array}%
\right] \right) $
\item Timeless-perspective commitment $\left( \mathbf{s}_{t}=\left[
\begin{array}{c}
\mathbf{x}_{t} \\
\mathbf{x}_{t-1} \\
\mathbf{u}_{t-1}%
\end{array}%
\right] \right) $
\end{itemize}
For each policy the solution returned is of the form%
\begin{eqnarray*}
\mathbf{s}_{t+1} &=&\mathbf{Ps}_{t}+\mathbf{K\epsilon }_{t+1}, \\
\mathbf{z}_{t} &=&\mathbf{Hs}_{t}, \\
\mathbf{u}_{t} &=&\mathbf{Fs}_{t}.
\end{eqnarray*}%
with this information summarized in the solution type, \textbf{%
State\_Space\_Soln}.
To solve a model for each of the policies above we would use code like the
following
\bigskip
\textbf{obj = State\_Space\_Objective(beta,q,u,r)}
\textbf{model = State\_Space\_Form(nx,ny,a,b,c,sigma)}
\textbf{tol = 1e-10}
\textbf{maxiters = 100}
\textbf{commit\_prob = 0.75}
\textbf{soln\_disc = solve\_disc(model,obj,tol,maxiters)}
\textbf{soln\_commit = solve\_commit(model,obj,tol,maxiters)}
\textbf{soln\_quasi = solve\_quasi(model,obj,commit\_prob,tol,maxiters)}
\textbf{soln\_timeless = solve\_timeless(model,obj,tol,maxiters)}
\bigskip
\subsection{Generalized state space form}
The LQ optimal policy problem in \textbf{Generalized\_State\_Space\_Form} is
described by the quadratic objective function%
\[
Loss=\text{E}\left[ \dsum\limits_{t=0}^{\infty }\beta ^{t}\left( \mathbf{z}%
_{t}^{^{\prime }}\mathbf{Qz}_{t}+\mathbf{z}_{t}^{^{\prime }}\mathbf{Uu}_{t}+%
\mathbf{u}_{t}^{^{\prime }}\mathbf{U}^{^{\prime }}\mathbf{z}_{t}+\mathbf{u}%
_{t}^{^{\prime }}\mathbf{Ru}_{t}\right) \right] ,
\]%
and the linear constraints%
\[
\left[
\begin{array}{c}
\mathbf{x}_{t+1} \\
\mathbf{A}_{0}\text{E}_{t}\mathbf{y}_{t+1}%
\end{array}%
\right] =\mathbf{A}\left[
\begin{array}{c}
\mathbf{x}_{t} \\
\mathbf{y}_{t}%
\end{array}%
\right] +\mathbf{Bu}_{t}+\mathbf{C}\left[ \mathbf{\epsilon }_{t+1}\right] ,
\]%
where $\mathbf{x}_{t}$ contains $n_{x}$ predetermined variables, $\mathbf{y}%
_{t}$ contains $n_{y}$ non-predetermined variables, $\mathbf{u}_{t}$
contains $n_{p}$ policy instruments, and $\mathbf{\epsilon }_{t}$ contains $%
n_{s}$ stochastic innovations. \ The \textbf{Generalized\_State\_Space\_Form}
differs from the \textbf{State\_Space\_Form} above through the presence of
the (usually) singular leading matrix $\mathbf{A}_{0}$.
For this model form the following policies are supported:
\begin{itemize}
\item Discretion $\left( \mathbf{s}_{t}=\mathbf{x}_{t}\right) $
\item Commitment $\left( \mathbf{s}_{t}=\left[
\begin{array}{c}
\mathbf{x}_{t} \\
\mathbf{\lambda }_{t}%
\end{array}%
\right] \right) $
\item Quasi-commitment $\left( \mathbf{s}_{t}=\left[
\begin{array}{c}
\mathbf{x}_{t} \\
\mathbf{\lambda }_{t}%
\end{array}%
\right] \right) $
\item Timeless-perspective commitment $\left( \mathbf{s}_{t}=\left[
\begin{array}{c}
\mathbf{x}_{t} \\
\mathbf{x}_{t-1} \\
\mathbf{u}_{t-1}%
\end{array}%
\right] \right) $
\end{itemize}
For each policy the solution returned is of the form%
\begin{eqnarray*}
\mathbf{s}_{t+1} &=&\mathbf{Ps}_{t}+\mathbf{K\epsilon }_{t+1}, \\
\mathbf{z}_{t} &=&\mathbf{Hs}_{t}, \\
\mathbf{u}_{t} &=&\mathbf{Fs}_{t}.
\end{eqnarray*}%
with this information summarized in the solution type, \textbf{%
State\_Space\_Soln}.
To solve a model for each of the policies above we would use code like the
following
\bigskip
\textbf{obj = State\_Space\_Objective(beta,q,u,r)}
\textbf{model = Generalized\_State\_Space\_Form(nx,ny,a0,a,b,c,sigma)}
\textbf{tol = 1e-10}
\textbf{maxiters = 100}
\textbf{commit\_prob = 0.75}
\textbf{soln\_disc = solve\_disc(model,obj,tol,maxiters)}
\textbf{soln\_commit = solve\_commit(model,obj,tol,maxiters)}
\textbf{soln\_quasi = solve\_quasi(model,obj,commit\_prob,tol,maxiters)}
\textbf{soln\_timeless = solve\_timeless(model,obj,tol,maxiters)}
\bigskip
\subsection{Structural form}
The LQ optimal policy problem in \textbf{Structural\_Form} is described by
the quadratic objective function%
\[
Loss=\text{E}\left[ \dsum\limits_{t=0}^{\infty }\beta ^{t}\left( \mathbf{y}%
_{t}^{^{\prime }}\mathbf{Qy}_{t}+\mathbf{u}_{t}^{^{\prime }}\mathbf{Ru}%
_{t}\right) \right] ,
\]%
and the linear constraints%
\[
\mathbf{A}_{0}\mathbf{y}_{t}=\mathbf{A}_{1}\mathbf{y}_{t-1}+\mathbf{A}_{2}%
\text{E}_{t}\mathbf{y}_{t+1}+\mathbf{A}_{3}\mathbf{u}_{t}+\mathbf{A}_{5}%
\mathbf{\epsilon }_{t},
\]%
where $\mathbf{y}_{t}$ contains $n$ variables, $\mathbf{u}_{t}$ contains $%
n_{p}$ policy instruments, and $\mathbf{\epsilon }_{t}$ contains $n_{s}$
stochastic innovarions.
For this model form the following policies are supported:
\begin{itemize}
\item Discretion $\left( \mathbf{s}_{t-1}=\mathbf{y}_{t-1}\right) $
\item Commitment $\left( \mathbf{s}_{t-1}=\left[
\begin{array}{c}
\mathbf{y}_{t-1} \\
\mathbf{\lambda }_{t-1}%
\end{array}%
\right] \right) $
\end{itemize}
For each policy the returned solution is of the form%
\begin{eqnarray*}
\mathbf{s}_{t} &=&\mathbf{Ps}_{t-1}+\mathbf{K\epsilon }_{t}, \\
\mathbf{u}_{t} &=&\mathbf{Fs}_{t-1}.
\end{eqnarray*}%
with this information contained in the solution type, \textbf{%
Structural\_Soln}.
To solve a model for each of the policies above we would use code like the
following
\bigskip
\textbf{obj = Structural\_Objective(beta,q,r)}
\textbf{model = Structural\_Form(a0,a1,a2,a3,a5,sigma)}
\textbf{tol = 1e-10}
\textbf{maxiters = 100}
\textbf{soln\_disc = solve\_disc(model,obj,tol,maxiters)}
\textbf{soln\_commit = solve\_commit(model,obj,tol,maxiters)}
\bigskip
\subsection{Generalized structural form}
The LQ optimal policy problem in \textbf{Generalized\_Structural\_Form} is
described by the quadratic objective function%
\[
Loss=\text{E}\left[ \dsum\limits_{t=0}^{\infty }\beta ^{t}\left( \mathbf{y}%
_{t}^{^{\prime }}\mathbf{Qy}_{t}+\mathbf{u}_{t}^{^{\prime }}\mathbf{Ru}%
_{t}\right) \right] ,
\]%
and the linear constraints%
\[
\mathbf{A}_{0}\mathbf{y}_{t}=\mathbf{A}_{1}\mathbf{y}_{t-1}+\mathbf{A}_{2}%
\text{E}_{t}\mathbf{y}_{t+1}+\mathbf{A}_{3}\mathbf{u}_{t}+\mathbf{A}_{4}%
\text{E}_{t}\mathbf{u}_{t+1}+\mathbf{A}_{5}\mathbf{\epsilon }_{t},
\]%
where $\mathbf{y}_{t}$ contains $n$ variables, $\mathbf{u}_{t}$ contains $%
n_{p}$ policy instruments, and $\mathbf{\epsilon }_{t}$ contains $n_{s}$
stochastic innovarions.
For this model form the following policies are supported:
\begin{itemize}
\item Discretion $\left( \mathbf{s}_{t-1}=\mathbf{y}_{t-1}\right) $
\item Commitment $\left( \mathbf{s}_{t-1}=\left[
\begin{array}{c}
\mathbf{y}_{t-1} \\
\mathbf{\lambda }_{t-1}%
\end{array}%
\right] \right) $
\end{itemize}
For each policy the returned solution is of the form%
\begin{eqnarray*}
\mathbf{s}_{t} &=&\mathbf{Ps}_{t-1}+\mathbf{K\epsilon }_{t}, \\
\mathbf{u}_{t} &=&\mathbf{Fs}_{t-1}.
\end{eqnarray*}%
with this information contained in the solution type, \textbf{%
Structural\_Soln}.
To solve a model for each of the policies above we would use code like the
following
\bigskip
\textbf{obj = Structural\_Objective(beta,q,r)}
\textbf{model = Generalized\_Structural\_Form(a0,a1,a2,a3,a4,a5,sigma)}
\textbf{tol = 1e-10}
\textbf{maxiters = 100}
\textbf{soln\_disc = solve\_disc(model,obj,tol,maxiters)}
\textbf{soln\_commit = solve\_commit(model,obj,tol,maxiters)}
\bigskip
\setlength{\baselineskip}{10pt}
\begin{thebibliography}{9}
\bibitem{} Binder, M., and H. Pesaran, (1995), \textquotedblleft
Multivariate Rational Expectations Models and Macroeconomic Modeling: A
Review and Some New Results,\textquotedblright\ in: Pesaran, H., M. Wickens,
(Eds.), \textit{Handbook of Applied Econometrics}, Basil Blackwell, Oxford,
pp.139--187.
\bibitem{} Blanchard, O., and C. Kahn, (1980), \textquotedblleft The
Solution to Linear Difference Models under Rational
Expectations,\textquotedblright\ \textit{Econometrica}, 48, pp.1305--1311.
\bibitem{} Gomme, P., and P. Klein, (2011), \textquotedblleft Second-Order
Approximation of Dynamic Models Without the Use of
Tensors,\textquotedblright\ \textit{Journal of Economic Dynamics and Control}%
, 35, pp.604--615.
\bibitem{} Klein, P., (2000), \textquotedblleft Using the Generalized Schur
Form to Solve a Multivariate Linear Rational Expectations
Model,\textquotedblright\ \textit{Journal of Economic Dynamics and Control},
24, pp.1405--1423.
\bibitem{} Lombardo, G., and A. Sutherland, (2007), \textquotedblleft
Computing Second-Order-Accurate Solutions for Rational Expectations models
Using Linear Solution Methods,\textquotedblright\ \textit{Journal of
Economic Dynamics and Control}, 31, pp.515--530.
\bibitem{} Sims, C., (2001), \textquotedblleft Solving Linear Rational
Expectations Models,\textquotedblright\ \textit{Computational Economics},
20, pp.1--20.
\end{thebibliography}
\end{document}
|
{-# OPTIONS --safe --experimental-lossy-unification #-}
module Cubical.Algebra.Polynomials.UnivariateList.Poly1-1Poly where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.HLevels
open import Cubical.Data.Nat renaming (_+_ to _+n_; _·_ to _·n_)
open import Cubical.Data.Vec renaming ( [] to <> ; _∷_ to _::_)
open import Cubical.Data.Vec.OperationsNat
open import Cubical.Algebra.DirectSum.DirectSumHIT.Base
open import Cubical.Algebra.Ring
open import Cubical.Algebra.CommRing
open import Cubical.Algebra.Polynomials.UnivariateList.Base renaming (Poly to Poly:)
open import Cubical.Algebra.Polynomials.UnivariateList.Properties
open import Cubical.Algebra.CommRing.Instances.Polynomials.UnivariatePolyList
open import Cubical.Algebra.CommRing.Instances.Polynomials.MultivariatePoly
private variable
ℓ : Level
module Equiv-Poly1-Poly:
(Acr@(A , Astr) : CommRing ℓ) where
private
PA = PolyCommRing Acr 1
PAstr = snd PA
PA: = UnivariatePolyList Acr
PA:str = snd PA:
open PolyMod Acr using (ElimProp)
open PolyModTheory Acr
using ( prod-Xn ; prod-Xn-sum ; prod-Xn-∷ ; prod-Xn-prod)
renaming
(prod-Xn-0P to prod-Xn-0P:)
open CommRingStr
open RingTheory
-- Notation P, Q, R... for Poly 1
-- x, y, w... for Poly:
-- a,b,c... for A
-----------------------------------------------------------------------------
-- direct
trad-base : (v : Vec ℕ 1) → A → Poly: Acr
trad-base (n :: <>) a = prod-Xn n (a ∷ [])
trad-base-neutral : (v : Vec ℕ 1) → trad-base v (0r Astr) ≡ []
trad-base-neutral (n :: <>) = cong (prod-Xn n) drop0 ∙ prod-Xn-0P: n
trad-base-add : (v : Vec ℕ 1) → (a b : A) → _+_ PA:str (trad-base v a) (trad-base v b) ≡ trad-base v (_+_ Astr a b)
trad-base-add (n :: <>) a b = prod-Xn-sum n (a ∷ []) (b ∷ [])
Poly1→Poly: : Poly Acr 1 → Poly: Acr
Poly1→Poly: = DS-Rec-Set.f _ _ _ _ (is-set PA:str)
[]
trad-base
(_+_ PA:str)
(+Assoc PA:str)
(+IdR PA:str)
(+Comm PA:str)
trad-base-neutral
trad-base-add
Poly1→Poly:-pres+ : (P Q : Poly Acr 1) → Poly1→Poly: (_+_ PAstr P Q) ≡ _+_ PA:str (Poly1→Poly: P) (Poly1→Poly: Q)
Poly1→Poly:-pres+ P Q = refl
-----------------------------------------------------------------------------
-- converse
Poly:→Poly1-int : (n : ℕ) → Poly: Acr → Poly Acr 1
Poly:→Poly1-int n [] = 0r PAstr
Poly:→Poly1-int n (a ∷ x) = _+_ PAstr (base (n :: <>) a) (Poly:→Poly1-int (suc n) x)
Poly:→Poly1-int n (drop0 i) = ((cong (λ X → _+_ PAstr X (0r PAstr)) (base-neutral (n :: <>))) ∙ (+IdR PAstr _)) i
Poly:→Poly1 : Poly: Acr → Poly Acr 1
Poly:→Poly1 x = Poly:→Poly1-int 0 x
Poly:→Poly1-int-pres+ : (x y : Poly: Acr) → (n : ℕ) →
Poly:→Poly1-int n (_+_ PA:str x y) ≡ _+_ PAstr (Poly:→Poly1-int n x) (Poly:→Poly1-int n y)
Poly:→Poly1-int-pres+ = ElimProp _
(λ y n → cong (Poly:→Poly1-int n) (+IdL PA:str y) ∙ sym (+IdL PAstr _))
(λ a x ind-x → ElimProp _
(λ n → sym (+IdR PAstr (Poly:→Poly1-int n (a ∷ x))))
(λ b y ind-y n → sym (+ShufflePairs (CommRing→Ring PA) _ _ _ _
∙ cong₂ (_+_ PAstr) (base-add _ _ _) (sym (ind-x y (suc n)))))
(isPropΠ (λ _ → is-set PAstr _ _)))
(isPropΠ2 (λ _ _ → is-set PAstr _ _))
Poly:→Poly1-pres+ : (x y : Poly: Acr) → Poly:→Poly1 (_+_ PA:str x y) ≡ _+_ PAstr (Poly:→Poly1 x) (Poly:→Poly1 y)
Poly:→Poly1-pres+ x y = Poly:→Poly1-int-pres+ x y 0
-----------------------------------------------------------------------------
-- section
e-sect-int : (x : Poly: Acr) → (n : ℕ) → Poly1→Poly: (Poly:→Poly1-int n x) ≡ prod-Xn n x
e-sect-int = ElimProp _
(λ n → sym (prod-Xn-0P: n))
(λ a x ind-x n → cong (λ X → _+_ PA:str (prod-Xn n (a ∷ [])) X) (ind-x (suc n))
∙ prod-Xn-∷ n a x)
(isPropΠ (λ _ → is-set PA:str _ _))
e-sect : (x : Poly: Acr) → Poly1→Poly: (Poly:→Poly1 x) ≡ x
e-sect x = e-sect-int x 0
-----------------------------------------------------------------------------
-- retraction
idde : (m n : ℕ) → (a : A) → Poly:→Poly1-int n (prod-Xn m (a ∷ [])) ≡ base ((n +n m) :: <>) a
idde zero n a = +IdR PAstr (base (n :: <>) a)
∙ cong (λ X → base (X :: <>) a) (sym (+-zero n))
idde (suc m) n a = cong (λ X → _+_ PAstr X (Poly:→Poly1-int (suc n) (prod-Xn m (a ∷ [])))) (base-neutral (n :: <>))
∙ +IdL PAstr (Poly:→Poly1-int (suc n) (prod-Xn m (a ∷ [])))
∙ idde m (suc n) a
∙ cong (λ X → base (X :: <>) a) (sym (+-suc n m))
idde-v : (v : Vec ℕ 1) → (a : A) → Poly:→Poly1-int 0 (trad-base v a) ≡ base v a
idde-v (n :: <>) a = (idde n 0 a)
e-retr : (P : Poly Acr 1) → Poly:→Poly1 (Poly1→Poly: P) ≡ P
e-retr = DS-Ind-Prop.f _ _ _ _ (λ _ → trunc _ _)
refl
(λ v a → idde-v v a)
λ {P Q} ind-P ind-Q → cong Poly:→Poly1 (Poly1→Poly:-pres+ P Q)
∙ Poly:→Poly1-pres+ (Poly1→Poly: P) (Poly1→Poly: Q)
∙ cong₂ (_+_ PAstr) ind-P ind-Q
-----------------------------------------------------------------------------
-- Ring morphism
Poly1→Poly:-pres1 : Poly1→Poly: (1r PAstr) ≡ 1r PA:str
Poly1→Poly:-pres1 = refl
trad-base-prod : (v v' : Vec ℕ 1) → (a a' : A) → trad-base (v +n-vec v') (Astr ._·_ a a') ≡
_·_ PA:str (trad-base v a) (trad-base v' a')
trad-base-prod (k :: <>) (l :: <>) a a' = sym ((prod-Xn-prod k l [ a ] [ a' ]) ∙ cong (λ X → prod-Xn (k +n l) [ X ]) (+IdR Astr _))
Poly1→Poly:-pres· : (P Q : Poly Acr 1) → Poly1→Poly: (_·_ PAstr P Q) ≡ _·_ PA:str (Poly1→Poly: P) (Poly1→Poly: Q)
Poly1→Poly:-pres· = DS-Ind-Prop.f _ _ _ _ (λ _ → isPropΠ λ _ → is-set PA:str _ _)
(λ Q → refl)
(λ v a → DS-Ind-Prop.f _ _ _ _ (λ _ → is-set PA:str _ _)
(sym (0RightAnnihilates (CommRing→Ring PA:) _))
(λ v' a' → trad-base-prod v v' a a')
λ {U V} ind-U ind-V → (cong₂ (_+_ PA:str) ind-U ind-V)
∙ sym (·DistR+ PA:str _ _ _))
λ {U V} ind-U ind-V Q → (cong₂ (_+_ PA:str) (ind-U Q) (ind-V Q))
∙ sym (·DistL+ PA:str _ _ _)
-----------------------------------------------------------------------------
-- Ring Equivalences
module _ (Acr : CommRing ℓ) where
open Equiv-Poly1-Poly: Acr
CRE-Poly1-Poly: : CommRingEquiv (PolyCommRing Acr 1) (UnivariatePolyList Acr)
fst CRE-Poly1-Poly: = isoToEquiv is
where
is : Iso _ _
Iso.fun is = Poly1→Poly:
Iso.inv is = Poly:→Poly1
Iso.rightInv is = e-sect
Iso.leftInv is = e-retr
snd CRE-Poly1-Poly: = makeIsRingHom
Poly1→Poly:-pres1
Poly1→Poly:-pres+
Poly1→Poly:-pres·
|
[STATEMENT]
lemma "the_or\<cdot>[x, y] = (x orelse y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. the_or\<cdot>[x, y] = (x orelse y)
[PROOF STEP]
by (fixrec_simp) |
import numpy as np
import torch
from torch import Tensor
from optical_flow.visualization.utils import hsv_to_rgb
def flow2rgb_hsv(flow: Tensor) -> Tensor:
"""Convert an optical flow field to a colored representation in form of an RGB image using the method by
representing the flow vectors in HSV color space and then converting it to RGB.
The color hue is determined by the angle to the X-axis and the norm of the flow determines the saturation.
White represents zero optical flow.
Args:
flow: the optical flow tensor of shape (B, 2, H, W)
Returns:
RGB image of shape (B, 3, H, W)
"""
flow = flow.clone()
flow[:, 1] *= -1
dx, dy = flow[:, 0], flow[:, 1]
angle = torch.atan2(dy, dx)
angle = torch.where(angle < 0, angle + (2 * np.pi), angle)
scale = torch.sqrt(dx ** 2 + dy ** 2)
h = angle / (2 * np.pi)
s = torch.clamp(scale, 0, 1)
v = torch.ones_like(s)
hsv = torch.stack((h, s, v), 1)
rgb = hsv_to_rgb(hsv)
return rgb
|
-- f es creciente syss ∀ x y, x < y → f x ≤ f y
-- ============================================
import data.real.basic
variable (f : ℝ → ℝ)
-- ----------------------------------------------------
-- Ejercicio 1. Definir la función
-- creciente : (ℝ → ℝ) → Prop
-- tal que (creciente f) espresa que f es creciente.
-- ----------------------------------------------------
def creciente (f : ℝ → ℝ) : Prop :=
∀ {x₁ x₂}, x₁ ≤ x₂ → f x₁ ≤ f x₂
-- ----------------------------------------------------
-- Ejercicio 2. Demostrar que f es creciente syss
-- ∀ x y, x < y → f x ≤ f y
-- ----------------------------------------------------
-- 1ª demostración
example :
creciente f ↔ ∀ {x y}, x < y → f x ≤ f y :=
begin
unfold creciente,
split,
{ intros hf x y hxy,
apply hf,
-- by library_search
exact le_of_lt hxy, },
{ intros h x y hxy,
have h1: x = y ∨ x < y,
apply eq_or_lt_of_le hxy,
cases h1 with h2 h3,
{ rw h2, },
{ apply h,
exact h3, }},
end
-- 2ª demostración
example :
creciente f ↔ ∀ {x y}, x < y → f x ≤ f y :=
begin
split,
{ intros hf x y hxy,
apply hf,
exact le_of_lt hxy, },
{ intros h x y hxy,
have h1: x = y ∨ x < y,
apply eq_or_lt_of_le hxy,
cases h1 with h2 h3,
{ rw h2, },
{ apply h,
exact h3, }},
end
-- 3ª demostración
example :
creciente f ↔ ∀ {x y}, x < y → f x ≤ f y :=
begin
split,
{ intros hf x y hxy,
apply hf,
linarith, },
{ intros h x y hxy,
cases (eq_or_lt_of_le hxy) with h2 h3,
{ rw h2, },
{ exact h h3, }},
end
|
Q-T Intimates | Create New Customer Account .
Streamline the checkout process on future orders.
Check the status of your orders and track them online.
Receive exclusive email updates and special offers.
Maintain a Wish List of items for purchase later. |
function y=rotqrvec(q,x)
%ROTQRVEC applies a quaternion rotation ot a vector array y=[q,x]
%
% Inputs: q(4,1) quaternion rotation (possibly unnormalized)
% x(3n,...) array of 3D column vectors
%
% Outputs: y(3n,...) array of 3D column vectors
% Copyright (C) Mike Brookes 2011-2012
% Version: $Id: rotqrvec.m 1640 2012-03-16 07:43:08Z dmb $
%
% VOICEBOX is a MATLAB toolbox for speech processing.
% Home page: http://www.ee.ic.ac.uk/hp/staff/dmb/voicebox/voicebox.html
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You can obtain a copy of the GNU General Public License from
% http://www.gnu.org/copyleft/gpl.html or by writing to
% Free Software Foundation, Inc.,675 Mass Ave, Cambridge, MA 02139, USA.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
s=size(x);
y=reshape(rotqr2ro(q)*reshape(x,3,[]),s); |
theory T118
imports Main
begin
lemma "(
(\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) &
(\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, meet(y, z)) = meet(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(meet(x, y), z) = meet(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(x, join(y, z)) = join(undr(x, y), undr(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(x, meet(y, z)) = join(over(x, y), over(x, z))) &
(\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) &
(\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) &
(\<forall> x::nat. invo(invo(x)) = x)
) \<longrightarrow>
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z)))
"
nitpick[card nat=7,timeout=86400]
oops
end |
WIKI WEDNESDAY APRIL 4TH 2007
6:00 PM TO 7:30 PM
Live music and complimentary wine and appetizers on the upper terrace!
ALL WIKI COMMUNITY MEMBERS WELCOME
It is our honor to invite all of you who have contributed to this site in the interest of public discourse to our Wiki Wednesday party next week. Our Owner, Matt Haines, Regional Manager, Dan DOrazio, and Marketing Manager, Joshua Vigil will be in attendance to thank you all for contributing to the establishment of Bistro 33 as a part of the Davis landscape and as a Daviswiki phenomenon.
We would like to invite you all to see the positive changes we are making at Bistro 33 and to hear about our plans for the future. Our goal is always to improve and always to take care of our guests; give us the opportunity to reaffirm that to you.
Personally, Id like to put faces to some of the empassioned voices who are a part of the Wiki community (positive, negative, and inbetween). Even though I may not always agree with what is written, I admire and respect the format of the Daviswiki and continually marvel at the impact it has on the community as a whole. I applaud all of you who take part in this truly interactive environment. There are so many of you who have contributed; all of you have made a difference in how I view Bistro and the plans we make for its growth and success. From JarrettNoble to HughToppe, JeffSpeckles to LisaBeth, CraigBrozinsky to JessicaFu, and everyone in between; I sincerely look forward to meeting you!
GIVE US THE OPPORTUNITY TO SHOW OUR THANKS! XJOIN US NEXT WEEK FOR WIKI WEDNESDAY!X (note that this refers to a past event)
Users/ShanninSaulnier
Shannin and the folks from Bistro 33 welcomed a small group of people from Davis Wiki to a very nice discussion about both the wiki and Bistro 33. This was the first non potluck Wiki Gatherings Wiki Gathering. Conversations touched on Welcome to the Wiki/Business Owner business owners or managers can have a mutually beneficial relationship with the wiki to the history of Historic City Hall old City Hall.
Shannin shared a lot of her philosophy toward customer service and the wiki folks shared information about the April 2007 Wiki Update recent developments with http://wikispot.org and its relationship to daviswiki. As with any Wiki Gatherings Wiki gathering the process of putting faces with user names took a while, but helps everyone involved to better understand who else is involved in this cooperative effort on the wiki.
Users/JabberWokky was in attendance if only by instant messaging and was able to answer some critical questions as the evening turned on and the topics drifted from Pennyfarthing to the vulcanization process for rubber.
20070517 19:26:07 nbsp Hook, line, sinker. Users/JeffSpeckles
20070517 19:31:28 nbsp I would bite Users/StevenDaubert
|
function sample = SampleGaussian(mu, sigma)
% sample from the Gaussian distribution specifed by mean value mu and standard deviation sigma
%
% Copyright (C) Daphne Koller, Stanford Univerity, 2012
sample = mu + sigma*randn(1,1);
|
[STATEMENT]
lemma diff_fun_space_times: "f * g \<in> diff_fun_space"
if "f \<in> diff_fun_space" "g \<in> diff_fun_space"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f * g \<in> diff_fun_space
[PROOF STEP]
using that
[PROOF STATE]
proof (prove)
using this:
f \<in> diff_fun_space
g \<in> diff_fun_space
goal (1 subgoal):
1. f * g \<in> diff_fun_space
[PROOF STEP]
by (auto simp: diff_fun_space_def intro!: diff_fun_times) |
[STATEMENT]
lemma pat_elem_less_size:
"(pat, e) \<in> set pes \<Longrightarrow> size_exp' e < (size_list (size_prod size size_exp') pes)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (pat, e) \<in> set pes \<Longrightarrow> size_exp' e < size_list (size_prod size size_exp') pes
[PROOF STEP]
by (induction pes) auto |
As late as July 1997 , U2 were pressured to accept an offer of approximately $ 4 million to perform in Basel , Switzerland on the date scheduled for the Sarajevo show . At the time , rumours about the region 's instability persisted . To ensure the Sarajevo show was not canceled , Sacirbey appeared at many of the band 's preceding shows to lobby on behalf of the city . For the stage to reach Sarajevo , the road crew had to drive the equipment and stage through war @-@ torn Bosnia . Although the trip was without incident , they had to pass through towns such as Mostar , which had been " obliterated " during the war . Stage and lighting designer Willie Williams commented that " when the truck drivers arrived you could see that they were changed men " . The only trouble in transporting the stage came when a border control agent prevented them from crossing the border for hours . The trucks reached Sarajevo two days prior to the concert , arriving to the cheers and applause of the city 's residents ; their arrival was the first concrete evidence that the band were keeping their promise to play there . McGuinness explained , " This is a city that 's been disappointed so many times there were a lot of people who weren 't prepared to believe the gig was going to take place until they saw the stage going up . " Until then , tickets had sold very slowly , but within 24 hours of the trucks ' arrival , another 8 @,@ 000 tickets were sold . Despite this , a day before the concert , 15 @,@ 000 tickets remained unsold . Three @-@ hundred local residents were employed to help assemble the stage and promote the show .
|
theory T54
imports Main
begin
lemma "(
(\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) &
(\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(x, join(y, z)) = join(undr(x, y), undr(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(x, meet(y, z)) = join(over(x, y), over(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(meet(x, y), z) = join(undr(x, z), undr(y, z))) &
(\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) &
(\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) &
(\<forall> x::nat. invo(invo(x)) = x)
) \<longrightarrow>
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(meet(x, y), z) = meet(mult(x, z), mult(y, z)))
"
nitpick[card nat=4,timeout=86400]
oops
end |
```python
%matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (12, 9)
plt.rcParams["font.size"] = 18
```
# In Core Fuel Management
In core fuel management focuses on the study of requirements and operational considerations impacting fuel performance in the reactor core, power history, core loading patterns, and refuelling activities.
## Learning Objectives
At the end of this lesson, you will be equipped to:
- List safety constraints driving in core fuel management decisions.
- Calculate capacity and availability factors.
- Calculate the mass required for each reactor year of operation.
- Calculate core and assembly discharge burnup based on power output.
- Analyze the reactivity evolution of a core based on burnup.
- Apply burnup calculations to multiple batch cores.
- Recognize the relationship between the number of batches and the final burnup.
- Understand the goals driving choices in various fuel loading patterns.
- Apply these lessons to pebble-fuelled and liquid-fueled advanced reactor designs.
- Recognize the impact of extended burnup on fuel utilization, SWU utilization, and fuel cycle cost.
- Understand how isotopic activities can be used to determine fuel burnup.
- Calculate burnup based on key activity ratios.
## Safety Constraints
- $\frac{P_{peak}}{P_{avg}}$, peak to average power ratio.
- $T_{max}$, maximimum core temperature.
- Departure from Nucleate Boiling Ratio (DNBR)
- $\rho$, reactivity in the core.
- $\alpha_T$, temperature coefficient of reactivity
Primarily, there is a loss of coolant accident (LOCA) peak clad temp (PCT) limit of 1205 $^\circ C$, which limits the maximum pellet linear power density to approx 48 kW/m at Hot Full Power(HFP).
- Critical Heat Flux (CHF), which denotes departure from nuclear boiling (DNB) for a PWR and Dryout for a BWR, not being exceeded during anticipated transients, which limits the maximum average fuel pin linear power density to approximately 29 kW/m at HFP.
- Fuel cladding strain limit not exceeded during anticipated transients
### Safety Variables
- Fuel enrichment
- Re-load batch size & number of assemblies
- Fuel loading pattern of fresh and partially spent fuel assemblies
- Control mechanisms
## Mass Required
The simplest possible representation of the mass of fuel that must be added into a reactor is:
\begin{align}
M(t) &= \frac{Q}{BU}
\end{align}
where
\begin{align}
M &= \mbox{mass of heavy metal (e.g., uranium) in the core }[MTHM/yr]\\
Q &= \mbox{annual thermal energy output }[GWd/yr]\\
BU &= \mbox{burnup }[GWd/MTIHM]
\end{align}
But, Q itself typically needs to be back-calculated from energy produced.
\begin{align}
Q &= \frac{P_0\cdot CF\cdot T}{\eta_{th}}
\end{align}
where
\begin{align}
P_0 &= \mbox{installed electric capacity }[GWe]\\
CF &= \mbox{capacity factor }[-]\\
T &= \mbox{time in core } [days]\\
\eta_{th} &= \mbox{thermal efficiency }[GWe/GWth]\\
\end{align}
```python
def m(q, bu):
return q/bu
def q(p0, cf, t, eta_th):
return p0*cf*t/eta_th
p0 = 1500 # installed electric capacity GWe
cf = 0.9 # capacity factor
t = 365 # days per year
eta_th = 0.33 # thermal efficiency GWe/GWth
bu = 50 # burnup GWd/MTIHM
print(m(q(p0, cf, t, eta_th), bu))
```
29863.636363636364
## Capacity and Availability Factors
The capacity factor is representative of the plant's tendency to acheive its rated power capacity.
\begin{align}
CF &= \frac{\mbox{actual power generated over time T}}{\mbox{rated power potential over time T}}\\
&=\frac{\int_0^T P(t)dt}{P_0T}\\
P(t) &= \mbox{ thermal power at time t during period T}
\end{align}
The capacity factor, integrated over time, gives Effective Full Power Days (EFPD), the equivalent number of days at full power.
\begin{align}
EFPD &= \int_0^TCF(t)dt\\
&= \int_0^T \frac{\int_0^T P(t)dt}{P_0T}\\
\end{align}
The availability factor is always greater than the capacity factor.
\begin{align}
AF &= \frac{\mbox{time during which the reactor was operational during time period T}}{T}
\end{align}
```python
# The reactor shuts down:
# for a few days during the 10th month
# for one month during month 18
shutdowns = {10:10.1,
18.5:19.5}
import numpy as np
def A(t, shutdowns):
to_ret = 1.0*(t > 0)
for start,stop in shutdowns.items():
if start < t and t < stop:
to_ret = 0
return to_ret
times = np.arange(0.0, 20.0, 0.01)
hist = np.arange(0.0, 20.0, 0.01)
cf = np.arange(0.0, 20.0, 0.01)
for i in range(0, times.size):
hist[i] = A(times[i], shutdowns)
cf[i] = A(times[i], shutdowns)*(1.-0.1*np.random.random())
plt.plot(times, hist, label='Availability')
plt.plot(times, cf, label='Capacity')
plt.ylim([-0.5, 1.5])
plt.title('Capacity and Availabilty')
plt.xlabel('Time (months)')
plt.ylabel('Factor [-]')
plt.legend()
```
We can do a quick numeric integral to get each factor as an integral over the 20 month cycle.
\begin{align}
AF &= \frac{\int_0^{20}A(t)dt}{T}\\
CF &= \frac{\int_0^{20}P(t)dt}{P_0T}\\
\end{align}
```python
print("Availability Factor = ", hist.sum()/hist.shape[0])
print("Capacity Factor = ", cf.sum()/cf.shape[0])
```
Availability Factor = 0.9455
Capacity Factor = 0.898687813941
## Simple Reactivity Model
- On each cycle (1/n)th of the fuel is replaced
- Each fuel batch experiences a discharge burnup of Bd
- Each fuel batch on each cycle experiences a burnup of Bd/n
- $k_{reactor}$ is the uncontrolled multiplication factor (excess reactivity)
- $k_i$ is the infinite multiplication factor of a fuel batch (excess reactivity)
Each batch of fuel will have a different burn-up and $k_i(B)$ since each batch has been in the reactor a different length of time. The reactivity of the reactor is found by summing over the reactivities of all the batches of fuel, for n batches:
\begin{align}
k_{reactor} = \frac{1}{n}\sum_{i=1}^{n}k_i(B)
\end{align}
\begin{align}
k_i(B) = k_0 - \alpha B_n
\end{align}
- $k_0$ is the uncontrolled infinite multiplication factor of the fuel batch when it is fresh.
- $B_n$ is the burnup of the batch in a single cycle. The n refers to the number of batches that the reload scheme includes.
- $\alpha$ is a constant of proportionality with units of 1/Bn. Uniform linear depletion.
- $k_F$ is the uncontrolled infinite multiplication factor necessary to sustain a chain reaction at the end of an operating cycle
```python
def ki(k0, alpha, b):
return k0 - alpha*b
def k(ki, n):
return (1/n)*np.sum(ki)
n=3
k0 = 4.5
alpha = (k0 - 1)/20000
bu = np.arange(0, 30000., 1000.)
plt.plot(bu, ki(k0, alpha, bu))
plt.plot(bu, np.zeros(bu.shape), color='r')
plt.ylabel(r'$k_i(B)$')
plt.xlabel(r'$B$')
plt.title('Excess Reactivity Using Linear Depletion Model')
```
This approximation is somewhat accurate and gives an intuition for the impact of reloading on excess reactivity in the core.
## Single Cycle Refuelling
\begin{align}
k_{reactor} = k_1(B_1)
\end{align}
\begin{align}
k_1(B_1) = k_0 - \alpha B_1
\end{align}
Therefore the fuel burnup capability is:
\begin{align}
B_1 &= \frac{k_0-k_F}{\alpha}
\end{align}
## Two Cycle Refuelling
At the end of each cycle one batch of fuel has been burned for one cycle and the other batch has been burned for two cycles. Thus:
\begin{align}
k_F &= \frac{k_0 - \alpha B_2}{2} + \frac{k_0 - 2\alpha B_2}{2}\\
&= k_0 - \frac{3\alpha B_2}{2}\\
B_2 &= \frac{2(k_0 - k_F)}{3\alpha}\\
&= \frac{2}{3}B_1
\end{align}
- Each batch in the two cycle reload scheme is burned for $2B_2$.
So, in terms of the single cycle reload burnup:
\begin{align}
2B_2 &= 2\left(\frac{2}{3}B_1\right)\\
&= \frac{4}{3}B_1\\
\end{align}
**This means there is 1/3 more burnup in the two cycle reload, for the same initial and final multiplication factors $k_0$ and $k_F$ (exactly the same fuel.)**
## N Cycle Reload Scheme
The relation between end-of-cycle core multiplication factor kF and the fresh fuel batch infinite multiplication factor k0 and the batch burnup in general is
\begin{align}
k_F &= k_0 - \frac{1}{n}\sum_{i=1}^{n}i\alpha B_n\\
\end{align}
Recall from your geometric series intution:
\begin{align}
\sum_{i=1}^{n}i &= \frac{n(n + 1)}{2}\\
\end{align}
Therefore:
\begin{align}
k_F &= k_0 - \left(\frac{n + 1}{2}\right)\alpha B_n\\
\end{align}
The batch burnup in a single cycle is then the result of solving for $B_n$:
\begin{align}
B_n &= \frac{2(k_0 - k_F)}{\alpha(n + 1)}
\end{align}
The discharge burnup of batch n, is the batch burnup in a cycle times the number of cycles:
\begin{align}
B_n^d &= nB_n\\
&= \frac{2n(k_0 - k_F)}{\alpha(n + 1)}\\
&= \left(\frac{2n}{n + 1}\right)\frac{k_0 - k_F}{\alpha} \\
&= \left(\frac{2n}{n + 1}\right)B_1 \\
\end{align}
```python
def bd(n, b1):
num = 2*n*b1
denom = n+1
return num/denom
b1 = 12000
n = np.arange(1,50)
plt.plot(n, bd(n, b1))
```
### Discussion: What is the primary drawback of many batches per core?
## Fuel Loading Patterns
Various fuel loading patterns are used to acheive improved fuel utilization (higher burnup), better core control, and lower leakage to the pressure vessel.
## Many and $\infty$ Batch Reactor Designs
Infinite batch refuelling (a.k.a. online refuelling) is possible in liquid fuelled cores with online reprocessing.
What exactly is a pebble core, then, in terms of batches?
<center>Aufiero, 2016</center>
## Determining Burnup
- Direct methods occur while the fuel is still in the core (using ion chambers and in-core flux probes)
- Indirect methods use measurements of activity after the fuel has been removed.
\begin{align}
BU &= a + bA(^{137}Cs)\\
BU &= c(e, r) + d(e, r) \left[A(^{134}Cs)/A(^{137}Cs)\right]\\
BU &= a\cdot exp{\left[b\cdot ln\left(\frac{A(^{106}Ru)A(^{137}Cs)}{[A(^{134}Cs)^2}\right)\right]}\\
a, b, c, d &= \mbox{calibration constants}\\
e &= \mbox{enrichment}\\
r &= \mbox{power rating}
\end{align}
```python
```
|
import re
import networkx as nx
import numpy as np
from aocd.models import Puzzle
pattern = re.compile(r'((\d)?\s?(\w*\s\w+)\s(bags?))')
def solve_puzzle_one(graph):
print(len(nx.ancestors(graph, 'shiny gold')))
def solve_puzzle_two(graph):
print(count_bags(graph, 'shiny gold'))
def count_bags(graph, node):
sum = 0
for successor in graph.successors(node):
weight = graph[node][successor]['weight']
sum += count_bags(graph, successor) * weight + weight
return sum
def parse_input(data):
g = nx.DiGraph()
for rule in np.array(data.splitlines()):
matches = pattern.findall(rule)
for i in range(1, len(matches)):
g.add_edge(matches[0][2], matches[i][2], weight=int(matches[i][1]) if matches[i][1] else 0)
return g
test_input = """light red bags contain 1 bright white bag, 2 muted yellow bags.
faded blue bags contain no other bags.
dark orange bags contain 3 bright white bags, 4 muted yellow bags.
bright white bags contain 1 shiny gold bag.
muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
dark olive bags contain 3 faded blue bags, 4 dotted black bags.
vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.
dotted black bags contain no other bags.
"""
test_input2 = """shiny gold bags contain 2 dark red bags.
dark red bags contain 2 dark orange bags.
dark orange bags contain 2 dark yellow bags.
dark yellow bags contain 2 dark green bags.
dark green bags contain 2 dark blue bags.
dark blue bags contain 2 dark violet bags.
dark violet bags contain no other bags."""
if __name__ == '__main__':
puzzle = Puzzle(year=2020, day=7)
if False:
graph = parse_input(test_input)
else:
graph = parse_input(puzzle.input_data)
# print(array)
solve_puzzle_one(graph)
solve_puzzle_two(graph)
|
(* Title: HOL/MicroJava/DFA/Err.thy
Author: Tobias Nipkow
Copyright 2000 TUM
*)
section {* The Error Type *}
theory Err
imports Semilat
begin
datatype 'a err = Err | OK 'a
type_synonym 'a ebinop = "'a \<Rightarrow> 'a \<Rightarrow> 'a err"
type_synonym 'a esl = "'a set * 'a ord * 'a ebinop"
primrec ok_val :: "'a err \<Rightarrow> 'a" where
"ok_val (OK x) = x"
definition lift :: "('a \<Rightarrow> 'b err) \<Rightarrow> ('a err \<Rightarrow> 'b err)" where
"lift f e == case e of Err \<Rightarrow> Err | OK x \<Rightarrow> f x"
definition lift2 :: "('a \<Rightarrow> 'b \<Rightarrow> 'c err) \<Rightarrow> 'a err \<Rightarrow> 'b err \<Rightarrow> 'c err" where
"lift2 f e1 e2 ==
case e1 of Err \<Rightarrow> Err
| OK x \<Rightarrow> (case e2 of Err \<Rightarrow> Err | OK y \<Rightarrow> f x y)"
definition le :: "'a ord \<Rightarrow> 'a err ord" where
"le r e1 e2 ==
case e2 of Err \<Rightarrow> True |
OK y \<Rightarrow> (case e1 of Err \<Rightarrow> False | OK x \<Rightarrow> x <=_r y)"
definition sup :: "('a \<Rightarrow> 'b \<Rightarrow> 'c) \<Rightarrow> ('a err \<Rightarrow> 'b err \<Rightarrow> 'c err)" where
"sup f == lift2(%x y. OK(x +_f y))"
definition err :: "'a set \<Rightarrow> 'a err set" where
"err A == insert Err {x . ? y:A. x = OK y}"
definition esl :: "'a sl \<Rightarrow> 'a esl" where
"esl == %(A,r,f). (A,r, %x y. OK(f x y))"
definition sl :: "'a esl \<Rightarrow> 'a err sl" where
"sl == %(A,r,f). (err A, le r, lift2 f)"
abbreviation
err_semilat :: "'a esl \<Rightarrow> bool"
where "err_semilat L == semilat(Err.sl L)"
primrec strict :: "('a \<Rightarrow> 'b err) \<Rightarrow> ('a err \<Rightarrow> 'b err)" where
"strict f Err = Err"
| "strict f (OK x) = f x"
lemma strict_Some [simp]:
"(strict f x = OK y) = (\<exists> z. x = OK z \<and> f z = OK y)"
by (cases x, auto)
lemma not_Err_eq:
"(x \<noteq> Err) = (\<exists>a. x = OK a)"
by (cases x) auto
lemma not_OK_eq:
"(\<forall>y. x \<noteq> OK y) = (x = Err)"
by (cases x) auto
lemma unfold_lesub_err:
"e1 <=_(le r) e2 == le r e1 e2"
by (simp add: lesub_def)
lemma le_err_refl:
"!x. x <=_r x \<Longrightarrow> e <=_(Err.le r) e"
apply (unfold lesub_def Err.le_def)
apply (simp split: err.split)
done
lemma le_err_trans [rule_format]:
"order r \<Longrightarrow> e1 <=_(le r) e2 \<longrightarrow> e2 <=_(le r) e3 \<longrightarrow> e1 <=_(le r) e3"
apply (unfold unfold_lesub_err le_def)
apply (simp split: err.split)
apply (blast intro: order_trans)
done
lemma le_err_antisym [rule_format]:
"order r \<Longrightarrow> e1 <=_(le r) e2 \<longrightarrow> e2 <=_(le r) e1 \<longrightarrow> e1=e2"
apply (unfold unfold_lesub_err le_def)
apply (simp split: err.split)
apply (blast intro: order_antisym)
done
lemma OK_le_err_OK:
"(OK x <=_(le r) OK y) = (x <=_r y)"
by (simp add: unfold_lesub_err le_def)
lemma order_le_err [iff]:
"order(le r) = order r"
apply (rule iffI)
apply (subst Semilat.order_def)
apply (blast dest: order_antisym OK_le_err_OK [THEN iffD2]
intro: order_trans OK_le_err_OK [THEN iffD1])
apply (subst Semilat.order_def)
apply (blast intro: le_err_refl le_err_trans le_err_antisym
dest: order_refl)
done
lemma le_Err [iff]: "e <=_(le r) Err"
by (simp add: unfold_lesub_err le_def)
lemma Err_le_conv [iff]:
"Err <=_(le r) e = (e = Err)"
by (simp add: unfold_lesub_err le_def split: err.split)
lemma le_OK_conv [iff]:
"e <=_(le r) OK x = (? y. e = OK y & y <=_r x)"
by (simp add: unfold_lesub_err le_def split: err.split)
lemma OK_le_conv:
"OK x <=_(le r) e = (e = Err | (? y. e = OK y & x <=_r y))"
by (simp add: unfold_lesub_err le_def split: err.split)
lemma top_Err [iff]: "top (le r) Err"
by (simp add: top_def)
lemma OK_less_conv [rule_format, iff]:
"OK x <_(le r) e = (e=Err | (? y. e = OK y & x <_r y))"
by (simp add: lesssub_def lesub_def le_def split: err.split)
lemma not_Err_less [rule_format, iff]:
"~(Err <_(le r) x)"
by (simp add: lesssub_def lesub_def le_def split: err.split)
lemma semilat_errI [intro]:
assumes semilat: "semilat (A, r, f)"
shows "semilat(err A, Err.le r, lift2(%x y. OK(f x y)))"
apply(insert semilat)
apply (unfold semilat_Def closed_def plussub_def lesub_def
lift2_def Err.le_def err_def)
apply (simp split: err.split)
done
lemma err_semilat_eslI_aux:
assumes semilat: "semilat (A, r, f)"
shows "err_semilat(esl(A,r,f))"
apply (unfold sl_def esl_def)
apply (simp add: semilat_errI[OF semilat])
done
lemma err_semilat_eslI [intro, simp]:
"\<And>L. semilat L \<Longrightarrow> err_semilat(esl L)"
by(simp add: err_semilat_eslI_aux split_tupled_all)
lemma acc_err [simp, intro!]: "acc r \<Longrightarrow> acc(le r)"
apply (unfold acc_def lesub_def le_def lesssub_def)
apply (simp add: wf_eq_minimal split: err.split)
apply clarify
apply (case_tac "Err : Q")
apply blast
apply (erule_tac x = "{a . OK a : Q}" in allE)
apply (case_tac "x")
apply fast
apply blast
done
lemma Err_in_err [iff]: "Err : err A"
by (simp add: err_def)
lemma Ok_in_err [iff]: "(OK x : err A) = (x:A)"
by (auto simp add: err_def)
subsection {* lift *}
lemma lift_in_errI:
"\<lbrakk> e : err S; !x:S. e = OK x \<longrightarrow> f x : err S \<rbrakk> \<Longrightarrow> lift f e : err S"
apply (unfold lift_def)
apply (simp split: err.split)
apply blast
done
lemma Err_lift2 [simp]:
"Err +_(lift2 f) x = Err"
by (simp add: lift2_def plussub_def)
lemma lift2_Err [simp]:
"x +_(lift2 f) Err = Err"
by (simp add: lift2_def plussub_def split: err.split)
lemma OK_lift2_OK [simp]:
"OK x +_(lift2 f) OK y = x +_f y"
by (simp add: lift2_def plussub_def split: err.split)
subsection {* sup *}
lemma Err_sup_Err [simp]:
"Err +_(Err.sup f) x = Err"
by (simp add: plussub_def Err.sup_def Err.lift2_def)
lemma Err_sup_Err2 [simp]:
"x +_(Err.sup f) Err = Err"
by (simp add: plussub_def Err.sup_def Err.lift2_def split: err.split)
lemma Err_sup_OK [simp]:
"OK x +_(Err.sup f) OK y = OK(x +_f y)"
by (simp add: plussub_def Err.sup_def Err.lift2_def)
lemma Err_sup_eq_OK_conv [iff]:
"(Err.sup f ex ey = OK z) = (? x y. ex = OK x & ey = OK y & f x y = z)"
apply (unfold Err.sup_def lift2_def plussub_def)
apply (rule iffI)
apply (simp split: err.split_asm)
apply clarify
apply simp
done
lemma Err_sup_eq_Err [iff]:
"(Err.sup f ex ey = Err) = (ex=Err | ey=Err)"
apply (unfold Err.sup_def lift2_def plussub_def)
apply (simp split: err.split)
done
subsection {* semilat (err A) (le r) f *}
lemma semilat_le_err_Err_plus [simp]:
"\<lbrakk> x: err A; semilat(err A, le r, f) \<rbrakk> \<Longrightarrow> Err +_f x = Err"
by (blast intro: Semilat.le_iff_plus_unchanged [OF Semilat.intro, THEN iffD1]
Semilat.le_iff_plus_unchanged2 [OF Semilat.intro, THEN iffD1])
lemma semilat_le_err_plus_Err [simp]:
"\<lbrakk> x: err A; semilat(err A, le r, f) \<rbrakk> \<Longrightarrow> x +_f Err = Err"
by (blast intro: Semilat.le_iff_plus_unchanged [OF Semilat.intro, THEN iffD1]
Semilat.le_iff_plus_unchanged2 [OF Semilat.intro, THEN iffD1])
lemma semilat_le_err_OK1:
"\<lbrakk> x:A; y:A; semilat(err A, le r, f); OK x +_f OK y = OK z \<rbrakk>
\<Longrightarrow> x <=_r z"
apply (rule OK_le_err_OK [THEN iffD1])
apply (erule subst)
apply (simp add: Semilat.ub1 [OF Semilat.intro])
done
lemma semilat_le_err_OK2:
"\<lbrakk> x:A; y:A; semilat(err A, le r, f); OK x +_f OK y = OK z \<rbrakk>
\<Longrightarrow> y <=_r z"
apply (rule OK_le_err_OK [THEN iffD1])
apply (erule subst)
apply (simp add: Semilat.ub2 [OF Semilat.intro])
done
lemma eq_order_le:
"\<lbrakk> x=y; order r \<rbrakk> \<Longrightarrow> x <=_r y"
apply (unfold Semilat.order_def)
apply blast
done
lemma OK_plus_OK_eq_Err_conv [simp]:
assumes "x:A" and "y:A" and "semilat(err A, le r, fe)"
shows "((OK x) +_fe (OK y) = Err) = (~(? z:A. x <=_r z & y <=_r z))"
proof -
have plus_le_conv3: "\<And>A x y z f r.
\<lbrakk> semilat (A,r,f); x +_f y <=_r z; x:A; y:A; z:A \<rbrakk>
\<Longrightarrow> x <=_r z \<and> y <=_r z"
by (rule Semilat.plus_le_conv [OF Semilat.intro, THEN iffD1])
from assms show ?thesis
apply (rule_tac iffI)
apply clarify
apply (drule OK_le_err_OK [THEN iffD2])
apply (drule OK_le_err_OK [THEN iffD2])
apply (drule Semilat.lub [OF Semilat.intro, of _ _ _ "OK x" _ "OK y"])
apply assumption
apply assumption
apply simp
apply simp
apply simp
apply simp
apply (case_tac "(OK x) +_fe (OK y)")
apply assumption
apply (rename_tac z)
apply (subgoal_tac "OK z: err A")
apply (drule eq_order_le)
apply (erule Semilat.orderI [OF Semilat.intro])
apply (blast dest: plus_le_conv3)
apply (erule subst)
apply (blast intro: Semilat.closedI [OF Semilat.intro] closedD)
done
qed
subsection {* semilat (err(Union AS)) *}
(* FIXME? *)
lemma all_bex_swap_
text {*
If @{term "AS = {}"} the thm collapses to
@{prop "order r & closed {Err} f & Err +_f Err = Err"}
which may not hold
*}
lemma err_semilat_UnionI:
"\<lbrakk> !A:AS. err_semilat(A, r, f); AS ~= {};
!A:AS.!B:AS. A~=B \<longrightarrow> (!a:A.!b:B. ~ a <=_r b & a +_f b = Err) \<rbrakk>
\<Longrightarrow> err_semilat(Union AS, r, f)"
apply (unfold semilat_def sl_def)
apply (simp add: closed_err_Union_lift2I)
apply (rule conjI)
apply blast
apply (simp add: err_def)
apply (rule conjI)
apply clarify
apply (rename_tac A a u B b)
apply (case_tac "A = B")
apply simp
apply simp
apply (rule conjI)
apply clarify
apply (rename_tac A a u B b)
apply (case_tac "A = B")
apply simp
apply simp
apply clarify
apply (rename_tac A ya yb B yd z C c a b)
apply (case_tac "A = B")
apply (case_tac "A = C")
apply simp
apply (rotate_tac -1)
apply simp
apply (rotate_tac -1)
apply (case_tac "B = C")
apply simp
apply (rotate_tac -1)
apply simp
done
end
|
State Before: Γ : Type u_1
inst✝ : Inhabited Γ
f : Γ → Γ
n i : ℕ
L : ListBlank Γ
⊢ nth (modifyNth f n L) i = if i = n then f (nth L i) else nth L i State After: case zero
Γ : Type u_1
inst✝ : Inhabited Γ
f : Γ → Γ
i✝ : ℕ
L✝ : ListBlank Γ
i : ℕ
L : ListBlank Γ
⊢ nth (modifyNth f Nat.zero L) i = if i = Nat.zero then f (nth L i) else nth L i
case succ
Γ : Type u_1
inst✝ : Inhabited Γ
f : Γ → Γ
i✝ : ℕ
L✝ : ListBlank Γ
n : ℕ
IH : ∀ (i : ℕ) (L : ListBlank Γ), nth (modifyNth f n L) i = if i = n then f (nth L i) else nth L i
i : ℕ
L : ListBlank Γ
⊢ nth (modifyNth f (Nat.succ n) L) i = if i = Nat.succ n then f (nth L i) else nth L i Tactic: induction' n with n IH generalizing i L State Before: case zero
Γ : Type u_1
inst✝ : Inhabited Γ
f : Γ → Γ
i✝ : ℕ
L✝ : ListBlank Γ
i : ℕ
L : ListBlank Γ
⊢ nth (modifyNth f Nat.zero L) i = if i = Nat.zero then f (nth L i) else nth L i State After: no goals Tactic: cases i <;> simp only [ListBlank.nth_zero, if_true, ListBlank.head_cons, ListBlank.modifyNth,
ListBlank.nth_succ, if_false, ListBlank.tail_cons, Nat.zero_eq] State Before: case succ
Γ : Type u_1
inst✝ : Inhabited Γ
f : Γ → Γ
i✝ : ℕ
L✝ : ListBlank Γ
n : ℕ
IH : ∀ (i : ℕ) (L : ListBlank Γ), nth (modifyNth f n L) i = if i = n then f (nth L i) else nth L i
i : ℕ
L : ListBlank Γ
⊢ nth (modifyNth f (Nat.succ n) L) i = if i = Nat.succ n then f (nth L i) else nth L i State After: case succ.zero
Γ : Type u_1
inst✝ : Inhabited Γ
f : Γ → Γ
i : ℕ
L✝ : ListBlank Γ
n : ℕ
IH : ∀ (i : ℕ) (L : ListBlank Γ), nth (modifyNth f n L) i = if i = n then f (nth L i) else nth L i
L : ListBlank Γ
⊢ nth (modifyNth f (Nat.succ n) L) Nat.zero = if Nat.zero = Nat.succ n then f (nth L Nat.zero) else nth L Nat.zero
case succ.succ
Γ : Type u_1
inst✝ : Inhabited Γ
f : Γ → Γ
i : ℕ
L✝ : ListBlank Γ
n : ℕ
IH : ∀ (i : ℕ) (L : ListBlank Γ), nth (modifyNth f n L) i = if i = n then f (nth L i) else nth L i
L : ListBlank Γ
n✝ : ℕ
⊢ nth (modifyNth f (Nat.succ n) L) (Nat.succ n✝) =
if Nat.succ n✝ = Nat.succ n then f (nth L (Nat.succ n✝)) else nth L (Nat.succ n✝) Tactic: cases i State Before: case succ.zero
Γ : Type u_1
inst✝ : Inhabited Γ
f : Γ → Γ
i : ℕ
L✝ : ListBlank Γ
n : ℕ
IH : ∀ (i : ℕ) (L : ListBlank Γ), nth (modifyNth f n L) i = if i = n then f (nth L i) else nth L i
L : ListBlank Γ
⊢ nth (modifyNth f (Nat.succ n) L) Nat.zero = if Nat.zero = Nat.succ n then f (nth L Nat.zero) else nth L Nat.zero State After: case succ.zero
Γ : Type u_1
inst✝ : Inhabited Γ
f : Γ → Γ
i : ℕ
L✝ : ListBlank Γ
n : ℕ
IH : ∀ (i : ℕ) (L : ListBlank Γ), nth (modifyNth f n L) i = if i = n then f (nth L i) else nth L i
L : ListBlank Γ
⊢ nth (modifyNth f (Nat.succ n) L) Nat.zero = nth L Nat.zero Tactic: rw [if_neg (Nat.succ_ne_zero _).symm] State Before: case succ.zero
Γ : Type u_1
inst✝ : Inhabited Γ
f : Γ → Γ
i : ℕ
L✝ : ListBlank Γ
n : ℕ
IH : ∀ (i : ℕ) (L : ListBlank Γ), nth (modifyNth f n L) i = if i = n then f (nth L i) else nth L i
L : ListBlank Γ
⊢ nth (modifyNth f (Nat.succ n) L) Nat.zero = nth L Nat.zero State After: no goals Tactic: simp only [ListBlank.nth_zero, ListBlank.head_cons, ListBlank.modifyNth, Nat.zero_eq] State Before: case succ.succ
Γ : Type u_1
inst✝ : Inhabited Γ
f : Γ → Γ
i : ℕ
L✝ : ListBlank Γ
n : ℕ
IH : ∀ (i : ℕ) (L : ListBlank Γ), nth (modifyNth f n L) i = if i = n then f (nth L i) else nth L i
L : ListBlank Γ
n✝ : ℕ
⊢ nth (modifyNth f (Nat.succ n) L) (Nat.succ n✝) =
if Nat.succ n✝ = Nat.succ n then f (nth L (Nat.succ n✝)) else nth L (Nat.succ n✝) State After: no goals Tactic: simp only [IH, ListBlank.modifyNth, ListBlank.nth_succ, ListBlank.tail_cons, Nat.succ.injEq] |
open import Coinduction using ( ∞ ; ♭ ; ♯_ )
open import Data.Bool using ( Bool ; true ; false )
open import Data.Empty using ( ⊥ ; ⊥-elim )
open import Data.Unit using ( ⊤ ; tt )
open import System.IO.Transducers.Lazy using
( _⇒_ ; inp ; out ; done ; choice ) renaming
( ⟦_⟧ to ⟦_⟧' ; _⟫_ to _⟫'_
; _[&]_ to _[&]'_ ; _⟨&⟩_ to _⟨&⟩'_ ; assoc to assoc' )
open import System.IO.Transducers.Session using ( Session ; I ; Σ ; Γ ; _/_ ; IsΣ ; ⟨_⟩ ; _&_ ; ¿ ; _⊕_ )
open import System.IO.Transducers.Trace using ( Trace ; [] ; _∷_ )
module System.IO.Transducers.Strict where
infixr 4 _⇛_
infixr 6 _⟫_
infixr 8 _[&]_ _⟨&⟩_
-- Strict tranducers are ones which perform input before any output
data Strict : ∀ {S T} → (S ⇒ T) → Set₁ where
inp : ∀ {A V F T} P → (Strict (inp {A} {V} {F} {T} P))
done : ∀ {S} → (Strict (done {S}))
-- Slightly annoyingly, _≡_ has different cardinalities
-- in different versions of the standard library.
-- Until 1.4 of agda-stdlib is more widely distributed,
-- we define a specialized version of _≡_ here.
data _≡_ (S : Session) : Session → Set₁ where
refl : S ≡ S
-- S ⇛ T is the type of strict transducers regarded as functions
_⇛_ : Session → Session → Set₁
I ⇛ T = I ≡ T
Σ V F ⇛ T = ∀ a → (♭ F a) ⇒ T
-- Identity transducer
id : ∀ {S} → S ⇛ S
id {I} = refl
id {Σ V F} = λ a → out a done
-- Inclusion of strict in lazy transducers
ι : ∀ {S T} → (S ⇛ T) → (S ⇒ T)
ι {I} refl = done
ι {Σ V F} P = inp (♯ P)
-- Composition
_⟫_ : ∀ {S T U} → (S ⇛ T) → (T ⇛ U) → (S ⇛ U)
_⟫_ {I} refl refl = refl
_⟫_ {Σ V F} {I} P refl = P
_⟫_ {Σ V F} {Σ W G} P Q = λ a → (P a ⟫' ι Q)
-- & on transducers
_[&]_ : ∀ {S T U V} → (S ⇛ T) → (U ⇛ V) → ((S & U) ⇛ (T & V))
_[&]_ {I} refl Q = Q
_[&]_ {Σ V F} P Q = λ a → (P a [&]' ι Q)
-- Associativity of &
assoc : ∀ {S T U} → ((S & (T & U)) ⇛ ((S & T) & U))
assoc {I} {T} {U} = id {T & U}
assoc {Σ V F} {T} {U} = λ a → out a (assoc' {♭ F a})
-- Mediating morphism for &
_⟨&⟩_ : ∀ {S T U} → (S ⇛ T) → (S ⇛ U) → (S ⇛ T & U)
_⟨&⟩_ {I} refl refl = refl
_⟨&⟩_ {Σ V F} P Q = λ a → (P a ⟨&⟩' Q a)
|
State Before: F : Type ?u.48941
α : Type u_3
β : Type u_1
γ : Type u_2
δ : Type ?u.48953
inst✝³ : Bot α
inst✝² : Bot β
inst✝¹ : Bot γ
inst✝ : Bot δ
g : BotHom β γ
f₁ f₂ : BotHom α β
hg : Injective ↑g
h : comp g f₁ = comp g f₂
a : α
⊢ ↑g (↑f₁ a) = ↑g (↑f₂ a) State After: no goals Tactic: rw [← BotHom.comp_apply, h, BotHom.comp_apply] |
lemma continuous_at_right_real_increasing: fixes f :: "real \<Rightarrow> real" assumes nondecF: "\<And>x y. x \<le> y \<Longrightarrow> f x \<le> f y" shows "continuous (at_right a) f \<longleftrightarrow> (\<forall>e>0. \<exists>d>0. f (a + d) - f a < e)" |
Formal statement is: lemma integral_restrict_UNIV: fixes f :: "'a::euclidean_space \<Rightarrow> 'b::euclidean_space" assumes "S \<in> sets lebesgue" shows "integral\<^sup>L lebesgue (\<lambda>x. if x \<in> S then f x else 0) = integral\<^sup>L (lebesgue_on S) f" Informal statement is: If $f$ is a Lebesgue-integrable function on a set $S$, then the integral of $f$ over $S$ is equal to the integral of $f$ over the whole space, restricted to $S$. |
theory State_Machine
imports Main
begin
section \<open>Definitions\<close>
text \<open>Transition function\<close>
type_synonym ('q, 'a) trans =
"'q \<times> 'a \<Rightarrow> 'q"
text \<open>simple state-machine\<close>
record ('q, 'a) state_machine =
initial :: "'q"
delta :: "('q, 'a) trans"
text \<open>closure of transition function\<close>
fun hat1 :: "('q, 'a) trans \<Rightarrow> ('q, 'a list) trans" where
"hat1 t (q, []) = q" |
"hat1 t (q, (a#as)) = hat1 t (t (q, a), as)"
abbreviation delta_hat ::
"('q, 'a, 'e) state_machine_scheme \<Rightarrow> ('q, 'a list) trans" where
"delta_hat m \<equiv> hat1 (delta m)"
definition initial_in_states :: "['q set, 'q] \<Rightarrow> bool" where
"initial_in_states st q0 \<equiv> q0 \<in> st"
definition reachable :: "('q, 'a, 'e) state_machine_scheme \<Rightarrow> 'q \<Rightarrow> bool" where
"reachable m q \<equiv> (\<exists>w. q = delta_hat m (initial m, w))"
definition trim :: "('q, 'a, 'e) state_machine_scheme \<Rightarrow> bool" where
"trim m \<equiv> \<forall>q. reachable m q"
section \<open>Properties\<close>
lemma delta_append[simp]:
"hat1 t (q, u @ v) = hat1 t (hat1 t (q, u), v)"
by (induct u arbitrary: q, auto)
lemma delta_append_1:
"hat1 t (q, u @ [a]) = t (hat1 t (q, u), a)"
by (induct u rule: rev_induct, simp_all)
lemma reachable_initial:
"reachable m (initial m)"
unfolding reachable_def
by (rule exI[where x="[]"], simp)
lemma reachable_delta:
assumes "reachable m q"
shows "reachable m (delta m (q, a))"
proof -
obtain w where "q = delta_hat m (initial m, w)"
using assms unfolding reachable_def by (erule exE)
then show ?thesis
unfolding reachable_def by (intro exI[where x="w @ [a]"], simp)
qed
lemma reachable_delta_hat:
assumes "reachable m q"
shows "reachable m (delta_hat m (q, w))"
proof -
obtain v where "q = delta_hat m (initial m, v)"
using assms unfolding reachable_def by (erule exE)
then show ?thesis
unfolding reachable_def by (intro exI[where x="v @ w"], simp)
qed
end
|
{-# OPTIONS --safe #-}
open import Definition.Typed.EqualityRelation
module Definition.LogicalRelation.Properties {{eqrel : EqRelSet}} where
open import Definition.LogicalRelation.Properties.Reflexivity public
open import Definition.LogicalRelation.Properties.Symmetry public
open import Definition.LogicalRelation.Properties.Transitivity public
open import Definition.LogicalRelation.Properties.Conversion public
open import Definition.LogicalRelation.Properties.Escape public
open import Definition.LogicalRelation.Properties.Universe public
open import Definition.LogicalRelation.Properties.Neutral public
open import Definition.LogicalRelation.Properties.Reduction public
open import Definition.LogicalRelation.Properties.Successor public
open import Definition.LogicalRelation.Properties.MaybeEmb public
|
\chapter{Analysis}
\label{sec:analysis}
In order to be able to evaluate potential means of improving Intel's \gls{AVX} reclocking algorithm, we first need to obtain thorough knowledge of the algorithm as it is implemented in current Intel x86 \glspl{CPU}. We can then use this knowledge for the software-based reimplementation presented in \Cref{sec:design} and to understand the hardware-induced constraints Intel needs to keep within, which is in turn necessary for designing a feasible and implementable improved reclocking algorithm.
Intel regularly publishes optimization manuals~\cite{inteloptimizationmanual} intended for compiler developers and software engineers which contain a vague description of the mechanism used for deciding when to lower or raise the processor's frequency upon execution of \gls{AVX} instructions. Precisely, Intel defines three \textit{turbo license levels}, which designate frequency offsets for different instruction mix scenarios:
\begin{itemize}
\item Level~0: only non-demanding (i.e., scalar, \gls{SSE}, \gls{AVX1} or light \gls{AVX2}) instructions are being executed; a core may run at its maximum turbo frequency. This is the default state.
\item Level~1: active during the execution of heavy \gls{AVX2} and/or light \gls{AVX-512} instructions. The maximum frequency is lowered to a \gls{SKU}-specific value.
\item Level~2: used for the execution of heavy \gls{AVX-512} instructions. The maximum frequency is lowered to a \gls{SKU}-specific value that is further below the frequency used in level~1.
\end{itemize}
\noindent Here, \enquote{heavy} instructions are defined to be floating-point, integer multiplication or integer \gls{FMA} operations. Given these license levels, Intel states that it may take up to \SI{500}{\micro\second} until the new frequency is applied and about \SI{2}{\milli\second} until a core reverts to level~0 after executing the last \enquote{heavy} instruction. Before the frequency is lowered, a core operates at \enquote{a lower peak capability,} however, Intel does not further specify what that exactly means. Intel hints that the license decisions are not solely bound to the instruction types as given in the level descriptions, but rather depend on the mix of instructions executed within a certain time window.
In this chapter we will describe the design of a framework that allows us to analyze the actual behavior of an x86 processor during the execution of \gls{AVX} instructions. Afterwards, we will present and evaluate the results generated when executed on a system equipped with a modern Intel \gls{CPU}, and point out deviations between the actual behavior and what Intel maintains in their specification.
\section{Methodology}
\label{sec:analysis:methodology}
For our reimplementation, our goal is to create a model of the reclocking behavior of an \gls{AVX-512}-capable \gls{CPU} that is as complete as possible and reflects the decisions made by the hardware with high accuracy. Therefore, by conducting this analysis, we want to answer the following aspects:
\begin{itemize}
\item When exactly does a \gls{CPU} core decide to reduce or raise its frequency during and after \gls{AVX} execution?
\item How much time do turbo license level switches need?
\item Do the \glspl{CPU} switch directly from level 0 to level 2 in case of heavy \gls{AVX-512} instructions or is there a step to level 1 in between?
\item What does Intel mean by \enquote{lower peak capability} while lowering the clock?
\item How complete is Intel's description of the reclocking algorithm?
\end{itemize}
In order to create a precise model we want to analyze these questions in different scenarios, i.e., for different instruction types, for different global load situations as well as with and without enabled turbo frequencies. To reach our goal, we run our analysis framework with synthetic code snippets that are designed to trigger the behavior to be analyzed.
\section{Design}
\label{sec:analysis:design}
Our analysis framework consists of a module for the \gls{Linux} kernel as well as a user-space component which interact with each other and make use of the \gls{PMU}, a unit commonly found in modern microprocessors that enables software to measure performance and bottlenecks on the hardware level. In the following sections, we will present the design and features of these components and describe how they contribute to our analysis purposes.
\subsection{Performance Monitoring Unit (PMU)}
\label{sec:analysis:design:pmu}
Modern \gls{x86} \glspl{CPU} commonly feature a \glsreset{PMU}\gls{PMU} which exposes a set of \textit{performance counters} that may be configured to count assertions of a large set of \textit{performance events} \cite{intelsdmsysprogguide}.
Precisely, we use version~3 of the \gls{x86} \textit{Architectural Performance Monitoring} facility, which features three \textit{fixed counters} per logical core that count retired instructions, cycles during which the core is not in a halt state, and \glsunset{TSC}\gls{TSC} cycles in unhalted state, respectively. The \glsreset{TSC}\gls{TSC} is a simple counter found in current \gls{x86} \glspl{CPU} that increments steadily with a fixed frequency and independent of the core clock, thus making it suitable for measuring wall-clock time. In addition to the fixed counters, eight freely configurable counters are available per physical core (four per logical core when \gls{SMT} is enabled). These counters may be set to count any of the performance events available for a specific microarchitecture, e.g., most architectures define events for cache hits/misses, execution stalls or load on specific execution units.
Each counter is represented via a \gls{MSR} and also configured through one. More specifically, software may configure the event to count (non-fixed counters only) and when to count (i.e., in user mode (ring~$\geq$~1) and/or kernel mode (ring~0)). Additionally, the counter can be configured to trigger an interrupt when it overflows. By setting the counter to its maximum value less an offset, this can be used as a mechanism to generate notifications when a certain amount of events of a specific type has occurred. The interrupt vector used for delivery can be configured in the core's \gls{APIC}'s \gls{LVT}. Optionally, the \gls{PMU} may be instructed to freeze all counters at their current values as soon as an interrupt is triggered.
\subsection{Overview}
\label{sec:analysis:design:overview}
The analysis tool presented here is made up of a kernel and a user-space component where the former provides the latter with means to configure the \gls{PMU} and efficient handling for interrupts generated by performance counter overflows.
As depicted in a simplified way in \Cref{fig:analysis:design:overview}, the user-space component spawns $n\in\mathbb{N}$ \textit{execution threads} and $w\in\{1,n\}$ \textit{wait threads}, each corresponding to exactly one execution thread. The idea behind having multiple execution threads is to be able to make measurements on multiple cores simultaneously, thereby simulating parallel workloads. In \emph{pre-throttling mode}, only one execution thread has an associated wait thread ($w=1$): the other execution threads are started earlier to simulate an environment with pre-existing load, where global load is merely increased by utilizing one more core. Without pre-throttling, there is a wait thread for each execution thread ($w=n$).
Upon startup, each execution thread generates a \gls{PMU} configuration designed to produce the desired measurements, which is then applied by the kernel module. Now, the kernel module jumps back into user-space to an address previously defined by the execution thread. Then, the thread will execute \gls{AVX} instructions until preempted by an overflow interrupt generated by the \gls{PMU} according to its configuration (as described in \Cref{sec:analysis:design:pmu}). Each wait thread is initially suspended until an interrupt is triggered on its corresponding execution thread, at which point it is resumed by the kernel component and provided with the raw performance counter values.
\begin{figure*}
\centering
\begin{tikzpicture}[font=\scriptsize]
\sffamily
\pgfmathsetmacro{\componentrectwidth}{4}
\pgfmathsetmacro{\componentrectheight}{4.75}
\pgfmathsetmacro{\separatordist}{2.5}
\pgfmathsetmacro{\arrowoverlength}{0.25}
\pgfmathsetmacro{\arrowlength}{2*(\arrowoverlength + \separatordist)}
\pgfmathsetmacro{\halfcomponentrectwidth}{\componentrectwidth*0.5}
\pgfmathsetmacro{\quartercomponentrectwidth}{\componentrectwidth*0.25}
% kernel
\draw (0cm,0cm) rectangle ++(\componentrectwidth cm,\componentrectheight) [ref=kernel-rect];
\node[color=kitblue] at ([yshift=-0.4 cm] kernel-rect south) {Kernel module};
%\draw[densely dotted] ([yshift=-1cm] kernel-rect north west) -- ++(\componentrectwidth,0) [ref=kernel-sep];
%\node at ([yshift=-0.5cm] kernel-rect north) {Linux};
% separator
\draw[dashed, color=kitdarkgrey] ([xshift=\separatordist cm] kernel-rect south east) -- ([xshift=\separatordist cm] kernel-rect north east) [ref=separator];
% user-space
\draw ([xshift=2*\separatordist cm +\componentrectwidth cm] 0, 0) rectangle ++(\componentrectwidth,\componentrectheight) [ref=user-rect];
\draw[densely dotted, color=kitdarkgrey] (user-rect south) -- ++(0,\componentrectheight);
\node[align=center,anchor=north] at ([xshift=-\quartercomponentrectwidth cm] user-rect north) {execution \\ thread};
\node[align=center,anchor=north] at ([xshift=+\quartercomponentrectwidth cm] user-rect north) {wait \\ thread};
\node[color=kitblue] at ([yshift=-0.4 cm] user-rect south) {User-space component};
% steps
\draw[<-] ([yshift=-0.85cm,xshift=-\arrowoverlength cm] kernel-rect north east) -- ++(\arrowlength,0) node[align=center,pos=.5,above=0] {1. Configuration instructions};
\node at ([yshift=-1.35cm] kernel-rect north) {2. Setup PMU};
\draw[->] ([yshift=-2.0cm,xshift=-\arrowoverlength cm] kernel-rect north east) -- ++(\arrowlength,0) node[align=center,pos=.5,above=0] {3. Return to user-space};
\node[align=center] at ([yshift=-2.5cm,xshift=-\quartercomponentrectwidth cm] user-rect north) {4. Execute \\ AVX};
\node[color=kityellow] at ([yshift=-3.12cm,xshift=-1cm] kernel-rect north) {\Huge\Lightning};
\node at ([yshift=-3.1cm] kernel-rect north) {5. Interrupt};
\draw ([yshift=-3.75cm,xshift=\arrowoverlength cm] user-rect north west) -- ++(-\arrowlength,0) node[align=center,pos=.5,above=0] {6. PMU values};
\draw[->] ([yshift=-3.75cm,xshift=\arrowoverlength cm] user-rect north west) -- ++(\halfcomponentrectwidth,0);
\node[align=center] at ([yshift=-4.25cm,xshift=\quartercomponentrectwidth cm] user-rect north) {7. Process \\ results};
\end{tikzpicture}
\caption{Simplified analysis framework architecture. The kernel module enables the user-space component to configure the PMU and handles interrupts.}
\label{fig:analysis:design:overview}
\end{figure*}
\subsection{Kernel Component}
\label{sec:analysis:design:kernel}
Our kernel component is not supposed to conduct any analysis tasks by itself, but is designed to aid the user-space component described later in \Cref{sec:analysis:design:userspace}. We chose to implement it as a module for version~5.1 of the \gls{Linux} kernel, which implies that it is written in the C programming language. Existence and design of this kernel module are motivated by our user-space component's needs to configure the \gls{PMU} in order to conduct the measurements required for our analysis. This can only be done from kernel-space.
During module startup, the \gls{PMU} is reset to a default state and the performance counter overflow interrupt vector is set in the \gls{LVT} of each core's \gls{APIC}. Notably, this degrades the functionality of \gls{Linux}'s \texttt{perf} subsystem as \texttt{perf} partially relies on using the \gls{PMU}.
The module interfaces with user-space by defining a custom device class and then providing a virtual device of the previously defined class, exposed via \texttt{/dev/reclocking\_analysis} in the virtual file system. User-space may then \texttt{open()} the provided device file and interact with the module by using several offered \texttt{ioctl()} calls.
Execution threads, on the one hand, initiate their execution by using the \texttt{SETUP ioctl()} call. A C \texttt{struct} must be passed that contains a set of \glspl{MSR} to be written by the kernel module -- these are used to configure the \gls{PMU}. In order to increase the precision of our measurements, it is desirable to cut time spent in user-space without actually executing the specific code to be measured. Therefore, a value for the \gls{instptr} must also be passed that will be set in the thread's context before returning to user-space. Thus, the thread will not directly return at the previous position in the \gls{libc}'s \texttt{ioctl()} wrapper but rather be redirected to another location in memory (i.e., where the \gls{AVX} code for our measurements lies). Optionally, the \texttt{r12}~\gls{x86} architectural register may also be set so that the code executed in user-space upon returning is able to access data structures in an easy manner without needing to use the stack. As described further below, the interrupt action must also be defined beforehand by the execution thread. After applying the configuration, the \texttt{ioctl()} handler saves the current \gls{TSC} value (to be able to measure elapsed time later on) and returns to user-space.
Wait threads, one the other hand, start with the \texttt{WAIT\_FOR\_INTERRUPT} \texttt{ioctl()} call, which takes two arguments: first, a pointer to a \texttt{interrupt\_result} structure in the user-space component where the resulting performance counter values shall be stored later and second, the numeric identifier of a \gls{CPU} core where an interrupt is expected to occur -- this is what effectively binds a wait thread to its corresponding execution thread. The calling thread is suspended by setting it into \texttt{TASK\_UNINTERRUPTIBLE} state. This state in \gls{Linux}'s task state machine allows a thread to be woken up only by the kernel itself and not via any user-space mechanisms (e.g., \glspl{UNIXsig}) \cite{kernelschedheader}. Consequently, this way we ensure the execution flow is not interrupted unexpectedly.
It is expected that all execution threads that have an associated wait thread trigger a performance counter overflow interrupt some time after setup. The interrupt handler will then proceed with one of multiple actions as instructed by the \texttt{SETUP} call:\clearpage
\begin{itemize}
\item \texttt{WAKE\_WAIT\_THREAD}: this action reads all performance counters and writes them along with the current \gls{TSC} value and the recorded \gls{TSC} value at \texttt{SETUP} to the \texttt{interrupt\_result} structure of the corresponding wake thread. Then, the wait thread is woken up, so that it may process and print the results. The execution thread that triggered the interrupt is returned to its previous \acrlong{instptr} (from before the \texttt{SETUP} call). Notably, from user-space's view, the original \texttt{SETUP} call returns only now.
\item \texttt{SET\_MSRS}: this is used for analysis tasks consisting of two consecutive steps, e.g., when two different \gls{AVX} phases are to be executed and we are only interested in performance events from the latter. The \gls{TSC} value is recorded and another set of \glspl{MSR} is configured on the thread's core. For the next interrupt, the action is unconditionally set to \texttt{WAKE\_WAIT\_THREAD}.
\item \texttt{GOTO}: exactly like \texttt{SET\_MSRS}, but also sets a new \acrlong{instptr} on the execution thread.
\end{itemize}
\noindent Each of these actions concludes with resetting the \gls{PMU}'s overflow bit and the \gls{APIC}'s state in order to be ready for further interrupts.
A practical software engineering issue arises from the fact that wait threads are suspended in an uninterruptible state after startup: they may easily get stuck due to programming errors that cause a lack of interrupts. For these cases, a third \texttt{ioctl()} call was implemented: \texttt{RESET\_WAIT\_THREADS}, which simply wakes all suspended wait threads and makes their pending \texttt{ioctl()} calls return with an error status.
Note that our implementation has a limitation: it does not work properly on processors with \gls{SMT} enabled. There are two reasons for this: first, as there is only one \gls{PMU} per physical core, only four performance counters are available per logical core with twofold \gls{SMT}. However, not all performance events we use are on a per-thread level, some are only per-core (e.g., all events that correspond to license levels), and thus it would not be required to measure them on both threads of a physical core. Second, as we will explain in \Cref{sec:analysis:design:measurementmodes}, all events we use to generate interrupts are on a per-core level, thus our interrupt handling would need to know that an interrupt generated on one thread also affects the other thread on the same physical core (if there are two execution threads running on one core). Nevertheless, as we do not have implemented any such \gls{SMT} awareness, this remains future work.
\subsection{User-Space Component}
\label{sec:analysis:design:userspace}
The user-space component of our analysis framework is the one that implements and performs the actual analysis tasks and is aided by the previously described kernel module by instructing it to configure the \gls{PMU} and handle performance interrupts. Akin to the kernel module, our user-space program is written in C with some additional helper tools implemented in the PHP scripting language for invocation and monitoring tasks and to generate spreadsheets containing the results. \gls{AVX} instructions included in the program are directly written in \gls{x86} assembly.
As briefly described in \Cref{sec:analysis:design:overview}, the user-space process spawns $n~\in~\mathbb{N}$ execution threads and $w\in\{1,n\}$ wait threads. The execution threads create the \gls{PMU} configurations to be applied by the kernel and then run the code to be measured. Wait threads are each associated with an execution thread and receive and process the raw performance counter values when interrupts are triggered on their corresponding execution threads. The amount of execution threads ($n$) is specified as a command line argument, whereas either all of them or only just one have an accompanying wait thread. This depends on whether \emph{pre-throttling mode} is enabled: the idea of this mode is to create an artificial, pre-existing global load situation across several cores where load is already high and further increased by utilizing an additional core. Therefore, the startup of one specific execution thread is delayed by a moment, and we also only want to collect results obtained from this thread, thus just one wait thread is required. In contrast, without pre-throttling, all execution threads start at precisely the same time and results are gathered from all of them. The code run on the threads used for pre-throttling may either be purely scalar or can use \gls{AVX}, too. This way we can test whether the load type (i.e., scalar or \gls{AVX}) on other cores makes a difference to \gls{AVX} reclocking on one core. Execution threads are bound to \gls{CPU} cores $1$ to $n$, respectively; the wait threads to the following cores. This also implies that at maximum $\lfloor{}\frac{C}{2}\rfloor{}$ execution threads may be run, where $C$ is the number of \gls{CPU} cores installed in the system, and a minimum of two cores must be available. With pre-throttling enabled, it is theoretically possible to have $C-1$ execution threads running. Note that pre-throttling mode does not have any effect when only one execution thread is used.
We want to be able to run tests with different instruction types. Therefore, an arbitrary number of \gls{ELF} sections containing \gls{AVX} instructions may be included in the component's compiled binary executable. The address of one of these sections and its length must be passed as arguments to the program (these values are easily obtainable using tools like \texttt{objdump}). On startup, one or more executable memory areas, each consisting of four pages, are mapped and filled with the content of the passed section, repeated until the area is full, or alternatively, filled only with a precisely set amount of repeated instructions. This allows our measurement modes to investigate the \gls{CPU}'s behavior when only a very specific amount of \gls{AVX} instructions is executed. For example, this can be used to determine how many instructions exactly are required to trigger a frequency reduction, whereas infinite loops are useful to measure the time taken to switch frequencies. In order to ensure the instruction flow does not run outside the allocated area, one of two different loop modes is used at the end of each memory area, depending on the measurement mode:
\begin{itemize}
\item \texttt{LOOP\_AVX}: a jump instruction to the beginning of the area is added in order to make the constructed code loop -- this allows for infinite \gls{AVX} execution until the executing thread is interrupted.
\item \texttt{LOOP\_R12\_CMP}: a spinlock-style loop is inserted that constantly compares the value referenced by the pointer stored in the \texttt{r12} register to $0$ and returns as soon as it isn't equal to $0$ anymore:
\begin{minted}{gas}
loop:
cmp 0x0, (%r12)
je loop
ret
\end{minted}
This way, after a specific amount of \gls{AVX} instructions was executed, the executor may spin using only scalar instructions until it is instructed to return from outside when another thread updates the value underneath the pointer in \texttt{r12}. Note that our \gls{AVX} memory area does not have its own stack frame in any way, so, assuming that the executing thread jumped into the memory area by using the \texttt{SETUP ioctl()} described in \Cref{sec:analysis:design:kernel}, we actually return the \texttt{ioctl()}'s stack frame here. This is a rather fragile and non-portable approach and may not work as desired with every \gls{libc} implementation.
\end{itemize}
Further, some measurement modes need to execute \gls{AVX} instructions until interrupted and then want to execute purely scalar code to wait for another event (e.g., until the clock speed is raised again). For this purpose, we also allow mapping memory areas that solely consist of an empty loop.
The number of four pages per area was not chosen arbitrarily: on an \gls{x86} processor running in \SI[number-unit-product=-]{64}{\bit} mode \cite{intelsdmsysprogguide}, pages have a default size of \SI{4}{\kibi\byte}, thus four pages equate a total size of \SI{16}{\kibi\byte}. We originally used an area size of \SI{2}{\mebi\byte} (512 pages), based on the idea of achieving a purely homogeneous workload that does not contain any jumps to increase the precision of our measurements. However, tests showed that the code would become approximately \SI{20}{\percent} faster when run on multiple cores in parallel. We believe this behavior to be caused by instruction cache misses -- although modern \glspl{CPU} commonly feature an \gls{instprefetcher}, there is a caveat: it does not load instructions across page boundaries, and thus every \SI{4}{\kibi\byte} of instructions we would see a cache miss and a costly pipeline stall until the next instructions arrive from memory. By using parallel execution on multiple cores, this effect is mitigated as the fastest core would already have loaded the instructions into its \glslink{L1}{L1} and \glspl{L2}\footnote{Note that on \textit{Skylake (Server)} processors the \glspl{L2} are inclusive, whereas \glslink{L3}{L3} is a victim cache \cite{intelxeonscalabledeepdive}.}. Thus, when other cores try to fetch the instructions from memory, the requests are instead served by a core that already has them via the cache coherency protocol, thereby dramatically reducing the latency. We could theoretically verify this theory by using performance events for measuring cache hits and misses, however, this is not too interesting for our purposes. It could be worthwhile to look into huge pages as an alternative solution, though that remains future work.
% TODO is this related to spectre?
In order to avoid inaccuracies caused by preemption, all execution threads use the \texttt{SCHED\_RR} scheduling policy offered by \gls{Linux}'s \gls{CFS} \cite{cfs} which is designed for near-real-time execution and selects real-time threads ordered by their priority; threads of equal priority are executed in a round-robin fashion. \gls{CFS} exposes additional configuration settings \cite{cfsrt} to control the fraction of time that may be consumed by real-time processes, namely \texttt{sched\_rt\_period\_us} and \texttt{sched\_rt\_runtime\_us}. The former sets a time window (\SI{1}{\second} per default) and the latter contains the absolute amount of time within that window that is available to real-time threads (\SI{950}{\milli\second} per default). In theory, we could configure these to allow for infinite real-time execution, however, practical tests have shown this leads to unbearable system hangs that would require further work on our implementation in order to fix. We settled for a value of \SI{990}{\milli\second} for \texttt{sched\_rt\_runtime\_us} as compromise.
In all measurement modes, we configure the following performance events (as documented by Intel in \cite{intelsdmmsrs}):
\begin{itemize}
\item \texttt{CORE\_POWER.LVL0\_TURBO\_LICENSE}: counts core cycles spent in turbo license level 0.
\item \texttt{CORE\_POWER.LVL1\_TURBO\_LICENSE}: counts core cycles spent in turbo license level 1.
\item \texttt{CORE\_POWER.LVL2\_TURBO\_LICENSE}: counts core cycles spent in turbo license level 2.
\item \texttt{CORE\_POWER.THROTTLE}: counts core cycles during which the \gls{OoO} engine is throttled.
\item \texttt{INT\_MISC.CLEAR\_RESTEER\_CYCLES}: counts core cycles while the execution engine is stalled waiting for instructions to be delivered. This is used to estimate the time spent before the actual execution when switching from kernel-space to user-space in execution threads.
\item \texttt{FP\_ARITH\_INST\_RETIRED.PACKED}: counts retired packed floating-point \glspl{vectorinst}. Several variants for \SI[number-unit-product=-]{128}{\bit}, \SI[number-unit-product=-]{256}{\bit} and \SI[number-unit-product=-]{512}{bit} vectors and single- and double-precision instructions are available which we select according to the instruction type used in the \gls{AVX} code section passed at startup.
\item \texttt{UOPS\_DISPATCHED\_PORT.PORT\_0}: counts \glspl{microinst} dispatched by the processor's scheduler at execution port 0. The use of this performance event is motivated by the \textit{Skylake (Server)} microarchitecture on which the \gls{CPU} we used for our analysis is based. These processors have an \gls{AVX-512} unit fused from two \SI[number-unit-product=-]{256}{\bit} units at execution ports 0 and 1 \cite{intelxeonscalabledeepdive}. For other microarchitectures, other performance events may be appropriate.
\item \texttt{UOPS\_DISPATCHED\_PORT.PORT\_5}: counts \glspl{microinst} dispatched by the processor's scheduler at execution port 5. The motivation here is the same as for the performance event counting \glspl{microinst} at port 0, however, only some specific \textit{Skylake (Server)} \glspl{CPU} have an additional, dedicated (i.e., non-fused) \gls{AVX-512} unit at port 5.
\end{itemize}
\noindent At startup, wait threads simply set their core affinity and then block at a synchronization barrier. Execution threads, in contrast, need to set their core affinity, their scheduling policy and build up the configuration to pass to the \texttt{SETUP ioctl()} call provided by our kernel module. Afterwards, they block at the same synchronization barrier as the wait threads. As soon as all threads have reached the barrier, in order to ensure their respective cores are ramped up to their maximum turbo frequency before starting the test run, the execution threads will enter a \SI{150}{\milli\second} busy-wait loop before calling the \texttt{SETUP ioctl()}. The wait threads directly jump into the \texttt{WAIT\_FOR\_INTERRUPT ioctl()}. Execution threads used for the previously described pre-throttling mode are an exception here, as they do not synchronize with the others but rather start executing right away, thus giving them a head start of about \SI{150}{\milli\second}. This reflects the desired behavior as, again, pre-throttling mode is designed to create pre-existing load conditions where load is further increased, so that we can measure the impact the type of load on other cores has on \gls{AVX} reclocking.
After execution has completed, there is not much to do for the execution threads: their \texttt{SETUP ioctl()} returns and then they simply exit. Wait threads, on the other side, will again synchronize at a barrier and then output the results as provided by the kernel module one after another before they exit, too. As soon as all threads have completed, the program quits.
\subsection{Measurement Modes}
\label{sec:analysis:design:measurementmodes}
In order to answer the questions named in \Cref{sec:analysis:methodology}, we implemented several different measurement modes in our user-space component, which are to be presented hereafter.
\subsubsection{DOWNCLOCK}
\label{sec:analysis:design:measurementmodes:downclock}
Our first measurement mode is designed to measure the downclocking behavior~--~i.e., how long it takes for a \gls{CPU} to reduce its frequency and whether there is a step to turbo license level~1 before switching to level~2 for instructions that target level~2.
In this mode, we simply map a single memory area with \gls{AVX} code to be run by all execution threads and configure the \gls{PMU} to trigger an interrupt and freeze the performance counters as soon as one cycle is spent in either level~1 or level~2, depending on the target license level passed as command line argument to the program. We use the \texttt{WAKE\_WAIT\_THREAD} interrupt action provided by the kernel component. Thereby, we can measure the time taken for the frequency reduction. When running a test case using this measurement mode with level~2 as target, we will also see whether any cycles were spent in level~1 from the respective performance counter. Thus, this measurement mode indeed answers the aforementioned questions.
\begin{figure*}[h]
\centering
\begin{subfigure}[b]{0.5\textwidth}
\begin{tikzpicture}[font=\scriptsize]
\sffamily
\pgfmathsetmacro{\levelheight}{0.2}
\pgfmathsetmacro{\linelength}{3}
\pgfmathsetmacro{\barlength}{0.3}
% legend
\draw[fill=kitgreen, color=kitgreen] (-2.5, 0.9) rectangle ++(\barlength cm, \levelheight cm);
\node[minimum height=\levelheight cm, text centered, anchor=west] at (-2.15, 1) {AVX};
\node[color=kityellow, align=center] at (-2.35, 0.3) {\large\Lightning};
\node[minimum height=\levelheight cm, text centered, anchor=west] at (-2.15, 0.3) {Interrupt};
% level 0
\node[color=kitblue, minimum height=\levelheight cm, text centered] at (0, 1) {Level~0};
\draw[color=kitdarkgrey] (1, 1) -- ++(\linelength cm,0);
% level 1
\node[color=kitblue, minimum height=\levelheight cm, text centered] at (0, 0.3) {Level~1};
\draw[color=kitdarkgrey] (1, 0.3) -- ++(\linelength cm,0);
% bars and transitions
\draw[<-] (1.1, 1.1) -- ++(0, 0.2);
\draw[fill=kitgreen, color=kitgreen] (1.1, 0.9) rectangle ++(2cm, \levelheight cm);
\draw[->, anchor=east] (3.1, 0.9) -- ++(0, -0.6) node[pos=0.5, anchor=west] {\large\color{kityellow}\Lightning};
\draw[decorate, decoration={brace, amplitude=5pt, mirror}] (1.1, 0.2) -- ++(2cm,0) node[pos=0.5, anchor=north, yshift=-0.1cm, align=center] {measure \\ time};
\end{tikzpicture}
\caption{Level~1}
\end{subfigure}%
\begin{subfigure}[b]{0.5\textwidth}
\begin{tikzpicture}[font=\scriptsize]
\sffamily
\pgfmathsetmacro{\levelheight}{0.2}
\pgfmathsetmacro{\linelength}{5}
% level 0
\node[color=kitblue, minimum height=\levelheight cm, text centered] at (0, 1) {Level~0};
\draw[color=kitdarkgrey] (1, 1) -- ++(\linelength cm,0);
% level 1
\node[color=kitblue, minimum height=\levelheight cm, text centered] at (0, 0.3) {Level~1};
\draw[color=kitdarkgrey] (1, 0.3) -- ++(\linelength cm,0);
% level 2
\node[color=kitblue, minimum height=\levelheight cm, text centered] at (0, -0.4) {Level~2};
\draw[color=kitdarkgrey] (1, -0.4) -- ++(\linelength cm,0);
% bars and transitions
\draw[<-] (1.1, 1.1) -- ++(0, 0.2);
\draw[fill=kitgreen, color=kitgreen] (1.1, 0.9) rectangle ++(2cm, \levelheight cm);
\draw[->, anchor=east] (3.1, 0.9) -- ++(0, -0.5);
\draw[fill=kitgreen, color=kitgreen] (3.1, 0.2) rectangle ++(2cm, \levelheight cm);
\draw[->, anchor=east] (5.1, 0.2) -- ++(0, -0.6) node[pos=0.5, anchor=west] {\large\color{kityellow}\Lightning};
\draw[decorate, decoration={brace, amplitude=5pt, mirror}] (1.1, -0.5) -- ++(4cm,0) node[pos=0.5, anchor=north, yshift=-0.1cm, align=center] {measure \\ time};
\end{tikzpicture}
\caption{Level~2}
\end{subfigure}
\caption{Illustration of the \texttt{DOWNCLOCK} measurement mode. This mode measures the time until the requested target license level is reached.}
\label{fig:analysis:design:measurementmodes:downclock}
\end{figure*}
\subsubsection{UPCLOCK}
\label{sec:analysis:design:measurementmodes:upclock}
After analyzing the downclocking times, the next logical step is to look at the reverse process: the upclocking. Here, we are mainly interested in the time the \gls{CPU} takes before returning back to its non-throttled frequency after the last \gls{AVX} instruction has retired.
Like in the \hyperref[sec:analysis:design:measurementmodes:downclock]{\texttt{DOWNCLOCK}} mode, we map an \gls{AVX} memory area into which the execution threads jump after startup, however, we also map an additional page with an infinite loop. In the first step, we configure an interrupt to be fired after switching to either level~1 or level~2, depending on the input, in order to be able to measure upclocking from both throttle levels. Then, using the \texttt{GOTO} interrupt action, we move the execution thread to the infinite loop page, reset our performance counters and configure the \gls{PMU} to trigger an interrupt for when level~0 is reached again and to freeze the performance counters at this point. It is important to not simply leave the core in a completely idle state, as the kernel would then eventually run the \texttt{MWAIT} \cite{intelsdminstructionreference} instruction on the core, causing it to enter a halt state, and thus our measurements would be useless given that it would not reflect real-world heterogeneous applications, and because the clock is disabled when the core is halted (i.e., there is no frequency anymore whatsoever).
Using the described procedure, we measure only the time spent after reaching a turbo license level with reduced frequency until returning to nominal frequency, which is exactly what we are interested in. Notably, in this mode, we instruct the \gls{PMU} to also count cycles while running in kernel-space (i.e., ring~$0$) as this is also time spent without executing \gls{AVX} instructions, and thus must be measured to retrieve precise results.
\begin{figure*}[t]
\centering
\begin{subfigure}[b]{0.6\textwidth}
\begin{tikzpicture}[font=\scriptsize]
\sffamily
\pgfmathsetmacro{\levelheight}{0.2}
\pgfmathsetmacro{\linelength}{3}
\pgfmathsetmacro{\legendbarlength}{0.3}
\pgfmathsetmacro{\barlength}{1.2}
% legend
\draw[fill=kitgreen, color=kitgreen] (-2.5, 1.6) rectangle ++(\legendbarlength cm, \levelheight cm);
\node[minimum height=\levelheight cm, text centered, anchor=west] at (-2.15, 1.7) {AVX};
\draw[fill=kitbrown, color=kitbrown] (-2.5, 0.9) rectangle ++(\legendbarlength cm, \levelheight cm);
\node[minimum height=\levelheight cm, text centered, anchor=west] at (-2.15, 1) {Scalar};
\node[color=kityellow, align=center] at (-2.35, 0.3) {\large\Lightning};
\node[minimum height=\levelheight cm, text centered, anchor=west] at (-2.15, 0.3) {Interrupt};
% level 0
\node[color=kitblue, minimum height=\levelheight cm, text centered] at (0, 1) {Level~0};
\draw[color=kitdarkgrey] (0.6, 1) -- ++(\linelength cm,0);
% level 1
\node[color=kitblue, minimum height=\levelheight cm, text centered] at (0, 0.3) {Level~1};
\draw[color=kitdarkgrey] (0.6, 0.3) -- ++(\linelength cm,0);
% bars and transitions
\draw[<-] (0.7, 1.1) -- ++(0, 0.2);
\draw[fill=kitgreen, color=kitgreen] (0.7, 0.9) rectangle ++(\barlength cm, \levelheight cm);
\draw[->] (1.9, 0.9) -- ++(0, -0.5) node[pos=0.5, anchor=west] {\large\color{kityellow}\Lightning};
\draw[fill=kitbrown, color=kitbrown] (1.9, 0.2) rectangle ++(\barlength cm, \levelheight cm);
\draw[->] (3.1, 0.4) -- ++(0, 0.6) node[pos=0.41666, anchor=west] {\large\color{kityellow}\Lightning};
\draw[decorate, decoration={brace, amplitude=5pt, mirror}] (1.9, 0.2) -- ++(\barlength cm,0) node[pos=0.5, anchor=north, yshift=-0.1cm, align=center] {measure \\ time};
\end{tikzpicture}
\caption{Level~1}
\end{subfigure}%
\begin{subfigure}[b]{0.4\textwidth}
\begin{tikzpicture}[font=\scriptsize]
\sffamily
\pgfmathsetmacro{\linelength}{4.2}
\pgfmathsetmacro{\barlength}{1.2}
\pgfmathsetmacro{\barheight}{0.2}
\pgfmathsetmacro{\linemargin}{0.6}
\pgfmathsetmacro{\linepadding}{0.1}
\pgfmathsetmacro{\linedistance}{0.7}
\pgfmathsetmacro{\linezeroy}{1}
\pgfmathsetmacro{\lineoney}{\linezeroy-\linedistance}
\pgfmathsetmacro{\linetwoy}{\lineoney-\linedistance}
\pgfmathsetmacro{\interbararrowheight}{\linedistance-\barheight}
\pgfmathsetmacro{\measurey}{\linetwoy-0.5*\barheight}
% level 0
\node[color=kitblue, minimum height=\barheight cm, text centered] at (0, \linezeroy) {Level~0};
\draw[color=kitdarkgrey] (\linemargin, \linezeroy) -- ++(\linelength cm,0);
% level 1
\node[color=kitblue, minimum height=\barheight cm, text centered] at (0, \lineoney) {Level~1};
\draw[color=kitdarkgrey] (\linemargin, \lineoney) -- ++(\linelength cm,0);
% level 2
\node[color=kitblue, minimum height=\barheight cm, text centered] at (0, \linetwoy) {Level~2};
\draw[color=kitdarkgrey] (\linemargin, \linetwoy) -- ++(\linelength cm,0);
% bars and transitions
\pgfmathsetmacro{\baronex}{\linemargin+\linepadding}
\pgfmathsetmacro{\baroney}{\linezeroy-0.5*\barheight}
\draw[fill=kitgreen, color=kitgreen] (\baronex, \baroney) rectangle ++(\barlength cm, \barheight cm);
\pgfmathsetmacro{\bartwox}{\baronex+\barlength}
\pgfmathsetmacro{\bartwoy}{\lineoney-0.5*\barheight}
\draw[fill=kitgreen, color=kitgreen] (\bartwox, \bartwoy) rectangle ++(\barlength cm, \barheight cm);
\pgfmathsetmacro{\barthreex}{\bartwox+\barlength}
\pgfmathsetmacro{\barthreey}{\linetwoy-0.5*\barheight}
\draw[fill=kitbrown, color=kitbrown] (\barthreex, \barthreey) rectangle ++(\barlength cm, \barheight cm);
\pgfmathsetmacro{\arrowoney}{\baroney+\barheight}
\draw[<-] (\baronex, \arrowoney) -- ++(0, 0.2);
\draw[->, anchor=east] (\bartwox, \baroney) -- ++(0, -\interbararrowheight cm);
\draw[->, anchor=east] (\barthreex, \bartwoy) -- ++(0, -\interbararrowheight cm) node[pos=0.5, anchor=west] {\large\color{kityellow}\Lightning};
\pgfmathsetmacro{\arrowfourx}{\barthreex+\barlength}
\pgfmathsetmacro{\arrowfoury}{\barthreey+\barheight}
\pgfmathsetmacro{\arrowfourlength}{\interbararrowheight+0.5*\barheight+\linedistance}
\pgfmathsetmacro{\arrowfournodepos}{0.5*\interbararrowheight/\arrowfourlength}
\draw[->, anchor=east] (\arrowfourx,\arrowfoury) -- ++(0, \arrowfourlength cm) node[pos=\arrowfournodepos, anchor=west] {\large\color{kityellow}\Lightning};
\draw[decorate, decoration={brace, amplitude=5pt, mirror}] (\barthreex, \measurey) -- ++(\barlength cm,0) node[pos=0.5, anchor=north, yshift=-0.1cm, align=center] {measure \\ time};
\end{tikzpicture}
\caption{Level~2}
\end{subfigure}
\caption{The \texttt{UPCLOCK} mode measures how long it takes a core to return to its level~0 frequency after an \gls{AVX}-induced reduction.}
\label{fig:analysis:design:measurementmodes:upclock}
\end{figure*}
\subsubsection{PRE\_THROTTLE\_TIME}
\label{sec:analysis:design:measurementmodes:prethrottlethroughput}
As cited at the beginning of \hyperref[sec:analysis]{this chapter}, Intel talks in their optimization manual \cite{inteloptimizationmanual} about the \glspl{CPU} operating at a \enquote{lower peak capability} before the switch to a turbo license level with lower frequency is completed. Early experimentation showed this state is seemingly represented by the \texttt{CORE\_POWER.THROTTLE} performance event which is described to count cycles where the \gls{OoO} engine is throttled \cite{intelsdmsysprogguide}. We want to find out when exactly this throttle state is activated and what instruction throughput the \gls{CPU} achieves before throttling to get an idea of the theoretically possible performance if the frequency reduction did not exist.
For this purpose, the \texttt{PRE\_THROTTLE\_TIME} mode conceptually works very much the same way as the \hyperref[sec:analysis:design:measurementmodes:downclock]{\texttt{DOWNCLOCK}} mode: we configure an interrupt that fires and freezes the performance counters as soon as the first cycle was spent in throttled mode and run our \gls{AVX} code using the \texttt{WAIT\_FOR\_INTERRUPT} interrupt action. Thereby, we obtain the desired information about the behavior during the time between starting execution and \gls{OoO} engine throttling.
\begin{figure*}[h]
\centering
\begin{tikzpicture}[font=\scriptsize]
\sffamily
\pgfmathsetmacro{\levelheight}{0.2}
\pgfmathsetmacro{\linelength}{1.8}
\pgfmathsetmacro{\legendbarlength}{0.3}
\pgfmathsetmacro{\barlength}{1.2}
% legend
\draw[fill=kitgreen, color=kitgreen] (-2.5, 0.9) rectangle ++(\legendbarlength cm, \levelheight cm);
\node[minimum height=\levelheight cm, text centered, anchor=west] at (-2.15, 1) {AVX};
\node[color=kityellow, align=center] at (-2.35, 0.3) {\large\Lightning};
\node[minimum height=\levelheight cm, text centered, anchor=west] at (-2.15, 0.3) {Interrupt};
% level 0
\node[color=kitblue, minimum height=\levelheight cm, text centered] at (0, 1) {Level~0};
\draw[color=kitdarkgrey] (0.6, 1) -- ++(\linelength cm,0);
% throttle
\node[color=kitblue, minimum height=\levelheight cm, text centered] at (0, 0.3) {Throttle};
\draw[color=kitdarkgrey] (0.6, 0.3) -- ++(\linelength cm,0);
% bars and transitions
\draw[<-] (0.7, 1.1) -- ++(0, 0.2);
\draw[fill=kitgreen, color=kitgreen] (0.7, 0.9) rectangle ++(\barlength cm, \levelheight cm);
\draw[->] (1.9, 0.9) -- ++(0, -0.6) node[pos=0.5, anchor=west] {\large\color{kityellow}\Lightning};
\draw[decorate, decoration={brace, amplitude=5pt, mirror}] (0.7, 0.2) -- ++(\barlength cm,0) node[pos=0.5, anchor=north, yshift=-0.1cm, align=center] {measure \\ instructions};
\end{tikzpicture}
\caption{After starting to execute demanding \gls{AVX} instructions, a core enters a state where the \gls{OoO} engine is throttled. The \texttt{PRE\_THROTTLE\_TIME} mode measures the time it takes until the throttling takes place.}
\label{fig:analysis:design:measurementmodes:prethrottlethroughput}
\end{figure*}
\subsubsection{REQUIRED\_INSTRUCTIONS}
\label{sec:analysis:design:measurementmodes:nonavxtime}
To obtain a model of the reclocking algorithm that is as complete as possible, we are not only interested in the time it takes for a \gls{CPU} to switch turbo license levels, but we also want to know how many instructions are precisely required to eventually trigger a frequency reduction.
The implementation of this measurement mode is more complex compared to the other modes and also partially depends on the license level transition to be examined. For this mode, the idea is to run multiple iterations where the amount of executed \gls{AVX} instructions is incremented in every iteration until executing the generated code triggers an interrupt on each execution thread due to license level switches after some time.
In every case, we map an \gls{AVX} memory area in \texttt{LOOP\_R12\_CMP} mode which initially contains only just one copy of the \gls{AVX} code in the selected \gls{ELF} section. As a reminder, this loop mode executes all \gls{AVX} instructions and then spins in purely scalar code until the \texttt{r12} register is set to a value other than zero. When license level~2 was chosen as target, we additionally map an area in \texttt{LOOP\_AVX} mode that is executed until level~1 is reached, as in this case, we are interested in the amount of instructions required to cause a switch from level~1 to level~2. Unlike \texttt{LOOP\_R12\_CMP}, \texttt{LOOP\_AVX} keeps executing \gls{AVX} code forever until interrupted. In \gls{AVX} pre-throttling mode, another area is created in \texttt{LOOP\_AVX} mode to be run by all execution threads used for pre-throttling. This allows them to execute \gls{AVX} code infinitely as required to fulfill their purpose of creating an artificial, pre-existing load.
In case level~1 is targeted, all (non-pre-throttling) execution threads directly jump into the \texttt{LOOP\_R12\_CMP} area and use the \texttt{WAKE\_WAIT\_THREAD} interrupt action mode. For level~2 as target, we select the \texttt{GOTO} interrupt action and first jump into the aforementioned \texttt{LOOP\_AVX} area and configure an interrupt to be triggered as soon as one cycle in level~1 was completed. Afterwards, the execution threads are also moved to the \texttt{LOOP\_R12\_CMP} area, hence we used the \texttt{GOTO} action.
\begin{figure*}
\centering
\begin{subfigure}[b]{0.6\textwidth}
\begin{tikzpicture}[font=\scriptsize]
\sffamily
\pgfmathsetmacro{\levelheight}{0.2}
\pgfmathsetmacro{\linelength}{3}
\pgfmathsetmacro{\legendbarlength}{0.3}
\pgfmathsetmacro{\barlength}{1.5}
% legend
\draw[fill=kitgreen, color=kitgreen] (-2.5, 1.6) rectangle ++(\legendbarlength cm, \levelheight cm);
\node[minimum height=\levelheight cm, text centered, anchor=west] at (-2.15, 1.7) {AVX};
\draw[fill=kitbrown, color=kitbrown] (-2.5, 0.9) rectangle ++(\legendbarlength cm, \levelheight cm);
\node[minimum height=\levelheight cm, text centered, anchor=west] at (-2.15, 1) {Scalar};
\node[color=kityellow, align=center] at (-2.35, 0.3) {\large\Lightning};
\node[minimum height=\levelheight cm, text centered, anchor=west] at (-2.15, 0.3) {Interrupt};
% level 0
\node[color=kitblue, minimum height=\levelheight cm, text centered] at (0, 1) {Level~0};
\draw[color=kitdarkgrey] (0.6, 1) -- ++(\linelength cm,0);
% level 1
\node[color=kitblue, minimum height=\levelheight cm, text centered] at (0, 0.3) {Level~1};
\draw[color=kitdarkgrey] (0.6, 0.3) -- ++(\linelength cm,0);
% bars and transitions
\draw[<-] (0.7, 1.1) -- ++(0, 0.2);
\draw[fill=kitgreen, color=kitgreen] (0.7, 0.9) rectangle ++(0.6 cm, \levelheight cm);
\draw[fill=kitbrown, color=kitbrown] (1.3, 0.9) rectangle ++(0.9 cm, \levelheight cm);
\draw[->] (2.2, 0.9) -- ++(0, -0.6) node[pos=0.5, anchor=west] {\large\color{kityellow}\Lightning};
\draw[draw=none] (0.7, 0.9) rectangle ++(\barlength cm, \levelheight cm) node[pos=0.5, align=center, color=black, font=\tiny] {\texttt{LOOP\_R12\_CMP}};
\draw[decorate, decoration={brace, amplitude=3pt, mirror}] (0.7, 0.2) -- ++(0.6 cm,0) node[pos=0.5, anchor=north, yshift=-0.1cm, align=center] {measure \\ instructions};
\end{tikzpicture}
\caption{Level~1}
\end{subfigure}%
\begin{subfigure}[b]{0.4\textwidth}
\begin{tikzpicture}[font=\scriptsize]
\sffamily
\pgfmathsetmacro{\linelength}{4.2}
\pgfmathsetmacro{\barlength}{1.5}
\pgfmathsetmacro{\barheight}{0.2}
\pgfmathsetmacro{\linemargin}{0.6}
\pgfmathsetmacro{\linepadding}{0.1}
\pgfmathsetmacro{\linedistance}{0.7}
\pgfmathsetmacro{\linezeroy}{1}
\pgfmathsetmacro{\lineoney}{\linezeroy-\linedistance}
\pgfmathsetmacro{\linetwoy}{\lineoney-\linedistance}
\pgfmathsetmacro{\interbararrowheight}{\linedistance-\barheight}
\pgfmathsetmacro{\measurey}{\linetwoy-0.5*\barheight}
% level 0
\node[color=kitblue, minimum height=\barheight cm, text centered] at (0, \linezeroy) {Level~0};
\draw[color=kitdarkgrey] (\linemargin, \linezeroy) -- ++(\linelength cm,0);
% level 1
\node[color=kitblue, minimum height=\barheight cm, text centered] at (0, \lineoney) {Level~1};
\draw[color=kitdarkgrey] (\linemargin, \lineoney) -- ++(\linelength cm,0);
% level 2
\node[color=kitblue, minimum height=\barheight cm, text centered] at (0, \linetwoy) {Level~2};
\draw[color=kitdarkgrey] (\linemargin, \linetwoy) -- ++(\linelength cm,0);
% bars and transitions
\pgfmathsetmacro{\baronex}{\linemargin+\linepadding}
\pgfmathsetmacro{\baroney}{\linezeroy-0.5*\barheight}
\draw[fill=kitgreen, color=kitgreen] (\baronex, \baroney) rectangle ++(\barlength cm, \barheight cm) node[pos=0.5, align=center, color=black, font=\tiny] {\texttt{LOOP\_AVX}};
\pgfmathsetmacro{\bartwox}{\baronex+\barlength}
\pgfmathsetmacro{\bartwoy}{\lineoney-0.5*\barheight}
\draw[fill=kitgreen, color=kitgreen] (\bartwox, \bartwoy) rectangle ++(0.6 cm, \barheight cm);
\draw[fill=kitbrown, color=kitbrown] (\bartwox+0.6, \bartwoy) rectangle ++(0.9 cm, \barheight cm);
\draw[draw=none] (\bartwox, \bartwoy) rectangle ++(\barlength cm, \barheight cm) node[pos=0.5, align=center, color=black, font=\tiny] {\texttt{LOOP\_R12\_CMP}};
\pgfmathsetmacro{\barthreex}{\bartwox+\barlength}
\pgfmathsetmacro{\barthreey}{\linetwoy-0.5*\barheight}
\pgfmathsetmacro{\arrowoney}{\baroney+\barheight}
\draw[<-] (\baronex, \arrowoney) -- ++(0, 0.2);
\draw[->, anchor=east] (\bartwox, \baroney) -- ++(0, -\interbararrowheight cm) node[pos=0.5, anchor=west] {\large\color{kityellow}\Lightning};
\draw[->, anchor=east] (\barthreex, \bartwoy) -- ++(0, -0.6 cm) node[pos=0.5, anchor=west] {\large\color{kityellow}\Lightning};
\draw[decorate, decoration={brace, amplitude=3pt, mirror}] (\bartwox, \measurey) -- ++(0.6 cm,0) node[pos=0.5, anchor=north, yshift=-0.1cm, align=center] {measure \\ instructions};
\end{tikzpicture}
\caption{Level~2}
\end{subfigure}
\caption{A certain amount of \gls{AVX} instructions is required to actually trigger frequency switches. The \texttt{REQUIRED\_INSTRUCTIONS} mode measures precisely how many are needed.}
\label{fig:analysis:design:measurementmodes:nonavxtime}
\end{figure*}
As we only want to see whether the number of repeated instructions in the \texttt{LOOP\_R12\_CMP} area is sufficient to eventually cause a frequency reduction, execution threads instruct the kernel module to fill the \texttt{r12} register with a pointer to a global variable that is set to a non-zero value by the main thread after a delay of \SI{1}{\milli\second}, thereby making all (again, non-pre-throttling) execution threads return after this time -- generally assuming that \SI{1}{\milli\second} is enough time for a turbo license level change to happen. Then, the main thread checks whether an interrupt was triggered on all expected cores. If yes, the test run has completed and the program quits. Otherwise, we remap the \texttt{LOOP\_R12\_CMP} memory with one more copy of the \gls{AVX} code than in the previous iteration. Wait threads that completed because an interrupt was triggered on their corresponding execution thread are respawned, afterwards we reset the variable used for the pointer in \texttt{r12} and start all execution threads again. Unlike in the first iteration, where execution threads generally spin for \SI{150}{\milli\second} to ramp up the core frequency (as described in \Cref{sec:analysis:design:userspace}), we only have them spin for \SI{3}{\milli\second} in further iterations as their respective cores are already running at their maximum frequency but possibly need to return from an attained turbo license level. Leaving them spinning again for \SI{150}{\milli\second} would incur an unnecessary prolongation of the test run. This procedure repeats until we have enough \gls{AVX} code in our \texttt{LOOP\_R12\_CMP} area to trigger interrupts on all desired cores. Note that execution threads used for pre-throttling are started once at the beginning and keep running without disruptions until the program exits.
At the end, the number of copies of the \gls{AVX} code in the \texttt{LOOP\_R12\_CMP} memory reflects the amount of instructions needed to cause a frequency transition.
\section{Results}
\label{sec:analysis:results}
We use the described analysis framework to conduct measurements with several different \gls{AVX} instructions using all available combinations of modes, i.e., with and without pre-throttling, different target turbo license levels, with turbo frequencies enabled and disabled, and with 1, 2, 3, and 4 execution threads. All measurements are executed 1000 times in order to obtain statistical certainty. In this section, we present our system setup, describe the precise instructions we used for testing, and present as well as discuss the results.
\subsection{System Setup}
\label{sec:analysis:results:systemsetup}
We performed our analysis on an Intel Core i9-7940X processor which features 14 physical cores with twofold \gls{SMT}, running at a nominal base frequency\footnote{Note that the base frequency equals the \gls{TSC}'s frequency \cite{intelsdmsysprogguide}.} of \SI{3.1}{\giga\hertz} with a maximum turbo frequency of \SI{4.3}{\giga\hertz} \cite{intel7940x}. Additionally, the chip supports \textit{Intel Turbo Boost Max Technology 3.0}, essentially meaning that two specific cores may operate at a higher turbo frequency, in this case \SI{4.4}{\giga\hertz}. These cores are selected based on their electrical and thermal properties during the manufacturing process \cite{intelxeonscalabledeepdive} -- this technique is otherwise also known as \textit{speed binning} \cite{lopata2012speed}. The chip's nominal \gls{TDP} is specified at \SI{165}{\watt}, which is the maximum power consumption the chip will sustain over long time periods. Note that, as our analysis framework does not currently support running with \gls{SMT} enabled (as explained in \Cref{sec:analysis:design:kernel}), we have disabled \gls{SMT} in our system, and thus each physical core exposes only one logical core. Further, whereas our analysis tool theoretically would allow running with 7 execution threads on this \gls{CPU} (as described in \Cref{sec:analysis:design:userspace}), we only tested with a maximum of 4 due to the limited time that was available. Nevertheless, we believe this is enough to generate representative results.
This \gls{CPU} is based on the \textit{Skylake (Server)} microarchitecture, the first \gls{x86} implementation featuring support for the \gls{AVX-512} extension \cite{intelxeonscalabledeepdive}, making it one of the oldest processors that expose the \gls{AVX} reclocking issue for heterogeneous workloads. However, not all \glspl{CPU} built with this microarchitecture feature the same number of \SI[number-unit-product=-]{512}{\bit} vector execution units: some have two, others only one. The i9-7940X used here has two.
The processor was installed on an ASUS TUF X299 MARK 2 motherboard along with \SI{32}{\gibi\byte} of DDR4 system memory operating at a frequency of \SI{2666}{\mega\hertz} and a \gls{NVMe} \acrlong{SSD}. The motherboard was not chosen arbitrarily: being designed for the needs of the overclocking community, it -- unlike most other motherboards for this platform -- allows to customize the frequency targets for \gls{AVX}-induced reclocking in its \gls{UEFI}'s configuration menu. For this analysis, the frequency offsets were configured to 3 and 7 for turbo license levels 1 and 2, respectively, resulting in target frequencies of \SI{3.4}{\giga\hertz} and \SI{2.8}{\giga\hertz}.
We opted to use Fedora 29 (Server Edition) as the operating system with a custom-built \gls{Linux} 5.1.0 kernel and glibc version 2.28-33. The kernel and all of our own code were compiled using GCC 8.3.1-2 with the default \texttt{-O2} optimization level. To minimize overhead and latencies caused by context switches from user-space to kernel-space and vice versa, we disabled all mitigations provided by the \gls{Linux} kernel for hardware vulnerabilities found in recent \glspl{CPU} (e.g., Spectre and Meltdown) as well as \gls{KASLR}. This improves the quality of our results as we only want to measure hardware behavior, and therefore want to avoid software overhead as much as possible.
\subsection{Tested Instructions}
\label{sec:analysis:results:testedinstructions}
Different instructions cause different switching activity and therefore need different amounts of energy. In order to create a precise model of the \gls{AVX} reclocking algorithm as it was implemented by Intel, we want to conduct our measurements with different kinds of \gls{AVX} instructions to find possible differences in the behavior -- especially with regard to what is documented. Note that we only tested homogeneous loads and did not run any tests with heterogeneous mixtures of different instruction classes. Characterizing the frequency scaling behavior for these remains future work.
We tried to select both floating-point and integer operations that reflect the \enquote{heavy} and \enquote{light} instruction types as defined in Intel's optimization manual \cite{inteloptimizationmanual} as well as instructions we guess to be implemented differently in the hardware's execution units. Consequently, we chose the following subset of \gls{AVX} instructions for our measurements (as obtained from Intel's manual for software developers \cite{intelsdminstructionreference}):
\begin{itemize}
\item \texttt{vfmaddsub132pd} (double-precision) and \texttt{vfmaddsub132ps} (single-precision) are floating-point \gls{FMA} instructions that alternatingly add and subtract the values from a third vector after multiplying the values from two other vectors. I.e., for input vectors $a$, $b$ and $c$, they calculate the result vector $r$ according to the following rule:
\begin{displaymath}
\begin{pmatrix}
r_1 \\
r_2 \\
r_3 \\
\dots
\end{pmatrix}
\coloneqq
\begin{pmatrix}
a_1 \times b_1 + c_1 \\
a_2 \times b_2 - c_2 \\
a_3 \times b_3 + c_3 \\
\dots
\end{pmatrix}
\end{displaymath}
\item \texttt{vmulpd} (double-precision) and \texttt{vmulps} (single-precision) simply calculate the products of all corresponding floating-point members from two input vectors.
\item \texttt{vpmullq} multiplies corresponding \SI[number-unit-product=-]{64}{\bit} integers from two input vectors into \SI[number-unit-product=-]{128}{\bit} intermediate results and stores the lower \SI{64}{\bit}s of every intermediate result in the target result vector.
\item \texttt{vpackssdw} merges two vectors with signed \SI[number-unit-product=-]{32}{\bit} integers into one vector consisting of signed \SI[number-unit-product=-]{16}{\bit} integers by handling overflow conditions via saturation arithmetic, i.e., for values larger than $32767$ ($=2^{15}-1$) or smaller than $-32768$ ($=-2^{15}$), the conversion results in these extreme values. In mathematical terms, the operation may be described as follows for input vectors $a$, $b$ and result vector $r$:
\begin{displaymath}
\forall i \in \{1,\dots,|a|\} \cap \mathbb{N}\colon r_i \coloneqq saturate(a_i), r_{|a|+i} \coloneqq saturate(b_i)
\end{displaymath}
where $saturate$ is defined as
\begin{displaymath}
saturate\colon
\begin{cases}
\{-2^{31}, \dots, 2^{31}-1\} \cap \mathbb{Z} \longrightarrow \{-2^{15}, \dots, 2^{15}-1\} \cap \mathbb{Z}, \\
x \mapsto min(2^{15}-1, max(x, -2^{15})).
\end{cases}
\end{displaymath}
Note that $|a| = |b|$ and $|r| = |a| + |b|$.
\item \texttt{vpaddsw} adds signed \SI[number-unit-product=-]{16}{\bit} integers from two input vectors using saturation arithmetic as described above.
\item \texttt{vpmaddwd} is an \gls{FMA}-style operation that first multiplies corresponding signed \SI[number-unit-product=-]{16}{\bit} integers from two input vectors, thereby creating an equal amount of \SI[number-unit-product=-]{32}{\bit} temporary results. Afterwards, the adjacent results are added together to generate the result vector. For input vectors $a$ and $b$, this is the operation executed to obtain the result vector $r$:
\begin{displaymath}
\begin{pmatrix}
r_1 \\
r_2 \\
\dots
\end{pmatrix}
\coloneqq
\begin{pmatrix}
(a_1 \times b_1) + (a_2 \times b_2) \\
(a_3 \times b_3) + (a_4 \times b_4) \\
\dots
\end{pmatrix}
\end{displaymath}
\end{itemize}
We wrote \gls{ELF} sections for our user-space component (as described in \Cref{sec:analysis:design:userspace}) containing assembly code for all of these instructions in two variants with \SI[number-unit-product=-]{256}{\bit} \texttt{YMM} and \SI[number-unit-product=-]{512}{\bit} \texttt{ZMM} registers, respectively. Additionally, for each variant, there are two versions: an \enquote{unrolled} one and another non-\enquote{unrolled} version. The non-unrolled ones simply contain a single instruction using the first three registers, e.g.:
\begin{center}
\begin{minted}{gas}
vfmaddsub132pd %zmm0, %zmm1, %zmm2
\end{minted}
\end{center}
By constantly executing the same instruction with the same operands, we create some artificial register pressure that prevents a core's scheduler from maximizing utilization of the two \SI[number-unit-product=-]{512}{\bit} vector units available in the execution engine. The unrolled versions, on the other side, alleviate this pressure by repeating the same instruction, but with different register operands:
\begin{center}
\begin{minted}{gas}
vfmaddsub132pd %zmm0, %zmm0, %zmm1
vfmaddsub132pd %zmm0, %zmm0, %zmm2
vfmaddsub132pd %zmm0, %zmm0, %zmm3
...
\end{minted}
\end{center}
Every unrolled section contains the same instruction repeated 31 times, always using \texttt{\%zmm0}/\texttt{\%ymm0} for the first two operands and \texttt{\%zmm\{1-31\}}/\texttt{\%ymm\{1-31\}} as last operand. Thereby, we exhaustively make use of all 32 \texttt{ZMM}/\texttt{YMM} architectural registers available on \gls{AVX-512}-capable processors \cite{intelsdmbasicarch}.
\subsection{Downclocking}
\label{sec:analysis:results:downclocking}
For our model of Intel's \gls{AVX} reclocking algorithm, the downclocking behavior -- i.e., the process of frequency reduction when executing \gls{AVX} instructions -- is an important puzzle piece. Here, we want to answer questions such as \enquote{How long does it take a \gls{CPU} to switch to its reduced frequency?} and \enquote{When is a frequency reduction triggered?}. We obtained the results to be presented in this section using the \hyperref[sec:analysis:design:measurementmodes:downclock]{\texttt{DOWNCLOCK}}, \hyperref[sec:analysis:design:measurementmodes:prethrottlethroughput]{\texttt{PRE\_THROTTLE\_TIME}} and \hyperref[sec:analysis:design:measurementmodes:nonavxtime]{\texttt{REQUIRED\_INSTRUCTIONS}} modes provided by our measurement system as described in \Cref{sec:analysis:design}.
Coarsely, we found the \gls{CPU} to generally run through the following steps for its frequency reduction:
\begin{enumerate}
\item Throttle the \acrlong{OoO} engine
\item Switch to turbo license level 1 and alleviate \gls{OoO} engine throttling
\item Switch to turbo license level 2 (for \gls{AVX-512} \enquote{heavy} instructions)
\end{enumerate}
This already contains our first insight: even for \gls{AVX-512} instructions that Intel defines to be \enquote{heavy}, the processor will first switch to license level~1 and spend some time in that mode before performing another frequency shift to level~2 -- Intel does not mention this in their optimization manual \cite{inteloptimizationmanual}. This information is easily obtained by executing the \hyperref[sec:analysis:design:measurementmodes:downclock]{\texttt{DOWNCLOCK}} mode twice with both levels as targets -- if the \glspl{CPU} did not make this intermediate step, the test would simply hang when executed with level~1 as target as this level would never be reached.
Similarly, we can confirm an observation previously made by \citeauthor{lemire2018avx512} \cite{lemire2018avx512}: even \gls{AVX-512} heavy instructions do not always trigger a switch to level~2. We observed this behavior with the \texttt{vfmaddsub132pd} instruction, for which only the unrolled version (i.e., the one without register pressure) will ever reach level~2. The very same behavior exists with the \SI[number-unit-product=-]{256}{\bit} version of this instruction, too: the core only switches to level~1 when unrolled. We can imagine two different factors that could potentially influence this decision: the load on the core's \gls{AVX} units and the register utilization itself.
\begin{figure}
\begin{tikzpicture}[trim axis left]
\sffamily
\begin{axis}[
xlabel={Runs ($n=1000$)},
ylabel={Instructions per Cycle},
scale only axis,
width=\textwidth,
height=3.5cm,
axis lines=left,
ymin=0,
ymax=0.45,
xtick=\empty,
xmin=-10,
xmax=1010
]
\addplot[only marks, color=kitgreen] table {plots/avx_dp_fma_512_l1_1cpus_downclock_throughput.csv} node[pos=0.5,below=0.25cm] {non-unrolled};
\addplot[only marks, color=kitblue] table {plots/avx_dp_fma_512_unrolled_l1_1cpus_downclock_throughput.csv} node[pos=0.5,below=0.4cm] {unrolled};
\end{axis}
\end{tikzpicture}
\caption{Throughput of the \texttt{vfmaddsub132pd} instruction before switching from level~0 to level~1 is doubled when unrolled.}
\label{fig:analysis:results:downclocking:avx_dp_fma_512_unrolled_l1_1cpus_downclock+avx_dp_fma_512_l1_1cpus_downclock:throughput}
\end{figure}
Before the first frequency reduction from level~0 to level~1 happens, we find that the instruction throughput (\acrlong{IPC}, \acrshort{IPC}\glsunset{IPC}) of the \SI[number-unit-product=-]{512}{\bit} variant is precisely doubled from $0.21$ to $0.42$ on average with the unrolled version compared to the non-unrolled implementation, as depicted in \Cref{fig:analysis:results:downclocking:avx_dp_fma_512_unrolled_l1_1cpus_downclock+avx_dp_fma_512_l1_1cpus_downclock:throughput}.
This is expected: as described in \Cref{sec:analysis:results:testedinstructions}, the \gls{CPU} we used for our tests features two \gls{AVX-512} units per core, and as such, when no register pressure prevents a core from parallelizing consecutive instructions, it can make full utilization of both units. However, we also found that the cores always run roughly equal amounts of instructions through both units, even in the non-unrolled case. Most likely, Intel's scheduler uses a simple round-robin algorithm to assign \glspl{microinst} to the units. This is a sign that the load on the units is not the determining factor: in the non-unrolled case the units take turns and each one stays unloaded only for a few cycles at a time.
In addition, the theory of the register pressure being the culprit here is supported by a patent on local power gating in processors published by Intel \cite{bonen2016performing}: here, Intel describes a technique to dynamically cut and restore power to vector units as well as vector registers upon demand to save energy, which our system's processor likely implements as described. Notably, execution units and registers are controlled independent of each other and it is also noted that the \texttt{vzeroupper} instruction directly impacts the power gating behavior. This instruction zeroes the upper \SI{384}{\bit}s of each architectural \SI[number-unit-product=-]{512}{\bit} vector register \cite{intelsdminstructionreference}. Indeed, if we explicitly set the \SI[number-unit-product=-]{512}{\bit} register \texttt{ZMM0} to a non-zero value before starting execution, we find that heavy \SI[number-unit-product=-]{256}{\bit} (i.e., \gls{AVX2}) vector instructions -- which would normally only cause switches to level~1 -- suddenly trigger frequency switches to level~2, too, but only if we do not additionally execute \texttt{vzeroupper} after setting \texttt{ZMM0}. This is not documented in Intel's description of the reclocking algorithm, but hints that register usage directly impacts the turbo license level selection in addition to the types of the executed instructions.
Apart from this discrepancy, we found that Intel's description of the instruction types and their associated turbo license levels holds true for the instructions we selected for testing. Now that we know the steps for frequency reductions, the next logical step is to find out when each of them occurs, what is required to trigger them and how much time a core spends in each state.
In \Cref{sec:analysis:design:measurementmodes:prethrottlethroughput}, we described the implementation of the \texttt{PRE\_THROTTLE\_TIME} measurement mode, designed to find out how many \gls{AVX} instructions may be executed before the \acrlong{OoO} engine is throttled. Additionally, we wanted to use it to measure what throughput could theoretically be achieved if there was no frequency reduction at all.
The answer here, however, is very simple: in all tested cases, the throttling occurs immediately after execution of the first instruction has completed -- no matter what instruction is tested, how many cores are used or whether pre-throttling and turbo frequencies are enabled. This also means that we are unable to measure the theoretically achievable throughput here: with a duration of just one instruction, no reliable numbers may be obtained.
Next, we are interested in the amount of instructions required to eventually trigger a switch to level~1. This is what the \hyperref[sec:analysis:design:measurementmodes:nonavxtime]{\texttt{REQUIRED\_INSTRUCTIONS}} measurement mode was built for: it incrementally builds and executes \gls{AVX} code with more instructions until a license level switch can be observed within a time window after an execution iteration.
Here, we indeed found interesting differences between different instruction types: all \SI[number-unit-product=-]{512}{\bit} instructions trigger a switch to level~1 after exactly one executed instruction. For \SI[number-unit-product=-]{256}{\bit} instructions, however, a quite different picture emerges: \Cref{fig:analysis:results:downclocking:avx_dp_fma_256_unrolled_l1_1cpus_non_avx_time:avx_instructions} depicts the required amount of instructions exemplary for the \SI[number-unit-product=-]{256}{\bit} \texttt{vfmaddsub132pd} unrolled case, without pre-throttling and with one core executing \gls{AVX} instructions. The result varies between a minimum of $3317$ and a maximum of $30845$ instructions while the average and the median are set rather near each other, at $12982.8$ and $12431$, respectively. \Cref{fig:analysis:results:downclocking:avx_dp_fma_256_unrolled_l1_1cpus_non_avx_time:avx_instructions:histogram} shows the same data, plotted as a histogram. It becomes clearly visible that the median is far nearer to the minimum than it is to the maximum.
\begin{figure}
\begin{tikzpicture}[trim axis left]
\sffamily
\begin{axis}[
xlabel={Runs ($n=1000$)},
ylabel={Instructions},
scale only axis,
width=\textwidth,
height=4cm,
axis lines=left,
ymin=0,
ymax=32000,
xtick=\empty,
xmin=-10,
xmax=1010
]
\addplot[only marks, color=kitblue] table {plots/avx_dp_fma_256_unrolled_l1_1cpus_non_avx_time_avx_instructions.csv};
\end{axis}
\end{tikzpicture}
\caption{Required \SI[number-unit-product=-]{256}{\bit} \texttt{vfmaddsub132pd} instructions to trigger a turbo license switch to level~1. Unlike with \SI[number-unit-product=-]{512}{\bit} instructions, the amount varies a lot.}
\label{fig:analysis:results:downclocking:avx_dp_fma_256_unrolled_l1_1cpus_non_avx_time:avx_instructions}
\end{figure}
\begin{figure}
\begin{tikzpicture}[trim axis left]
\sffamily
\begin{axis}[
ybar,
ymin=0,
xlabel={Instructions},
scale only axis,
width=\textwidth-0.5cm,
height=4cm,
axis lines=left,
% xmin=2900,
% xmax=32000,
xmin=0, xmax=32000,
xtick=data,
ymax=290,
scaled ticks=base 10:0
]
\addplot[color=kitblue, fill=kitblue, hist] table {plots/avx_dp_fma_256_unrolled_l1_1cpus_non_avx_time_avx_instructions.csv};
\draw[color=kitgreen, thick] ({axis cs:12431,0}) -- ({axis cs:12431,275.5}) node[anchor=west, pos=0.96, xshift=0.1cm] {median = 12431};
\end{axis}
% \begin{axis}[
% axis y line=none,
% axis x line=none,
% scale only axis,
% width=\textwidth,
% height=5cm,
% xmin=2900,
% xmax=32000
% ]
% \addplot[color=kitgreen, domain=2900:32000, smooth, thick] {x^(6.6985-1)/((1938.2)^(6.6985)*(6.6985)!)*exp(-x/1938.2)};
% \end{axis}
\end{tikzpicture}
\caption{Histogram of the data depicted in \Cref{fig:analysis:results:downclocking:avx_dp_fma_256_unrolled_l1_1cpus_non_avx_time:avx_instructions}. The distance between median and maximum is much larger than between median and minimum.}
\label{fig:analysis:results:downclocking:avx_dp_fma_256_unrolled_l1_1cpus_non_avx_time:avx_instructions:histogram}
\end{figure}
We find that average and median values rise when executing the test with more cores. While this may seem surprising at first glance, it is expected: when run with multiple cores, the test only ends if a frequency switch is triggered on all cores. Therefore, given the high variance of the results, it becomes less likely that all cores reach a frequency switch with less instructions.
We implemented pre-throttling mode to be able to test what happens when additional load is created when a further core is utilized in an environment where other cores are already loaded, contrary to the default mode where the \gls{AVX} workload is started on all cores at the same time. However, pre-throttling does not seem to have any effect that can not be attributed to statistical noise. Nevertheless, disabling turbo frequencies does have one: we can still observe a high variance within the results, but all statistic measures are \emph{lower}: with a single core, we find a minimum of $527$, a maximum of $21793$, $8642.49$ on average, and a median of $8137.5$. This is intriguing as one would expect a frequency drop to be less necessary when starting off a lower base frequency. We can not be sure of an explanation for this, however, this observation may hint that voltage stability is a crucial factor here: in the previously cited patent \cite{bonen2016performing}, Intel notes that a voltage drop may occur due to falling electrical resistance (and in turn, rising current) upon powering the vector units and that a detector for this situation is in place. Given that, at a lower frequency, the core also runs at a lower voltage, it seems plausible that the voltage drops below the critical threshold earlier as the subtracted resistance is the same regardless of voltage and frequency.
% todo this theory may be verified by testing different chips of the same model and perhaps also applying higher Vcore
\begin{figure}
\begin{tikzpicture}[trim axis left]
\sffamily
\begin{axis}[
xlabel={Runs ($n=1000$)},
ylabel={Downclocking Time (\si{\micro\second})},
scale only axis,
width=\textwidth,
height=4cm,
axis lines=left,
ymin=15,
ymax=50,
xtick=\empty,
xmin=-10,
xmax=1010,
axis y discontinuity=parallel
]
\addplot[only marks, color=kitblue] table {plots/avx_dp_fma_512_l1_1cpus_downclock_time.csv};
\end{axis}
\begin{axis}[
ylabel={Downclocking Time (\si{\micro\second})},
scale only axis,
width=\textwidth,
height=4cm,
hide axis,
ymin=15,
ymax=50,
xtick=\empty,
xmin=0,
xmax=1000
]
\addplot[color=kitgreen, domain=0:1000, thick] {24.593225806452} node[pos=0, below=0.55cm, anchor=west] {median = \SI{24.59}{\micro\second}};
\end{axis}
\end{tikzpicture}
\caption{Downclocking time to level~1 for the \SI[number-unit-product=-]{512}{\bit} \texttt{vfmaddsub132pd} instruction. The results are very homogeneous around a median of \SI{24.59}{\micro\second}.}
\label{fig:analysis:results:downclocking:avx_dp_fma_512_l1_1cpus_downclock:time}
\end{figure}
As with these results we have fully established all conditions required for \gls{AVX}-induced frequency reductions to level~1, we are now interested in the time required for the actual switch (i.e., the time between starting execution and the moment the new frequency is applied). Again, we find \SI[number-unit-product=-]{512}{\bit} and \SI[number-unit-product=-]{256}{\bit} instructions to have noticeable differences. For example, with \SI[number-unit-product=-]{512}{\bit} \texttt{vfmaddsub132pd} instructions executed on a single core, all results are very homogeneously distributed around a median of \SI{24.59}{\micro\second} with only a few outliers, as depicted in \Cref{fig:analysis:results:downclocking:avx_dp_fma_512_l1_1cpus_downclock:time}. There is no statistically relevant difference with other \SI[number-unit-product=-]{512}{\bit} instructions. However, median and deviation both rise with multiple cores: for example, with three cores (\Cref{fig:analysis:results:downclocking:avx_dp_fma_512_l1_3cpus_downclock:time}), we find the average of the median across all three cores to be at \SI{27.39}{\micro\second} -- an increase of nearly three microseconds. Whereas the standard deviation with only one core is very low at \SI{0.0025}{\micro\second}, it quintuples with three cores and amounts to an average of \SI{0.013}{\micro\second}. Notably, this increase only happens with pre-throttling mode disabled, i.e., when only one core switches its license level, whereas the others are already running in a level with reduced frequency. This is interesting because it tells us that the frequency is not the determining factor here: the maximum turbo frequency of a single core depends on the available electrical power budget as well as on how many cores are under load, and thus with three cores each core runs at a lower frequency, compared to when only one core is active. If, however, the increase is not visible with pre-throttling -- i.e., when two cores are already at level~1 -- the lower frequency can not be at fault for the increased latency. A simple and plausible explanation could be that the \gls{PCU} requires more time to make its decision when more license requests are pending.
\begin{figure}
\begin{tikzpicture}[trim axis left]
\sffamily
\begin{axis}[
xlabel={Runs ($n=1000$)},
ylabel={Downclocking Time (\si{\micro\second})},
scale only axis,
width=\textwidth,
height=4cm,
axis lines=left,
ymin=15,
ymax=50,
xtick=\empty,
xmin=-10,
xmax=1010,
axis y discontinuity=parallel
]
\addplot[only marks, color=kitblue, mark options={scale=0.5}] table {plots/avx_dp_fma_512_l1_3cpus_downclock_time_cpu0.csv};
\addplot[only marks, color=kitgreen, mark options={scale=0.5}] table {plots/avx_dp_fma_512_l1_3cpus_downclock_time_cpu1.csv};
\addplot[only marks, color=kityellow, mark options={scale=0.5}] table {plots/avx_dp_fma_512_l1_3cpus_downclock_time_cpu2.csv};
\end{axis}
\end{tikzpicture}
\caption{Downclocking time to level 1 for the \SI[number-unit-product=-]{512}{\bit} \texttt{vfmaddsub132pd} instruction when executed on three cores (each color represents a specific core). Compared to \Cref{fig:analysis:results:downclocking:avx_dp_fma_512_l1_1cpus_downclock:time}, median and standard deviation are higher.}
\label{fig:analysis:results:downclocking:avx_dp_fma_512_l1_3cpus_downclock:time}
\end{figure}
Looking at \SI[number-unit-product=-]{256}{\bit} instructions, we find that the downclocking time to level~1 is still very homogeneous across all runs, albeit a lot higher than in the \SI[number-unit-product=-]{512}{\bit} case. As shown in \Cref{fig:analysis:results:downclocking:avx_dp_fma_256_unrolled_l1_1cpus_downclock:time}, with \SI[number-unit-product=-]{256}{\bit} \texttt{vfmaddsub132pd} instructions executed on one core, the median is at \SI{51.52}{\micro\second} -- more than doubled compared to the \SI{24.59}{\micro\second} of the \SI[number-unit-product=-]{512}{\bit} variant. The results do not correlate with the amount of instructions required to cause the frequency reduction previously depicted in \Cref{fig:analysis:results:downclocking:avx_dp_fma_256_unrolled_l1_1cpus_non_avx_time:avx_instructions,fig:analysis:results:downclocking:avx_dp_fma_256_unrolled_l1_1cpus_non_avx_time:avx_instructions:histogram}, which makes it seem likely that the difference in timing is induced by an algorithmic difference in the implementation.
\begin{figure}
\begin{tikzpicture}[trim axis left]
\sffamily
\begin{axis}[
xlabel={Runs ($n=1000$)},
ylabel={Downclocking Time (\si{\micro\second})},
scale only axis,
width=\textwidth,
height=4cm,
axis lines=left,
ymin=24,
ymax=78,
xtick=\empty,
xmin=-10,
xmax=1010,
axis y discontinuity=parallel
]
\addplot[only marks, color=kitblue] table {plots/avx_dp_fma_256_unrolled_l1_1cpus_downclock_time.csv};
\end{axis}
\begin{axis}[
ylabel={\si{\micro\second}},
scale only axis,
width=\textwidth,
height=4cm,
hide axis,
ymin=24,
ymax=78,
xtick=\empty,
xmin=0,
xmax=1000
]
\addplot[color=kitgreen, domain=0:1000, thick] {51.516774193548} node[pos=0, right=0.2cm, below=1.55cm, anchor=west] {median = \SI{51.52}{\micro\second}};
\end{axis}
\end{tikzpicture}
\caption{Downclocking time to level 1 for the \SI[number-unit-product=-]{256}{\bit} \texttt{vfmaddsub132pd} instruction. This takes a lot longer than with the \SI[number-unit-product=-]{512}{\bit} version.}
\label{fig:analysis:results:downclocking:avx_dp_fma_256_unrolled_l1_1cpus_downclock:time}
\end{figure}
So far we have described the frequency reduction process until level~1 is reached. For cases that target this level (i.e., everything apart from \gls{AVX-512} heavy instructions with the notable exceptions outlined above), nothing further happens until the load that induced the license level switch ceases and the frequency is brought back to its previous level again, as described later in \Cref{sec:analysis:results:upclocking}.
Our findings about the second turbo license switch from level~1 to level~2 (where applicable) did not yield any surprises: in general, the behavior is similar to what happens during the switch from level~0 to level~1.
\begin{figure}
\begin{tikzpicture}[trim axis left]
\sffamily
\begin{axis}[
xlabel={Runs ($n=1000$)},
ylabel={Downclocking Time (\si{\micro\second})},
scale only axis,
width=\textwidth,
height=4cm,
axis lines=left,
ymin=42,
ymax=80,
xtick=\empty,
xmin=-10,
xmax=1010
]
\addplot[only marks, color=kitblue] table {plots/avx_dp_fma_512_unrolled_l2_1cpus_downclock_time.csv};
%\draw[color=red] ([xshift=-0.5cm] {axis cs:-50,43.35}) -- ([xshift=0.5cm] {axis cs:-10,43.35});
\end{axis}%
\begin{axis}[
ylabel={\si{\micro\second}},
scale only axis,
width=\textwidth,
height=4cm,
hide axis,
ymin=42,
ymax=80,
xtick=\empty,
xmin=0,
xmax=1000,
]
\addplot[color=kitgreen, domain=0:1000, thick] {51.433548387097} node[pos=0.02, below=0.7cm, anchor=west] {median = \SI{51.43}{\micro\second}};
\end{axis}
\fill[white] (-0.28cm,0.14cm) rectangle (0.28cm,0.28cm); % for some reason enabling discontinuity in this plot fucks up everything
\draw[color=black] (-0.28cm,0.28cm) -- (0.28cm,0.28cm);
\draw[color=black] (-0.28cm,0.14cm) -- (0.28cm,0.14cm);
\end{tikzpicture}
\caption{Time taken from level~0 to level~2 with the \texttt{vfmaddsub132pd} instruction. Again, the results are very homogeneous.}
\label{fig:analysis:results:downclocking:avx_dp_fma_512_unrolled_l2_1cpus_downclock:time}
\end{figure}
\Cref{fig:analysis:results:downclocking:avx_dp_fma_512_unrolled_l2_1cpus_downclock:time} shows the time needed to reach level~2 using the unrolled \texttt{vfmaddsub132pd} instruction as an example. We find the data to be homogeneous with only a few outliers, similar to previous results. The median is located at \SI{51.43}{\micro\second}, however, note that this also includes the time taken from level~0 to level~1. By subtracting the median of the transition to level~1 (\SI{24.59}{\micro\second}), we can deduce it takes \SI{26.9}{\micro\second} from level~1 to level~2, which is only slightly longer. For multiple cores and pre-throttling mode, the general behavior and the increases with multiple cores are about the same. This fits our theory that the \gls{PCU} takes longer to make decisions with multiple license transition requests pending.
\subsection{Upclocking}
\label{sec:analysis:results:upclocking}
After a core's clock is reduced due to a license level transition, it runs at the lower frequency until no more heavy instructions are being executed. However, the frequency can not be raised immediately in order to avoid wasting time with too many frequency switches, and thus the core keeps executing further instructions at a lower speed for a while -- this is what essentially causes the performance issue for heterogeneous workloads that motivated this work. Further, the upclocking part of the reclocking algorithm is the one where it is most likely to find room for possible optimizations. Therefore, the process of raising the frequency (actually, reverting the reduction) deserves particular attention. According to Intel, as cited in this chapter's \hyperref[sec:analysis]{introduction}, the processor generally delays increasing the frequency again by about \SI{2}{\milli\second}. To verify this claim, we used our framework's \hyperref[sec:analysis:design:measurementmodes:upclock]{\texttt{UPCLOCK}} measurement mode, which executes \gls{AVX} instructions until a license level transition occurs on a given core and then keeps the core spinning in a scalar loop until it switches back to level~0.
\begin{figure}
\begin{tikzpicture}[trim axis left]
\sffamily
\begin{axis}[
xlabel={Runs ($n=1000$)},
ylabel={Upclocking Delay (\si{\milli\second})},
scale only axis,
width=\textwidth,
height=4cm,
axis lines=left,
ymin=0.65,
ymax=0.72,
xtick=\empty,
xmin=-10,
xmax=1010,
axis y discontinuity=parallel
]
\addplot[only marks, color=kitblue] table {plots/avx_dp_fma_256_unrolled_l1_1cpus_upclock_time.csv};
\end{axis}
\begin{axis}[
ylabel={\si{\micro\second}},
scale only axis,
width=\textwidth,
height=4cm,
hide axis,
ymin=0.65,
ymax=0.72,
xtick=\empty,
xmin=0,
xmax=1000
]
\addplot[color=kitgreen, domain=0:1000, thick] {0.674503548387097} node[pos=0, below=0.72cm, anchor=west] {median = \SI{0.675}{\milli\second}};
\end{axis}
\end{tikzpicture}
\caption{Upclocking times after executing \SI[number-unit-product=-]{256}{\bit} \texttt{vfmaddsub132pd} instructions until level~1 is reached. Results show the upclocking delay to uniformly be around \SI[quotient-mode=fraction]{2/3}{\milli\second}.}
\label{fig:analysis:results:downclocking:avx_dp_fma_256_unrolled_l1_1cpus_upclock:time}
\end{figure}
Again using the results from the \texttt{vfmaddsub132pd} instruction as example, we find that the upclocking behavior differs between several test configurations. For the \SI[number-unit-product=-]{256}{\bit} unrolled version executed on a single core with pre-throttling disabled and targeting level~1, we get the results depicted in \Cref{fig:analysis:results:downclocking:avx_dp_fma_256_unrolled_l1_1cpus_upclock:time}. These are very uniformly distributed around a median of \SI{0.675}{\milli\second}. Notably, this is suspiciously near to \SI[quotient-mode=fraction]{2/3}{\milli\second}. Most likely this is the value Intel tried to approximate.
\begin{figure}
\begin{tikzpicture}[trim axis left]
\sffamily
\begin{axis}[
xlabel={Runs ($n=1000$)},
ylabel={Upclocking Delay (\si{\milli\second})},
scale only axis,
width=\textwidth,
height=4cm,
axis lines=left,
ymin=0.5,
ymax=1.35,
xtick=\empty,
xmin=-10,
xmax=1010,
axis y discontinuity=parallel
]
\addplot[only marks, color=kitblue] table {plots/avx_dp_fma_512_unrolled_l1_1cpus_upclock_time.csv};
\end{axis}
\begin{axis}[
ylabel={\si{\micro\second}},
scale only axis,
width=\textwidth,
height=4cm,
hide axis,
ymin=0.5,
ymax=1.35,
xtick=\empty,
xmin=0,
xmax=1000
]
\addplot[color=kitgreen, domain=0:1000, thick] {0.674378709677419} node[pos=0, below=0.35cm, anchor=west] {median = \SI{0.674}{\milli\second}};
\end{axis}
\end{tikzpicture}
\caption{Upclocking times after executing \SI[number-unit-product=-]{512}{\bit} \texttt{vfmaddsub132pd} instructions until level~1 is reached. While most runs still yield a time of \SI[quotient-mode=fraction]{2/3}{\milli\second}, some are scattered within a range up to \SI[quotient-mode=fraction]{4/3}{\milli\second}.}
\label{fig:analysis:results:downclocking:avx_dp_fma_512_unrolled_l1_1cpus_upclock:time}
\end{figure}
Looking at the very same instruction in its \SI[number-unit-product=-]{512}{\bit} variant under the same test conditions in \Cref{fig:analysis:results:downclocking:avx_dp_fma_512_unrolled_l1_1cpus_upclock:time}, a different picture emerges: while the median is still nearly the same at \SI{0.674}{\milli\second}, the maximum is at \SI{1.333}{\milli\second} -- about \SI[quotient-mode=fraction]{4/3}{\milli\second}. However, when going to level~2, all runs are again homogeneously distributed around \SI[quotient-mode=fraction]{2/3}{\milli\second}. The results are mostly the same when executed with multiple cores, save the notable exception of pre-throttling mode: in \Cref{fig:analysis:results:downclocking:avx_dp_fma_512_unrolled_l1_1cpus_upclock:time}, \SI{69.4}{\percent} of the results are below \SI{0.7}{\milli\second}. With two cores and \gls{AVX} pre-throttling enabled, as graphed in \Cref{fig:analysis:results:downclocking:avx_dp_fma_512_unrolled_l1_2cpus_pre_throttle_avx_upclock:time}, this applies to \SI{94.7}{\percent} of the runs. Similar results are obtained with more cores.
\begin{figure}
\begin{tikzpicture}[trim axis left]
\sffamily
\begin{axis}[
xlabel={Runs ($n=1000$)},
ylabel={Upclocking Delay (\si{\milli\second})},
scale only axis,
width=\textwidth,
height=4cm,
axis lines=left,
ymin=0.5,
ymax=1.35,
xtick=\empty,
xmin=-10,
xmax=1010,
axis y discontinuity=parallel,
]
\addplot[only marks, color=kitblue] table {plots/avx_dp_fma_512_unrolled_l1_2cpus_pre_throttle_avx_upclock_time.csv};
\end{axis}
\end{tikzpicture}
\caption{Upclocking times after executing \SI[number-unit-product=-]{512}{\bit} instructions until level~1 is reached with two cores in \gls{AVX} pre-throttling mode. Compared to \Cref{fig:analysis:results:downclocking:avx_dp_fma_512_unrolled_l1_1cpus_upclock:time}, where only one core was active, the variance is a lot smaller.}
\label{fig:analysis:results:downclocking:avx_dp_fma_512_unrolled_l1_2cpus_pre_throttle_avx_upclock:time}
\end{figure}
|
Formal statement is: lemma is_pole_divide: fixes f :: "'a :: t2_space \<Rightarrow> 'b :: real_normed_field" assumes "isCont f z" "filterlim g (at 0) (at z)" "f z \<noteq> 0" shows "is_pole (\<lambda>z. f z / g z) z" Informal statement is: If $f$ is a continuous function at $z$ and $g$ converges to $0$ at $z$, then $f/g$ has a pole at $z$. |
# Mie Scattering Function
**Scott Prahl**
**April 2021**
*If miepython is not installed, uncomment the following cell (i.e., delete the #) and run (shift-enter)*
```python
#!pip install --user miepython
```
```python
import numpy as np
import matplotlib.pyplot as plt
try:
import miepython
except ModuleNotFoundError:
print('miepython not installed. To install, uncomment and run the cell above.')
print('Once installation is successful, rerun this cell again.')
```
miepython not installed. To install, uncomment and run the cell above.
Once installation is successful, rerun this cell again.
Mie scattering describes the special case of the interaction of light passing through a non-absorbing medium with a single embedded spherical object. The sphere itself can be non-absorbing, moderately absorbing, or perfectly absorbing.
## Goals for this notebook:
* show how to plot the phase function
* explain the units for the scattering phase function
* describe normalization of the phase function
* show a few examples from classic Mie texts
## Geometry
Specifically, the scattering function $p(\theta_i,\phi_i,\theta_o,\phi_o)$ describes the amount of light scattered by a particle for light incident at an angle $(\theta_i,\phi_i)$ and exiting the particle (in the far field) at an angle $(\theta_o,\phi_o)$. For simplicity, the scattering function is often assumed to be rotationally symmetric (it is, obviously, for spherical scatterers) and that the angle that the light is scattered into only depends the $\theta=\theta_o-\theta_i$. In this case, the scattering function can be written as $p(\theta)$. Finally, the angle is often replaced by $\mu=\cos\theta$ and therefore the phase function becomes just $p(\mu)$.
The figure below shows the basic idea. An incoming monochromatic plane wave hits a sphere and produces *in the far field* two separate monochromatic waves — a slightly attenuated unscattered planar wave and an outgoing spherical wave.
Obviously. the scattered light will be cylindrically symmetric about the ray passing through the center of the sphere.
```python
t = np.linspace(0,2*np.pi,100)
xx = np.cos(t)
yy = np.sin(t)
fig,ax=plt.subplots(figsize=(10,8))
plt.axes().set_aspect('equal')
plt.plot(xx,yy)
plt.plot([-5,7],[0,0],'--k')
plt.annotate('incoming irradiance', xy=(-4.5,-2.3),ha='left',color='blue',fontsize=14)
for i in range(6):
y0 = i -2.5
plt.annotate('',xy=(-1.5,y0),xytext=(-5,y0),arrowprops=dict(arrowstyle="->",color='blue'))
plt.annotate('unscattered irradiance', xy=(3,-2.3),ha='left',color='blue',fontsize=14)
for i in range(6):
y0 = i -2.5
plt.annotate('',xy=(7,y0),xytext=(3,y0),arrowprops=dict(arrowstyle="->",color='blue',ls=':'))
plt.annotate('scattered\nspherical\nwave', xy=(0,1.5),ha='left',color='red',fontsize=16)
plt.annotate('',xy=(2.5,2.5),xytext=(0,0),arrowprops=dict(arrowstyle="->",color='red'))
plt.annotate(r'$\theta$',xy=(2,0.7),color='red',fontsize=14)
plt.annotate('',xy=(2,2),xytext=(2.7,0),arrowprops=dict(connectionstyle="arc3,rad=0.2", arrowstyle="<->",color='red'))
plt.xlim(-5,7)
plt.ylim(-3,3)
plt.axis('off')
plt.show()
```
## Scattered Wave
```python
fig,ax=plt.subplots(figsize=(10,8))
plt.axes().set_aspect('equal')
plt.scatter([0],[0],s=30)
m = 1.5
x = np.pi/3
theta = np.linspace(-180,180,180)
mu = np.cos(theta/180*np.pi)
scat = 15 * miepython.i_unpolarized(m,x,mu)
plt.plot(scat*np.cos(theta/180*np.pi),scat*np.sin(theta/180*np.pi))
for i in range(12):
ii = i*15
xx = scat[ii]*np.cos(theta[ii]/180*np.pi)
yy = scat[ii]*np.sin(theta[ii]/180*np.pi)
# print(xx,yy)
plt.annotate('',xy=(xx,yy),xytext=(0,0),arrowprops=dict(arrowstyle="->",color='red'))
plt.annotate('incident irradiance', xy=(-4.5,-2.3),ha='left',color='blue',fontsize=14)
for i in range(6):
y0 = i -2.5
plt.annotate('',xy=(-1.5,y0),xytext=(-5,y0),arrowprops=dict(arrowstyle="->",color='blue'))
plt.annotate('unscattered irradiance', xy=(3,-2.3),ha='left',color='blue',fontsize=14)
for i in range(6):
y0 = i -2.5
plt.annotate('',xy=(7,y0),xytext=(3,y0),arrowprops=dict(arrowstyle="->",color='blue',ls=':'))
plt.annotate('scattered\nspherical wave', xy=(0,1.5),ha='left',color='red',fontsize=16)
plt.xlim(-5,7)
plt.ylim(-3,3)
#plt.axis('off')
plt.show()
```
## Normalization of the scattered light
So the scattering function or phase function has at least three reasonable normalizations that involve integrating over all $4\pi$ steradians. Below $d\Omega=\sin\theta d\theta\,d\phi$ is a differential solid angle
$$
\begin{align}
\int_{4\pi} p(\theta,\phi) \,d\Omega &= 1\\[2mm]
\int_{4\pi} p(\theta,\phi) \,d\Omega &= 4\pi \\[2mm]
\int_{4\pi} p(\theta,\phi) \,d\Omega &= a \qquad\qquad \mbox{Used by miepython}\\[2mm]
\end{align}
$$
where $a$ is the single scattering albedo,
$$
a = \frac{\sigma_s}{\sigma_s+\sigma_a}
$$
and $\sigma_s$ is the scattering cross section, and $\sigma_a$ is the absorption cross section.
*The choice of normalization was made because it accounts for light lost through absorption by the sphere.*
If the incident light has units of watts, then the values from the scattering function $p(\theta,\phi)$ have units of radiant intensity or W/sr.
For example, a circular detector with radius $r_d$ at a distance $R$ will subtend an angle
$$
\Omega = \frac{\pi r_d^2}{R^2}
$$
(assuming $r_d\ll R$). Now if $P_0$ of light is scattered by a sphere then the scattered power on the detector will be
$$
P_d = P_0 \cdot \Omega \cdot p(\theta,\phi)
$$
## Examples
### Unpolarized Scattering Function
If unpolarized light hits the sphere, then there are no polarization effects to worry about. It is pretty easy to generate a plot to show how scattering changes with angle.
```python
m = 1.5
x = np.pi/3
theta = np.linspace(-180,180,180)
mu = np.cos(theta/180*np.pi)
scat = miepython.i_unpolarized(m,x,mu)
fig,ax = plt.subplots(1,2,figsize=(12,5))
ax=plt.subplot(121, projection='polar')
ax.plot(theta/180*np.pi,scat)
ax.set_rticks([0.05, 0.1,0.15])
ax.set_title("m=1.5, Sphere Diameter = $\lambda$/3")
plt.subplot(122)
plt.plot(theta,scat)
plt.xlabel('Exit Angle [degrees]')
plt.ylabel('Unpolarized Scattered light [1/sr]')
plt.title('m=1.5, Sphere Diameter = $\lambda$/3')
plt.ylim(0.00,0.2)
plt.show()
```
A similar calculation but using `ez_intensities()`
```python
m = 1.33
lambda0 = 632.8 # nm
d = 200 # nm
theta = np.linspace(-180,180,180)
mu = np.cos(theta/180*np.pi)
Ipar, Iper = miepython.ez_intensities(m, d, lambda0, mu)
fig,ax = plt.subplots(1,2,figsize=(12,5))
ax=plt.subplot(121, projection='polar')
ax.plot(theta/180*np.pi,Ipar)
ax.plot(theta/180*np.pi,Iper)
ax.set_rticks([0.05, 0.1, 0.15, 0.20])
plt.title("m=%.2f, Sphere Diameter = %.0f nm, $\lambda$=%.1f nm" % (m, d, lambda0))
plt.subplot(122)
plt.plot(theta,Ipar)
plt.plot(theta,Iper)
plt.xlabel('Exit Angle [degrees]')
plt.ylabel('Unpolarized Scattered light [1/sr]')
plt.title("m=%.2f, Sphere Diameter = %.0f nm, $\lambda$=%.1f nm" % (m, d, lambda0))
plt.ylim(0.00,0.2)
plt.show()
```
### Rayleigh Scattering
Classic Rayleigh scattering treats small particles with natural (unpolarized) light.
The solid black line denotes the total scattered intensity. The red dashed line is light scattered that is polarized perpendicular to the plane of the graph and the blue dotted line is for light parallel to the plane of the graph. (Compare with van de Hult, Figure 10)
```python
m = 1.3
x = 0.01
theta = np.linspace(-180,180,180)
mu = np.cos(theta/180*np.pi)
ipar = miepython.i_par(m,x,mu)/2
iper = miepython.i_per(m,x,mu)/2
iun = miepython.i_unpolarized(m,x,mu)
fig,ax = plt.subplots(1,2,figsize=(12,5))
ax=plt.subplot(121, projection='polar')
ax.plot(theta/180*np.pi,iper,'r--')
ax.plot(theta/180*np.pi,ipar,'b:')
ax.plot(theta/180*np.pi,iun,'k')
ax.set_rticks([0.05, 0.1,0.15])
plt.title('m=%.2f, Sphere Parameter = %.2f' %(m,x))
plt.subplot(122)
plt.plot(theta,iper,'r--')
plt.plot(theta,ipar,'b:')
plt.plot(theta,iun,'k')
plt.xlabel('Exit Angle [degrees]')
plt.ylabel('Normalized Scattered light [1/sr]')
plt.title('m=%.2f, Sphere Parameter = %.2f' %(m,x))
plt.ylim(0.00,0.125)
plt.text(130,0.02,r"$0.5I_{per}$",color="blue", fontsize=16)
plt.text(120,0.062,r"$0.5I_{par}$",color="red", fontsize=16)
plt.text(30,0.11,r"$I_{unpolarized}$",color="black", fontsize=16)
plt.show()
```
## Differential Scattering Cross Section
Sometimes one would like the scattering function normalized so that the integral over all $4\pi$ steradians to be the scattering cross section
$$
\sigma_{sca} = \frac{\pi d^2}{4} Q_{sca}
$$
The *differential scattering cross section* \frac{d\sigma_{sca}}{d\Omega}
$$
\sigma_{sca} = \int_{4\pi} \frac{d\sigma_{sca}}{d\Omega}\,d\Omega
$$
Since the unpolarized scattering normalized so its integral is the single scattering albedo, this means that
$$
\frac{Q_{sca}}{Q_{ext}} = \int_{4\pi} p(\mu) \sin\theta\,d\theta d\phi
$$
and therefore the differential scattering cross section can be obtained `miepython` using
$$
\frac{d\sigma_{sca}}{d\Omega} = \frac{\pi d^2 Q_{ext}}{4} \cdot p(\theta,\phi)
$$
Note that this is $Q_{ext}$ and *not* $Q_{sca}$ because of the choice of normalization!
For example, here is a replica of [figure 4](http://plaza.ufl.edu/dwhahn/Rayleigh%20and%20Mie%20Light%20Scattering.pdf)
```python
m = 1.4-0j
lambda0 = 532e-9 # m
theta = np.linspace(0,180,1000)
mu = np.cos(theta* np.pi/180)
d = 1700e-9 # m
x = 2 * np.pi/lambda0 * d/2
geometric_cross_section = np.pi * d**2/4 * 1e4 # cm**2
qext, qsca, qback, g = miepython.mie(m,x)
sigma_sca = geometric_cross_section * qext * miepython.i_unpolarized(m,x,mu)
plt.semilogy(theta, sigma_sca*1e-3, color='blue')
plt.text(15, sigma_sca[0]*3e-4, "%.0fnm\n(x10$^{-3}$)" % (d*1e9), color='blue')
d = 170e-9 # m
x = 2 * np.pi/lambda0 * d/2
geometric_cross_section = np.pi * d**2/4 * 1e4 # cm**2
qext, qsca, qback, g = miepython.mie(m,x)
sigma_sca = geometric_cross_section * qext * miepython.i_unpolarized(m,x,mu)
plt.semilogy(theta, sigma_sca, color='red')
plt.text(110, sigma_sca[-1]/2, "%.0fnm" % (d*1e9), color='red')
d = 17e-9 # m
x = 2 * np.pi/lambda0 * d/2
geometric_cross_section = np.pi * d**2/4 * 1e4 # cm**2
qext, qsca, qback, g = miepython.mie(m,x)
sigma_sca = geometric_cross_section * qext * miepython.i_unpolarized(m,x,mu)
plt.semilogy(theta, sigma_sca*1e6, color='green')
plt.text(130, sigma_sca[-1]*1e6, "(x10$^6$)\n%.0fnm" % (d*1e9), color='green')
plt.title("Refractive index m=1.4, $\lambda$=532nm")
plt.xlabel("Scattering Angle (degrees)")
plt.ylabel("Diff. Scattering Cross Section (cm$^2$/sr)")
plt.grid(True)
plt.show()
```
## Normalization revisited
### Evenly spaced $\mu=\cos\theta$
Start with uniformly distributed scattering angles that are evenly spaced over the cosine of the scattered angle.
#### Verifying normalization numerically
Specifically, to ensure proper normalization, the integral of the scattering function over all solid angles must be unity
$$
a = \int_0^{2\pi}\int_0^\pi \, p(\theta,\phi)\,\sin\theta\,d\theta\,d\phi
$$
or with a change of variables $\mu=\cos\theta$ and using the symmetry to the integral in $\phi$
$$
a = 2\pi \int_{-1}^1 \, p(\mu)\,d\mu
$$
This integral can be done numerically by simply summing all the rectangles
$$
a = 2\pi \sum_{i=0}^N p(\mu_i)\,\Delta\mu_i
$$
and if all the rectanges have the same width
$$
a = 2\pi\Delta\mu \sum_{i=0}^N p(\mu_i)
$$
#### Case 1. n=1.5, x=1
The total integral `total=` in the title should match the albedo `a=`.
For this non-strongly peaked scattering function, the simple integration remains close to the expected value.
```python
m = 1.5
x = 1
mu = np.linspace(-1,1,501)
intensity = miepython.i_unpolarized(m,x,mu)
qext, qsca, qback, g = miepython.mie(m,x)
a = qsca/qext
#integrate over all angles
dmu = mu[1] - mu[0]
total = 2 * np.pi * dmu * np.sum(intensity)
plt.plot(mu,intensity)
plt.xlabel(r'$\cos(\theta)$')
plt.ylabel('Unpolarized Scattering Intensity [1/sr]')
plt.title('m=%.3f%+.3fj, x=%.2f, a=%.3f, total=%.3f'%(m.real,m.imag,x,a, total))
plt.show()
```
#### Case 2: m=1.5-1.5j, x=1
Aagin the total integral `total=` in the title should match the albedo `a=`.
For this non-strongly peaked scattering function, the simple integration remains close to the expected value.
```python
m = 1.5 - 1.5j
x = 1
mu = np.linspace(-1,1,501)
intensity = miepython.i_unpolarized(m,x,mu)
qext, qsca, qback, g = miepython.mie(m,x)
a = qsca/qext
#integrate over all angles
dmu = mu[1] - mu[0]
total = 2 * np.pi * dmu * np.sum(intensity)
plt.plot(mu,intensity)
plt.xlabel(r'$\cos(\theta)$')
plt.ylabel('Unpolarized Scattering Intensity [1/sr]')
plt.title('m=%.3f%+.3fj, x=%.2f, a=%.3f, total=%.3f'%(m.real,m.imag,x,a, total))
plt.show()
```
## Normalization, evenly spaced $\theta$
The total integral total in the title should match the albedo $a$.
For this non-strongly peaked scattering function, even spacing in $\theta$ improves the accuracy of the integration.
```python
m = 1.5-1.5j
x = 1
theta = np.linspace(0,180,361)*np.pi/180
mu = np.cos(theta)
intensity = miepython.i_unpolarized(m,x,mu)
qext, qsca, qback, g = miepython.mie(m,x)
a = qsca/qext
#integrate over all angles
dtheta = theta[1]-theta[0]
total = 2 * np.pi * dtheta * np.sum(intensity* np.sin(theta))
plt.plot(mu,intensity)
plt.xlabel(r'$\cos(\theta)$')
plt.ylabel('Unpolarized Scattering Intensity [1/sr]')
plt.title('m=%.3f%+.3fj, x=%.2f, a=%.3f, total=%.3f'%(m.real,m.imag,x,a, total))
plt.show()
```
## Comparison to Wiscombe's Mie Program
Wiscombe normalizes as
$$
\int_{4\pi} p(\theta,\phi) \,d\Omega = \pi x^2 Q_{sca}
$$
where $p(\theta)$ is the scattered light.
Once corrected for differences in phase function normalization, Wiscombe's test cases match those from `miepython` exactly.
### Wiscombe's Test Case 14
```python
"""
MIEV0 Test Case 14: Refractive index: real 1.500 imag -1.000E+00, Mie size parameter = 1.000
Angle Cosine S-sub-1 S-sub-2 Intensity Deg of Polzn
0.00 1.000000 5.84080E-01 1.90515E-01 5.84080E-01 1.90515E-01 3.77446E-01 0.0000
30.00 0.866025 5.65702E-01 1.87200E-01 5.00161E-01 1.45611E-01 3.13213E-01 -0.1336
60.00 0.500000 5.17525E-01 1.78443E-01 2.87964E-01 4.10540E-02 1.92141E-01 -0.5597
90.00 0.000000 4.56340E-01 1.67167E-01 3.62285E-02 -6.18265E-02 1.20663E-01 -0.9574
"""
x=1.0
m=1.5-1.0j
mu=np.cos(np.linspace(0,90,4) * np.pi/180)
qext, qsca, qback, g = miepython.mie(m,x)
albedo = qsca/qext
unpolar = miepython.i_unpolarized(m,x,mu) # normalized to a
unpolar /= albedo # normalized to 1
unpolar_miev = np.array([3.77446E-01,3.13213E-01,1.92141E-01,1.20663E-01])
unpolar_miev /= np.pi * qsca * x**2 # normalized to 1
ratio = unpolar_miev/unpolar
print("MIEV0 Test Case 14: m=1.500-1.000j, Mie size parameter = 1.000")
print()
print(" %9.1f°%9.1f°%9.1f°%9.1f°"%(0,30,60,90))
print("MIEV0 %9.5f %9.5f %9.5f %9.5f"%(unpolar_miev[0],unpolar_miev[1],unpolar_miev[2],unpolar_miev[3]))
print("miepython %9.5f %9.5f %9.5f %9.5f"%(unpolar[0],unpolar[1],unpolar[2],unpolar[3]))
print("ratio %9.5f %9.5f %9.5f %9.5f"%(ratio[0],ratio[1],ratio[2],ratio[3]))
```
### Wiscombe's Test Case 10
```python
"""
MIEV0 Test Case 10: Refractive index: real 1.330 imag -1.000E-05, Mie size parameter = 100.000
Angle Cosine S-sub-1 S-sub-2 Intensity Deg of Polzn
0.00 1.000000 5.25330E+03 -1.24319E+02 5.25330E+03 -1.24319E+02 2.76126E+07 0.0000
30.00 0.866025 -5.53457E+01 -2.97188E+01 -8.46720E+01 -1.99947E+01 5.75775E+03 0.3146
60.00 0.500000 1.71049E+01 -1.52010E+01 3.31076E+01 -2.70979E+00 8.13553E+02 0.3563
90.00 0.000000 -3.65576E+00 8.76986E+00 -6.55051E+00 -4.67537E+00 7.75217E+01 -0.1645
"""
x=100.0
m=1.33-1e-5j
mu=np.cos(np.linspace(0,90,4) * np.pi/180)
qext, qsca, qback, g = miepython.mie(m,x)
albedo = qsca/qext
unpolar = miepython.i_unpolarized(m,x,mu) # normalized to a
unpolar /= albedo # normalized to 1
unpolar_miev = np.array([2.76126E+07,5.75775E+03,8.13553E+02,7.75217E+01])
unpolar_miev /= np.pi * qsca * x**2 # normalized to 1
ratio = unpolar_miev/unpolar
print("MIEV0 Test Case 10: m=1.330-0.00001j, Mie size parameter = 100.000")
print()
print(" %9.1f°%9.1f°%9.1f°%9.1f°"%(0,30,60,90))
print("MIEV0 %9.5f %9.5f %9.5f %9.5f"%(unpolar_miev[0],unpolar_miev[1],unpolar_miev[2],unpolar_miev[3]))
print("miepython %9.5f %9.5f %9.5f %9.5f"%(unpolar[0],unpolar[1],unpolar[2],unpolar[3]))
print("ratio %9.5f %9.5f %9.5f %9.5f"%(ratio[0],ratio[1],ratio[2],ratio[3]))
```
### Wiscombe's Test Case 7
```python
"""
MIEV0 Test Case 7: Refractive index: real 0.750 imag 0.000E+00, Mie size parameter = 10.000
Angle Cosine S-sub-1 S-sub-2 Intensity Deg of Polzn
0.00 1.000000 5.58066E+01 -9.75810E+00 5.58066E+01 -9.75810E+00 3.20960E+03 0.0000
30.00 0.866025 -7.67288E+00 1.08732E+01 -1.09292E+01 9.62967E+00 1.94639E+02 0.0901
60.00 0.500000 3.58789E+00 -1.75618E+00 3.42741E+00 8.08269E-02 1.38554E+01 -0.1517
90.00 0.000000 -1.78590E+00 -5.23283E-02 -5.14875E-01 -7.02729E-01 1.97556E+00 -0.6158
"""
x=10.0
m=0.75
mu=np.cos(np.linspace(0,90,4) * np.pi/180)
qext, qsca, qback, g = miepython.mie(m,x)
albedo = qsca/qext
unpolar = miepython.i_unpolarized(m,x,mu) # normalized to a
unpolar /= albedo # normalized to 1
unpolar_miev = np.array([3.20960E+03,1.94639E+02,1.38554E+01,1.97556E+00])
unpolar_miev /= np.pi * qsca * x**2 # normalized to 1
ratio = unpolar_miev/unpolar
print("MIEV0 Test Case 7: m=0.75, Mie size parameter = 10.000")
print()
print(" %9.1f°%9.1f°%9.1f°%9.1f°"%(0,30,60,90))
print("MIEV0 %9.5f %9.5f %9.5f %9.5f"%(unpolar_miev[0],unpolar_miev[1],unpolar_miev[2],unpolar_miev[3]))
print("miepython %9.5f %9.5f %9.5f %9.5f"%(unpolar[0],unpolar[1],unpolar[2],unpolar[3]))
print("ratio %9.5f %9.5f %9.5f %9.5f"%(ratio[0],ratio[1],ratio[2],ratio[3]))
```
## Comparison to Bohren & Huffmans's Mie Program
Bohren & Huffman normalizes as
$$
\int_{4\pi} p(\theta,\phi) \,d\Omega = 4 \pi x^2 Q_{sca}
$$
### Bohren & Huffmans's Test Case 14
```python
"""
BHMie Test Case 14, Refractive index = 1.5000-1.0000j, Size parameter = 1.0000
Angle Cosine S1 S2
0.00 1.0000 -8.38663e-01 -8.64763e-01 -8.38663e-01 -8.64763e-01
0.52 0.8660 -8.19225e-01 -8.61719e-01 -7.21779e-01 -7.27856e-01
1.05 0.5000 -7.68157e-01 -8.53697e-01 -4.19454e-01 -3.72965e-01
1.57 0.0000 -7.03034e-01 -8.43425e-01 -4.44461e-02 6.94424e-02
"""
x=1.0
m=1.5-1j
mu=np.cos(np.linspace(0,90,4) * np.pi/180)
qext, qsca, qback, g = miepython.mie(m,x)
albedo = qsca/qext
unpolar = miepython.i_unpolarized(m,x,mu) # normalized to a
unpolar /= albedo # normalized to 1
s1_bh = np.empty(4,dtype=np.complex)
s1_bh[0] = -8.38663e-01 - 8.64763e-01*1j
s1_bh[1] = -8.19225e-01 - 8.61719e-01*1j
s1_bh[2] = -7.68157e-01 - 8.53697e-01*1j
s1_bh[3] = -7.03034e-01 - 8.43425e-01*1j
s2_bh = np.empty(4,dtype=np.complex)
s2_bh[0] = -8.38663e-01 - 8.64763e-01*1j
s2_bh[1] = -7.21779e-01 - 7.27856e-01*1j
s2_bh[2] = -4.19454e-01 - 3.72965e-01*1j
s2_bh[3] = -4.44461e-02 + 6.94424e-02*1j
# BHMie seems to normalize their intensities to 4 * pi * x**2 * Qsca
unpolar_bh = (abs(s1_bh)**2+abs(s2_bh)**2)/2
unpolar_bh /= np.pi * qsca * 4 * x**2 # normalized to 1
ratio = unpolar_bh/unpolar
print("BHMie Test Case 14: m=1.5000-1.0000j, Size parameter = 1.0000")
print()
print(" %9.1f°%9.1f°%9.1f°%9.1f°"%(0,30,60,90))
print("BHMIE %9.5f %9.5f %9.5f %9.5f"%(unpolar_bh[0],unpolar_bh[1],unpolar_bh[2],unpolar_bh[3]))
print("miepython %9.5f %9.5f %9.5f %9.5f"%(unpolar[0],unpolar[1],unpolar[2],unpolar[3]))
print("ratio %9.5f %9.5f %9.5f %9.5f"%(ratio[0],ratio[1],ratio[2],ratio[3]))
print()
print("Note that this test is identical to MIEV0 Test Case 14 above.")
print()
print("Wiscombe's code is much more robust than Bohren's so I attribute errors all to Bohren")
```
### Bohren & Huffman, water droplets
Tiny water droplet (0.26 microns) in clouds has pretty strong forward scattering! A graph of this is figure 4.9 in Bohren and Huffman's *Absorption and Scattering of Light by Small Particles*.
A bizarre scaling factor of $16\pi$ is needed to make the `miepython` results match those in the figure 4.9.
```python
x=3
m=1.33-1e-8j
theta = np.linspace(0,180,181)
mu = np.cos(theta*np.pi/180)
scaling_factor = 16*np.pi
iper = scaling_factor*miepython.i_per(m,x,mu)
ipar = scaling_factor*miepython.i_par(m,x,mu)
P = (iper-ipar)/(iper+ipar)
plt.subplots(2,1,figsize=(8,8))
plt.subplot(2,1,1)
plt.semilogy(theta,ipar,label='$i_{par}$')
plt.semilogy(theta,iper,label='$i_{per}$')
plt.xlim(0,180)
plt.xticks(range(0,181,30))
plt.ylabel('i$_{par}$ and i$_{per}$')
plt.legend()
plt.title('Figure 4.9 from Bohren & Huffman')
plt.subplot(2,1,2)
plt.plot(theta,P)
plt.ylim(-1,1)
plt.xticks(range(0,181,30))
plt.xlim(0,180)
plt.ylabel('Polarization')
plt.plot([0,180],[0,0],':k')
plt.xlabel('Angle (Degrees)')
plt.show()
```
## van de Hulst Comparison
This graph (see figure 29 in *Light Scattering by Small Particles*) was obviously constructed by hand. In this graph, van de Hulst worked hard to get as much information as possible
```python
x=5
m=10000
theta = np.linspace(0,180,361)
mu = np.cos(theta*np.pi/180)
fig, ax = plt.subplots(figsize=(8,8))
x=10
s1,s2 = miepython.mie_S1_S2(m,x,mu)
sone = 2.5*abs(s1)
stwo = 2.5*abs(s2)
plt.plot(theta,sone,'b')
plt.plot(theta,stwo,'--r')
plt.annotate('x=%.1f '%x,xy=(theta[-1],sone[-1]),ha='right',va='bottom')
x=5
s1,s2 = miepython.mie_S1_S2(m,x,mu)
sone = 2.5*abs(s1) + 1
stwo = 2.5*abs(s2) + 1
plt.plot(theta,sone,'b')
plt.plot(theta,stwo,'--r')
plt.annotate('x=%.1f '%x,xy=(theta[-1],sone[-1]),ha='right',va='bottom')
x=3
s1,s2 = miepython.mie_S1_S2(m,x,mu)
sone = 2.5*abs(s1) + 2
stwo = 2.5*abs(s2) + 2
plt.plot(theta,sone,'b')
plt.plot(theta,stwo,'--r')
plt.annotate('x=%.1f '%x,xy=(theta[-1],sone[-1]),ha='right',va='bottom')
x=1
s1,s2 = miepython.mie_S1_S2(m,x,mu)
sone = 2.5*abs(s1) + 3
stwo = 2.5*abs(s2) + 3
plt.plot(theta,sone,'b')
plt.plot(theta,stwo,'--r')
plt.annotate('x=%.1f '%x,xy=(theta[-1],sone[-1]),ha='right',va='bottom')
x=0.5
s1,s2 = miepython.mie_S1_S2(m,x,mu)
sone = 2.5*abs(s1) + 4
stwo = 2.5*abs(s2) + 4
plt.plot(theta,sone,'b')
plt.plot(theta,stwo,'--r')
plt.annotate('x=%.1f '%x,xy=(theta[-1],sone[-1]),ha='right',va='bottom')
plt.xlim(0,180)
plt.ylim(0,5.5)
plt.xticks(range(0,181,30))
plt.yticks(np.arange(0,5.51,0.5))
plt.title('Figure 29 from van de Hulst, Non-Absorbing Spheres')
plt.xlabel('Angle (Degrees)')
ax.set_yticklabels(['0','1/2','0','1/2','0','1/2','0','1/2','0','1/2','5',' '])
plt.grid(True)
plt.show()
```
## Comparisons with Kerker, Angular Gain
Another interesting graph is figure 4.51 from [*The Scattering of Light* by Kerker](https://www.sciencedirect.com/book/9780124045507/the-scattering-of-light-and-other-electromagnetic-radiation).
The angular gain is
$$
G_1 = \frac{4}{x^2} |S_1(\theta)|^2
\qquad\mbox{and}\qquad
G_2 = \frac{4}{x^2} |S_2(\theta)|^2
$$
```python
## Kerker, Angular Gain
x=1
m=10000
theta = np.linspace(0,180,361)
mu = np.cos(theta*np.pi/180)
fig, ax = plt.subplots(figsize=(8,8))
s1,s2 = miepython.mie_S1_S2(m,x,mu)
G1 = 4*abs(s1)**2/x**2
G2 = 4*abs(s2)**2/x**2
plt.plot(theta,G1,'b')
plt.plot(theta,G2,'--r')
plt.annotate('$G_1$',xy=(50,0.36),color='blue',fontsize=14)
plt.annotate('$G_2$',xy=(135,0.46),color='red',fontsize=14)
plt.xlim(0,180)
plt.xticks(range(0,181,30))
plt.title('Figure 4.51 from Kerker, Non-Absorbing Spheres, x=1')
plt.xlabel('Angle (Degrees)')
plt.ylabel('Angular Gain')
plt.show()
```
```python
```
|
theory Op
imports Main
begin
section \<open>Keys, Versions, and Operations\<close>
text \<open>Our database is conceptually modeled as a map of keys to values. We don't demand anything of
our keys, other than that they exist and have equality.\<close>
typedecl key
text \<open>We're going to define our databases in terms of versions, arguments, and return value types
which are polymorphic. One option would be to have these as type parameters in... literally every
single function, but that's going to be exhausting, and it also means we can't use typeclasses,
because typeclasses can't return things with type variables. Another option is to define a version
explicitly as, say, a list of nats, but discussions with Galois engineers suggests that this is
counterproductive: we're forcing the solver to pull in a whole bunch of theorems that it doesn't
actually need, which makes automated proof search harder.
So... another option to try here might be to do this with *locales*, which... I honestly don't
understand even 10% of. I *think* they allow us to prove a bunch of properties about structures
involving type parameters without actually defining what those type parameters *are*, and... also
making those structures sort of... an implicitly available argument to every function we define? But
that leaves other questions, like... what if I have two arguments? Are locales meant to be more...
about universal structures? The Digraph library doesn't think so, but I don't understand half of
what it's doing. :(\<close>
text \<open>It'd be nice to define our versions, arguments, and return values as polymorphic type
parameters. However, owing to what I think is a limitation in Isabelle's typeclass system, we can't.
What we CAN do is fix our versions, arguments, and retvals as lists of naturals, naturals, and lists
of naturals, respectively. We can use this representation for lists, sets, counters, and registers
easily, by defining different types of graphs. Also, we won't have to carry these type parameters on
everything.\<close>
type_synonym "version" = "nat list"
type_synonym "writeArg" = "nat"
type_synonym "writeRet" = "nat list"
text \<open>Reads and writes are different types of operations. We're going to want to distinguish
them.\<close>
datatype opType = Read | Write
text \<open>Reads and writes have different types of arguments and return values. However, it's going to
be convenient to talk about and compare their arguments and return values without caring what type
of operation we performed. We define wrapper types for arguments and return values here.\<close>
datatype arg = WriteArg "writeArg" | ReadArg
datatype ret = WriteRet "writeRet" | ReadRet "version"
text \<open>An operation acts on the state of some key, taking a preversion of an object and, using an
argument, producing a postversion and a return value. In general, we don't know exactly what the
versions and return value are; we represent these as options.\<close>
class keyed =
fixes key :: "'a \<Rightarrow> key"
class op =
fixes op_type :: "'a \<Rightarrow> opType"
fixes pre_version :: "'a \<Rightarrow> version option"
fixes arg :: "'a \<Rightarrow> arg"
fixes post_version :: "'a \<Rightarrow> version option"
fixes ret :: "'a \<Rightarrow> ret option"
text \<open>We now define two types of operations. Abstract operations (beginning with a) have definite
versions and values. Observed operations may not know their versions and return values. Reads take
no argument and return their current version; writes may change their versions somehow.\<close>
datatype aop =
ARead "key" "version" |
AWrite "key" "version" "writeArg" "version" "writeRet"
datatype oop =
ORead "key" "version option" |
OWrite "key" "version option" "writeArg" "version option" "writeRet option"
text \<open>A few accessors for when we don't want to deal with optionals...\<close>
primrec apre_version :: "aop \<Rightarrow> version" where
"apre_version (ARead k v) = v" |
"apre_version (AWrite k v1 a v2 r) = v1"
primrec aret :: "aop \<Rightarrow> ret" where
"aret (ARead k v) = (ReadRet v)" |
"aret (AWrite k v1 a v2 r) = (WriteRet r)"
primrec apost_version :: "aop \<Rightarrow> version" where
"apost_version (ARead k v) = v" |
"apost_version (AWrite k v1 a v2 r) = v2"
definition aversions_in_op :: "aop \<Rightarrow> version set" where
"aversions_in_op op \<equiv> {apre_version op, apost_version op}"
text \<open>These accessors allow us to extract keys, versions, etc from all types of operations in
a uniform way.\<close>
instantiation aop :: keyed
begin
primrec key_aop :: "aop \<Rightarrow> key" where
"key_aop (ARead k v) = k" |
"key_aop (AWrite k v1 a v2 r) = k"
instance ..
end
instantiation aop :: op
begin
primrec op_type_aop :: "aop \<Rightarrow> opType" where
"op_type_aop (ARead k v) = Read" |
"op_type_aop (AWrite k v1 a v2 r) = Write"
primrec pre_version_aop :: "aop \<Rightarrow> version option" where
"pre_version_aop (ARead k v) = Some v" |
"pre_version_aop (AWrite k v1 a v2 r) = Some v1"
primrec arg_aop :: "aop \<Rightarrow> arg" where
"arg_aop (ARead k v) = ReadArg" |
"arg_aop (AWrite k v1 a v2 r) = WriteArg a"
primrec post_version_aop :: "aop \<Rightarrow> version option" where
"post_version_aop (ARead k v) = Some v" |
"post_version_aop (AWrite k v1 a v2 r) = Some v2"
primrec ret_aop :: "aop \<Rightarrow> ret option" where
"ret_aop (ARead k v) = Some (ReadRet v)" |
"ret_aop (AWrite k v1 a v2 r) = Some (WriteRet r)"
instance ..
end
text \<open>As a quick test of these accessors...\<close>
lemma "arg (ARead k v) = ReadArg"
by auto
lemma "pre_version (AWrite k v1 a v2 r) = Some v1"
by auto
lemma "post_version (AWrite k v1 a v2 r) = Some v2"
by auto
lemma "(key (ARead k v1)) = (key (AWrite k v2 a v3 r))"
by auto
text \<open>Moving on to accessors for observed operations...\<close>
instantiation oop :: keyed
begin
primrec key_oop :: "oop \<Rightarrow> key" where
"key_oop (ORead k v) = k" |
"key_oop (OWrite k v1 a v2 r) = k"
instance ..
end
instantiation oop :: op
begin
primrec op_type_oop :: "oop \<Rightarrow> opType" where
"op_type_oop (ORead k v) = Read" |
"op_type_oop (OWrite k v1 a v2 r) = Write"
primrec pre_version_oop :: "oop \<Rightarrow> version option" where
"pre_version_oop (ORead k v) = v" |
"pre_version_oop (OWrite k v1 a v2 r) = v1"
primrec arg_oop :: "oop \<Rightarrow> arg" where
"arg_oop (ORead k v) = ReadArg" |
"arg_oop (OWrite k v1 a v2 r) = WriteArg a"
primrec post_version_oop :: "oop \<Rightarrow> version option" where
"post_version_oop (ORead k v) = v" |
"post_version_oop (OWrite k v1 a v2 r) = v2"
primrec ret_oop :: "oop \<Rightarrow> ret option" where
"ret_oop (ORead k v) = (case v of Some v \<Rightarrow> Some (ReadRet v) | None \<Rightarrow> None)" |
"ret_oop (OWrite k v1 a v2 r) = (case r of Some r \<Rightarrow> Some (WriteRet r) | None \<Rightarrow> None)"
instance ..
end
text \<open>And as a quick check...\<close>
lemma "(post_version (ORead k1 (Some v))) =
(pre_version (OWrite k2 (Some v) a None None))"
by auto
text \<open>We're going to be asking a lot about "the set of all versions in <something>".\<close>
class all_versions =
fixes all_versions :: "'a \<Rightarrow> version set"
instantiation aop :: all_versions
begin
primrec all_versions_aop :: "aop \<Rightarrow> version set" where
"all_versions_aop (ARead k v) = {v}" |
"all_versions_aop (AWrite k v1 a v2 r) = {v1, v2}"
instance ..
end
instantiation oop :: all_versions
begin
primrec all_versions_oop :: "oop \<Rightarrow> version set" where
"all_versions_oop (ORead k v) = (case v of None \<Rightarrow> {} | (Some v) \<Rightarrow> {v})" |
"all_versions_oop (OWrite k v1 a v2 r) = (case v1 of
None \<Rightarrow> (case v2 of None \<Rightarrow> {} | (Some v2) \<Rightarrow> {v2}) |
(Some v1) \<Rightarrow> (case v2 of None \<Rightarrow> {v1} | (Some v2) \<Rightarrow> {v1, v2}))"
instance ..
end
text \<open>... And all keys in something \<close>
class all_keys =
fixes all_keys :: "'a \<Rightarrow> key set"
text \<open>And similarly, we're going to want to talk about all operations in a transaction, version
graph, object, history, observation, etc...\<close>
class all_aops =
fixes all_aops :: "'a \<Rightarrow> aop set"
class all_oops =
fixes all_oops :: "'a \<Rightarrow> oop set"
text \<open>And if you have the set of all ops, you can filter that to the set of writes or reads.\<close>
definition all_owrites :: "'a::all_oops \<Rightarrow> oop set" where
"all_owrites a = {op. (op \<in> (all_oops a)) \<and> ((op_type op = Write))}"
definition all_oreads :: "'a::all_oops \<Rightarrow> oop set" where
"all_oreads a = {op. (op \<in> (all_oops a)) \<and> ((op_type op = Read))}"
definition all_awrites :: "'a::all_aops \<Rightarrow> aop set" where
"all_awrites a = {op. (op \<in> (all_aops a)) \<and> ((op_type op = Write))}"
definition all_areads :: "'a::all_aops \<Rightarrow> aop set" where
"all_areads a = {op. (op \<in> (all_aops a)) \<and> ((op_type op = Read))}"
text \<open>An observed operation is definite if its optional fields are known. Might want to break this
up later; it might be helpful to talk about postversion-definite, retval-definite, write-definite,
etc.\<close>
class definite =
fixes is_definite :: "'a \<Rightarrow> bool"
(* Huh, can't instantiate a typeclass over a polymorphic type?
instantiation "'a option" :: definite
begin
*)
primrec is_definite_option :: "'a option \<Rightarrow> bool" where
"is_definite_option None = False" |
"is_definite_option (Some x) = True"
instantiation oop :: definite
begin
primrec is_definite :: "oop \<Rightarrow> bool" where
"is_definite (ORead k v) = is_definite_option v" |
"is_definite (OWrite k v1 a v2 r) = (is_definite_option v1 \<and>
is_definite_option v2 \<and>
is_definite_option r)"
instance ..
end
text \<open>We now define a notion of compatibility, which says whether an observed operation could
correspond to some abstract operation. The idea here is that the database executed the abstract
operation, but that we don't know, due to the client protocol, or perhaps due to missing responses,
exactly what happened. We compare Options to actual values, ensuring that either the optional is
None (e.g. we don't know), or if it's Some, that the values are equal.
I'd like to do this as a typeclass, but without multi-type-parameter typeclasses, we can't
write a generic function over a \<Rightarrow> b \<Rightarrow> bool. This seems like something I'm likely to mess up,
so instead, we write a family of compatibility functions with type names.\<close>
primrec is_compatible_option :: "'a option \<Rightarrow> 'a \<Rightarrow> bool" where
"is_compatible_option None y = True" |
"is_compatible_option (Some x) y = (x = y)"
text \<open>An observed operation is compatible with an abstract operation if their types, keys,
versions, arguments, and return values are all compatible.\<close>
definition is_compatible_op :: "oop \<Rightarrow> aop \<Rightarrow> bool" where
"is_compatible_op oop aop \<equiv>
(((op_type oop) = (op_type aop)) \<and>
((key oop) = (key aop)) \<and>
(is_compatible_option (pre_version oop) (apre_version aop)) \<and>
((arg oop) = (arg aop)) \<and>
(is_compatible_option (ret oop) (aret aop)) \<and>
(is_compatible_option (post_version oop) (apost_version aop)))"
text \<open>Some basic lemmata around compatibility. These are... surprisingly expensive proofs for
sledgehammer to find...\<close>
lemma compatible_same_type: "is_compatible_op oop aop \<Longrightarrow> ((op_type oop) = (op_type aop))"
using is_compatible_op_def by blast
lemma compatible_same_key: "is_compatible_op oop aop \<Longrightarrow> ((key oop) = (key aop))"
using is_compatible_op_def by blast
lemma compatible_same_arg: "is_compatible_op oop aop \<Longrightarrow> ((arg oop) = (arg aop))"
using is_compatible_op_def by blast
lemma compatible_pre_version:
"is_compatible_op oop aop \<Longrightarrow> (((pre_version oop) = None) \<or>
((pre_version oop) = Some (apre_version aop)))"
by (metis is_compatible_op_def is_compatible_option.simps(2) not_Some_eq)
lemma compatible_post_version:
"is_compatible_op oop aop \<Longrightarrow> (((post_version oop) = None) \<or>
((post_version oop) = Some (apost_version aop)))"
by (metis is_compatible_op_def is_compatible_option.simps(2) not_Some_eq)
lemma compatible_ret:
"is_compatible_op oop aop \<Longrightarrow> (((ret oop) = None) \<or>
((ret oop) = Some (aret aop)))"
by (metis is_compatible_op_def is_compatible_option.simps(2) not_Some_eq)
lemma compatible_definite_same_pre_version:
"(is_compatible_op oop aop \<and> is_definite oop) \<Longrightarrow> ((pre_version oop) = (pre_version aop))"
by (smt aop.exhaust apre_version.simps(1) apre_version.simps(2) is_compatible_op_def is_compatible_option.simps(2) is_definite.simps(1) is_definite.simps(2) is_definite_option.simps(1) oop.exhaust option.exhaust pre_version_aop.simps(1) pre_version_aop.simps(2) pre_version_oop.simps(1) pre_version_oop.simps(2))
lemma compatible_definite_same_post_version:
"(is_compatible_op oop aop \<and> is_definite oop) \<Longrightarrow> ((post_version oop) = (post_version aop))"
by (smt aop.exhaust apost_version.simps(2) compatible_definite_same_pre_version is_compatible_op_def is_compatible_option.simps(2) is_definite.simps(2) is_definite_option.simps(1) oop.exhaust opType.distinct(1) op_type_aop.simps(1) op_type_aop.simps(2) op_type_oop.simps(1) op_type_oop.simps(2) option.exhaust post_version_aop.simps(1) post_version_aop.simps(2) post_version_oop.simps(1) post_version_oop.simps(2) pre_version_aop.simps(1) pre_version_oop.simps(1))
lemma compatible_definite_same_ret:
"(is_compatible_op oop aop \<and> is_definite oop) \<Longrightarrow> ((ret oop) = (ret aop))"
by (smt aop.exhaust aret.simps(1) aret.simps(2) is_compatible_op_def is_compatible_option.simps(2) is_definite.simps(1) is_definite.simps(2) is_definite_option.simps(1) not_None_eq oop.exhaust option.case(2) ret_aop.simps(1) ret_aop.simps(2) ret_oop.simps(1) ret_oop.simps(2))
text \<open>If two operations are compatible and the observed one is definite, they share exactly
the same values.\<close>
lemma definite_compatible_same:
"is_compatible_op oop aop \<and> is_definite oop \<Longrightarrow>
(((pre_version oop) = (pre_version aop)) \<and>
((post_version oop) = (post_version aop)) \<and>
((ret oop) = (ret aop)))"
by (simp add: compatible_definite_same_post_version compatible_definite_same_pre_version compatible_definite_same_ret)
end |
[STATEMENT]
lemma msubstltpos_nb: "tmbound0 t \<Longrightarrow> islin (Lt (CNP 0 a r)) \<Longrightarrow> bound0 (msubstltpos c t a r)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>tmbound0 t; islin (Lt (CNP 0 a r))\<rbrakk> \<Longrightarrow> bound0 (msubstltpos c t a r)
[PROOF STEP]
by (simp add: msubstltpos_def) |
/-
Copyright (c) 2018 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
Adapted from the corresponding theory for complete lattices.
Theory of conditionally complete lattices.
A conditionally complete lattice is a lattice in which every non-empty bounded subset s
has a least upper bound and a greatest lower bound, denoted below by Sup s and Inf s.
Typical examples are real, nat, int with their usual orders.
The theory is very comparable to the theory of complete lattices, except that suitable
boundedness and non-emptyness assumptions have to be added to most statements.
We introduce two predicates bdd_above and bdd_below to express this boundedness, prove
their basic properties, and then go on to prove most useful properties of Sup and Inf
in conditionally complete lattices.
To differentiate the statements between complete lattices and conditionally complete
lattices, we prefix Inf and Sup in the statements by c, giving cInf and cSup. For instance,
Inf_le is a statement in complete lattices ensuring Inf s ≤ x, while cInf_le is the same
statement in conditionally complete lattices with an additional assumption that s is
bounded below.
-/
import
order.lattice order.complete_lattice order.bounds
tactic.finish data.set.countable
set_option old_structure_cmd true
open preorder set lattice
universes u v w
variables {α : Type u} {β : Type v} {ι : Type w}
section preorder
variables [preorder α] [preorder β] {s t : set α} {a b : α}
/-Sets bounded above and bounded below.-/
def bdd_above (s : set α) := ∃x, ∀y∈s, y ≤ x
def bdd_below (s : set α) := ∃x, ∀y∈s, x ≤ y
/-Introduction rules for boundedness above and below.
Most of the time, it is more efficient to use ⟨w, P⟩ where P is a proof
that all elements of the set are bounded by w. However, they are sometimes handy.-/
lemma bdd_above.mk (a : α) (H : ∀y∈s, y≤a) : bdd_above s := ⟨a, H⟩
lemma bdd_below.mk (a : α) (H : ∀y∈s, a≤y) : bdd_below s := ⟨a, H⟩
/-Empty sets and singletons are trivially bounded. For finite sets, we need
a notion of maximum and minimum, i.e., a lattice structure, see later on.-/
@[simp] lemma bdd_above_empty : ∀ [nonempty α], bdd_above (∅ : set α)
| ⟨x⟩ := ⟨x, by simp⟩
@[simp] lemma bdd_below_empty : ∀ [nonempty α], bdd_below (∅ : set α)
| ⟨x⟩ := ⟨x, by simp⟩
@[simp] lemma bdd_above_singleton : bdd_above ({a} : set α) :=
⟨a, by simp only [set.mem_singleton_iff, forall_eq]⟩
@[simp] lemma bdd_below_singleton : bdd_below ({a} : set α) :=
⟨a, by simp only [set.mem_singleton_iff, forall_eq]⟩
/-If a set is included in another one, boundedness of the second implies boundedness
of the first-/
lemma bdd_above_subset (st : s ⊆ t) : bdd_above t → bdd_above s
| ⟨w, hw⟩ := ⟨w, λ y ys, hw _ (st ys)⟩
lemma bdd_below_subset (st : s ⊆ t) : bdd_below t → bdd_below s
| ⟨w, hw⟩ := ⟨w, λ y ys, hw _ (st ys)⟩
/- Boundedness of intersections of sets, in different guises, deduced from the
monotonicity of boundedness.-/
lemma bdd_above_inter_left : bdd_above s → bdd_above (s ∩ t) :=
bdd_above_subset (set.inter_subset_left _ _)
lemma bdd_above_inter_right : bdd_above t → bdd_above (s ∩ t) :=
bdd_above_subset (set.inter_subset_right _ _)
lemma bdd_below_inter_left : bdd_below s → bdd_below (s ∩ t) :=
bdd_below_subset (set.inter_subset_left _ _)
lemma bdd_below_inter_right : bdd_below t → bdd_below (s ∩ t) :=
bdd_below_subset (set.inter_subset_right _ _)
/--The image under a monotone function of a set which is bounded above is bounded above-/
lemma bdd_above_of_bdd_above_of_monotone {f : α → β} (hf : monotone f) : bdd_above s → bdd_above (f '' s)
| ⟨C, hC⟩ := ⟨f C, by rintro y ⟨x, x_bnd, rfl⟩; exact hf (hC x x_bnd)⟩
/--The image under a monotone function of a set which is bounded below is bounded below-/
lemma bdd_below_of_bdd_below_of_monotone {f : α → β} (hf : monotone f) : bdd_below s → bdd_below (f '' s)
| ⟨C, hC⟩ := ⟨f C, by rintro y ⟨x, x_bnd, rfl⟩; exact hf (hC x x_bnd)⟩
end preorder
/--When there is a global maximum, every set is bounded above.-/
@[simp] lemma bdd_above_top [order_top α] (s : set α) : bdd_above s :=
⟨⊤, by intros; apply order_top.le_top⟩
/--When there is a global minimum, every set is bounded below.-/
@[simp] lemma bdd_below_bot [order_bot α] (s : set α): bdd_below s :=
⟨⊥, by intros; apply order_bot.bot_le⟩
/-When there is a max (i.e., in the class semilattice_sup), then the union of
two bounded sets is bounded, by the maximum of the bounds for the two sets.
With this, we deduce that finite sets are bounded by induction, and that a finite
union of bounded sets is bounded.-/
section semilattice_sup
variables [semilattice_sup α] {s t : set α} {a b : α}
/--The union of two sets is bounded above if and only if each of the sets is.-/
@[simp] lemma bdd_above_union : bdd_above (s ∪ t) ↔ bdd_above s ∧ bdd_above t :=
⟨show bdd_above (s ∪ t) → (bdd_above s ∧ bdd_above t), from
assume : bdd_above (s ∪ t),
have S : bdd_above s, by apply bdd_above_subset _ ‹bdd_above (s ∪ t)›; simp only [set.subset_union_left],
have T : bdd_above t, by apply bdd_above_subset _ ‹bdd_above (s ∪ t)›; simp only [set.subset_union_right],
and.intro S T,
show (bdd_above s ∧ bdd_above t) → bdd_above (s ∪ t), from
assume H : bdd_above s ∧ bdd_above t,
let ⟨⟨ws, hs⟩, ⟨wt, ht⟩⟩ := H in
/-hs : ∀ (y : α), y ∈ s → y ≤ ws ht : ∀ (y : α), y ∈ s → y ≤ wt-/
have Bs : ∀b∈s, b ≤ ws ⊔ wt,
by intros; apply le_trans (hs b ‹b ∈ s›) _; simp only [lattice.le_sup_left],
have Bt : ∀b∈t, b ≤ ws ⊔ wt,
by intros; apply le_trans (ht b ‹b ∈ t›) _; simp only [lattice.le_sup_right],
show bdd_above (s ∪ t),
begin
apply bdd_above.mk (ws ⊔ wt),
intros b H_1,
cases H_1,
apply Bs _ ‹b ∈ s›,
apply Bt _ ‹b ∈ t›,
end⟩
/--Adding a point to a set preserves its boundedness above.-/
@[simp] lemma bdd_above_insert : bdd_above (insert a s) ↔ bdd_above s :=
⟨bdd_above_subset (by simp only [set.subset_insert]),
λ h, by rw [insert_eq, bdd_above_union]; exact ⟨bdd_above_singleton, h⟩⟩
/--A finite set is bounded above.-/
lemma bdd_above_finite [nonempty α] (hs : finite s) : bdd_above s :=
finite.induction_on hs bdd_above_empty $ λ a s _ _, bdd_above_insert.2
/--A finite union of sets which are all bounded above is still bounded above.-/
lemma bdd_above_finite_union [nonempty α] {β : Type v} {I : set β} {S : β → set α} (H : finite I) :
(bdd_above (⋃i∈I, S i)) ↔ (∀i ∈ I, bdd_above (S i)) :=
⟨λ (bdd : bdd_above (⋃i∈I, S i)) i (hi : i ∈ I),
bdd_above_subset (subset_bUnion_of_mem hi) bdd,
show (∀i ∈ I, bdd_above (S i)) → (bdd_above (⋃i∈I, S i)), from
finite.induction_on H
(λ _, by rw bUnion_empty; exact bdd_above_empty)
(λ x s hn hf IH h, by simp only [
set.mem_insert_iff, or_imp_distrib, forall_and_distrib, forall_eq] at h;
rw [set.bUnion_insert, bdd_above_union]; exact ⟨h.1, IH h.2⟩)⟩
end semilattice_sup
/-When there is a min (i.e., in the class semilattice_inf), then the union of
two sets which are bounded from below is bounded from below, by the minimum of
the bounds for the two sets. With this, we deduce that finite sets are
bounded below by induction, and that a finite union of sets which are bounded below
is still bounded below.-/
section semilattice_inf
variables [semilattice_inf α] {s t : set α} {a b : α}
/--The union of two sets is bounded below if and only if each of the sets is.-/
@[simp] lemma bdd_below_union : bdd_below (s ∪ t) ↔ bdd_below s ∧ bdd_below t :=
⟨show bdd_below (s ∪ t) → (bdd_below s ∧ bdd_below t), from
assume : bdd_below (s ∪ t),
have S : bdd_below s, by apply bdd_below_subset _ ‹bdd_below (s ∪ t)›; simp only [set.subset_union_left],
have T : bdd_below t, by apply bdd_below_subset _ ‹bdd_below (s ∪ t)›; simp only [set.subset_union_right],
and.intro S T,
show (bdd_below s ∧ bdd_below t) → bdd_below (s ∪ t), from
assume H : bdd_below s ∧ bdd_below t,
let ⟨⟨ws, hs⟩, ⟨wt, ht⟩⟩ := H in
/-hs : ∀ (y : α), y ∈ s → ws ≤ y ht : ∀ (y : α), y ∈ s → wt ≤ y-/
have Bs : ∀b∈s, ws ⊓ wt ≤ b,
by intros; apply le_trans _ (hs b ‹b ∈ s›); simp only [lattice.inf_le_left],
have Bt : ∀b∈t, ws ⊓ wt ≤ b,
by intros; apply le_trans _ (ht b ‹b ∈ t›); simp only [lattice.inf_le_right],
show bdd_below (s ∪ t),
begin
apply bdd_below.mk (ws ⊓ wt),
intros b H_1,
cases H_1,
apply Bs _ ‹b ∈ s›,
apply Bt _ ‹b ∈ t›,
end⟩
/--Adding a point to a set preserves its boundedness below.-/
@[simp] lemma bdd_below_insert : bdd_below (insert a s) ↔ bdd_below s :=
⟨show bdd_below (insert a s) → bdd_below s, from bdd_below_subset (by simp only [set.subset_insert]),
show bdd_below s → bdd_below (insert a s),
by rw[insert_eq]; simp only [bdd_below_singleton, bdd_below_union, and_self, forall_true_iff] {contextual := tt}⟩
/--A finite set is bounded below.-/
lemma bdd_below_finite [nonempty α] (hs : finite s) : bdd_below s :=
finite.induction_on hs bdd_below_empty $ λ a s _ _, bdd_below_insert.2
/--A finite union of sets which are all bounded below is still bounded below.-/
lemma bdd_below_finite_union [nonempty α] {β : Type v} {I : set β} {S : β → set α} (H : finite I) :
(bdd_below (⋃i∈I, S i)) ↔ (∀i ∈ I, bdd_below (S i)) :=
⟨λ (bdd : bdd_below (⋃i∈I, S i)) i (hi : i ∈ I),
bdd_below_subset (subset_bUnion_of_mem hi) bdd,
show (∀i ∈ I, bdd_below (S i)) → (bdd_below (⋃i∈I, S i)), from
finite.induction_on H
(λ _, by rw bUnion_empty; exact bdd_below_empty)
(λ x s hn hf IH h, by simp only [
set.mem_insert_iff, or_imp_distrib, forall_and_distrib, forall_eq] at h;
rw [set.bUnion_insert, bdd_below_union]; exact ⟨h.1, IH h.2⟩)⟩
end semilattice_inf
namespace lattice
/-- A conditionally complete lattice is a lattice in which
every nonempty subset which is bounded above has a supremum, and
every nonempty subset which is bounded below has an infimum.
Typical examples are real numbers or natural numbers.
To differentiate the statements from the corresponding statements in (unconditional)
complete lattices, we prefix Inf and Sup by a c everywhere. The same statements should
hold in both worlds, sometimes with additional assumptions of non-emptyness or
boundedness.-/
class conditionally_complete_lattice (α : Type u) extends lattice α, has_Sup α, has_Inf α :=
(le_cSup : ∀s a, bdd_above s → a ∈ s → a ≤ Sup s)
(cSup_le : ∀s a, s ≠ ∅ → (∀b∈s, b ≤ a) → Sup s ≤ a)
(cInf_le : ∀s a, bdd_below s → a ∈ s → Inf s ≤ a)
(le_cInf : ∀s a, s ≠ ∅ → (∀b∈s, a ≤ b) → a ≤ Inf s)
class conditionally_complete_linear_order (α : Type u)
extends conditionally_complete_lattice α, decidable_linear_order α
class conditionally_complete_linear_order_bot (α : Type u)
extends conditionally_complete_lattice α, decidable_linear_order α, order_bot α :=
(cSup_empty : Sup ∅ = ⊥)
/- A complete lattice is a conditionally complete lattice, as there are no restrictions
on the properties of Inf and Sup in a complete lattice.-/
instance conditionally_complete_lattice_of_complete_lattice [complete_lattice α]:
conditionally_complete_lattice α :=
{ le_cSup := by intros; apply le_Sup; assumption,
cSup_le := by intros; apply Sup_le; assumption,
cInf_le := by intros; apply Inf_le; assumption,
le_cInf := by intros; apply le_Inf; assumption,
..‹complete_lattice α›}
instance conditionally_complete_linear_order_of_complete_linear_order [complete_linear_order α]:
conditionally_complete_linear_order α :=
{ ..lattice.conditionally_complete_lattice_of_complete_lattice, .. ‹complete_linear_order α› }
section conditionally_complete_lattice
variables [conditionally_complete_lattice α] {s t : set α} {a b : α}
theorem le_cSup (h₁ : bdd_above s) (h₂ : a ∈ s) : a ≤ Sup s :=
conditionally_complete_lattice.le_cSup s a h₁ h₂
theorem cSup_le (h₁ : s ≠ ∅) (h₂ : ∀b∈s, b ≤ a) : Sup s ≤ a :=
conditionally_complete_lattice.cSup_le s a h₁ h₂
theorem cInf_le (h₁ : bdd_below s) (h₂ : a ∈ s) : Inf s ≤ a :=
conditionally_complete_lattice.cInf_le s a h₁ h₂
theorem le_cInf (h₁ : s ≠ ∅) (h₂ : ∀b∈s, a ≤ b) : a ≤ Inf s :=
conditionally_complete_lattice.le_cInf s a h₁ h₂
theorem le_cSup_of_le (_ : bdd_above s) (hb : b ∈ s) (h : a ≤ b) : a ≤ Sup s :=
le_trans h (le_cSup ‹bdd_above s› hb)
theorem cInf_le_of_le (_ : bdd_below s) (hb : b ∈ s) (h : b ≤ a) : Inf s ≤ a :=
le_trans (cInf_le ‹bdd_below s› hb) h
theorem cSup_le_cSup (_ : bdd_above t) (_ : s ≠ ∅) (h : s ⊆ t) : Sup s ≤ Sup t :=
cSup_le ‹s ≠ ∅› (assume (a) (ha : a ∈ s), le_cSup ‹bdd_above t› (h ha))
theorem cInf_le_cInf (_ : bdd_below t) (_ :s ≠ ∅) (h : s ⊆ t) : Inf t ≤ Inf s :=
le_cInf ‹s ≠ ∅› (assume (a) (ha : a ∈ s), cInf_le ‹bdd_below t› (h ha))
theorem cSup_le_iff (_ : bdd_above s) (_ : s ≠ ∅) : Sup s ≤ a ↔ (∀b ∈ s, b ≤ a) :=
⟨assume (_ : Sup s ≤ a) (b) (_ : b ∈ s),
le_trans (le_cSup ‹bdd_above s› ‹b ∈ s›) ‹Sup s ≤ a›,
cSup_le ‹s ≠ ∅›⟩
theorem le_cInf_iff (_ : bdd_below s) (_ : s ≠ ∅) : a ≤ Inf s ↔ (∀b ∈ s, a ≤ b) :=
⟨assume (_ : a ≤ Inf s) (b) (_ : b ∈ s),
le_trans ‹a ≤ Inf s› (cInf_le ‹bdd_below s› ‹b ∈ s›),
le_cInf ‹s ≠ ∅›⟩
lemma cSup_upper_bounds_eq_cInf {s : set α} (h : bdd_below s) (hs : s ≠ ∅) :
Sup {a | ∀x∈s, a ≤ x} = Inf s :=
let ⟨b, hb⟩ := h, ⟨a, ha⟩ := ne_empty_iff_exists_mem.1 hs in
le_antisymm
(cSup_le (ne_empty_iff_exists_mem.2 ⟨b, hb⟩) $ assume a ha, le_cInf hs ha)
(le_cSup ⟨a, assume y hy, hy a ha⟩ $ assume x hx, cInf_le h hx)
lemma cInf_lower_bounds_eq_cSup {s : set α} (h : bdd_above s) (hs : s ≠ ∅) :
Inf {a | ∀x∈s, x ≤ a} = Sup s :=
let ⟨b, hb⟩ := h, ⟨a, ha⟩ := ne_empty_iff_exists_mem.1 hs in
le_antisymm
(cInf_le ⟨a, assume y hy, hy a ha⟩ $ assume x hx, le_cSup h hx)
(le_cInf (ne_empty_iff_exists_mem.2 ⟨b, hb⟩) $ assume a ha, cSup_le hs ha)
/--Introduction rule to prove that b is the supremum of s: it suffices to check that b
is larger than all elements of s, and that this is not the case of any w<b.-/
theorem cSup_intro (_ : s ≠ ∅) (_ : ∀a∈s, a ≤ b) (H : ∀w, w < b → (∃a∈s, w < a)) : Sup s = b :=
have bdd_above s := ⟨b, by assumption⟩,
have (Sup s < b) ∨ (Sup s = b) := lt_or_eq_of_le (cSup_le ‹s ≠ ∅› ‹∀a∈s, a ≤ b›),
have ¬(Sup s < b) :=
assume: Sup s < b,
let ⟨a, _, _⟩ := (H (Sup s) ‹Sup s < b›) in /- a ∈ s, Sup s < a-/
have Sup s < Sup s := lt_of_lt_of_le ‹Sup s < a› (le_cSup ‹bdd_above s› ‹a ∈ s›),
show false, by finish [lt_irrefl (Sup s)],
show Sup s = b, by finish
/--Introduction rule to prove that b is the infimum of s: it suffices to check that b
is smaller than all elements of s, and that this is not the case of any w>b.-/
theorem cInf_intro (_ : s ≠ ∅) (_ : ∀a∈s, b ≤ a) (H : ∀w, b < w → (∃a∈s, a < w)) : Inf s = b :=
have bdd_below s := ⟨b, by assumption⟩,
have (b < Inf s) ∨ (b = Inf s) := lt_or_eq_of_le (le_cInf ‹s ≠ ∅› ‹∀a∈s, b ≤ a›),
have ¬(b < Inf s) :=
assume: b < Inf s,
let ⟨a, _, _⟩ := (H (Inf s) ‹b < Inf s›) in /- a ∈ s, a < Inf s-/
have Inf s < Inf s := lt_of_le_of_lt (cInf_le ‹bdd_below s› ‹a ∈ s›) ‹a < Inf s› ,
show false, by finish [lt_irrefl (Inf s)],
show Inf s = b, by finish
/--When an element a of a set s is larger than all elements of the set, it is Sup s-/
theorem cSup_of_mem_of_le (_ : a ∈ s) (_ : ∀w∈s, w ≤ a) : Sup s = a :=
have bdd_above s := ⟨a, by assumption⟩,
have s ≠ ∅ := ne_empty_of_mem ‹a ∈ s›,
have A : a ≤ Sup s := le_cSup ‹bdd_above s› ‹a ∈ s›,
have B : Sup s ≤ a := cSup_le ‹s ≠ ∅› ‹∀w∈s, w ≤ a›,
le_antisymm B A
/--When an element a of a set s is smaller than all elements of the set, it is Inf s-/
theorem cInf_of_mem_of_le (_ : a ∈ s) (_ : ∀w∈s, a ≤ w) : Inf s = a :=
have bdd_below s := ⟨a, by assumption⟩,
have s ≠ ∅ := ne_empty_of_mem ‹a ∈ s›,
have A : Inf s ≤ a := cInf_le ‹bdd_below s› ‹a ∈ s›,
have B : a ≤ Inf s := le_cInf ‹s ≠ ∅› ‹∀w∈s, a ≤ w›,
le_antisymm A B
/--b < Sup s when there is an element a in s with b < a, when s is bounded above.
This is essentially an iff, except that the assumptions for the two implications are
slightly different (one needs boundedness above for one direction, nonemptyness and linear
order for the other one), so we formulate separately the two implications, contrary to
the complete_lattice case.-/
lemma lt_cSup_of_lt (_ : bdd_above s) (_ : a ∈ s) (_ : b < a) : b < Sup s :=
lt_of_lt_of_le ‹b < a› (le_cSup ‹bdd_above s› ‹a ∈ s›)
/--Inf s < b s when there is an element a in s with a < b, when s is bounded below.
This is essentially an iff, except that the assumptions for the two implications are
slightly different (one needs boundedness below for one direction, nonemptyness and linear
order for the other one), so we formulate separately the two implications, contrary to
the complete_lattice case.-/
lemma cInf_lt_of_lt (_ : bdd_below s) (_ : a ∈ s) (_ : a < b) : Inf s < b :=
lt_of_le_of_lt (cInf_le ‹bdd_below s› ‹a ∈ s›) ‹a < b›
/--The supremum of a singleton is the element of the singleton-/
@[simp] theorem cSup_singleton (a : α) : Sup {a} = a :=
have A : a ≤ Sup {a} :=
by apply le_cSup _ _; simp only [set.mem_singleton,bdd_above_singleton],
have B : Sup {a} ≤ a :=
by apply cSup_le _ _; simp only [set.mem_singleton_iff, forall_eq,ne.def, not_false_iff, set.singleton_ne_empty],
le_antisymm B A
/--The infimum of a singleton is the element of the singleton-/
@[simp] theorem cInf_singleton (a : α) : Inf {a} = a :=
have A : Inf {a} ≤ a :=
by apply cInf_le _ _; simp only [set.mem_singleton,bdd_below_singleton],
have B : a ≤ Inf {a} :=
by apply le_cInf _ _; simp only [set.mem_singleton_iff, forall_eq,ne.def, not_false_iff, set.singleton_ne_empty],
le_antisymm A B
/--If a set is bounded below and above, and nonempty, its infimum is less than or equal to
its supremum.-/
theorem cInf_le_cSup (_ : bdd_below s) (_ : bdd_above s) (_ : s ≠ ∅) : Inf s ≤ Sup s :=
let ⟨w, hw⟩ := exists_mem_of_ne_empty ‹s ≠ ∅› in /-hw : w ∈ s-/
have Inf s ≤ w := cInf_le ‹bdd_below s› ‹w ∈ s›,
have w ≤ Sup s := le_cSup ‹bdd_above s› ‹w ∈ s›,
le_trans ‹Inf s ≤ w› ‹w ≤ Sup s›
/--The sup of a union of sets is the max of the suprema of each subset, under the assumptions
that all sets are bounded above and nonempty.-/
theorem cSup_union (_ : bdd_above s) (_ : s ≠ ∅) (_ : bdd_above t) (_ : t ≠ ∅) :
Sup (s ∪ t) = Sup s ⊔ Sup t :=
have A : Sup (s ∪ t) ≤ Sup s ⊔ Sup t :=
have s ∪ t ≠ ∅ := by simp only [not_and, set.union_empty_iff, ne.def] at *; finish,
have F : ∀b∈ s∪t, b ≤ Sup s ⊔ Sup t :=
begin
intros,
cases H,
apply le_trans (le_cSup ‹bdd_above s› ‹b ∈ s›) _, simp only [lattice.le_sup_left],
apply le_trans (le_cSup ‹bdd_above t› ‹b ∈ t›) _, simp only [lattice.le_sup_right]
end,
cSup_le this F,
have B : Sup s ⊔ Sup t ≤ Sup (s ∪ t) :=
have Sup s ≤ Sup (s ∪ t) := by apply cSup_le_cSup _ ‹s ≠ ∅›; simp only [bdd_above_union,set.subset_union_left]; finish,
have Sup t ≤ Sup (s ∪ t) := by apply cSup_le_cSup _ ‹t ≠ ∅›; simp only [bdd_above_union,set.subset_union_right]; finish,
by simp only [lattice.sup_le_iff]; split; assumption; assumption,
le_antisymm A B
/--The inf of a union of sets is the min of the infima of each subset, under the assumptions
that all sets are bounded below and nonempty.-/
theorem cInf_union (_ : bdd_below s) (_ : s ≠ ∅) (_ : bdd_below t) (_ : t ≠ ∅) :
Inf (s ∪ t) = Inf s ⊓ Inf t :=
have A : Inf s ⊓ Inf t ≤ Inf (s ∪ t) :=
have s ∪ t ≠ ∅ := by simp only [not_and, set.union_empty_iff, ne.def] at *; finish,
have F : ∀b∈ s∪t, Inf s ⊓ Inf t ≤ b :=
begin
intros,
cases H,
apply le_trans _ (cInf_le ‹bdd_below s› ‹b ∈ s›), simp only [lattice.inf_le_left],
apply le_trans _ (cInf_le ‹bdd_below t› ‹b ∈ t›), simp only [lattice.inf_le_right]
end,
le_cInf this F,
have B : Inf (s ∪ t) ≤ Inf s ⊓ Inf t :=
have Inf (s ∪ t) ≤ Inf s := by apply cInf_le_cInf _ ‹s ≠ ∅›; simp only [bdd_below_union,set.subset_union_left]; finish,
have Inf (s ∪ t) ≤ Inf t := by apply cInf_le_cInf _ ‹t ≠ ∅›; simp only [bdd_below_union,set.subset_union_right]; finish,
by simp only [lattice.le_inf_iff]; split; assumption; assumption,
le_antisymm B A
/--The supremum of an intersection of sets is bounded by the minimum of the suprema of each
set, if all sets are bounded above and nonempty.-/
theorem cSup_inter_le (_ : bdd_above s) (_ : bdd_above t) (_ : s ∩ t ≠ ∅) :
Sup (s ∩ t) ≤ Sup s ⊓ Sup t :=
begin
apply cSup_le ‹s ∩ t ≠ ∅› _, simp only [lattice.le_inf_iff, and_imp, set.mem_inter_eq], intros b _ _, split,
apply le_cSup ‹bdd_above s› ‹b ∈ s›,
apply le_cSup ‹bdd_above t› ‹b ∈ t›
end
/--The infimum of an intersection of sets is bounded below by the maximum of the
infima of each set, if all sets are bounded below and nonempty.-/
theorem le_cInf_inter (_ : bdd_below s) (_ : bdd_below t) (_ : s ∩ t ≠ ∅) :
Inf s ⊔ Inf t ≤ Inf (s ∩ t) :=
begin
apply le_cInf ‹s ∩ t ≠ ∅› _, simp only [and_imp, set.mem_inter_eq, lattice.sup_le_iff], intros b _ _, split,
apply cInf_le ‹bdd_below s› ‹b ∈ s›,
apply cInf_le ‹bdd_below t› ‹b ∈ t›
end
/-- The supremum of insert a s is the maximum of a and the supremum of s, if s is
nonempty and bounded above.-/
theorem cSup_insert (_ : bdd_above s) (_ : s ≠ ∅) : Sup (insert a s) = a ⊔ Sup s :=
calc Sup (insert a s)
= Sup ({a} ∪ s) : by rw [insert_eq]
... = Sup {a} ⊔ Sup s : by apply cSup_union _ _ ‹bdd_above s› ‹s ≠ ∅›; simp only [ne.def, not_false_iff, set.singleton_ne_empty,bdd_above_singleton]
... = a ⊔ Sup s : by simp only [eq_self_iff_true, lattice.cSup_singleton]
/-- The infimum of insert a s is the minimum of a and the infimum of s, if s is
nonempty and bounded below.-/
theorem cInf_insert (_ : bdd_below s) (_ : s ≠ ∅) : Inf (insert a s) = a ⊓ Inf s :=
calc Inf (insert a s)
= Inf ({a} ∪ s) : by rw [insert_eq]
... = Inf {a} ⊓ Inf s : by apply cInf_union _ _ ‹bdd_below s› ‹s ≠ ∅›; simp only [ne.def, not_false_iff, set.singleton_ne_empty,bdd_below_singleton]
... = a ⊓ Inf s : by simp only [eq_self_iff_true, lattice.cInf_singleton]
@[simp] lemma cInf_interval [conditionally_complete_lattice α] : Inf {b | a ≤ b} = a :=
cInf_of_mem_of_le (by simp only [set.mem_set_of_eq]) (λw Hw, by simp only [set.mem_set_of_eq] at Hw; apply Hw)
@[simp] lemma cSup_interval [conditionally_complete_lattice α] : Sup {b | b ≤ a} = a :=
cSup_of_mem_of_le (by simp only [set.mem_set_of_eq]) (λw Hw, by simp only [set.mem_set_of_eq] at Hw; apply Hw)
/--The indexed supremum of two functions are comparable if the functions are pointwise comparable-/
lemma csupr_le_csupr {f g : β → α} (B : bdd_above (range g)) (H : ∀x, f x ≤ g x) : supr f ≤ supr g :=
begin
classical, by_cases nonempty β,
{ have Rf : range f ≠ ∅, {simpa},
apply cSup_le Rf,
rintros y ⟨x, rfl⟩,
have : g x ∈ range g := ⟨x, rfl⟩,
exact le_cSup_of_le B this (H x) },
{ have Rf : range f = ∅, {simpa},
have Rg : range g = ∅, {simpa},
unfold supr, rw [Rf, Rg] }
end
/--The indexed supremum of a function is bounded above by a uniform bound-/
lemma csupr_le [ne : nonempty β] {f : β → α} {c : α} (H : ∀x, f x ≤ c) : supr f ≤ c :=
cSup_le (by simp [not_not_intro ne]) (by rwa forall_range_iff)
/--The indexed supremum of a function is bounded below by the value taken at one point-/
lemma le_csupr {f : β → α} (H : bdd_above (range f)) {c : β} : f c ≤ supr f :=
le_cSup H (mem_range_self _)
/--The indexed infimum of two functions are comparable if the functions are pointwise comparable-/
lemma cinfi_le_cinfi {f g : β → α} (B : bdd_below (range f)) (H : ∀x, f x ≤ g x) : infi f ≤ infi g :=
begin
classical, by_cases nonempty β,
{ have Rg : range g ≠ ∅, {simpa},
apply le_cInf Rg,
rintros y ⟨x, rfl⟩,
have : f x ∈ range f := ⟨x, rfl⟩,
exact cInf_le_of_le B this (H x) },
{ have Rf : range f = ∅, {simpa},
have Rg : range g = ∅, {simpa},
unfold infi, rw [Rf, Rg] }
end
/--The indexed minimum of a function is bounded below by a uniform lower bound-/
lemma le_cinfi [ne : nonempty β] {f : β → α} {c : α} (H : ∀x, c ≤ f x) : c ≤ infi f :=
le_cInf (by simp [not_not_intro ne]) (by rwa forall_range_iff)
/--The indexed infimum of a function is bounded above by the value taken at one point-/
lemma cinfi_le {f : β → α} (H : bdd_below (range f)) {c : β} : infi f ≤ f c :=
cInf_le H (mem_range_self _)
lemma is_lub_cSup {s : set α} (ne : s ≠ ∅) (H : bdd_above s) : is_lub s (Sup s) :=
⟨assume x, le_cSup H, assume x, cSup_le ne⟩
lemma is_glb_cInf {s : set α} (ne : s ≠ ∅) (H : bdd_below s) : is_glb s (Inf s) :=
⟨assume x, cInf_le H, assume x, le_cInf ne⟩
@[simp] theorem cinfi_const [nonempty ι] {a : α} : (⨅ b:ι, a) = a :=
begin
rcases exists_mem_of_nonempty ι with ⟨x, _⟩,
refine le_antisymm (@cinfi_le _ _ _ _ _ x) (le_cinfi (λi, _root_.le_refl _)),
rw range_const,
exact bdd_below_singleton
end
@[simp] theorem csupr_const [nonempty ι] {a : α} : (⨆ b:ι, a) = a :=
begin
rcases exists_mem_of_nonempty ι with ⟨x, _⟩,
refine le_antisymm (csupr_le (λi, _root_.le_refl _)) (@le_csupr _ _ _ (λ b:ι, a) _ x),
rw range_const,
exact bdd_above_singleton
end
end conditionally_complete_lattice
section conditionally_complete_linear_order
variables [conditionally_complete_linear_order α] {s t : set α} {a b : α}
/--When b < Sup s, there is an element a in s with b < a, if s is nonempty and the order is
a linear order.-/
lemma exists_lt_of_lt_cSup (_ : s ≠ ∅) (_ : b < Sup s) : ∃a∈s, b < a :=
begin
classical, by_contra h,
have : Sup s ≤ b :=
by apply cSup_le ‹s ≠ ∅› _; finish,
apply lt_irrefl b (lt_of_lt_of_le ‹b < Sup s› ‹Sup s ≤ b›)
end
/--When Inf s < b, there is an element a in s with a < b, if s is nonempty and the order is
a linear order.-/
lemma exists_lt_of_cInf_lt (_ : s ≠ ∅) (_ : Inf s < b) : ∃a∈s, a < b :=
begin
classical, by_contra h,
have : b ≤ Inf s :=
by apply le_cInf ‹s ≠ ∅› _; finish,
apply lt_irrefl b (lt_of_le_of_lt ‹b ≤ Inf s› ‹Inf s < b›)
end
/--Introduction rule to prove that b is the supremum of s: it suffices to check that
1) b is an upper bound
2) every other upper bound b' satisfies b ≤ b'.-/
theorem cSup_intro' (_ : s ≠ ∅)
(h_is_ub : ∀ a ∈ s, a ≤ b) (h_b_le_ub : ∀ub, (∀ a ∈ s, a ≤ ub) → (b ≤ ub)) : Sup s = b :=
le_antisymm
(show Sup s ≤ b, from cSup_le ‹s ≠ ∅› h_is_ub)
(show b ≤ Sup s, from h_b_le_ub _ $ assume a, le_cSup ⟨b, h_is_ub⟩)
end conditionally_complete_linear_order
section conditionally_complete_linear_order_bot
variables [conditionally_complete_linear_order_bot α]
lemma cSup_empty [conditionally_complete_linear_order_bot α] : (Sup ∅ : α) = ⊥ :=
conditionally_complete_linear_order_bot.cSup_empty α
end conditionally_complete_linear_order_bot
section
local attribute [instance] classical.prop_decidable
noncomputable instance : has_Inf ℕ :=
⟨λs, if h : ∃n, n ∈ s then @nat.find (λn, n ∈ s) _ h else 0⟩
noncomputable instance : has_Sup ℕ :=
⟨λs, if h : ∃n, ∀a∈s, a ≤ n then @nat.find (λn, ∀a∈s, a ≤ n) _ h else 0⟩
lemma Inf_nat_def {s : set ℕ} (h : ∃n, n ∈ s) : Inf s = @nat.find (λn, n ∈ s) _ h :=
dif_pos _
lemma Sup_nat_def {s : set ℕ} (h : ∃n, ∀a∈s, a ≤ n) :
Sup s = @nat.find (λn, ∀a∈s, a ≤ n) _ h :=
dif_pos _
/-- This instance is necessary, otherwise the lattice operations would be derived via
conditionally_complete_linear_order_bot and marked as noncomputable. -/
instance : lattice ℕ := infer_instance
noncomputable instance : conditionally_complete_linear_order_bot ℕ :=
{ Sup := Sup, Inf := Inf,
le_cSup := assume s a hb ha, by rw [Sup_nat_def hb]; revert a ha; exact @nat.find_spec _ _ hb,
cSup_le := assume s a hs ha, by rw [Sup_nat_def ⟨a, ha⟩]; exact nat.find_min' _ ha,
le_cInf := assume s a hs hb,
by rw [Inf_nat_def (ne_empty_iff_exists_mem.1 hs)]; exact hb _ (@nat.find_spec (λn, n ∈ s) _ _),
cInf_le := assume s a hb ha, by rw [Inf_nat_def ⟨a, ha⟩]; exact nat.find_min' _ ha,
cSup_empty :=
begin
simp only [Sup_nat_def, set.mem_empty_eq, forall_const, forall_prop_of_false, not_false_iff, exists_const],
apply bot_unique (nat.find_min' _ _),
trivial
end,
.. (infer_instance : order_bot ℕ), .. (infer_instance : lattice ℕ),
.. (infer_instance : decidable_linear_order ℕ) }
end
end lattice /-end of namespace lattice-/
namespace with_top
open lattice
local attribute [instance] classical.prop_decidable
variables [conditionally_complete_linear_order_bot α]
lemma has_lub (s : set (with_top α)) : ∃a, is_lub s a :=
begin
by_cases hs : s = ∅, { subst hs, exact ⟨⊥, is_lub_empty⟩, },
rcases ne_empty_iff_exists_mem.1 hs with ⟨x, hxs⟩,
by_cases bnd : ∃b:α, ↑b ∈ upper_bounds s,
{ rcases bnd with ⟨b, hb⟩,
have bdd : bdd_above {a : α | ↑a ∈ s}, from ⟨b, assume y hy, coe_le_coe.1 $ hb _ hy⟩,
refine ⟨(Sup {a : α | ↑a ∈ s} : α), _, _⟩,
{ assume a has,
rcases (le_coe_iff _ _).1 (hb _ has) with ⟨a, rfl, h⟩,
exact (coe_le_coe.2 $ le_cSup bdd has) },
{ assume a hs,
rcases (le_coe_iff _ _).1 (hb _ hxs) with ⟨x, rfl, h⟩,
refine (coe_le_iff _ _).2 (assume c hc, _), subst hc,
exact (cSup_le (ne_empty_of_mem hxs) $ assume b (hbs : ↑b ∈ s), coe_le_coe.1 $ hs _ hbs), } },
exact ⟨⊤, assume a _, le_top, assume a,
match a with
| some a, ha := (bnd ⟨a, ha⟩).elim
| none, ha := _root_.le_refl ⊤
end⟩
end
lemma has_glb (s : set (with_top α)) : ∃a, is_glb s a :=
begin
by_cases hs : ∃x:α, ↑x ∈ s,
{ rcases hs with ⟨x, hxs⟩,
refine ⟨(Inf {a : α | ↑a ∈ s} : α), _, _⟩,
exact (assume a has, (coe_le_iff _ _).2 $ assume x hx, cInf_le (bdd_below_bot _) $
show ↑x ∈ s, from hx ▸ has),
{ assume a has,
rcases (le_coe_iff _ _).1 (has _ hxs) with ⟨x, rfl, h⟩,
exact (coe_le_coe.2 $ le_cInf (ne_empty_of_mem hxs) $
assume b hbs, coe_le_coe.1 $ has _ hbs) } },
exact ⟨⊤, assume a, match a with
| some a, ha := (hs ⟨a, ha⟩).elim
| none, ha := _root_.le_refl _
end,
assume a _, le_top⟩
end
noncomputable instance : has_Sup (with_top α) := ⟨λs, classical.some $ has_lub s⟩
noncomputable instance : has_Inf (with_top α) := ⟨λs, classical.some $ has_glb s⟩
lemma is_lub_Sup (s : set (with_top α)) : is_lub s (Sup s) := classical.some_spec _
lemma is_glb_Inf (s : set (with_top α)) : is_glb s (Inf s) := classical.some_spec _
noncomputable instance : complete_linear_order (with_top α) :=
{ Sup := Sup, le_Sup := assume s, (is_lub_Sup s).1, Sup_le := assume s, (is_lub_Sup s).2,
Inf := Inf, le_Inf := assume s, (is_glb_Inf s).2, Inf_le := assume s, (is_glb_Inf s).1,
decidable_le := classical.dec_rel _,
.. with_top.linear_order, ..with_top.lattice, ..with_top.order_top, ..with_top.order_bot }
lemma coe_Sup {s : set α} (hb : bdd_above s) : (↑(Sup s) : with_top α) = (⨆a∈s, ↑a) :=
begin
by_cases hs : s = ∅,
{ rw [hs, cSup_empty], simp only [set.mem_empty_eq, lattice.supr_bot, lattice.supr_false], refl },
apply le_antisymm,
{ refine ((coe_le_iff _ _).2 $ assume b hb, cSup_le hs $ assume a has, coe_le_coe.1 $ hb ▸ _),
exact (le_supr_of_le a $ le_supr_of_le has $ _root_.le_refl _) },
{ exact (supr_le $ assume a, supr_le $ assume ha, coe_le_coe.2 $ le_cSup hb ha) }
end
lemma coe_Inf {s : set α} (hs : s ≠ ∅) : (↑(Inf s) : with_top α) = (⨅a∈s, ↑a) :=
let ⟨x, hx⟩ := ne_empty_iff_exists_mem.1 hs in
have (⨅a∈s, ↑a : with_top α) ≤ x, from infi_le_of_le x $ infi_le_of_le hx $ _root_.le_refl _,
let ⟨r, r_eq, hr⟩ := (le_coe_iff _ _).1 this in
le_antisymm
(le_infi $ assume a, le_infi $ assume ha, coe_le_coe.2 $ cInf_le (bdd_below_bot s) ha)
begin
refine (r_eq.symm ▸ coe_le_coe.2 $ le_cInf hs $ assume a has, coe_le_coe.1 $ _),
refine (r_eq ▸ infi_le_of_le a _),
exact (infi_le_of_le has $ _root_.le_refl _),
end
end with_top
section order_dual
open lattice
instance (α : Type*) [conditionally_complete_lattice α] :
conditionally_complete_lattice (order_dual α) :=
{ le_cSup := @cInf_le α _,
cSup_le := @le_cInf α _,
le_cInf := @cSup_le α _,
cInf_le := @le_cSup α _,
..order_dual.lattice.has_Inf α,
..order_dual.lattice.has_Sup α,
..order_dual.lattice.lattice α }
instance (α : Type*) [conditionally_complete_linear_order α] :
conditionally_complete_linear_order (order_dual α) :=
{ ..order_dual.lattice.conditionally_complete_lattice α,
..order_dual.decidable_linear_order α }
end order_dual
|
1
-- @@stderr --
dtrace: invalid probe specifier foounix: probe description :foounix:: does not match any probes
|
#ifndef RW_COMMON_PROGRAMOPTIONS_HPP_
#define RW_COMMON_PROGRAMOPTIONS_HPP_
#if !defined(SWIG)
#include <rw/core/PropertyMap.hpp>
#include <boost/program_options/options_description.hpp>
#include <boost/program_options/positional_options.hpp>
#include <boost/program_options/variables_map.hpp>
#endif
namespace rw { namespace common {
/**
* @brief a class for parsing program command line into a PropertyMap
*/
class ProgramOptions
{
public:
/**
* @brief Construct new set of program options.
* @param applicationName [in] the name of the application.
* @param version [in] the version of the application.
*/
ProgramOptions (const std::string& applicationName, const std::string& version) :
_appName (applicationName), _version (version), _optionDesc ("Options")
{}
/**
* @brief this initialize default options that can add simple properties to the propertymap.
*/
void initOptions ();
/**
* @brief add a string option that is only allowed to occur once on the command line
* @param name [in] name of option
* @param defval [in] the default string value if any
* @param desc [in] description of commandline option
*/
void addStringOption (const std::string& name, const std::string& defval,
const std::string& desc);
/**
* @brief Set \b name of option number \b i.
* @param name [in] the name.
* @param i [in] index of the option.
*/
void setPositionalOption (const std::string& name, int i);
/**
* @brief parses input, if
* @param argc
* @param argv
* @return if 0 is returned then help or an error
*/
int parse (int argc, char** argv);
/**
* @brief Parses input from a string.
* @param string [in] input line.
* @return 0 if success.
*/
int parse (const std::string& string);
/**
* @brief Get the underlying program options description from boost.
* @return reference to options_description.
*/
boost::program_options::options_description& getOptionDescription () { return _optionDesc; }
/**
* @brief Get the underlying positional program options description from boost.
* @return reference to positional_options_description.
*/
boost::program_options::positional_options_description& getPosOptionDescription ()
{
return _posOptionDesc;
}
/**
* @brief Get parsed properties in RobWork format in the form of a PropertyMap.
* @return the property map with parsed options.
*/
rw::core::PropertyMap getPropertyMap () { return _pmap; }
private:
int checkVariablesMap (boost::program_options::variables_map& vm);
std::string _appName;
rw::core::PropertyMap _pmap;
std::string _inputFile, _version;
std::vector< std::string > _additionalStringOptions;
boost::program_options::options_description _optionDesc;
boost::program_options::positional_options_description _posOptionDesc;
};
}} // namespace rw::common
#endif
|
theory T175
imports Main
begin
lemma "(
(\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) &
(\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, meet(y, z)) = meet(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(x, meet(y, z)) = join(over(x, y), over(x, z))) &
(\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) &
(\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) &
(\<forall> x::nat. invo(invo(x)) = x)
) \<longrightarrow>
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(meet(x, y), z) = join(undr(x, z), undr(y, z)))
"
nitpick[card nat=4,timeout=86400]
oops
end |
{-# OPTIONS --without-K --safe #-}
module Fragment.Examples.Semigroup.Arith.Reasoning where
open import Fragment.Examples.Semigroup.Arith.Base
+-direct : ∀ {m n} → (m + 2) + (3 + n) ≡ m + (5 + n)
+-direct {m} {n} = begin
(m + 2) + (3 + n)
≡⟨ fragment SemigroupFrex +-semigroup ⟩
m + (5 + n)
∎
open import Data.Nat.Properties using (*-distribˡ-+)
+-inner : ∀ {m n k} → k * (m + 2) + k * (3 + n) ≡ k * (m + 5 + n)
+-inner {m} {n} {k} = begin
k * (m + 2) + k * (3 + n)
≡⟨ sym (*-distribˡ-+ k (m + 2) (3 + n)) ⟩
k * ((m + 2) + (3 + n))
≡⟨ cong (k *_) (fragment SemigroupFrex +-semigroup) ⟩
k * (m + 5 + n)
∎
|
`is_element/E/subdiv` := (p::nonnegint,n::nonnegint,m::nonnegint) -> proc(theta)
global reason;
local tag,src,tgt,i,t;
tag := "is_element/E/subdiv";
if p = 0 then
return evalb(n = m and theta = []);
fi;
if not(type(theta,list(list(nonnegint)))) then
reason := [tag,"theta is not a list of lists of natural numbers",theta];
return false;
fi;
if nops(theta) <> p then
reason := [tag,"theta is not a list of length p",theta,p];
return false;
fi;
tgt := NULL;
for i from 1 to p do
if theta[i] = [] then
reason := [tag,"theta[i] is empty",theta,i];
return false;
fi;
t := max(op(theta[i]));
if {op(theta[i])} <> {seq(j,j=0..t)} then
reason := [tag,"theta[i] does not give an epimorphism",theta,i,t];
return false;
fi;
tgt := tgt,t;
od:
tgt := [tgt];
src := map(nops,theta) -~ 1;
if src[1] <> n then
reason := [tag,"chain does not start with [n]",theta,src,n];
return false;
fi;
if tgt[p] <> m then
reason := [tag,"chain does not end with [m]",theta,tgt,m];
return false;
fi;
if [op(src),m] <> [n,op(tgt)] then
reason := [tag,"sources and targets do not match",theta,src,tgt,n,m];
return false;
fi;
return true;
end:
`is_leq/E/subdiv` := NULL;
`list_elements/E/subdiv/aux` := proc(n::nonnegint,m::nonnegint)
option remember;
local N,M,E;
N := {seq(i,i=0..n)};
M := {seq(i,i=0..m)};
E := `list_elements/epi`(N,M);
E := map(f -> [seq(f[i],i=0..n)],E);
return E;
end:
`list_elements/E/subdiv` := proc(p::nonnegint,n::nonnegint,m::nonnegint)
option remember;
local L,k,E0,E1;
if p = 0 then
if n = m then
return [[]];
else
return [];
fi;
else
L := NULL;
for k from m to n do
E0 := `list_elements/E/subdiv`(p-1,n,k);
E1 := `list_elements/E/subdiv/aux`(k,m);
L := L,seq(seq([op(e0),e1],e1 in E1),e0 in E0);
od:
fi;
return [L];
end:
`random_element/E/subdiv` := (p::nonnegint,n::nonnegint,m::nonnegint) -> proc()
local k,q,f,g;
if m > n then
return FAIL;
fi;
if p = 0 then
if m = n then
return [];
else
return FAIL;
fi;
elif p = 1 then
g := `random_element/epi`({seq(j,j=0..n)},{seq(j,j=0..m)})();
return [[seq(g[j],j=0..n)]];
else
k := rand(m..n)();
q := rand(1..p-1)();
f := `random_element/E/subdiv`(q,n,k)();
g := `random_element/E/subdiv`(p-q,k,m)();
return [op(f),op(g)];
fi;
end:
`count_elements/E/subdiv` := proc(p::nonnegint,n::nonnegint,m::nonnegint)
option remember;
if p = 0 then
return `if`(n = m,1,0);
else
return add(`count_elements/E/subdiv`(p-1,n,k) *
Stirling2(k+1,m+1) * (m+1)!,k=m..n);
fi;
end:
`degrees/E/subdiv` := (p,n,m) -> (theta) ->
[op(map(nops,theta) -~ 1),m];
######################################################################
`is_element/EE/subdiv` := (p::nonnegint,n::nonnegint) -> proc(theta)
global reason;
local tag,src,tgt,i,m,t,b;
tag := "is_element/EE/subdiv";
if p = 0 then
if theta = [] then
return true;
else
reason := [tag,"for p=0 the only element is []",theta];
return phi;
fi;
fi;
if not(type(theta,list(list(nonnegint)))) then
reason := [tag,"theta is not a list of lists of natural numbers",theta];
return false;
fi;
if nops(theta) <> p then
reason := [tag,"theta is not a list of length p",theta,p];
return false;
fi;
if theta[p] = [] then
reason := [tag,"theta[p] = []",theta,p];
return false;
fi;
m := max(op(theta[p]));
b := `is_element/E/subdiv`(p,n,m)(theta);
if not(b) then
reason := [tag,"is_element/E/subdiv failed",m,reason];
return false;
fi;
return true;
end:
`is_leq/EE/subdiv` := NULL;
`list_elements/EE/subdiv` := proc(p::nonnegint,n::nonnegint)
option remember;
[seq(op(`list_elements/E/subdiv`(p,n,m)),m=0..n)];
end:
`random_element/EE/subdiv` := (p::nonnegint,n::nonnegint) -> proc()
local m;
m := rand(0..n)();
return `random_element/E/subdiv`(p,n,m)();
end:
`count_elements/EE/subdiv` := proc(p::nonnegint,n::nonnegint)
add(`count_elements/E/subdiv`(p,n,m),m=0..n);
end:
`find_count_coeffs/EE/subdiv` := proc(n)
local E,a,p,k;
E := [seq(`count_elements/EE/subdiv`(p,n) - add((-1)^(n+k)*a[n,k]*((k+1)!)^p,k=0..n),p=1..n+5)];
return solve(E);
end:
`degrees/EE/subdiv` := (p,n) -> (theta) ->
[op(map(nops,theta) -~ 1),max(op(theta[p]))];
######################################################################
`is_element/threads/subdiv` := (p::nonnegint,n::nonnegint) -> (theta) -> proc(i)
global reason;
local tag,k,m;
tag := "is_element/threads/subdiv";
if not(type(i,list(nonnegint)) and nops(i) = p+1) then
reason := [tag,"i is not a list of p+1 natural numbers",i,p];
return false;
fi;
k := `degrees/EE/subdiv`(p,n)(theta);
for m from 0 to p do
if i[m+1] > k[m+1] then
return false;
fi;
if m < p and theta[m+1][i[m+1]+1] > i[m+2] then
return false;
fi;
od:
return true;
end:
`is_leq/threads/subdiv` := NULL;
`list_elements/threads/subdiv` := (p::nonnegint,n::nonnegint) -> proc(theta)
local i,k,m,phi,L,M;
if p = 0 then
return [seq([i],i=0..n)];
else
m := max(op(theta[p]));
phi := [op(1..p-1,theta)];
L := `list_elements/threads/subdiv`(p-1,n)(phi);
M := NULL;
for i in L do
k := theta[p][i[p]+1];
M := M,seq([op(i),j],j=k..m);
od:
return [M];
fi;
end:
`random_element/threads/subdiv` := (p::nonnegint,n::nonnegint) -> (theta) -> proc()
local i,j,k,m0,m1;
k := `degrees/EE/subdiv`(p,n)(theta);
i := [rand(0..n)()];
for j from 1 to p do
m0 := theta[j][i[j]+1];
m1 := k[j+1];
i := [op(i),rand(m0..m1)()];
od;
return i;
end:
`count_elements/threads/subdiv` := NULL;
`mu/threads/subdiv` := (p::nonnegint,n::nonnegint) -> (theta) -> proc(i)
local k,u,m;
k := `degrees/EE/subdiv`(p,n)(theta);
u := k[p+1]+1;
for m from 0 to p-1 do
u := u * nops(select(j -> (j <= i[m+2]),theta[m+1]));
od:
return u;
end:
`xi/EE/subdiv` := (p::nonnegint,n::nonnegint) -> proc(theta)
local x,II,i;
if p = 0 then
return [1$(n+1)]/~(n+1);
fi;
x := table([seq(i=0,i=0..n)]);
II := `list_elements/threads/subdiv`(p,n)(theta);
for i in II do
x[i[1]] := x[i[1]] + 1/`mu/threads/subdiv`(p,n)(theta)(i);
od:
return [seq(x[i],i=0..n)];
end:
######################################################################
# F(p,n) is the set of vertices in the (p+1)-fold barycentric
# subdivision of the n-simplex
`is_element/FF/subdiv` := (p::nonnegint,n::nonnegint) -> proc(alpha_theta)
local alpha,theta,m;
if not(type(alpha_theta,list) and nops(alpha_theta) = 2) then
return false;
fi;
alpha,theta := op(alpha_theta);
if not(type(alpha,list(nonnegint)) and nops(alpha) > 0) then
return false;
fi;
m := nops(alpha) - 1;
if not(`is_element/simplicial_mono_alt`(m,n)(alpha)) then
return false;
fi;
if not(`is_element/EE/subdiv`(p,m)(theta)) then
return false;
fi;
return true;
end:
`list_elements/FF/subdiv` := proc(p,n)
local L,N,m,alpha,theta;
L := NULL:
N := [seq(i,i=0..n)];
for alpha in combinat[powerset](N) do
m := nops(alpha) - 1;
if m >= 0 then
L := L,seq([alpha,theta],theta in `list_elements/EE/subdiv`(p,m));
fi;
od:
return [L];
end:
`random_element/FF/subdiv` := (p,n) -> proc()
local d,m,alpha,theta;
d := rand(1..n)();
alpha := `random_subset_of`({seq(i,i=0..n)},d);
alpha := sort([op(alpha)]);
m := nops(alpha) - 1;
theta := `random_element/EE/subdiv`(p,m)();
return [alpha,theta];
end:
# This function is not quite right. It seems to find a correct morphism
# when one exists, but it claims that morphisms exist when that is not the case.
`find_morphism/FF/subdiv` := (p::nonnegint,n::nonnegint) -> proc(alpha_theta0,alpha_theta1)
local alpha0,alpha1,theta0,theta1,m0,m1,k0,k1,f,h,i,j,u,v,xi,V,W,G,H;
alpha0,theta0 := op(alpha_theta0);
alpha1,theta1 := op(alpha_theta1);
m0 := nops(alpha0) - 1;
m1 := nops(alpha1) - 1;
k0 := `degrees/EE/subdiv`(p,m0)(theta0);
k1 := `degrees/EE/subdiv`(p,m1)(theta1);
f := table([seq(i=FAIL,i=0..n)]);
for i from 0 to m1 do f[alpha1[i+1]] := i; od;
xi := [[seq(f[alpha0[i+1]],i=0..m0)]];
if member(FAIL,xi[1]) then return FAIL; fi;
for j from 0 to p-1 do
V := {seq(w,w=0..k0[j+1])};
W := {seq(w,w=0..k0[j+1+1])};
G := [seq(select(v -> theta0[j+1][v+1] <= w,V),w in W)];
H := map(A -> map(v -> theta1[j+1][xi[j+1][v+1]+1],A),G);
h := map(max,H);
if h <> sort([op({op(h)})]) then
return false;
fi;
xi := [op(xi),h];
od;
return xi;
end:
`is_leq/FF/subdiv` := (p::nonnegint,n::nonnegint) -> proc(alpha_theta0,alpha_theta1)
evalb(`find_morphism/FF/subdiv`(p,n)(alpha_theta0,alpha_theta1) <> FAIL);
end:
`xi/FF/subdiv` := (p::nonnegint,n::nonnegint) -> proc(alpha_theta)
local alpha,theta,i,m,x,y,yt;
alpha,theta := op(alpha_theta);
m := nops(alpha) - 1;
x := `xi/EE/subdiv`(p,m)(theta);
yt := table([seq(i=0,i=0..n)]);
for i from 0 to m do
yt[alpha[i+1]] := x[i+1];
od:
y := [seq(yt[i],i=0..n)];
return y;
end:
# Every vertex of the (p+1)-fold subdivision is the barycentre of
# a d-simplex of the p-fold subdivision for some d. This function
# returns d.
`rank/FF/subdiv` := (p::nonnegint,n::nonnegint) -> proc(alpha_theta)
local alpha,theta,m;
alpha,theta := op(alpha_theta);
m := nops(alpha) - 1;
if p = 0 then
return m;
else
return max(op(theta[p]));
fi;
end:
######################################################################
# Here zeta is a strictly increasing map [j] -> [k] and theta is an
# unordered surjective map [m] -> [k]. The function returns a triple
# [n,beta,phi] where beta : [n] -> [m] is strictly increasing and
# phi : [n] -> [j] is surjective. In an appropriate context we will
# have alpha^*([x,theta]) = [beta^*(x),phi].
`rewrite/epi/subdiv` := proc(zeta,theta)
local i,j,m,J,M,k1,N,n,xi,phi;
j := nops(zeta) - 1;
m := nops(theta) - 1;
J := {seq(j0,j0=0..j)};
M := {seq(m0,m0=0..m)};
k1 := op(-1,zeta);
N := select(m0 -> theta[m0+1] <= k1,M);
n := nops(N) - 1;
xi := sort([op(N)]);
phi := NULL;
for i from 0 to n do
phi := phi,min(select(j0 -> zeta[j0+1] >= theta[xi[i+1]+1],J));
od:
phi := [phi];
return [n,xi,phi];
end:
`rewrite/FF/subdiv` := (p,n) -> (zeta) -> proc(alpha_theta)
local alpha,theta,k0,m,xi,phi,alpha1,theta1;
alpha,theta := op(alpha_theta);
if p = 0 then
k0 := nops(zeta) - 1;
return [[seq(alpha[zeta[i+1]+1],i=0..k0)],[]];
else
k0 := nops(zeta) - 1;
m,xi,phi := op(`rewrite/epi/subdiv`(zeta,theta[p]));
alpha1,theta1 := op(`rewrite/FF/subdiv`(p-1,n)(xi)([alpha,[op(1..-2,theta)]]));
return [alpha1,[op(theta1),phi]];
fi;
end:
`rewrite_aux/FF/subdiv` := (p,n) -> (zeta) -> proc(alpha_theta)
local alpha,theta,k0,m,xi,phi,xi1,alpha1,theta1;
alpha,theta := op(alpha_theta);
if p = 0 then
k0 := nops(zeta) - 1;
return [[zeta],[seq(alpha[zeta[i+1]+1],i=0..k0)],[]];
else
k0 := nops(zeta) - 1;
m,xi,phi := op(`rewrite/epi/subdiv`(zeta,theta[p]));
xi1,alpha1,theta1 := op(`rewrite_aux/FF/subdiv`(p-1,n)(xi)([alpha,[op(1..-2,theta)]]));
return [[op(xi1),zeta],alpha1,[op(theta1),phi]];
fi;
end:
`vertices/FF/subdiv` := (p,n) -> proc(alpha_theta)
local r;
r := `rank/FF/subdiv`(p,n)(alpha_theta);
return [seq(`rewrite/FF/subdiv`(p,n)([i])(alpha_theta),i=0..r)];
end:
|
theory T167
imports Main
begin
lemma "(
(\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) &
(\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(x, join(y, z)) = join(undr(x, y), undr(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z))) &
(\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) &
(\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) &
(\<forall> x::nat. invo(invo(x)) = x)
) \<longrightarrow>
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(meet(x, y), z) = join(undr(x, z), undr(y, z)))
"
nitpick[card nat=4,timeout=86400]
oops
end |
theory T180
imports Main
begin
lemma "(
(\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) &
(\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, meet(y, z)) = meet(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(meet(x, y), z) = meet(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(x, join(y, z)) = join(undr(x, y), undr(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z))) &
(\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) &
(\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) &
(\<forall> x::nat. invo(invo(x)) = x)
) \<longrightarrow>
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(meet(x, y), z) = join(undr(x, z), undr(y, z)))
"
nitpick[card nat=7,timeout=86400]
oops
end |
@testset "Aut(Σ₃.₀)" begin
genus = 3
π₁Σ = Groups.SurfaceGroup(genus, 0)
Groups.PermRightAut(p::Perm) = Groups.PermRightAut(p.d)
# Groups.PermLeftAut(p::Perm) = Groups.PermLeftAut(p.d)
autπ₁Σ = let autπ₁Σ = AutomorphismGroup(π₁Σ)
pauts = let p = perm"(1,3,5)(2,4,6)"
[Groups.PermRightAut(p^i) for i in 0:2]
end
T = eltype(KnuthBendix.letters(alphabet(autπ₁Σ)))
S = eltype(pauts)
A = Alphabet(Union{T,S}[KnuthBendix.letters(alphabet(autπ₁Σ)); pauts])
autG = AutomorphismGroup(
π₁Σ,
autπ₁Σ.gens,
A,
ntuple(i->inv(gens(π₁Σ, i)), 2Groups.genus(π₁Σ))
)
autG
end
Al = alphabet(autπ₁Σ)
S = [gens(autπ₁Σ); inv.(gens(autπ₁Σ))]
sautFn = let ltrs = KnuthBendix.letters(Al)
parent(first(ltrs).autFn_word)
end
τ = Groups.rotation_element(sautFn)
@testset "Twists" begin
A = KnuthBendix.alphabet(sautFn)
λ = Groups.ΡΛ(:λ, A, 2genus)
ϱ = Groups.ΡΛ(:ϱ, A, 2genus)
@test sautFn(Groups.Te_diagonal(λ, ϱ, 1)) ==
conj(sautFn(Groups.Te_diagonal(λ, ϱ, 2)), τ)
@test sautFn(Groups.Te_diagonal(λ, ϱ, 3)) == sautFn(Groups.Te(λ, ϱ, 3, 1))
end
z = let d = Groups.domain(τ)
Groups.evaluate(τ^genus)
end
@test π₁Σ.(word.(z)) == Groups.domain(first(S))
d = Groups.domain(first(S))
p = perm"(1,3,5)(2,4,6)"
@test Groups.evaluate!(deepcopy(d), τ) == d^inv(p)
@test Groups.evaluate!(deepcopy(d), τ^2) == d^p
E, sizes = Groups.wlmetric_ball(S, radius=3)
@test sizes == [49, 1813, 62971]
B2 = @view E[1:sizes[2]]
σ = autπ₁Σ(Word([Al[Groups.PermRightAut(p)]]))
@test conj(S[7], σ) == S[10]
@test conj(S[7], σ^2) == S[11]
@test conj(S[9], σ) == S[12]
@test conj(S[9], σ^2) == S[8]
@test conj(S[1], σ) == S[4]
@test conj(S[1], σ^2) == S[5]
@test conj(S[3], σ) == S[6]
@test conj(S[3], σ^2) == S[2]
B2ᶜ = [conj(b, σ) for b in B2]
@test B2ᶜ != B2
@test Set(B2ᶜ) == Set(B2)
end
|
1
-- @@stderr --
dtrace: invalid probe specifier :genunix:: probe description ::genunix: does not match any probes
|
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D5_DimensionalityReduction/student/W1D5_Tutorial1.ipynb" target="_parent"></a>
# Neuromatch Academy: Week 1, Day 5, Tutorial 1
# Dimensionality Reduction: Geometric view of data
__Content creators:__ Alex Cayco Gajic, John Murray
__Content reviewers:__ Roozbeh Farhoudi, Matt Krause, Spiros Chavlis, Richard Gao, Michael Waskom
---
# Tutorial Objectives
In this notebook we'll explore how multivariate data can be represented in different orthonormal bases. This will help us build intuition that will be helpful in understanding PCA in the following tutorial.
Overview:
- Generate correlated multivariate data.
- Define an arbitrary orthonormal basis.
- Project the data onto the new basis.
```python
# @title Video 1: Geometric view of data
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="THu9yHnpq9I", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
Video available at https://youtube.com/watch?v=THu9yHnpq9I
---
# Setup
```python
# Import
import numpy as np
import matplotlib.pyplot as plt
```
```python
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
```
```python
# @title Helper functions
def get_data(cov_matrix):
"""
Returns a matrix of 1000 samples from a bivariate, zero-mean Gaussian.
Note that samples are sorted in ascending order for the first random variable
Args:
cov_matrix (numpy array of floats): desired covariance matrix
Returns:
(numpy array of floats) : samples from the bivariate Gaussian, with each
column corresponding to a different random
variable
"""
mean = np.array([0, 0])
X = np.random.multivariate_normal(mean, cov_matrix, size=1000)
indices_for_sorting = np.argsort(X[:, 0])
X = X[indices_for_sorting, :]
return X
def plot_data(X):
"""
Plots bivariate data. Includes a plot of each random variable, and a scatter
plot of their joint activity. The title indicates the sample correlation
calculated from the data.
Args:
X (numpy array of floats) : Data matrix each column corresponds to a
different random variable
Returns:
Nothing.
"""
fig = plt.figure(figsize=[8, 4])
gs = fig.add_gridspec(2, 2)
ax1 = fig.add_subplot(gs[0, 0])
ax1.plot(X[:, 0], color='k')
plt.ylabel('Neuron 1')
plt.title('Sample var 1: {:.1f}'.format(np.var(X[:, 0])))
ax1.set_xticklabels([])
ax2 = fig.add_subplot(gs[1, 0])
ax2.plot(X[:, 1], color='k')
plt.xlabel('Sample Number')
plt.ylabel('Neuron 2')
plt.title('Sample var 2: {:.1f}'.format(np.var(X[:, 1])))
ax3 = fig.add_subplot(gs[:, 1])
ax3.plot(X[:, 0], X[:, 1], '.', markerfacecolor=[.5, .5, .5],
markeredgewidth=0)
ax3.axis('equal')
plt.xlabel('Neuron 1 activity')
plt.ylabel('Neuron 2 activity')
plt.title('Sample corr: {:.1f}'.format(np.corrcoef(X[:, 0], X[:, 1])[0, 1]))
plt.show()
def plot_basis_vectors(X, W):
"""
Plots bivariate data as well as new basis vectors.
Args:
X (numpy array of floats) : Data matrix each column corresponds to a
different random variable
W (numpy array of floats) : Square matrix representing new orthonormal
basis each column represents a basis vector
Returns:
Nothing.
"""
plt.figure(figsize=[4, 4])
plt.plot(X[:, 0], X[:, 1], '.', color=[.5, .5, .5], label='Data')
plt.axis('equal')
plt.xlabel('Neuron 1 activity')
plt.ylabel('Neuron 2 activity')
plt.plot([0, W[0, 0]], [0, W[1, 0]], color='r', linewidth=3,
label='Basis vector 1')
plt.plot([0, W[0, 1]], [0, W[1, 1]], color='b', linewidth=3,
label='Basis vector 2')
plt.legend()
plt.show()
def plot_data_new_basis(Y):
"""
Plots bivariate data after transformation to new bases.
Similar to plot_data but with colors corresponding to projections onto
basis 1 (red) and basis 2 (blue). The title indicates the sample correlation
calculated from the data.
Note that samples are re-sorted in ascending order for the first
random variable.
Args:
Y (numpy array of floats): Data matrix in new basis each column
corresponds to a different random variable
Returns:
Nothing.
"""
fig = plt.figure(figsize=[8, 4])
gs = fig.add_gridspec(2, 2)
ax1 = fig.add_subplot(gs[0, 0])
ax1.plot(Y[:, 0], 'r')
plt.xlabel
plt.ylabel('Projection \n basis vector 1')
plt.title('Sample var 1: {:.1f}'.format(np.var(Y[:, 0])))
ax1.set_xticklabels([])
ax2 = fig.add_subplot(gs[1, 0])
ax2.plot(Y[:, 1], 'b')
plt.xlabel('Sample number')
plt.ylabel('Projection \n basis vector 2')
plt.title('Sample var 2: {:.1f}'.format(np.var(Y[:, 1])))
ax3 = fig.add_subplot(gs[:, 1])
ax3.plot(Y[:, 0], Y[:, 1], '.', color=[.5, .5, .5])
ax3.axis('equal')
plt.xlabel('Projection basis vector 1')
plt.ylabel('Projection basis vector 2')
plt.title('Sample corr: {:.1f}'.format(np.corrcoef(Y[:, 0], Y[:, 1])[0, 1]))
plt.show()
```
---
# Section 1: Generate correlated multivariate data
```python
# @title Video 2: Multivariate data
video = YouTubeVideo(id="jcTq2PgU5Vw", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
Video available at https://youtube.com/watch?v=jcTq2PgU5Vw
To gain intuition, we will first use a simple model to generate multivariate data. Specifically, we will draw random samples from a *bivariate normal distribution*. This is an extension of the one-dimensional normal distribution to two dimensions, in which each $x_i$ is marginally normal with mean $\mu_i$ and variance $\sigma_i^2$:
\begin{align}
x_i \sim \mathcal{N}(\mu_i,\sigma_i^2).
\end{align}
Additionally, the joint distribution for $x_1$ and $x_2$ has a specified correlation coefficient $\rho$. Recall that the correlation coefficient is a normalized version of the covariance, and ranges between -1 and +1:
\begin{align}
\rho = \frac{\text{cov}(x_1,x_2)}{\sqrt{\sigma_1^2 \sigma_2^2}}.
\end{align}
For simplicity, we will assume that the mean of each variable has already been subtracted, so that $\mu_i=0$. The remaining parameters can be summarized in the covariance matrix, which for two dimensions has the following form:
\begin{equation*}
{\bf \Sigma} =
\begin{pmatrix}
\text{var}(x_1) & \text{cov}(x_1,x_2) \\
\text{cov}(x_1,x_2) &\text{var}(x_2)
\end{pmatrix}.
\end{equation*}
In general, $\bf \Sigma$ is a symmetric matrix with the variances $\text{var}(x_i) = \sigma_i^2$ on the diagonal, and the covariances on the off-diagonal. Later, we will see that the covariance matrix plays a key role in PCA.
## Exercise 1: Draw samples from a distribution
We have provided code to draw random samples from a zero-mean bivariate normal distribution. Throughout this tutorial, we'll imagine these samples represent the activity (firing rates) of two recorded neurons on different trials. Fill in the function below to calculate the covariance matrix given the desired variances and correlation coefficient. The covariance can be found by rearranging the equation above:
\begin{align}
\text{cov}(x_1,x_2) = \rho \sqrt{\sigma_1^2 \sigma_2^2}.
\end{align}
Use these functions to generate and plot data while varying the parameters. You should get a feel for how changing the correlation coefficient affects the geometry of the simulated data.
**Steps**
* Fill in the function `calculate_cov_matrix` to calculate the desired covariance.
* Generate and plot the data for $\sigma_1^2 =1$, $\sigma_1^2 =1$, and $\rho = .8$. Try plotting the data for different values of the correlation coefficent: $\rho = -1, -.5, 0, .5, 1$.
```python
help(plot_data)
help(get_data)
```
Help on function plot_data in module __main__:
plot_data(X)
Plots bivariate data. Includes a plot of each random variable, and a scatter
plot of their joint activity. The title indicates the sample correlation
calculated from the data.
Args:
X (numpy array of floats) : Data matrix each column corresponds to a
different random variable
Returns:
Nothing.
Help on function get_data in module __main__:
get_data(cov_matrix)
Returns a matrix of 1000 samples from a bivariate, zero-mean Gaussian.
Note that samples are sorted in ascending order for the first random variable
Args:
cov_matrix (numpy array of floats): desired covariance matrix
Returns:
(numpy array of floats) : samples from the bivariate Gaussian, with each
column corresponding to a different random
variable
```python
def calculate_cov_matrix(var_1, var_2, corr_coef):
"""
Calculates the covariance matrix based on the variances and correlation
coefficient.
Args:
var_1 (scalar) : variance of the first random variable
var_2 (scalar) : variance of the second random variable
corr_coef (scalar) : correlation coefficient
Returns:
(numpy array of floats) : covariance matrix
"""
#################################################
## TODO for students: calculate the covariance matrix
# Fill out function and remove
# raise NotImplementedError("Student excercise: calculate the covariance matrix!")
#################################################
# Calculate the covariance from the variances and correlation
cov = corr_coef*np.sqrt(var_2*var_1)
cov_matrix = np.array([[var_1, cov], [cov, var_2]])
return cov_matrix
###################################################################
## TO DO for students: generate and plot bivariate Gaussian data with variances of 1
## and a correlation coefficients of: 0.8
## repeat while varying the correlation coefficient from -1 to 1
###################################################################
np.random.seed(2020) # set random seed
variance_1 = 1
variance_2 = 1
corr_coef = 0.8
# Uncomment to test your code and plot
cov_matrix = calculate_cov_matrix(variance_1, variance_2, corr_coef)
X = get_data(cov_matrix)
plot_data(X)
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial1_Solution_57497711.py)
*Example output:*
---
# Section 2: Define a new orthonormal basis
```python
# @title Video 3: Orthonormal bases
video = YouTubeVideo(id="PC1RZELnrIg", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
Video available at https://youtube.com/watch?v=PC1RZELnrIg
Next, we will define a new orthonormal basis of vectors ${\bf u} = [u_1,u_2]$ and ${\bf w} = [w_1,w_2]$. As we learned in the video, two vectors are orthonormal if:
1. They are orthogonal (i.e., their dot product is zero):
\begin{equation}
{\bf u\cdot w} = u_1 w_1 + u_2 w_2 = 0
\end{equation}
2. They have unit length:
\begin{equation}
||{\bf u} || = ||{\bf w} || = 1
\end{equation}
In two dimensions, it is easy to make an arbitrary orthonormal basis. All we need is a random vector ${\bf u}$, which we have normalized. If we now define the second basis vector to be ${\bf w} = [-u_2,u_1]$, we can check that both conditions are satisfied:
\begin{equation}
{\bf u\cdot w} = - u_1 u_2 + u_2 u_1 = 0
\end{equation}
and
\begin{equation}
{|| {\bf w} ||} = \sqrt{(-u_2)^2 + u_1^2} = \sqrt{u_1^2 + u_2^2} = 1,
\end{equation}
where we used the fact that ${\bf u}$ is normalized. So, with an arbitrary input vector, we can define an orthonormal basis, which we will write in matrix by stacking the basis vectors horizontally:
\begin{equation}
{{\bf W} } =
\begin{pmatrix}
u_1 & w_1 \\
u_2 & w_2
\end{pmatrix}.
\end{equation}
## Exercise 2: Find an orthonormal basis
In this exercise you will fill in the function below to define an orthonormal basis, given a single arbitrary 2-dimensional vector as an input.
**Steps**
* Modify the function `define_orthonormal_basis` to first normalize the first basis vector $\bf u$.
* Then complete the function by finding a basis vector $\bf w$ that is orthogonal to $\bf u$.
* Test the function using initial basis vector ${\bf u} = [3,1]$. Plot the resulting basis vectors on top of the data scatter plot using the function `plot_basis_vectors`. (For the data, use $\sigma_1^2 =1$, $\sigma_2^2 =1$, and $\rho = .8$).
```python
help(plot_basis_vectors)
```
Help on function plot_basis_vectors in module __main__:
plot_basis_vectors(X, W)
Plots bivariate data as well as new basis vectors.
Args:
X (numpy array of floats) : Data matrix each column corresponds to a
different random variable
W (numpy array of floats) : Square matrix representing new orthonormal
basis each column represents a basis vector
Returns:
Nothing.
```python
def define_orthonormal_basis(u):
"""
Calculates an orthonormal basis given an arbitrary vector u.
Args:
u (numpy array of floats) : arbitrary 2-dimensional vector used for new
basis
Returns:
(numpy array of floats) : new orthonormal basis
columns correspond to basis vectors
"""
#################################################
## TODO for students: calculate the orthonormal basis
# Fill out function and remove
# raise NotImplementedError("Student excercise: implement the orthonormal basis function")
#################################################
# normalize vector u
u = np.array(u)
u = u/np.sqrt(u.T@u)
# calculate vector w that is orthogonal to w
w = [-u[1],u[0]]
W = np.column_stack([u, w])
return W
np.random.seed(2020) # set random seed
variance_1 = 1
variance_2 = 1
corr_coef = 0.8
cov_matrix = calculate_cov_matrix(variance_1, variance_2, corr_coef)
X = get_data(cov_matrix)
u = np.array([3, 1])
# Uncomment and run below to plot the basis vectors
W = define_orthonormal_basis(u)
plot_basis_vectors(X, W)
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial1_Solution_7a9640ef.py)
*Example output:*
---
# Section 3: Project data onto new basis
```python
# @title Video 4: Change of basis
video = YouTubeVideo(id="Mj6BRQPKKUc", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
Video available at https://youtube.com/watch?v=Mj6BRQPKKUc
Finally, we will express our data in the new basis that we have just found. Since $\bf W$ is orthonormal, we can project the data into our new basis using simple matrix multiplication :
\begin{equation}
{\bf Y = X W}.
\end{equation}
We will explore the geometry of the transformed data $\bf Y$ as we vary the choice of basis.
## Exercise 3: Define an orthonormal basis
In this exercise you will fill in the function below to define an orthonormal basis, given a single arbitrary vector as an input.
**Steps**
* Complete the function `change_of_basis` to project the data onto the new basis.
* Plot the projected data using the function `plot_data_new_basis`.
* What happens to the correlation coefficient in the new basis? Does it increase or decrease?
* What happens to variance?
```python
def change_of_basis(X, W):
"""
Projects data onto new basis W.
Args:
X (numpy array of floats) : Data matrix each column corresponding to a
different random variable
W (numpy array of floats) : new orthonormal basis columns correspond to
basis vectors
Returns:
(numpy array of floats) : Data matrix expressed in new basis
"""
#################################################
## TODO for students: project the data onto o new basis W
# Fill out function and remove
# raise NotImplementedError("Student excercise: implement change of basis")
#################################################
# project data onto new basis described by W
Y = X@W
return Y
# Unomment below to transform the data by projecting it into the new basis
Y = change_of_basis(X, W)
plot_data_new_basis(Y)
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial1_Solution_a1124bbc.py)
*Example output:*
## Interactive Demo: Play with the basis vectors
To see what happens to the correlation as we change the basis vectors, run the cell below. The parameter $\theta$ controls the angle of $\bf u$ in degrees. Use the slider to rotate the basis vectors.
```python
# @title
# @markdown Make sure you execute this cell to enable the widget!
def refresh(theta=0):
u = [1, np.tan(theta * np.pi / 180)]
W = define_orthonormal_basis(u)
Y = change_of_basis(X, W)
plot_basis_vectors(X, W)
plot_data_new_basis(Y)
_ = widgets.interact(refresh, theta=(0, 90, 5))
```
interactive(children=(IntSlider(value=0, description='theta', max=90, step=5), Output()), _dom_classes=('widge…
## Questions
* What happens to the projected data as you rotate the basis?
* How does the correlation coefficient change? How does the variance of the projection onto each basis vector change?
* Are you able to find a basis in which the projected data is **uncorrelated**?
---
# Summary
- In this tutorial, we learned that multivariate data can be visualized as a cloud of points in a high-dimensional vector space. The geometry of this cloud is shaped by the covariance matrix.
- Multivariate data can be represented in a new orthonormal basis using the dot product. These new basis vectors correspond to specific mixtures of the original variables - for example, in neuroscience, they could represent different ratios of activation across a population of neurons.
- The projected data (after transforming into the new basis) will generally have a different geometry from the original data. In particular, taking basis vectors that are aligned with the spread of cloud of points decorrelates the data.
* These concepts - covariance, projections, and orthonormal bases - are key for understanding PCA, which we be our focus in the next tutorial.
|
Formal statement is: lemma tendsto_add_filterlim_at_infinity': fixes c :: "'b::real_normed_vector" and F :: "'a filter" assumes "filterlim f at_infinity F" and "(g \<longlongrightarrow> c) F" shows "filterlim (\<lambda>x. f x + g x) at_infinity F" Informal statement is: If $f$ tends to infinity and $g$ converges, then $f + g$ tends to infinity. |
% [INPUT]
% va = A vector of floats [0,Inf) of length n representing the market values of assets.
% vap = Input argument representing the distributional parameters of assets whose type depends on the chosen option pricing model:
% - for Black-Scholes-Merton, a float [0,Inf) representing the annualized volatility of assets;
% - for Gram-Charlier, a row vector of floats (-Inf,Inf) of length 3 whose values represent respectively the annualized volatility, skewness and excess kurtosis of assets.
% cds = A vector of floats [0,Inf) of length n representing the credit default swap spreads.
% db = A float or a vector of floats [0,Inf) of length n representing the default barrier.
% r = A float or a vector of floats (-Inf,Inf) of length n representing the annualized risk-free interest rate.
% t = A float or a vector of floats (0,Inf) of length n representing the time to maturity of default barrier.
%
% [OUTPUT]
% el = A column vector of floats [0,Inf) of length n representing the expected losses.
% cl = A column vector of floats [0,Inf) of length n representing the contingent liabilities.
% a = A column vector of floats [0,1] of length n representing the contingent alphas.
function [el,cl,a] = contingent_claims_analysis(varargin)
persistent ip;
if (isempty(ip))
ip = inputParser();
ip.addRequired('va',@(x)validateattributes(x,{'double'},{'real' 'finite' 'nonnegative' '2d' 'nonempty'}));
ip.addRequired('vap',@(x)validateattributes(x,{'double'},{'real' 'finite' 'vector' 'nonempty'}));
ip.addRequired('cds',@(x)validateattributes(x,{'double'},{'real' 'nonnegative' 'vector' 'nonempty'}));
ip.addRequired('db',@(x)validateattributes(x,{'double'},{'real' 'finite' 'nonnegative' 'vector' 'nonempty'}));
ip.addRequired('r',@(x)validateattributes(x,{'double'},{'real' 'finite' 'vector' 'nonempty'}));
ip.addRequired('t',@(x)validateattributes(x,{'double'},{'real' 'finite' '>' 0 'vector' 'nonempty'}));
end
ip.parse(varargin{:});
ipr = ip.Results;
[va,vap,cds,db,r,t] = validate_input(ipr.va,ipr.vap,ipr.cds,ipr.db,ipr.r,ipr.t);
nargoutchk(3,3);
[el,cl,a] = contingent_claims_analysis_internal(va,vap,cds,db,r,t);
end
function [el,cl,a] = contingent_claims_analysis_internal(va,vap,cds,db,r,t)
s = vap(1);
st = s * sqrt(t);
dbd = db .* exp(-r .* t);
d1 = (log(va ./ db) + ((r + (0.5 * s^2)) .* t)) ./ st;
d2 = d1 - st;
put_price = (dbd .* normcdf(-d2)) - (va .* normcdf(-d1));
if (numel(vap) == 3)
g = vap(2);
k = vap(3);
t1 = (g / 6) .* ((2 * s) - d1);
t2 = (k / 24) .* (1 - d1.^2 + (3 .* d1 .* s) - (3 * s^2));
put_price = put_price - (va .* normcdf(d1) .* s .* (t1 - t2));
end
put_price = max(0,put_price);
rd = dbd - put_price;
cds_put_price = dbd .* (1 - exp(-cds .* max(0.5,((db ./ rd) - 1)) .* t));
cds_put_price = min(cds_put_price,put_price);
a = max(0,min(1 - (cds_put_price ./ put_price),1));
a(~isreal(a)) = 0;
el = put_price;
cl = el .* a;
end
function [va,vap,cds,db,r,t] = validate_input(va,vap,cds,db,r,t)
va = va(:);
va_len = numel(va);
if (va_len < 5)
error('The value of ''va'' is invalid. Expected input to be a vector containing at least 5 elements.');
end
vap_len = numel(vap);
if ((vap_len ~= 1) && (vap_len ~= 3))
error('The value of ''vap'' is invalid. Expected input to be a vector containing either 1 or 3 elements.');
end
if (vap(1) < 0)
error('The value of ''vap'' is invalid. Expected input first element to be greater than or equal to 0.');
end
cds = cds(:);
if (numel(va) ~= va_len)
error(['The value of ''cds'' is invalid. Expected input to be a vector of length ' num2str(va_len) ' elements.']);
end
if (all(cds >= 1))
cds = cds ./ 10000;
end
data = {db(:) r(:) t(:)};
l = unique(cellfun(@numel,data));
l_scalar = (l == 1);
if (any(l_scalar))
if (any(l(~l_scalar) ~= va_len))
error(['The number of elements of ''db'', ''r'' and ''t'' must be either 1 or equal to ' num2str(va_len) '.']);
end
else
if (any(l ~= va_len))
error(['The number of elements of ''db'', ''r'' and ''t'' must be either 1 or equal to ' num2str(va_len) '.']);
end
end
for i = 1:numel(data)
data_i = data{i};
if (numel(data_i) == 1)
data{i} = repmat(data_i,va_len,1);
end
end
[db,r,t] = deal(data{:});
end
|
Formal statement is: lemma limitin_atin_self: "limitin Y f (f a) (atin X a) \<longleftrightarrow> f a \<in> topspace Y \<and> (a \<in> topspace X \<longrightarrow> (\<forall>V. openin Y V \<and> f a \<in> V \<longrightarrow> (\<exists>U. openin X U \<and> a \<in> U \<and> f ` U \<subseteq> V)))" Informal statement is: A function $f$ from a topological space $X$ to a topological space $Y$ has a limit at a point $a \in X$ if and only if $f(a) \in Y$ and for every open set $V$ containing $f(a)$, there exists an open set $U$ containing $a$ such that $f(U) \subseteq V$. |
Kamma matrimony site - Search lakhs of Kamma Matrimonials brides & grooms profiles by ID number, photos. Free Kamma matrimonial login to find best Kamma matches & Marriage!
Get the newest thousands of Coupons, deals and sales for all your favorite brands, from all your favorite stores. |
sampleFromAGroup<-function(x, y, nsize, samp_options=list(replacement=FALSE, sample_all_available=FALSE, sample_all_available_warning = TRUE)){
# 2015-2016 WP2 FishPI
# Adapted by Nuno Prista from great original work of Liz Clarke, Marine Scotland.
# 2016-10-17 Nuno Prista: added option sample all when sampling without replacement [see comments]
# 2016-10-17 Nuno Prista: added sample2 [correction of behaviour of "sample" when only 1 element is being sampled]
# 2016-10-17 Nuno Prista: improved identification of samples in result
# 2016-10-18 Nuno Prista: improved code at samp_options level [made independent from position in list - > now easier to add options]
# 2016-10-18 Nuno Prista: added suppress.warnings to samp_options
# 2018-05-29 Nuno Prista: adapted so that one of the group names can be NA (useful in, e.g., stratifying by maturity)
nGroup<-length(nsize)
xSamp<-NULL
for (i in 1:nGroup) {
#print(i)
if(!is.na(names(nsize)[i]))
{
indx<-unique(x[which(y==names(nsize)[i])])
} else {indx <- x [which(is.na(y))] } # nuno 20180529 [handles NAs in stratification]
# nuno 20161017: condicao para amostrar todos os disponiveis without replacement
if(samp_options$replacement == FALSE & samp_options$sample_all_available == TRUE & nsize[i]>length(indx)){
nsize[i] <- length(indx)
if(samp_options$sample_all_available_warning==TRUE) {print(paste("sampling all available in group",names(nsize)[i]))}
}
# nuno 20161017: condicao para existencia de 1 unico elemento a amostrar (corrige comportamento de funcao sample)
if(length(indx)>1){
samp<-sample(indx, size=nsize[i], replace = samp_options$replacement)
} else {samp<-sample2(indx, size=nsize[i], replace = samp_options$replacement)}
# nuno 20161017: atribui nomes
names(samp)<-rep(names(nsize)[i], length(samp))
if(i == 1) {
xSamp <-samp
} else {
xSamp <- c(xSamp, samp)
}
}
return(xSamp)
}
|
-- examples in "Type-Driven Development with Idris"
-- chapter 7
-- check that all functions are total
%default total
--
-- Binary Search Trees
--
data BST elem = Empty
| Node (BST elem) elem (BST elem)
-- from Haskell's Data.Tree
shift : String -> String -> List String -> List String
shift str1 str2 strings
= zipWith (++) (take (length strings) $ str1 :: repeat str2) strings
-- from Haskell's Data.Tree
partial
draw : Show elem => BST elem -> List String
draw Empty = ["*"]
draw (Node left x right) = (show x) :: helper [left, right]
where
partial
helper : List (BST elem) -> List String
helper [] = []
helper (t :: []) = "|" :: shift "`- " " " (draw t)
helper (t :: ts) = "|" :: shift "+- " "| " (draw t) ++ helper ts
-- from Haskell's Data.Tree
partial
drawTree : Show elem => BST elem -> String
drawTree = unlines . draw
partial
Show elem => Show (BST elem) where
show x = drawTree x
insert : Ord elem => elem -> BST elem -> BST elem
insert x Empty = Node Empty x Empty
insert x orig@(Node left val right)
= case compare x val of
LT => Node (insert x left) val right
EQ => orig
GT => Node left val (insert x right)
listToTree : Ord a => List a -> BST a
listToTree [] = Empty
listToTree (x :: xs) = insert x $ listToTree xs
treeToList : BST a -> List a
treeToList Empty = []
treeToList (Node left val right) = treeToList left ++ [val] ++ treeToList right
Functor BST where
map func Empty = Empty
map func (Node left val right) =
Node (map func left) (func val) (map func right)
-- we have two alternatives to implement Foldable for BST
-- one here: following https://hackage.haskell.org/package/base-4.11.1.0/docs/Data-Foldable.html
-- one in chapter 7 of the book
--
-- > :let tree = listToTree [1,4,6,7,3]
-- > foldr (::) [] tree
-- [1, 3, 4, 6, 7] : List Integer
-- > foldl (\x,y => x ++ [y]) [] tree
-- [1, 3, 4, 6, 7] : List Integer
--
-- > foldr @{chapter} (::) [] tree
-- [3, 7, 6, 4, 1] : List Integer
-- > foldl @{chapter} (\x,y => x ++ [y]) [] tree
-- [3, 7, 6, 4, 1] : List Integer
-- following https://hackage.haskell.org/package/base-4.11.1.0/docs/Data-Foldable.html
-- foldr func (func e (foldr func acc right)) left
--
Foldable BST where
foldr func acc Empty = acc
foldr func acc (Node left e right) =
let rightVal = foldr func acc right
eVal = func e rightVal
in
foldr func eVal left
-- following the chapter
-- func e (foldr func (foldr func acc left) right)
--
[chapter] Foldable BST where
foldr func acc Empty = acc
foldr func acc (Node left e right) =
let leftVal = foldr func acc left
rightVal = foldr func leftVal right
in
func e rightVal
-- call with:
-- > :exec printTree $ listToTree [1,4,6,3,7]
-- 7
-- |
-- +- 3
-- | |
-- | +- 1
-- | | |
-- | | +- *
-- | | |
-- | | `- *
-- | |
-- | `- 6
-- | |
-- | +- 4
-- | | |
-- | | +- *
-- | | |
-- | | `- *
-- | |
-- | `- *
-- |
-- `- *
partial
printTree : Show elem => (BST elem) -> IO ()
printTree t = do putStrLn (show t)
|
!------------------------------------------------------------------------!
! The Community Multiscale Air Quality (CMAQ) system software is in !
! continuous development by various groups and is based on information !
! from these groups: Federal Government employees, contractors working !
! within a United States Government contract, and non-Federal sources !
! including research institutions. These groups give the Government !
! permission to use, prepare derivative works of, and distribute copies !
! of their work in the CMAQ system to the public and to permit others !
! to do so. The United States Environmental Protection Agency !
! therefore grants similar permission to use the CMAQ system software, !
! but users are requested to provide copies of derivative works or !
! products designed to operate in the CMAQ system to the United States !
! Government without restrictions as to use by others. Software !
! that is used with the CMAQ system but distributed under the GNU !
! General Public License or the GNU Lesser General Public License is !
! subject to their copyright restrictions. !
!------------------------------------------------------------------------!
C RCS file, release, date & time of last delta, author, state, [and locker]
C $Header: /project/work/rep/STENEX/src/se_snl/se_comm_info_ext.f,v 1.1 2004/03/26 16:16:47 yoj Exp $
C what(1) key, module and SID; SCCS file; date and time of last delta:
C %W% %P% %G% %U%
C --------------------------------------------------------------------------
C Note: all these variables with prefix se_ are for stencil exchange library
C only
C
C to define communication info variables:
C
C se_ngb_pe -- an array to indicate a communication with a certain
C processor is required base upon near-neighbour
C communication pattern: -1 denotes no communication is
C needed, and a non -1 number denotes processor number with
C which communication is formed
C se_numdim -- dimensionality of a data structure which requires
C communication
C se_decompstr -- indicator of which dimenion(s) of data is/are decomposed,
C 0 (not decomposed), 1 (decomposed)
C --------------------------------------------------------------------------
module se_comm_info_ext
integer :: se_ngb_pe(8)
integer :: se_numdim
character (len=10) :: se_decompstr
integer :: se_twoway_npcol, se_twoway_nprow
end module se_comm_info_ext
|
module Control.Monad.Id
%default total
%access public export
data Id : (a : Type)-> Type where
IdF : (x : a) -> Id a
implementation Functor Id where
map f (IdF a) = IdF (f a)
implementation Applicative Id where
pure a = IdF a
(IdF f) <*> (IdF a) = IdF $ f a
implementation Monad Id where
(IdF x) >>= f = f x
implementation (Show a)=>Show (Id a) where
show (IdF a) = show a
|
import cv2
import pykitti
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import math
class GPS:
def __init__(self,datset):
self.dataset = dataset
self.initial = self.cartesian(0)
def cartesian(self, frame):
oxts_packet = self.dataset.oxts[frame].packet
elevation = oxts_packet.alt
longitude = oxts_packet.lon
latitude = oxts_packet.lat
R = 6378137.0 + elevation # relative to centre of the earth
X = R * math.cos(longitude * math.pi / 180) * math.sin(latitude * math.pi / 180)
Y = R * math.sin(longitude * math.pi / 180) * math.sin(latitude * math.pi / 180)
if(frame != 0):
y = Y - self.initial[1]
x = X - self.initial[0]
else:
x = X
y = Y
return (x,y)
basedir = './'
date = '2011_09_30'
drive = '0018'
dataset = pykitti.raw(basedir, date, drive, imformat='cv2')
# dataset.calib: Calibration data are accessible as a named tuple
# dataset.timestamps: Timestamps are parsed into a list of datetime objects
# dataset.oxts: List of OXTS packets and 6-dof poses as named tuples
# dataset.camN: Returns a generator that loads individual images from camera N
# dataset.get_camN(idx): Returns the image from camera N at idx
# dataset.gray: Returns a generator that loads monochrome stereo pairs (cam0, cam1)
# dataset.get_gray(idx): Returns the monochrome stereo pair at idx
# dataset.rgb: Returns a generator that loads RGB stereo pairs (cam2, cam3)
# dataset.get_rgb(idx): Returns the RGB stereo pair at idx
# dataset.velo: Returns a generator that loads velodyne scans as [x,y,z,reflectance]
# dataset.get_velo(idx): Returns the velodyne scan at idx
gps = GPS(dataset)
n_frames = len(dataset.cam0_files)
gps_data = []
imu_data = []
for frame in tqdm(range(1,n_frames-1)):
gps_data.append(gps.cartesian(frame))
plt.plot(*zip(*gps_data))
plt.show()
plt.close()
# first_gray = dataset.get_gray(0)
# print(dataset.oxts[0].packet)
# img_np = np.array(first_gray[0])
# img_cv2 = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
# debug = Debugger((2,1))
# debug.collect(img_cv2,("image","gray"))
# debug.collect(first_gray[1],("image","gray"))
# debug.display(plot=True)
# cv2.namedWindow("Input")
# cv2.imshow("Input",img_cv2)
# cv2.waitKey(0)
# cv2.destroyAllWindows() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.