text
stringlengths 0
3.34M
|
---|
lemma scaleR_right_mono_neg: "b \<le> a \<Longrightarrow> c \<le> 0 \<Longrightarrow> a *\<^sub>R c \<le> b *\<^sub>R c" for c :: "'a::ordered_real_vector" |
Formal statement is: corollary compact_uniformly_continuous: fixes f :: "'a :: metric_space \<Rightarrow> 'b :: metric_space" assumes f: "continuous_on S f" and S: "compact S" shows "uniformly_continuous_on S f" Informal statement is: If $f$ is a continuous function from a compact set $S$ to a metric space, then $f$ is uniformly continuous on $S$. |
import Mathlib.Data.Nat.Basic
import Mathlib.Init.Algebra.Order
import Mathlib.Init.Data.Nat.Basic
import Mathlib.Init.Data.Nat.Lemmas
import Mathlib.Init.Data.Int.Basic
import Mathlib.Data.String.Defs
import Mathlib.Data.String.Lemmas
import Mathlib.Data.Equiv.Basic
import Timelib.Util
import Timelib.NanoPrecision.Duration.SignedDuration
import Timelib.NanoPrecision.Duration.UnsignedDuration
import Timelib.NanoPrecision.DateTime.NaiveDateTime
import Timelib.NanoPrecision.DateTime.DateTime
import Timelib.NanoPrecision.TimeZone.Basic
structure HDateTime where
offset : Offset
dateTime : DateTime offset
def HDateTime.EquivSigma : Equiv HDateTime (Sigma DateTime) := {
toFun := fun dt => ⟨dt.offset, dt.dateTime⟩
invFun := fun sig => ⟨sig.fst, sig.snd⟩
left_inv := by simp [Function.LeftInverse]
right_inv := by simp [Function.RightInverse, Function.LeftInverse]
}
section HDateTimeStuff
variable (t : HDateTime)
/- Show that this is an equivalence relation -/
@[reducible]
def HDateTime.simultaneous : HDateTime → HDateTime → Prop
| ⟨_, ⟨naive_dt₁⟩⟩, ⟨_, ⟨naive_dt₂⟩⟩ => naive_dt₁ = naive_dt₂
def HDateTime.simultaneous.equivalence : Equivalence HDateTime.simultaneous := {
refl := fun d => rfl
symm := fun h => h.symm
trans := fun h h' => Eq.trans h h'
}
instance instHDateTimeSetoid : Setoid HDateTime :=
⟨HDateTime.simultaneous, HDateTime.simultaneous.equivalence⟩
/--
LT compares the underlying naive DateTime.
-/
instance : LT HDateTime where
lt := InvImage instLTNaiveDateTime.lt (fun t => t.dateTime.naive)
/--
LE compares the underlying naive DateTime
-/
instance : LE HDateTime where
le := InvImage instLENaiveDateTime.le (fun t => t.dateTime.naive)
@[simp] theorem HDateTime.le_def (d₁ d₂ : HDateTime) : (d₁ <= d₂) = (d₁.dateTime.naive <= d₂.dateTime.naive) := rfl
@[simp] theorem HDateTime.lt_def (d₁ d₂ : HDateTime) : (d₁ < d₂) = (d₁.dateTime.naive < d₂.dateTime.naive) := rfl
instance instDecidableLTHDateTime (a b : HDateTime) : Decidable (a < b) := inferInstanceAs (Decidable (a.dateTime.naive < b.dateTime.naive))
instance instDecidableLEHDateTime (a b : HDateTime) : Decidable (a <= b) := inferInstanceAs (Decidable (a.dateTime.naive <= b.dateTime.naive))
/--
HDateTime is only a Preorder since it does not respect antisymmetry.
t₁ <= t₂ ∧ t₂ <= t₁ does not imply t₁ = t₂ since they may have different offets/timezones.
-/
instance : Preorder HDateTime where
le_refl (a) := le_refl a.dateTime.naive
le_trans (a b c) := Int.le_trans
lt_iff_le_not_le (a b) := Int.lt_iff_le_not_le
instance : HAdd HDateTime SignedDuration HDateTime where
hAdd da du := ⟨da.offset, da.dateTime + du⟩
instance : HAdd SignedDuration HDateTime HDateTime where
hAdd du da := da + du
theorem HDateTime.hAdd_def (d : HDateTime) (dur : SignedDuration) : d + dur = ⟨d.offset, d.dateTime + dur⟩ := rfl
instance : HSub HDateTime SignedDuration HDateTime where
hSub da du := ⟨da.offset, da.dateTime + -du⟩
theorem HDateTime.hSub_def (d : HDateTime) (dur : SignedDuration) : d - dur = ⟨d.offset, d.dateTime + -dur⟩ := rfl
instance : HAdd HDateTime UnsignedDuration HDateTime where
hAdd da du := ⟨da.offset, da.dateTime + du⟩
instance : HAdd UnsignedDuration HDateTime HDateTime where
hAdd du da := da + du
theorem HDateTime.hAdd_def_unsigned (d : HDateTime) (dur : UnsignedDuration) : d + dur = ⟨d.offset, d.dateTime + dur⟩ := rfl
instance : HSub HDateTime UnsignedDuration HDateTime where
hSub da du := ⟨da.offset, da.dateTime - du⟩
theorem HDateTime.hSub_def_unsigned (d : HDateTime) (dur : UnsignedDuration) : d - dur = ⟨d.offset, d.dateTime - dur⟩ := rfl
|
-- Some tests for the Agda Abstract Machine.
open import Agda.Builtin.Nat
open import Agda.Builtin.Equality
open import Agda.Builtin.Sigma
_×_ : Set → Set → Set
A × B = Σ A λ _ → B
id : Nat → Nat
id x = x
-- Applying id should not break sharing
double : Nat → Nat
double n = id n + id n
pow : Nat → Nat
pow zero = 1
pow (suc n) = double (pow n)
test-pow : pow 64 ≡ 18446744073709551616
test-pow = refl
-- Projections should not break sharing
addPair : Nat × Nat → Nat
addPair p = fst p + snd p
dup : Nat → Nat × Nat
dup x .fst = x
dup x .snd = x
smush : Nat × Nat → Nat × Nat
smush p = dup (addPair p)
iter : {A : Set} → (A → A) → Nat → A → A
iter f zero x = x
iter f (suc n) x = f (iter f n x)
pow' : Nat → Nat
pow' n = addPair (iter smush n (0 , 1))
test-pow' : pow' 64 ≡ pow 64
test-pow' = refl
-- Should not be linear (not quadratic) for neutral n.
builtin : Nat → Nat → Nat
builtin k n = k + n - k
test-builtin : ∀ n → builtin 50000 n ≡ n
test-builtin n = refl
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Data.Nat.Lower where
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Function
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Path
open import Cubical.Foundations.Transport
open import Cubical.Data.Bool
open import Cubical.Data.Empty as Empty
open import Cubical.Data.Nat.Base
open import Cubical.Data.Nat.Properties
open import Cubical.Data.Sigma
open import Cubical.Data.Sum
open import Cubical.Data.Unit
open import Cubical.Relation.Nullary
isMonotone : (ℕ → Bool) → Type
isMonotone f = ∀ n → f n ≥ f (suc n)
isPropIsMonotone : ∀ f → isProp (isMonotone f)
isPropIsMonotone f = isPropΠ λ n → isProp≥ (f n) (f (suc n))
isPropDepIsMonotone : isPropDep isMonotone
isPropDepIsMonotone = isOfHLevel→isOfHLevelDep 1 isPropIsMonotone {_}
Monotone : Type
Monotone = Σ _ isMonotone
isSetMonotone : isSet Monotone
isSetMonotone = isSetΣ (isSet→ isSetBool) (isProp→isSet ∘ isPropIsMonotone)
private
variable
ℓ : Level
m n : Monotone
private
mz : ℕ → Bool
mz _ = false
ms : (ℕ → Bool) → (ℕ → Bool)
ms _ zero = true
ms f (suc m) = f m
msm : ∀{f} → isMonotone f → isMonotone (ms f)
msm _ zero = _
msm mf (suc m) = mf m
mp : (ℕ → Bool) → (ℕ → Bool)
mp f k = f (suc k)
ms-mp : ∀ f → f 0 ≡ true → ms (mp f) ≡ f
ms-mp f p i 0 = p (~ i)
ms-mp f p i (suc k) = f (suc k)
mz-lemma : ∀ f → isMonotone f → f 0 ≡ false → ∀ k → false ≡ f k
mz-lemma f _ p zero = sym p
mz-lemma f mf p (suc k)
with f 1
| inspect f 1
| subst (_≥ f 1) p (mf 0)
... | false | [ q ]ᵢ | _ = mz-lemma (mp f) (mf ∘ suc) q k
msuc : Monotone → Monotone
msuc m .fst = ms (fst m)
msuc m .snd = msm (snd m)
mpred : Monotone → Monotone
mpred f .fst k = f .fst (suc k)
mpred f .snd k = f .snd (suc k)
data MView : (ℕ → Bool) → Type where
mzv : MView mz
msv : ∀ n → MView (ms n)
mview : ∀ f → isMonotone f → MView f
mview f mf with f 0 | inspect f 0
... | true | [ p ]ᵢ = subst MView (ms-mp f p) (msv (mp f))
... | false | [ p ]ᵢ = subst MView (funExt (mz-lemma f mf p)) mzv
∞ : Monotone
∞ .fst _ = true
∞ .snd _ = _
Detached : (ℕ → Bool) → Type
Detached p = Σ[ n ∈ ℕ ] Bool→Type (p n)
Lower : Monotone → Type
Lower m = Detached (fst m)
Detached-ext
: ∀{p : ℕ → Bool} (k l : Detached p) → k .fst ≡ l .fst → k ≡ l
Detached-ext {p} (k , q) (l , r) s
= ΣPathP (s , isPropDep∘ p isPropDep-Bool→Type q r s)
Lower∞≃ℕ : Lower ∞ ≃ ℕ
Lower∞≃ℕ = isoToEquiv λ where
.fun → fst
.inv n → n , _
.rightInv _ → refl
.leftInv _ → refl
where open Iso
private
apart : ℕ → ℕ → Type
apart zero zero = ⊥
apart (suc m) (suc n) = apart m n
apart _ _ = Unit
≢→apart : (i j : ℕ) → ¬ i ≡ j → apart i j
≢→apart zero zero ¬p = ¬p refl
≢→apart (suc i) (suc j) ¬p = ≢→apart i j (¬p ∘ cong suc)
≢→apart zero (suc _) _ = _
≢→apart (suc _) zero _ = _
apart→≢ : (i j : ℕ) → apart i j → ¬ i ≡ j
apart→≢ (suc _) zero _ = snotz
apart→≢ zero (suc _) _ = znots
apart→≢ (suc i) (suc j) i#j = apart→≢ i j i#j ∘ cong predℕ
isPropApart : ∀ i j → isProp (apart i j)
isPropApart 0 0 = isProp⊥
isPropApart (suc i) (suc j) = isPropApart i j
isPropApart 0 (suc _) = isPropUnit
isPropApart (suc _) 0 = isPropUnit
_#_ : ∀{P : ℕ → Type ℓ} → Σ ℕ P → Σ ℕ P → Type
u # v = apart (fst u) (fst v)
_#?_ : ∀{P : ℕ → Type ℓ} → (u v : Σ ℕ P) → (u # v) ⊎ (fst u ≡ fst v)
u #? v = decide (fst u) (fst v) where
decide : (m n : ℕ) → apart m n ⊎ (m ≡ n)
decide zero zero = inr refl
decide zero (suc _) = inl _
decide (suc _) zero = inl _
decide (suc m) (suc n) = map (idfun _) (cong suc) (decide m n)
#→≢ : ∀{P : ℕ → Type ℓ} (u v : Σ ℕ P) → u # v → ¬ u ≡ v
#→≢ u v d = apart→≢ (fst u) (fst v) d ∘ cong fst
isProp# : ∀{P : ℕ → Type ℓ} (u v : Σ ℕ P) → isProp (u # v)
isProp# u v = isPropApart (fst u) (fst v)
isProp#Depᵣ : ∀{P : ℕ → Type ℓ} (v : Σ ℕ P) → isPropDep (λ u → u # v)
isProp#Depᵣ v = isOfHLevel→isOfHLevelDep 1 (λ u → isProp# u v) {_} {_}
≢→# : ∀{p} (u v : Detached p) → ¬ u ≡ v → u # v
≢→# u v ¬p = ≢→apart (fst u) (fst v) (¬p ∘ Detached-ext u v)
dzero : ∀{f} → Detached (ms f)
dzero = zero , _
dsuc : ∀{f} → Detached f → Detached (ms f)
dsuc (l , p) = suc l , p
module Untangle
{α β}
(f : Detached (ms α) → Detached (ms β))
(g : Detached (ms β) → Detached (ms α))
(rinv : section f g)
(linv : retract f g)
where
default : ∀{γ} → (v d : Detached (ms γ)) → v # d → Detached γ
default (suc l , p) _ _ = l , p
default (0 , _) (suc l , p) _ = l , p
#-f : ∀ u v → u # v → f u # f v
#-f u v u#v with f u #? f v
... | inl fu#fv = fu#fv
... | inr fu≡fv = Empty.rec (#→≢ u v u#v u≡v)
where
u≡v : u ≡ v
u≡v = sym (linv u)
∙∙ cong g (Detached-ext (f u) (f v) fu≡fv)
∙∙ linv v
#-g : ∀ u v → u # v → g u # g v
#-g u v u#v with g u #? g v
... | inl gu#gv = gu#gv
... | inr gu≡gv = Empty.rec (#→≢ u v u#v u≡v)
where
u≡v : u ≡ v
u≡v = sym (rinv u)
∙∙ cong f (Detached-ext (g u) (g v) gu≡gv)
∙∙ rinv v
f- : Detached α → Detached β
f- v = default (f (dsuc v)) (f dzero) (#-f (dsuc v) dzero _)
g- : Detached β → Detached α
g- v = default (g (dsuc v)) (g dzero) (#-g (dsuc v) dzero _)
g-f-z : ∀ v u → g dzero ≡ dsuc v → g (dsuc u) ≡ dzero → g- u ≡ v
g-f-z v u r s with g (dsuc u) | g dzero | #-g (dsuc u) dzero _
... | zero , _ | suc k , q | #gf
= Detached-ext (k , q) v (cong (predℕ ∘ fst) r)
... | w@(suc k , _) | _ | #gf = Empty.rec (snotz (cong fst s))
g-f-s : ∀ v u → g (dsuc u) ≡ dsuc v → g- u ≡ v
g-f-s v u r with g (dsuc u) | #-g (dsuc u) dzero _
... | suc k , q | #gf = Detached-ext (k , q) v (cong (predℕ ∘ fst) r)
... | zero , _ | #gf = Empty.rec (znots (cong fst r))
g-f- : ∀(v : Detached α) → g- (f- v) ≡ v
g-f- v with f (dsuc v) | linv (dsuc v) | #-f (dsuc v) dzero _
g-f- v | suc j , p | r | #f = g-f-s v (j , p) r
... | zero , _ | r | #f with f dzero | linv dzero
... | suc j , p | s = g-f-z v (j , p) r s
f-g-z : ∀ v u → f dzero ≡ dsuc v → f (dsuc u) ≡ dzero → f- u ≡ v
f-g-z v u r s with f (dsuc u) | f dzero | #-f (dsuc u) dzero _
... | zero , _ | suc k , q | #fg
= Detached-ext (k , q) v (cong (predℕ ∘ fst) r)
... | (suc _ , _) | _ | _ = Empty.rec (snotz (cong fst s))
f-g-s : ∀ v u → f (dsuc u) ≡ dsuc v → f- u ≡ v
f-g-s v u r with f (dsuc u) | #-f (dsuc u) dzero _
... | suc k , q | _ = Detached-ext (k , q) v (cong (predℕ ∘ fst) r)
... | zero , _ | _ = Empty.rec (znots (cong fst r))
f-g- : ∀ v → f- (g- v) ≡ v
f-g- v with g (dsuc v) | rinv (dsuc v) | #-g (dsuc v) dzero _
... | suc j , q | r | _ = f-g-s v (j , q) r
... | zero , _ | r | _ with g dzero | rinv dzero
... | suc k , q | s = f-g-z v (k , q) r s
open Iso
iso- : Iso (Detached α) (Detached β)
iso- .fun = f-
iso- .inv = g-
iso- .rightInv = f-g-
iso- .leftInv = g-f-
iso-pred
: ∀{α β}
→ Iso (Detached (ms α)) (Detached (ms β))
→ Iso (Detached α) (Detached β)
iso-pred i = Untangle.iso- fun inv rightInv leftInv
where open Iso i
isInjectiveLower : Lower m ≡ Lower n → m ≡ n
isInjectiveLower {m} {n} P =
curry ΣPathP
(lemma (m .fst) (n .fst) (m .snd) (n .snd) (pathToIso P))
(isPropDepIsMonotone (m .snd) (n .snd) _)
where
lemma
: ∀ α β → isMonotone α → isMonotone β
→ Iso (Detached α) (Detached β)
→ α ≡ β
lemma α β mα mβ I i k with mview α mα | mview β mβ
... | mzv | mzv = mz k
lemma α β mα mβ I i 0 | msv _ | msv _
= true
lemma α β mα mβ I i (suc k) | msv α' | msv β'
= lemma α' β' (mα ∘ suc) (mβ ∘ suc) (iso-pred I) i k
lemma α β mα mβ I i k | mzv | msv β'
= Empty.rec {A = α k ≡ β k} (Iso.inv I dzero .snd) i
lemma α β mα mβ I i k | msv _ | mzv
= Empty.rec {A = α k ≡ β k} (Iso.fun I dzero .snd) i
|
module Twist.Move
import Control.Algebra
import Twist.Cycle
import Twist.Face
import Twist.Rotation
%default total
%access export
data Move : (f : Type) -> Type where
Rest : Move f
Turn : Rotation f c face -> Move f -> Move f
Show (Move f) where
show Rest = ""
show (Turn r Rest) = show r
show (Turn r (Turn s m)) = show m ++ " " ++ show s ++ show r
|
= = Taxonomy , naming , and phylogeny = =
|
State Before: R : Type u_2
R₂ : Type ?u.109627
K : Type ?u.109630
M : Type u_1
M₂ : Type ?u.109636
V : Type ?u.109639
S : Type ?u.109642
inst✝⁵ : Semiring R
inst✝⁴ : AddCommMonoid M
inst✝³ : Module R M
x : M
p p' : Submodule R M
inst✝² : Semiring R₂
σ₁₂ : R →+* R₂
inst✝¹ : AddCommMonoid M₂
inst✝ : Module R₂ M₂
s t : Set M
⊢ (p ⊔ p').toAddSubmonoid = p.toAddSubmonoid ⊔ p'.toAddSubmonoid State After: case h
R : Type u_2
R₂ : Type ?u.109627
K : Type ?u.109630
M : Type u_1
M₂ : Type ?u.109636
V : Type ?u.109639
S : Type ?u.109642
inst✝⁵ : Semiring R
inst✝⁴ : AddCommMonoid M
inst✝³ : Module R M
x✝ : M
p p' : Submodule R M
inst✝² : Semiring R₂
σ₁₂ : R →+* R₂
inst✝¹ : AddCommMonoid M₂
inst✝ : Module R₂ M₂
s t : Set M
x : M
⊢ x ∈ (p ⊔ p').toAddSubmonoid ↔ x ∈ p.toAddSubmonoid ⊔ p'.toAddSubmonoid Tactic: ext x State Before: case h
R : Type u_2
R₂ : Type ?u.109627
K : Type ?u.109630
M : Type u_1
M₂ : Type ?u.109636
V : Type ?u.109639
S : Type ?u.109642
inst✝⁵ : Semiring R
inst✝⁴ : AddCommMonoid M
inst✝³ : Module R M
x✝ : M
p p' : Submodule R M
inst✝² : Semiring R₂
σ₁₂ : R →+* R₂
inst✝¹ : AddCommMonoid M₂
inst✝ : Module R₂ M₂
s t : Set M
x : M
⊢ x ∈ (p ⊔ p').toAddSubmonoid ↔ x ∈ p.toAddSubmonoid ⊔ p'.toAddSubmonoid State After: case h
R : Type u_2
R₂ : Type ?u.109627
K : Type ?u.109630
M : Type u_1
M₂ : Type ?u.109636
V : Type ?u.109639
S : Type ?u.109642
inst✝⁵ : Semiring R
inst✝⁴ : AddCommMonoid M
inst✝³ : Module R M
x✝ : M
p p' : Submodule R M
inst✝² : Semiring R₂
σ₁₂ : R →+* R₂
inst✝¹ : AddCommMonoid M₂
inst✝ : Module R₂ M₂
s t : Set M
x : M
⊢ (∃ y, y ∈ p ∧ ∃ z, z ∈ p' ∧ y + z = x) ↔ ∃ y, y ∈ p.toAddSubmonoid ∧ ∃ z, z ∈ p'.toAddSubmonoid ∧ y + z = x Tactic: rw [mem_toAddSubmonoid, mem_sup, AddSubmonoid.mem_sup] State Before: case h
R : Type u_2
R₂ : Type ?u.109627
K : Type ?u.109630
M : Type u_1
M₂ : Type ?u.109636
V : Type ?u.109639
S : Type ?u.109642
inst✝⁵ : Semiring R
inst✝⁴ : AddCommMonoid M
inst✝³ : Module R M
x✝ : M
p p' : Submodule R M
inst✝² : Semiring R₂
σ₁₂ : R →+* R₂
inst✝¹ : AddCommMonoid M₂
inst✝ : Module R₂ M₂
s t : Set M
x : M
⊢ (∃ y, y ∈ p ∧ ∃ z, z ∈ p' ∧ y + z = x) ↔ ∃ y, y ∈ p.toAddSubmonoid ∧ ∃ z, z ∈ p'.toAddSubmonoid ∧ y + z = x State After: no goals Tactic: rfl |
#If X ~ Exp(λ) then kX ~ Exp(λ/k).
#Equivalent: If X ~ Exp(α) then kX ~ Exp(kα)
#Proof:
PDF_exponential := (alpha, x) -> exp(-x/alpha)/alpha;
PDF_kX := (alpha, x, k) -> PDF_exponential(alpha, x/k)/k;
simplify(PDF_kX(alpha, x, k));
PDF_exp_scaled := (alpha, x, k) -> PDF_exponential(k*alpha, x);
simplify(PDF_exp_scaled(alpha, x, k));
evalb(PDF_kX(alpha, x, k) = PDF_exp_scaled(alpha, x, k));
|
Set Implicit Arguments.
Set Strict Implicit.
Require Export Setoid.
Require Omega.
(** * Sets.v: Definition of sets as predicates over a type A *)
Section sets.
Variable A : Type.
Variable decA : forall x y :A, {x=y}+{x<>y}.
Definition set := A -> Prop.
Definition full : set := fun (x:A) => True.
Definition empty : set := fun (x:A) => False.
Definition add (a:A) (P:set) : set := fun (x:A) => x=a \/ (P x).
Definition singl (a:A) :set := fun (x:A) => x=a.
Definition union (P Q:set) :set := fun (x:A) => (P x) \/ (Q x).
Definition compl (P:set) :set := fun (x:A) => ~P x.
Definition inter (P Q:set) :set := fun (x:A) => (P x) /\ (Q x).
Definition rem (a:A) (P:set) :set := fun (x:A) => x<>a /\ (P x).
(** ** Equivalence *)
Definition eqset (P Q:set) := forall (x:A), P x <-> Q x.
Implicit Arguments full [].
Implicit Arguments empty [].
Lemma eqset_refl : forall P:set, eqset P P.
unfold eqset; intuition.
Save.
Lemma eqset_sym : forall P Q:set, eqset P Q -> eqset Q P.
unfold eqset; firstorder.
Save.
Lemma eqset_trans : forall P Q R:set,
eqset P Q -> eqset Q R -> eqset P R.
unfold eqset; firstorder.
Save.
Hint Resolve eqset_refl.
Hint Immediate eqset_sym.
(** ** Setoid structure *)
Lemma set_setoid : Setoid_Theory set eqset.
split; red; auto.
exact eqset_trans.
Qed.
Add Setoid set eqset set_setoid as Set_setoid.
Add Morphism add : eqset_add.
unfold eqset,add; firstorder.
Save.
Add Morphism rem : eqset_rem.
unfold eqset,rem; firstorder.
Save.
Hint Resolve eqset_add eqset_rem.
Add Morphism union : eqset_union.
unfold eqset,union; firstorder.
Save.
Hint Immediate eqset_union.
Lemma eqset_union_left :
forall P1 Q P2,
eqset P1 P2 -> eqset (union P1 Q) (union P2 Q).
auto.
Save.
Lemma eqset_union_right :
forall P Q1 Q2 ,
eqset Q1 Q2 -> eqset (union P Q1) (union P Q2).
auto.
Save.
Hint Resolve eqset_union_left eqset_union_right.
Add Morphism inter : eqset_inter.
unfold eqset,inter; firstorder.
Save.
Hint Immediate eqset_inter.
Add Morphism compl : eqset_compl.
unfold eqset,compl; firstorder.
Save.
Hint Resolve eqset_compl.
Lemma eqset_add_empty : forall (a:A) (P:set), ~eqset (add a P) empty.
red; unfold eqset,empty,add; intros a P eqH; assert (H:=eqH a); intuition.
Save.
(** ** Finite sets given as an enumeration of elements *)
Inductive finite (P: set) : Type :=
fin_eq_empty : eqset P empty -> finite P
| fin_eq_add : forall (x:A)(Q:set),
~ Q x-> finite Q -> eqset P (add x Q) -> finite P.
Hint Constructors finite.
Lemma fin_empty : (finite empty).
auto.
Defined.
Lemma fin_add : forall (x:A)(P:set),
~ P x -> finite P -> finite (add x P).
eauto.
Defined.
Lemma fin_eqset: forall (P Q : set), (eqset P Q)->(finite P)->(finite Q).
induction 2.
apply fin_eq_empty.
apply eqset_trans with P; auto.
apply fin_eq_add with x Q0; auto.
apply eqset_trans with P; auto.
Defined.
Hint Resolve fin_empty fin_add.
(** *** Emptyness is decidable for finite sets *)
Definition isempty (P:set) := eqset P empty.
Definition notempty (P:set) := not (eqset P empty).
Lemma isempty_dec : forall P, finite P -> {isempty P}+{notempty P}.
unfold isempty,notempty; destruct 1; auto.
right; red; intros.
apply (@eqset_add_empty x Q); auto.
apply eqset_trans with P; auto.
Save.
(** *** Size of a finite set *)
Fixpoint size (P:set) (f:finite P) {struct f}: nat :=
match f with fin_eq_empty _ => 0%nat
| fin_eq_add _ Q _ f' _ => S (size f')
end.
Lemma size_eqset : forall P Q (f:finite P) (e:eqset P Q),
(size (fin_eqset e f)) = (size f).
induction f; simpl; intros; auto.
Save.
(** ** Inclusion *)
Definition incl (P Q:set) := forall x, P x -> Q x.
Lemma incl_refl : forall (P:set), incl P P.
unfold incl; intuition.
Save.
Lemma incl_trans : forall (P Q R:set),
incl P Q -> incl Q R -> incl P R.
unfold incl; intuition.
Save.
Lemma eqset_incl : forall (P Q : set), eqset P Q -> incl P Q.
unfold eqset, incl; firstorder.
Save.
Lemma eqset_incl_sym : forall (P Q : set), eqset P Q -> incl Q P.
unfold eqset, incl; firstorder.
Save.
Lemma eqset_incl_intro :
forall (P Q : set), incl P Q -> incl Q P -> eqset P Q.
unfold eqset, incl; firstorder.
Save.
Hint Resolve incl_refl incl_trans eqset_incl_intro.
Hint Immediate eqset_incl eqset_incl_sym.
(** ** Properties of operations on sets *)
Lemma incl_empty : forall P, incl empty P.
unfold incl,empty; intuition.
Save.
Lemma incl_empty_false : forall P a, incl P empty -> ~ P a.
unfold incl; firstorder.
Save.
Lemma incl_add_empty : forall (a:A) (P:set), ~ incl (add a P) empty.
red; unfold incl,empty,add; intros a P eqH; assert (H:=eqH a); intuition.
Save.
Lemma eqset_empty_false : forall P a, eqset P empty -> P a -> False.
unfold eqset; firstorder.
Save.
Hint Immediate incl_empty_false eqset_empty_false incl_add_empty.
Lemma incl_rem_stable : forall a P Q, incl P Q -> incl (rem a P) (rem a Q).
unfold incl,rem;intuition.
Save.
Lemma incl_add_stable : forall a P Q, incl P Q -> incl (add a P) (add a Q).
unfold incl,add;intuition.
Save.
Lemma incl_rem_add_iff :
forall a P Q, incl (rem a P) Q <-> incl P (add a Q).
unfold rem, add, incl; intuition.
case (decA x a); auto.
case (H x); intuition.
Save.
Lemma incl_rem_add:
forall (a:A) (P Q:set),
(P a) -> incl Q (rem a P) -> incl (add a Q) P.
unfold rem, add, incl; intros; auto.
case H1; intro; subst; auto.
case (H0 x); auto.
Save.
Lemma incl_add_rem :
forall (a:A) (P Q:set),
~ Q a -> incl (add a Q) P -> incl Q (rem a P) .
unfold rem, add, incl; intros; auto.
case (decA x a); intros; auto.
subst; case H; auto.
Save.
Hint Immediate incl_rem_add incl_add_rem.
Lemma eqset_rem_add :
forall (a:A) (P Q:set),
(P a) -> eqset Q (rem a P) -> eqset (add a Q) P.
intros; assert (incl Q (rem a P)); auto.
assert (incl (rem a P) Q); auto.
case (incl_rem_add_iff a P Q); auto.
Save.
Lemma eqset_add_rem :
forall (a:A) (P Q:set),
~ Q a -> eqset (add a Q) P -> eqset Q (rem a P).
intros; assert (incl (add a Q) P); auto.
assert (incl P (add a Q)); auto.
case (incl_rem_add_iff a P Q); auto.
Save.
Hint Immediate eqset_rem_add eqset_add_rem.
Lemma add_rem_eq_eqset :
forall x (P:set), eqset (add x (rem x P)) (add x P).
unfold eqset, add, rem; intuition.
case (decA x0 x); intuition.
Save.
Lemma add_rem_diff_eqset :
forall x y (P:set),
x<>y -> eqset (add x (rem y P)) (rem y (add x P)).
unfold eqset, add, rem; intuition.
subst; auto.
Save.
Lemma add_eqset_in :
forall x (P:set), P x -> eqset (add x P) P.
unfold eqset, add; intuition.
subst;auto.
Save.
Hint Resolve add_rem_eq_eqset add_rem_diff_eqset add_eqset_in.
Lemma add_rem_eqset_in :
forall x (P:set), P x -> eqset (add x (rem x P)) P.
intros; apply eqset_trans with (add x P); auto.
Save.
Hint Resolve add_rem_eqset_in.
Lemma rem_add_eq_eqset :
forall x (P:set), eqset (rem x (add x P)) (rem x P).
unfold eqset, add, rem; intuition.
Save.
Lemma rem_add_diff_eqset :
forall x y (P:set),
x<>y -> eqset (rem x (add y P)) (add y (rem x P)).
intros; apply eqset_sym; auto.
Save.
Lemma rem_eqset_notin :
forall x (P:set), ~P x -> eqset (rem x P) P.
unfold eqset, rem; intuition.
subst;auto.
Save.
Hint Resolve rem_add_eq_eqset rem_add_diff_eqset rem_eqset_notin.
Lemma rem_add_eqset_notin :
forall x (P:set), ~P x -> eqset (rem x (add x P)) P.
intros; apply eqset_trans with (rem x P); auto.
Save.
Hint Resolve rem_add_eqset_notin.
Lemma rem_not_in : forall x (P:set), ~ rem x P x.
unfold rem; intuition.
Save.
Lemma add_in : forall x (P:set), add x P x.
unfold add; intuition.
Save.
Lemma add_in_eq : forall x y P, x=y -> add x P y.
unfold add; intuition.
Save.
Lemma add_intro : forall x (P:set) y, P y -> add x P y.
unfold add; intuition.
Save.
Lemma add_incl : forall x (P:set), incl P (add x P).
unfold incl,add; intuition.
Save.
Lemma add_incl_intro : forall x (P Q:set), (Q x) -> (incl P Q) -> (incl (add x P) Q).
unfold incl,add; intuition; subst; intuition.
Save.
Lemma rem_incl : forall x (P:set), incl (rem x P) P.
unfold incl, rem; intuition.
Save.
Hint Resolve rem_not_in add_in rem_incl add_incl.
Lemma union_sym : forall P Q : set,
eqset (union P Q) (union Q P).
unfold eqset, union; intuition.
Save.
Lemma union_empty_left : forall P : set,
eqset P (union P empty).
unfold eqset, union, empty; intuition.
Save.
Lemma union_empty_right : forall P : set,
eqset P (union empty P).
unfold eqset, union, empty; intuition.
Save.
Lemma union_add_left : forall (a:A) (P Q: set),
eqset (add a (union P Q)) (union P (add a Q)).
unfold eqset, union, add; intuition.
Save.
Lemma union_add_right : forall (a:A) (P Q: set),
eqset (add a (union P Q)) (union (add a P) Q).
unfold eqset, union, add; intuition.
Save.
Hint Resolve union_sym union_empty_left union_empty_right
union_add_left union_add_right.
Lemma union_incl_left : forall P Q, incl P (union P Q).
unfold incl,union; intuition.
Save.
Lemma union_incl_right : forall P Q, incl Q (union P Q).
unfold incl,union; intuition.
Save.
Lemma union_incl_intro : forall P Q R, incl P R -> incl Q R -> incl (union P Q) R.
unfold incl,union; intuition.
Save.
Hint Resolve union_incl_left union_incl_right union_incl_intro.
Lemma incl_union_stable : forall P1 P2 Q1 Q2,
incl P1 P2 -> incl Q1 Q2 -> incl (union P1 Q1) (union P2 Q2).
intros; apply union_incl_intro; unfold incl,union; intuition.
Save.
Hint Immediate incl_union_stable.
Lemma inter_sym : forall P Q : set,
eqset (inter P Q) (inter Q P).
unfold eqset, inter; intuition.
Save.
Lemma inter_empty_left : forall P : set,
eqset empty (inter P empty).
unfold eqset, inter, empty; intuition.
Save.
Lemma inter_empty_right : forall P : set,
eqset empty (inter empty P).
unfold eqset, inter, empty; intuition.
Save.
Lemma inter_add_left_in : forall (a:A) (P Q: set),
(P a) -> eqset (add a (inter P Q)) (inter P (add a Q)).
unfold eqset, inter, add; split; intuition.
subst; auto.
Save.
Lemma inter_add_left_out : forall (a:A) (P Q: set),
~ P a -> eqset (inter P Q) (inter P (add a Q)).
unfold eqset, inter, add; split; intuition.
subst; case H; auto.
Save.
Lemma inter_add_right_in : forall (a:A) (P Q: set),
Q a -> eqset (add a (inter P Q)) (inter (add a P) Q).
unfold eqset, inter, add; split; intuition.
subst; auto.
Save.
Lemma inter_add_right_out : forall (a:A) (P Q: set),
~ Q a -> eqset (inter P Q) (inter (add a P) Q).
unfold eqset, inter, add; split; intuition.
subst; case H; auto.
Save.
Hint Resolve inter_sym inter_empty_left inter_empty_right
inter_add_left_in inter_add_left_out inter_add_right_in inter_add_right_out.
(** ** Generalized union *)
Definition gunion (I:Type)(F:I->set) : set := fun z => exists i, F i z.
Lemma gunion_intro : forall I (F:I->set) i, incl (F i) (gunion F).
red; intros; exists i; auto.
Save.
Lemma gunion_elim : forall I (F:I->set) (P:set), (forall i, incl (F i) P) -> incl (gunion F) P.
red; intros I F P H x (i,Hi).
apply (H i x); auto.
Save.
Lemma gunion_monotonic : forall I (F G : I -> set),
(forall i, incl (F i) (G i))-> incl (gunion F) (gunion G).
intros I F G H x (i,Hi).
exists i; apply (H i x); trivial.
Save.
(** ** Decidable sets *)
Definition dec (P:set) := forall x, {P x}+{ ~ P x}.
Definition dec2bool (P:set) : dec P -> A -> bool :=
fun p x => if p x then true else false.
Lemma compl_dec : forall P, dec P -> dec (compl P).
intros P dP x; destruct (dP x); auto.
Defined.
Lemma inter_dec : forall P Q, dec P -> dec Q -> dec (inter P Q).
intros P Q dP dQ x; unfold inter; destruct (dP x).
destruct (dQ x); intuition.
right; intuition.
Defined.
Lemma union_dec : forall P Q, dec P -> dec Q -> dec (union P Q).
intros P Q dP dQ x; unfold union; destruct (dP x); auto.
destruct (dQ x); intuition.
Defined.
Hint Resolve compl_dec inter_dec union_dec.
(** ** Removing an element from a finite set *)
Lemma finite_rem : forall (P:set) (a:A),
finite P -> finite (rem a P).
induction 1; intuition.
apply fin_eq_empty.
unfold rem,empty,eqset; intuition.
apply (eqset_empty_false x e); auto.
case (decA x a); intros.
apply fin_eqset with Q; subst; auto.
apply eqset_add_rem; auto.
apply fin_eq_add with x (rem a Q); auto.
subst; unfold rem; intuition.
apply eqset_trans with (rem a (add x Q)); auto.
Defined.
Lemma size_finite_rem:
forall (P:set) (a:A) (f:finite P),
(P a) -> size f = S (size (finite_rem a f)).
induction f; intros.
case (eqset_empty_false a e H).
simpl; case (decA x a); simpl; intros.
case e0; unfold eq_rect_r;simpl; auto.
rewrite size_eqset; auto.
rewrite IHf; auto.
case (e a); unfold add; intuition.
case n0; auto.
Save.
(* bug lie a intuition
Lemma size_finite_rem:
forall (P:set) (a:A) (f:finite P),
(P a) -> size f = S (size (finite_rem a f)).
induction f; intuition.
case (eqset_empty_false a e H).
simpl; case (decA x a); simpl; intros.
case e0; unfold eq_rect_r;simpl; auto.
rewrite size_eqset; auto.
rewrite IHf; auto.
case (e a); unfold add; intuition.
case f0; auto.
Save.
*)
Require Import Arith.
Lemma size_incl :
forall (P:set)(f:finite P) (Q:set)(g:finite Q),
(incl P Q)-> size f <= size g.
induction f; simpl; intros; auto with arith.
apply le_trans with (S (size (finite_rem x g))).
apply le_n_S.
apply IHf with (g:= finite_rem x g); auto.
apply incl_trans with (rem x P); auto.
apply incl_add_rem; auto.
apply incl_rem_stable; auto.
rewrite <- size_finite_rem; auto.
case (e x); intuition.
Save.
Lemma size_unique :
forall (P:set)(f:finite P) (Q:set)(g:finite Q),
(eqset P Q)-> size f = size g.
intros; apply le_antisym; apply size_incl; auto.
Save.
Lemma finite_incl : forall P:set,
finite P -> forall Q:set, dec Q -> incl Q P -> finite Q.
intros P FP; elim FP; intros; auto.
apply fin_eq_empty.
unfold empty,eqset in *|-*; intuition.
case (e x); auto.
case (X0 x); intros.
apply fin_eq_add with (x:=x) (Q:=(rem x Q0)); auto.
apply X.
unfold dec,rem.
intro y; case (decA x y); intro.
case (X0 y); subst; intuition.
case (X0 y); intuition.
case (incl_rem_add_iff x Q0 Q); intuition.
apply H1; apply incl_trans with P0; auto.
apply eqset_sym; auto.
apply X; auto.
red; intros.
case (e x0); intuition.
case H1; intuition; subst; auto.
case n0; auto.
Save.
Lemma finite_dec : forall P:set, finite P -> dec P.
red; intros P FP; elim FP; intros.
right; intro; apply (eqset_empty_false x e); auto.
case (e x0); unfold add; intuition.
case (X x0); intuition.
case (decA x0 x); intuition.
Save.
Lemma fin_add_in : forall (a:A) (P:set), finite P -> finite (add a P).
intros a P FP; case (finite_dec FP a); intro.
apply fin_eqset with P; auto.
apply eqset_sym; auto.
apply fin_add; auto.
Defined.
Lemma finite_union :
forall P Q, finite P -> finite Q -> finite (union P Q).
intros P Q FP FQ; elim FP; intros.
apply fin_eqset with Q; auto.
apply eqset_trans with (union empty Q); auto.
apply fin_eqset with (add x (union Q0 Q)); auto.
apply eqset_trans with (union (add x Q0) Q); auto.
apply fin_add_in; auto.
Defined.
Lemma finite_full_dec : forall P:set, finite full -> dec P -> finite P.
intros; apply finite_incl with full; auto.
unfold full,incl; auto.
Save.
Require Import Lt.
(** *** Filter operation *)
Lemma finite_inter : forall P Q, dec P -> finite Q -> finite (inter P Q).
intros P Q decP FQ.
induction FQ.
constructor 1.
apply eqset_trans with (inter P empty); auto.
case (decP x); intro.
constructor 2 with x (inter P Q); auto.
unfold inter; intuition.
rewrite e.
unfold add,inter; red; intuition.
subst; auto.
apply fin_eqset with (inter P Q); auto.
rewrite e.
unfold add,inter; red; intuition.
subst; intuition.
Defined.
Lemma size_inter_empty : forall P Q (decP:dec P) (e:eqset Q empty),
size (finite_inter decP (fin_eq_empty e))=O.
trivial.
Save.
Lemma size_inter_add_in :
forall P Q R (decP:dec P)(x:A)(nq:~Q x)(FQ:finite Q)(e:eqset R (add x Q)),
P x ->size (finite_inter decP (fin_eq_add nq FQ e))=S (size (finite_inter decP FQ)).
intros; simpl.
case (decP x); intro; trivial; contradiction.
Save.
Lemma size_inter_add_notin :
forall P Q R (decP:dec P)(x:A)(nq:~Q x)(FQ:finite Q)(e:eqset R (add x Q)),
~ P x -> size (finite_inter decP (fin_eq_add nq FQ e))=size (finite_inter decP FQ).
intros; simpl.
case (decP x); intro; try contradiction.
rewrite size_eqset; trivial.
Save.
Lemma size_inter_incl : forall P Q (decP:dec P)(FP:finite P)(FQ:finite Q),
(incl P Q) -> size (finite_inter decP FQ)=size FP.
intros; apply size_unique.
unfold inter; intro.
generalize (H x); intuition.
Save.
(** *** Selecting elements in a finite set *)
Fixpoint nth_finite (P:set) (k:nat) (PF : finite P) {struct PF}: (k < size PF) -> A :=
match PF as F return (k < size F) -> A with
fin_eq_empty H => (fun (e : k<0) => match lt_n_O k e with end)
| fin_eq_add x Q nqx fq eqq =>
match k as k0 return k0<S (size fq)->A with
O => fun e => x
| (S k1) => fun (e:S k1<S (size fq)) => nth_finite fq (lt_S_n k1 (size fq) e)
end
end.
(** A set with size > 1 contains at least 2 different elements **)
Lemma select_non_empty : forall (P:set), finite P -> notempty P -> sigT P.
destruct 1; intros.
case H; auto.
exists x; case (e x); intuition.
Defined.
Lemma select_diff : forall (P:set) (FP:finite P),
(1 < size FP)%nat -> sigT (fun x => sigT (fun y => P x /\ P y /\ x<>y)).
destruct FP; simpl; intros.
absurd (1<0); omega.
exists x; destruct FP; simpl in H.
absurd (1<1); omega.
exists x0; intuition.
case (e x); auto.
case (e0 x0); case (e x0); unfold add; intuition.
subst; case (e0 x0); intuition.
Save.
End sets.
Hint Resolve eqset_refl.
Hint Resolve eqset_add eqset_rem.
Hint Immediate eqset_sym finite_dec finite_full_dec eqset_incl eqset_incl_sym eqset_incl_intro.
Hint Resolve incl_refl.
Hint Immediate incl_union_stable.
Hint Resolve union_incl_left union_incl_right union_incl_intro incl_empty rem_incl
incl_rem_stable incl_add_stable.
Hint Constructors finite.
Hint Resolve add_in add_in_eq add_intro add_incl add_incl_intro union_sym union_empty_left union_empty_right
union_add_left union_add_right finite_union eqset_union_left
eqset_union_right.
Implicit Arguments full [].
Implicit Arguments empty [].
Add Parametric Relation (A:Type) : (set A) (eqset (A:=A))
reflexivity proved by (eqset_refl (A:=A))
symmetry proved by (eqset_sym (A:=A))
transitivity proved by (eqset_trans (A:=A))
as eqset_rel.
Add Parametric Relation (A:Type) : (set A) (incl (A:=A))
reflexivity proved by (incl_refl (A:=A))
transitivity proved by (incl_trans (A:=A))
as incl_rel.
|
The UN has declared that asylum is an inalienable human right, and most countries offer it. The principle is that nations should safeguard people who face persecution or danger when their countries can not or do not want to protect them. There have long been debates about who deserves the sanctuary, but today discord is going deeper. In the wake of violence in the Middle East and Afghanistan and parts of Africa and Central America, the number of asylum seekers has risen to record levels. While most of them are hosted by neighboring countries, a crackdown on refugees in the United States and Europe is raising doubts that support for the asylum concept can survive.
The total number of refugees has increased steadily since 2012, to 19.9 million by the middle of 2018, fueling the antipathy towards strangers in some host countries. Of the 1.9 million new refugee status applicants in 2017, the United States had the largest number – 332,000, with 43% coming from Central America, where gang violence has become widespread. US President Donald Trump has joined his offer to suppress immigration with an effort to radically remodel the nation's asylum system. It has banned entry to citizens of six countries, five of them mostly Muslims, and has reduced the number of refugees who can be admitted to the United States to 30,000, a historical minimum. His administration has excluded asylum for people fleeing domestic violence and gangs, and for those who have illegally crossed the US border with Mexico. It imposed a policy of detaining anyone who was illegally caught while crossing the border, including those seeking refugee status. The parents were separated from their children, causing a public protest that led Trump to back down. In October, Trump threatened to cut foreign aid to Guatemala, Honduras and El Salvador in retaliation for a so-called caravan of migrants traveling through Mexico to the United States. In the European Union, the resentment for the influx of refugees has led leaders to consider creating holding centers, probably in Africa, to manage asylum seekers. Officials have discouraged groups from saving such people in the Mediterranean Sea. Hungary, led by populist Prime Minister Viktor Orban, has made it a crime to help migrants seek asylum.
The Office of the United Nations High Commissioner for Refugees affirms that the concept of asylum is one of the "first distinctive signs of civilization", citing references to it in texts of 3,500 years. The word derives from the ancient Greek word for freedom from seizure. A 1951 UN Convention on the Status of Refugees and its 1967 Protocol are the modern legal framework for asylum, which defines refugees as persons who can prove to be persecuted at home on the basis of race, religion, nationality, political conviction or social group. Agreements in Europe, Africa and South America have broadened the definition by including those fleeing generalized violence. Among today's refugees, Syrians are the largest group. They are fleeing a civil war, like the Afghans and South Sudanese who make up the closest groups. Among the victims of the persecution there are Christians who escape forced conversion to Islam in the Arab countries and Rohingya, a Muslim ethnic group in Myanmar fleeing the abuse of their fellow countrymen. Asylum applications worldwide amounted to 1.9 million in 2017. The United States presented the largest number of new applicants, with 43% coming from Central America. In 2017, around 732,500 asylum applications were accepted, and a little more – 754,100 – were rejected globally. Asylum was used as a political tool, as when Americans welcomed Cubans and Vietnamese who sought refuge from communism. Requests for safe haven by gay, bisexual and transgender people have increased in recent years.
Attacks in Europe and the United States by assassins linked or inspired by foreign jihadist groups have generated the fear that future terrorists will hide among those who seek refuge. Critics of pro-asylum policies also fear that the hiring of refugees can lead to higher rates of crime and unemployment. Trump administration officials have claimed that the asylum system is abused by scammers. Other critics of the asylum assessments in the United States say that they are so arbitrary that they amount to "refugee roulette". This has promoted the development of a cottage-like industry to provide refuge seekers with compelling personal stories that may be exaggerated or false. Defenders of the control process say it is rigorous, even if no system is infallible. Asylum advocates stress the universal obligation to protect the vulnerable and observe that many of the people that nationalists like Trump would keep out were fleeing terrorism. The debate on asylum in the United States and Europe can overshadow the fact that the burden of hosting refugees around the world lies more with the poorer countries closer to major conflicts, such as Turkey, Pakistan and Uganda. . |
State Before: α : Type u_1
a b : α
l : List α
⊢ getLastD (b :: l) a = getLastD l b State After: no goals Tactic: cases l <;> rfl |
[STATEMENT]
lemma (in infinite_coin_toss_space) pseudo_proj_True_stake_image:
assumes "(stake n w) = stake n x"
shows "pseudo_proj_True n w = pseudo_proj_True n x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pseudo_proj_True n w = pseudo_proj_True n x
[PROOF STEP]
by (simp add: assms pseudo_proj_True_def) |
-- --------------------------------------------------------- [ Model.idr<Code> ]
-- Module : UML.Code.Model
-- Description : Data types for common code constructs.
-- Copyright : (c) Jan de Muijnck-Hughes
-- License : see LICENSE
-- --------------------------------------------------------------------- [ EOH ]
module UML.Code.Model
%access public
data DType : Type where
MkSType : (name : String) -> DType
MkCType : (name : String) -> (attrs : List (String, String)) -> DType
-- MkLType : (name : String) -> (itemTy : DType) -> DType
DTypes : Type
DTypes = List DType
||| Defines a function in an interface.
data Function : Type where
||| Constructs a new function.
MkPFunc : (name : String)
-> (ps : List (Pair String String))
-> (retTy : String) -> Function
MkFunc : (name : String)
-> (rety : String)
-> Function
-- ---------------------------------------------------------------------- [ Eq ]
instance Eq DType where
(==) (MkSType x) (MkSType y) = x == y
(==) (MkCType x xs) (MkCType y ys) = x == y && xs == ys
(==) _ _ = False
instance Eq Function where
(==) (MkPFunc x xs xr) (MkPFunc y ys yr) = x == y && xs == ys && xr == yr
(==) (MkFunc x xr) (MkFunc y yr) = x == y && xr == yr
(==) _ _ = False
-- -------------------------------------------------------------------- [ Show ]
instance Show DType where
show (MkSType n) = unwords ["[Data Simple", show n, "]\n"]
show (MkCType n as) = unwords ["[Data Complex", show n, show as, "]\n"]
instance Show Function where
show (MkPFunc n ps rty) = unwords ["[Func", show n, show ps, show rty, " ]\n"]
show (MkFunc n rty) = unwords ["[Func", show n, show rty, "]\n"]
-- --------------------------------------------------------------------- [ EOF ]
|
{-# OPTIONS --safe #-}
module Cubical.Algebra.Matrix where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Function
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Structure
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.Univalence
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Transport
open import Cubical.Functions.FunExtEquiv
import Cubical.Data.Empty as ⊥
open import Cubical.Data.Bool
open import Cubical.Data.Nat hiding (_+_ ; _·_; +-comm ; +-assoc; ·-assoc)
open import Cubical.Data.Vec
open import Cubical.Data.Sigma.Base
open import Cubical.Data.FinData
open import Cubical.Relation.Nullary
open import Cubical.Algebra.Group
open import Cubical.Algebra.AbGroup
open import Cubical.Algebra.Monoid
open import Cubical.Algebra.Ring
open import Cubical.Algebra.Ring.BigOps
open Iso
private
variable
ℓ : Level
A : Type ℓ
-- Equivalence between Vec matrix and Fin function matrix
FinMatrix : (A : Type ℓ) (m n : ℕ) → Type ℓ
FinMatrix A m n = FinVec (FinVec A n) m
VecMatrix : (A : Type ℓ) (m n : ℕ) → Type ℓ
VecMatrix A m n = Vec (Vec A n) m
FinMatrix→VecMatrix : {m n : ℕ} → FinMatrix A m n → VecMatrix A m n
FinMatrix→VecMatrix M = FinVec→Vec (λ fm → FinVec→Vec (M fm))
VecMatrix→FinMatrix : {m n : ℕ} → VecMatrix A m n → FinMatrix A m n
VecMatrix→FinMatrix M fn fm = lookup fm (lookup fn M)
FinMatrix→VecMatrix→FinMatrix : {m n : ℕ} (M : FinMatrix A m n)
→ VecMatrix→FinMatrix (FinMatrix→VecMatrix M) ≡ M
FinMatrix→VecMatrix→FinMatrix {m = zero} M = funExt (⊥.rec ∘ ¬Fin0)
FinMatrix→VecMatrix→FinMatrix {n = zero} M = funExt₂ (λ _ → ⊥.rec ∘ ¬Fin0)
FinMatrix→VecMatrix→FinMatrix {m = suc m} {n = suc n} M = funExt₂ goal
where
goal : (fm : Fin (suc m)) (fn : Fin (suc n))
→ VecMatrix→FinMatrix (_ ∷ FinMatrix→VecMatrix (M ∘ suc)) fm fn ≡ M fm fn
goal zero zero = refl
goal zero (suc fn) i = FinVec→Vec→FinVec (M zero ∘ suc) i fn
goal (suc fm) fn i = FinMatrix→VecMatrix→FinMatrix (M ∘ suc) i fm fn
VecMatrix→FinMatrix→VecMatrix : {m n : ℕ} (M : VecMatrix A m n)
→ FinMatrix→VecMatrix (VecMatrix→FinMatrix M) ≡ M
VecMatrix→FinMatrix→VecMatrix {m = zero} [] = refl
VecMatrix→FinMatrix→VecMatrix {m = suc m} (M ∷ MS) i =
Vec→FinVec→Vec M i ∷ VecMatrix→FinMatrix→VecMatrix MS i
FinMatrixIsoVecMatrix : (A : Type ℓ) (m n : ℕ) → Iso (FinMatrix A m n) (VecMatrix A m n)
fun (FinMatrixIsoVecMatrix A m n) = FinMatrix→VecMatrix
inv (FinMatrixIsoVecMatrix A m n) = VecMatrix→FinMatrix
rightInv (FinMatrixIsoVecMatrix A m n) = VecMatrix→FinMatrix→VecMatrix
leftInv (FinMatrixIsoVecMatrix A m n) = FinMatrix→VecMatrix→FinMatrix
FinMatrix≃VecMatrix : {m n : ℕ} → FinMatrix A m n ≃ VecMatrix A m n
FinMatrix≃VecMatrix {_} {A} {m} {n} = isoToEquiv (FinMatrixIsoVecMatrix A m n)
FinMatrix≡VecMatrix : (A : Type ℓ) (m n : ℕ) → FinMatrix A m n ≡ VecMatrix A m n
FinMatrix≡VecMatrix _ _ _ = ua FinMatrix≃VecMatrix
-- Define abelian group structure on matrices
module FinMatrixAbGroup (G' : AbGroup ℓ) where
open AbGroupStr (snd G') renaming ( is-set to isSetG )
private G = ⟨ G' ⟩
zeroFinMatrix : ∀ {m n} → FinMatrix G m n
zeroFinMatrix _ _ = 0g
negFinMatrix : ∀ {m n} → FinMatrix G m n → FinMatrix G m n
negFinMatrix M i j = - M i j
addFinMatrix : ∀ {m n} → FinMatrix G m n → FinMatrix G m n → FinMatrix G m n
addFinMatrix M N i j = M i j + N i j
isSetFinMatrix : ∀ {m n} → isSet (FinMatrix G m n)
isSetFinMatrix = isSetΠ2 λ _ _ → isSetG
addFinMatrixAssoc : ∀ {m n} → (M N K : FinMatrix G m n)
→ addFinMatrix M (addFinMatrix N K) ≡ addFinMatrix (addFinMatrix M N) K
addFinMatrixAssoc M N K i j k = assoc (M j k) (N j k) (K j k) i
addFinMatrix0r : ∀ {m n} → (M : FinMatrix G m n)
→ addFinMatrix M zeroFinMatrix ≡ M
addFinMatrix0r M i j k = rid (M j k) i
addFinMatrix0l : ∀ {m n} → (M : FinMatrix G m n)
→ addFinMatrix zeroFinMatrix M ≡ M
addFinMatrix0l M i j k = lid (M j k) i
addFinMatrixNegMatrixr : ∀ {m n} → (M : FinMatrix G m n)
→ addFinMatrix M (negFinMatrix M) ≡ zeroFinMatrix
addFinMatrixNegMatrixr M i j k = invr (M j k) i
addFinMatrixNegMatrixl : ∀ {m n} → (M : FinMatrix G m n)
→ addFinMatrix (negFinMatrix M) M ≡ zeroFinMatrix
addFinMatrixNegMatrixl M i j k = invl (M j k) i
addFinMatrixComm : ∀ {m n} → (M N : FinMatrix G m n)
→ addFinMatrix M N ≡ addFinMatrix N M
addFinMatrixComm M N i k l = comm (M k l) (N k l) i
FinMatrixAbGroup : (m n : ℕ) → AbGroup ℓ
FinMatrixAbGroup m n =
makeAbGroup {G = FinMatrix G m n} zeroFinMatrix addFinMatrix negFinMatrix
isSetFinMatrix addFinMatrixAssoc addFinMatrix0r
addFinMatrixNegMatrixr addFinMatrixComm
-- Define a abelian group structure on vector matrices and prove that
-- it is equal to FinMatrixAbGroup using the SIP
module _ (G' : AbGroup ℓ) where
open AbGroupStr (snd G')
private G = ⟨ G' ⟩
zeroVecMatrix : ∀ {m n} → VecMatrix G m n
zeroVecMatrix = replicate (replicate 0g)
negVecMatrix : ∀ {m n} → VecMatrix G m n → VecMatrix G m n
negVecMatrix = map (map (λ x → - x))
addVec : ∀ {m} → Vec G m → Vec G m → Vec G m
addVec [] [] = []
addVec (x ∷ xs) (y ∷ ys) = x + y ∷ addVec xs ys
addVecMatrix : ∀ {m n} → VecMatrix G m n → VecMatrix G m n → VecMatrix G m n
addVecMatrix [] [] = []
addVecMatrix (M ∷ MS) (N ∷ NS) = addVec M N ∷ addVecMatrix MS NS
open FinMatrixAbGroup
-- Proof that FinMatrix→VecMatrix is a group homorphism
FinMatrix→VecMatrixHomAdd : (m n : ℕ) (M N : FinMatrix G m n)
→ FinMatrix→VecMatrix (addFinMatrix G' M N) ≡
addVecMatrix (FinMatrix→VecMatrix M) (FinMatrix→VecMatrix N)
FinMatrix→VecMatrixHomAdd zero n M N = refl
FinMatrix→VecMatrixHomAdd (suc m) n M N =
λ i → lem n (M zero) (N zero) i
∷ FinMatrix→VecMatrixHomAdd m n (λ i j → M (suc i) j) (λ i j → N (suc i) j) i
where
lem : (n : ℕ) (V W : FinVec G n)
→ FinVec→Vec (λ j → V j + W j) ≡ addVec (FinVec→Vec V) (FinVec→Vec W)
lem zero V W = refl
lem (suc n) V W = λ i → V zero + W zero ∷ lem n (V ∘ suc) (W ∘ suc) i
-- Combine everything to get an induced abelian group structure of
-- VecMatrix that is equal to the one on FinMatrix
VecMatrixAbGroup : (m n : ℕ) → AbGroup ℓ
VecMatrixAbGroup m n =
InducedAbGroup (FinMatrixAbGroup G' m n) addVecMatrix
FinMatrix≃VecMatrix (FinMatrix→VecMatrixHomAdd m n)
FinMatrixAbGroup≡VecMatrixAbGroup : (m n : ℕ) → FinMatrixAbGroup G' m n ≡ VecMatrixAbGroup m n
FinMatrixAbGroup≡VecMatrixAbGroup m n =
InducedAbGroupPath (FinMatrixAbGroup G' m n) addVecMatrix
FinMatrix≃VecMatrix (FinMatrix→VecMatrixHomAdd m n)
-- Define identity matrix and matrix multiplication for FinMatrix and
-- prove that square matrices form a ring
module _ (R' : Ring ℓ) where
open RingStr (snd R') renaming ( is-set to isSetR )
open RingTheory R'
open KroneckerDelta R'
open Sum R'
open FinMatrixAbGroup (_ , abgroupstr _ _ _ (snd R' .RingStr.+IsAbGroup))
private R = ⟨ R' ⟩
oneFinMatrix : ∀ {n} → FinMatrix R n n
oneFinMatrix i j = δ i j
mulFinMatrix : ∀ {m1 m2 m3} → FinMatrix R m1 m2 → FinMatrix R m2 m3 → FinMatrix R m1 m3
mulFinMatrix M N i k = ∑ λ j → M i j · N j k
∑Exchange : ∀ {m n} → (M : FinMatrix R m n) → ∑ (λ i → ∑ (λ j → M i j)) ≡ ∑ (λ j → ∑ (λ i → M i j))
∑Exchange {m = zero} {n = n} M = sym (∑0r n)
∑Exchange {m = suc m} {n = zero} M = cong (λ x → 0r + x) (∑0r m) ∙ +Rid 0r
∑Exchange {m = suc m} {n = suc n} M =
let a = M zero zero
L = ∑ λ j → M zero (suc j)
C = ∑ λ i → M (suc i) zero
N = ∑ λ i → ∑ λ j → M (suc i) (suc j)
-- N reindexed
N' = ∑ λ j → ∑ λ i → M (suc i) (suc j)
in a + L + ∑ (λ i → ∑ (λ j → M (suc i) j)) ≡⟨ (λ k → a + L + ∑Split (λ i → M (suc i) zero) (λ i → ∑ (λ j → M (suc i) (suc j))) k) ⟩
a + L + (C + N) ≡⟨ (λ k → a + L + (C + ∑Exchange (λ i j → M (suc i) (suc j)) k)) ⟩
a + L + (C + N') ≡⟨ sym (+Assoc _ _ _) ⟩
a + (L + (C + N')) ≡⟨ (λ k → a + +Assoc-comm1 L C N' k) ⟩
a + (C + (L + N')) ≡⟨ +Assoc _ _ _ ⟩
a + C + (L + N') ≡⟨ (λ k → a + C + ∑Split (λ j → M zero (suc j)) (λ j → ∑ (λ i → M (suc i) (suc j))) (~ k)) ⟩
a + C + ∑ (λ j → ∑ (λ i → M i (suc j))) ∎
mulFinMatrixAssoc : ∀ {m n k l} → (M : FinMatrix R m n) → (N : FinMatrix R n k) → (K : FinMatrix R k l)
→ mulFinMatrix M (mulFinMatrix N K) ≡ mulFinMatrix (mulFinMatrix M N) K
mulFinMatrixAssoc M N K = funExt₂ λ i j →
∑ (λ k → M i k · ∑ (λ l → N k l · K l j)) ≡⟨ ∑Ext (λ k → ∑Mulrdist (M i k) (λ l → N k l · K l j)) ⟩
∑ (λ k → ∑ (λ l → M i k · (N k l · K l j))) ≡⟨ ∑Ext (λ k → ∑Ext (λ l → ·Assoc (M i k) (N k l) (K l j))) ⟩
∑ (λ k → ∑ (λ l → M i k · N k l · K l j)) ≡⟨ ∑Exchange (λ k l → M i k · N k l · K l j) ⟩
∑ (λ l → ∑ (λ k → M i k · N k l · K l j)) ≡⟨ ∑Ext (λ l → sym (∑Mulldist (K l j) (λ k → M i k · N k l))) ⟩
∑ (λ l → ∑ (λ k → M i k · N k l) · K l j) ∎
mulFinMatrixr1 : ∀ {m n} → (M : FinMatrix R m n) → mulFinMatrix M oneFinMatrix ≡ M
mulFinMatrixr1 M = funExt₂ λ i j → ∑Mulr1 _ (M i) j
mulFinMatrix1r : ∀ {m n} → (M : FinMatrix R m n) → mulFinMatrix oneFinMatrix M ≡ M
mulFinMatrix1r M = funExt₂ λ i j → ∑Mul1r _ (λ x → M x j) i
mulFinMatrixrDistrAddFinMatrix : ∀ {n} (M N K : FinMatrix R n n)
→ mulFinMatrix M (addFinMatrix N K) ≡ addFinMatrix (mulFinMatrix M N) (mulFinMatrix M K)
mulFinMatrixrDistrAddFinMatrix M N K = funExt₂ λ i j →
∑ (λ k → M i k · (N k j + K k j)) ≡⟨ ∑Ext (λ k → ·Rdist+ (M i k) (N k j) (K k j)) ⟩
∑ (λ k → M i k · N k j + M i k · K k j) ≡⟨ ∑Split (λ k → M i k · N k j) (λ k → M i k · K k j) ⟩
∑ (λ k → M i k · N k j) + ∑ (λ k → M i k · K k j) ∎
mulFinMatrixlDistrAddFinMatrix : ∀ {n} (M N K : FinMatrix R n n)
→ mulFinMatrix (addFinMatrix M N) K ≡ addFinMatrix (mulFinMatrix M K) (mulFinMatrix N K)
mulFinMatrixlDistrAddFinMatrix M N K = funExt₂ λ i j →
∑ (λ k → (M i k + N i k) · K k j) ≡⟨ ∑Ext (λ k → ·Ldist+ (M i k) (N i k) (K k j)) ⟩
∑ (λ k → M i k · K k j + N i k · K k j) ≡⟨ ∑Split (λ k → M i k · K k j) (λ k → N i k · K k j) ⟩
∑ (λ k → M i k · K k j) + ∑ (λ k → N i k · K k j) ∎
FinMatrixRing : (n : ℕ) → Ring ℓ
FinMatrixRing n =
makeRing {R = FinMatrix R n n} zeroFinMatrix oneFinMatrix addFinMatrix
mulFinMatrix negFinMatrix isSetFinMatrix addFinMatrixAssoc
addFinMatrix0r addFinMatrixNegMatrixr addFinMatrixComm
mulFinMatrixAssoc mulFinMatrixr1 mulFinMatrix1r
mulFinMatrixrDistrAddFinMatrix mulFinMatrixlDistrAddFinMatrix
|
[STATEMENT]
lemma int_neq_iff:"((w::int) \<noteq> z) = (w < z) \<or> (z < w)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (w \<noteq> z) = (w < z) \<or> z < w
[PROOF STEP]
by auto |
-- @@stderr --
dtrace: failed to compile script test/unittest/speculation/err.D_AGG_SPEC.SpeculateWithCount.d: [D_AGG_SPEC] line 28: aggregating actions may not follow speculate( )
|
Formal statement is: lemma path_connected_uncountable: fixes S :: "'a::metric_space set" assumes "path_connected S" "a \<in> S" "b \<in> S" "a \<noteq> b" shows "uncountable S" Informal statement is: If $S$ is a path-connected set containing two distinct points $a$ and $b$, then $S$ is uncountable. |
\section{Simply connected Riemann surfaces}
|
lemma LIMSEQ_le: "X \<longlonglongrightarrow> x \<Longrightarrow> Y \<longlonglongrightarrow> y \<Longrightarrow> \<exists>N. \<forall>n\<ge>N. X n \<le> Y n \<Longrightarrow> x \<le> y" for x y :: "'a::linorder_topology" |
module Palindrome
import Data.Strings
import System.REPL
%default total
palindrome : String -> Bool
palindrome s = toLower s == toLower (reverse s)
checkPalindrome : (str : String) -> String
checkPalindrome str = if palindrome str
then "Yes" ++ "\n"
else "No" ++ "\n"
partial
main : IO ()
main = repl "Enter a string: " checkPalindrome |
[STATEMENT]
lemma holdsIstate_phase_noPH_paperIDs: "holdsIstate phase_noPH_paperIDs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. holdsIstate phase_noPH_paperIDs
[PROOF STEP]
unfolding IO_Automaton.holdsIstate_def istate_def istate_def phase_noPH_paperIDs_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>confID. phase \<lparr>confIDs = [], conf = \<lambda>confID. emptyConf, userIDs = [voronkovUserID], pass = \<lambda>uID. emptyPass, user = \<lambda>uID. emptyUser, roles = \<lambda>confID uID. [], paperIDs = \<lambda>confID. [], paper = \<lambda>papID. emptyPaper, pref = \<lambda>uID papID. NoPref, voronkov = voronkovUserID, news = \<lambda>confID. [], phase = \<lambda>confID. noPH\<rparr> confID = noPH \<longrightarrow> paperIDs \<lparr>confIDs = [], conf = \<lambda>confID. emptyConf, userIDs = [voronkovUserID], pass = \<lambda>uID. emptyPass, user = \<lambda>uID. emptyUser, roles = \<lambda>confID uID. [], paperIDs = \<lambda>confID. [], paper = \<lambda>papID. emptyPaper, pref = \<lambda>uID papID. NoPref, voronkov = voronkovUserID, news = \<lambda>confID. [], phase = \<lambda>confID. noPH\<rparr> confID = []
[PROOF STEP]
by auto |
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.number_theory.pell
import Mathlib.data.pfun
import Mathlib.data.fin2
import Mathlib.PostPort
universes u u_1 u_2 u_3
namespace Mathlib
namespace int
theorem eq_nat_abs_iff_mul (x : ℤ) (n : ℕ) : nat_abs x = n ↔ (x - ↑n) * (x + ↑n) = 0 := sorry
end int
/-- Alternate definition of `vector` based on `fin2`. -/
def vector3 (α : Type u) (n : ℕ) :=
fin2 n → α
namespace vector3
/-- The empty vector -/
def nil {α : Type u_1} : vector3 α 0 :=
sorry
/-- The vector cons operation -/
def cons {α : Type u_1} {n : ℕ} (a : α) (v : vector3 α n) : vector3 α (Nat.succ n) :=
fun (i : fin2 (Nat.succ n)) => fin2.cases' a v i
infixr:67 " :: " => Mathlib.vector3.cons
/- We do not want to make the following notation global, because then these expressions will be
overloaded, and only the expected type will be able to disambiguate the meaning. Worse: Lean will
try to insert a coercion from `vector3 α _` to `list α`, if a list is expected. -/
@[simp] theorem cons_fz {α : Type u_1} {n : ℕ} (a : α) (v : vector3 α n) : cons a v fin2.fz = a :=
rfl
@[simp] theorem cons_fs {α : Type u_1} {n : ℕ} (a : α) (v : vector3 α n) (i : fin2 n) : cons a v (fin2.fs i) = v i :=
rfl
/-- Get the `i`th element of a vector -/
def nth {α : Type u_1} {n : ℕ} (i : fin2 n) (v : vector3 α n) : α :=
v i
/-- Construct a vector from a function on `fin2`. -/
def of_fn {α : Type u_1} {n : ℕ} (f : fin2 n → α) : vector3 α n :=
f
/-- Get the head of a nonempty vector. -/
def head {α : Type u_1} {n : ℕ} (v : vector3 α (Nat.succ n)) : α :=
v fin2.fz
/-- Get the tail of a nonempty vector. -/
def tail {α : Type u_1} {n : ℕ} (v : vector3 α (Nat.succ n)) : vector3 α n :=
fun (i : fin2 n) => v (fin2.fs i)
theorem eq_nil {α : Type u_1} (v : vector3 α 0) : v = nil := sorry
theorem cons_head_tail {α : Type u_1} {n : ℕ} (v : vector3 α (Nat.succ n)) : head v :: tail v = v :=
funext fun (i : fin2 (Nat.succ n)) => fin2.cases' rfl (fun (_x : fin2 n) => rfl) i
def nil_elim {α : Type u_1} {C : vector3 α 0 → Sort u} (H : C nil) (v : vector3 α 0) : C v :=
eq.mpr sorry H
def cons_elim {α : Type u_1} {n : ℕ} {C : vector3 α (Nat.succ n) → Sort u} (H : (a : α) → (t : vector3 α n) → C (a :: t)) (v : vector3 α (Nat.succ n)) : C v :=
eq.mpr sorry (H (head v) (tail v))
@[simp] theorem cons_elim_cons {α : Type u_1} {n : ℕ} {C : vector3 α (Nat.succ n) → Sort u_2} {H : (a : α) → (t : vector3 α n) → C (a :: t)} {a : α} {t : vector3 α n} : cons_elim H (a :: t) = H a t :=
rfl
protected def rec_on {α : Type u_1} {C : {n : ℕ} → vector3 α n → Sort u} {n : ℕ} (v : vector3 α n) (H0 : C nil) (Hs : {n : ℕ} → (a : α) → (w : vector3 α n) → C w → C (a :: w)) : C v :=
nat.rec_on n (fun (v : vector3 α 0) => nil_elim H0 v)
(fun (n : ℕ) (IH : (_a : vector3 α n) → C _a) (v : vector3 α (Nat.succ n)) =>
cons_elim (fun (a : α) (t : vector3 α n) => Hs a t (IH t)) v)
v
@[simp] theorem rec_on_nil {α : Type u_1} {C : {n : ℕ} → vector3 α n → Sort u_2} {H0 : C nil} {Hs : {n : ℕ} → (a : α) → (w : vector3 α n) → C w → C (a :: w)} : vector3.rec_on nil H0 Hs = H0 :=
rfl
@[simp] theorem rec_on_cons {α : Type u_1} {C : {n : ℕ} → vector3 α n → Sort u_2} {H0 : C nil} {Hs : {n : ℕ} → (a : α) → (w : vector3 α n) → C w → C (a :: w)} {n : ℕ} {a : α} {v : vector3 α n} : vector3.rec_on (a :: v) H0 Hs = Hs a v (vector3.rec_on v H0 Hs) :=
rfl
/-- Append two vectors -/
def append {α : Type u_1} {m : ℕ} (v : vector3 α m) {n : ℕ} (w : vector3 α n) : vector3 α (n + m) :=
nat.rec_on m (fun (_x : vector3 α 0) => w)
(fun (m : ℕ) (IH : vector3 α m → vector3 α (n + m)) (v : vector3 α (Nat.succ m)) =>
cons_elim (fun (a : α) (t : vector3 α m) => fin2.cases' a (IH t)) v)
v
@[simp] theorem append_nil {α : Type u_1} {n : ℕ} (w : vector3 α n) : append nil w = w :=
rfl
@[simp] theorem append_cons {α : Type u_1} (a : α) {m : ℕ} (v : vector3 α m) {n : ℕ} (w : vector3 α n) : append (a :: v) w = a :: append v w :=
rfl
@[simp] theorem append_left {α : Type u_1} {m : ℕ} (i : fin2 m) (v : vector3 α m) {n : ℕ} (w : vector3 α n) : append v w (fin2.left n i) = v i := sorry
@[simp] theorem append_add {α : Type u_1} {m : ℕ} (v : vector3 α m) {n : ℕ} (w : vector3 α n) (i : fin2 n) : append v w (fin2.add i m) = w i := sorry
/-- Insert `a` into `v` at index `i`. -/
def insert {α : Type u_1} (a : α) {n : ℕ} (v : vector3 α n) (i : fin2 (Nat.succ n)) : vector3 α (Nat.succ n) :=
fun (j : fin2 (Nat.succ n)) => cons a v (fin2.insert_perm i j)
@[simp] theorem insert_fz {α : Type u_1} (a : α) {n : ℕ} (v : vector3 α n) : insert a v fin2.fz = a :: v := sorry
@[simp] theorem insert_fs {α : Type u_1} (a : α) {n : ℕ} (b : α) (v : vector3 α n) (i : fin2 (Nat.succ n)) : insert a (b :: v) (fin2.fs i) = b :: insert a v i := sorry
theorem append_insert {α : Type u_1} (a : α) {k : ℕ} (t : vector3 α k) {n : ℕ} (v : vector3 α n) (i : fin2 (Nat.succ n)) (e : Nat.succ n + k = Nat.succ (n + k)) : insert a (append t v) (eq.rec_on e (fin2.add i k)) = eq.rec_on e (append t (insert a v i)) := sorry
end vector3
/-- "Curried" exists, i.e. ∃ x1 ... xn, f [x1, ..., xn] -/
def vector_ex {α : Type u_1} (k : ℕ) : (vector3 α k → Prop) → Prop :=
sorry
/-- "Curried" forall, i.e. ∀ x1 ... xn, f [x1, ..., xn] -/
def vector_all {α : Type u_1} (k : ℕ) : (vector3 α k → Prop) → Prop :=
sorry
theorem exists_vector_zero {α : Type u_1} (f : vector3 α 0 → Prop) : Exists f ↔ f vector3.nil := sorry
theorem exists_vector_succ {α : Type u_1} {n : ℕ} (f : vector3 α (Nat.succ n) → Prop) : Exists f ↔ ∃ (x : α), ∃ (v : vector3 α n), f (x :: v) := sorry
theorem vector_ex_iff_exists {α : Type u_1} {n : ℕ} (f : vector3 α n → Prop) : vector_ex n f ↔ Exists f := sorry
theorem vector_all_iff_forall {α : Type u_1} {n : ℕ} (f : vector3 α n → Prop) : vector_all n f ↔ ∀ (v : vector3 α n), f v := sorry
/-- `vector_allp p v` is equivalent to `∀ i, p (v i)`, but unfolds directly to a conjunction,
i.e. `vector_allp p [0, 1, 2] = p 0 ∧ p 1 ∧ p 2`. -/
def vector_allp {α : Type u_1} (p : α → Prop) {n : ℕ} (v : vector3 α n) :=
vector3.rec_on v True
fun (n : ℕ) (a : α) (v : vector3 α n) (IH : Prop) =>
vector3.rec_on v (p a) fun (n : ℕ) (b : α) (v' : vector3 α n) (_x : Prop) => p a ∧ IH
@[simp] theorem vector_allp_nil {α : Type u_1} (p : α → Prop) : vector_allp p vector3.nil = True :=
rfl
@[simp] theorem vector_allp_singleton {α : Type u_1} (p : α → Prop) (x : α) : vector_allp p (x :: vector3.nil) = p x :=
rfl
@[simp] theorem vector_allp_cons {α : Type u_1} (p : α → Prop) {n : ℕ} (x : α) (v : vector3 α n) : vector_allp p (x :: v) ↔ p x ∧ vector_allp p v :=
vector3.rec_on v (iff.symm (and_true (vector_allp p (x :: vector3.nil))))
fun (n : ℕ) (a : α) (v : vector3 α n) (IH : vector_allp p (x :: v) ↔ p x ∧ vector_allp p v) => iff.rfl
theorem vector_allp_iff_forall {α : Type u_1} (p : α → Prop) {n : ℕ} (v : vector3 α n) : vector_allp p v ↔ ∀ (i : fin2 n), p (v i) := sorry
theorem vector_allp.imp {α : Type u_1} {p : α → Prop} {q : α → Prop} (h : ∀ (x : α), p x → q x) {n : ℕ} {v : vector3 α n} (al : vector_allp p v) : vector_allp q v :=
iff.mpr (vector_allp_iff_forall q v) fun (i : fin2 n) => h (v i) (iff.mp (vector_allp_iff_forall p v) al i)
/-- `list_all p l` is equivalent to `∀ a ∈ l, p a`, but unfolds directly to a conjunction,
i.e. `list_all p [0, 1, 2] = p 0 ∧ p 1 ∧ p 2`. -/
@[simp] def list_all {α : Type u_1} (p : α → Prop) : List α → Prop :=
sorry
@[simp] theorem list_all_cons {α : Type u_1} (p : α → Prop) (x : α) (l : List α) : list_all p (x :: l) ↔ p x ∧ list_all p l :=
list.cases_on l (idRhs (list_all p [x] ↔ list_all p [x] ∧ True) (iff.symm (and_true (list_all p [x]))))
fun (l_hd : α) (l_tl : List α) => idRhs (list_all p (x :: l_hd :: l_tl) ↔ list_all p (x :: l_hd :: l_tl)) iff.rfl
theorem list_all_iff_forall {α : Type u_1} (p : α → Prop) (l : List α) : list_all p l ↔ ∀ (x : α), x ∈ l → p x := sorry
theorem list_all.imp {α : Type u_1} {p : α → Prop} {q : α → Prop} (h : ∀ (x : α), p x → q x) {l : List α} : list_all p l → list_all q l := sorry
@[simp] theorem list_all_map {α : Type u_1} {β : Type u_2} {p : β → Prop} (f : α → β) {l : List α} : list_all p (list.map f l) ↔ list_all (p ∘ f) l := sorry
theorem list_all_congr {α : Type u_1} {p : α → Prop} {q : α → Prop} (h : ∀ (x : α), p x ↔ q x) {l : List α} : list_all p l ↔ list_all q l :=
{ mp := list_all.imp fun (x : α) => iff.mp (h x), mpr := list_all.imp fun (x : α) => iff.mpr (h x) }
protected instance decidable_list_all {α : Type u_1} (p : α → Prop) [decidable_pred p] (l : List α) : Decidable (list_all p l) :=
decidable_of_decidable_of_iff (list.decidable_ball (fun (x : α) => p x) l) sorry
/- poly -/
/-- A predicate asserting that a function is a multivariate integer polynomial.
(We are being a bit lazy here by allowing many representations for multiplication,
rather than only allowing monomials and addition, but the definition is equivalent
and this is easier to use.) -/
inductive is_poly {α : Sort u_1} : ((α → ℕ) → ℤ) → Prop
where
| proj : ∀ (i : α), is_poly fun (x : α → ℕ) => ↑(x i)
| const : ∀ (n : ℤ), is_poly fun (x : α → ℕ) => n
| sub : ∀ {f g : (α → ℕ) → ℤ}, is_poly f → is_poly g → is_poly fun (x : α → ℕ) => f x - g x
| mul : ∀ {f g : (α → ℕ) → ℤ}, is_poly f → is_poly g → is_poly fun (x : α → ℕ) => f x * g x
/-- The type of multivariate integer polynomials -/
def poly (α : Type u) :=
Subtype fun (f : (α → ℕ) → ℤ) => is_poly f
namespace poly
protected instance has_coe_to_fun {α : Type u} : has_coe_to_fun (poly α) :=
has_coe_to_fun.mk (fun (f : poly α) => (α → ℕ) → ℤ) fun (f : poly α) => subtype.val f
/-- The underlying function of a `poly` is a polynomial -/
theorem isp {α : Type u} (f : poly α) : is_poly ⇑f :=
subtype.property f
/-- Extensionality for `poly α` -/
theorem ext {α : Type u} {f : poly α} {g : poly α} (e : ∀ (x : α → ℕ), coe_fn f x = coe_fn g x) : f = g :=
subtype.eq (funext e)
/-- Construct a `poly` given an extensionally equivalent `poly`. -/
def subst {α : Type u} (f : poly α) (g : (α → ℕ) → ℤ) (e : ∀ (x : α → ℕ), coe_fn f x = g x) : poly α :=
{ val := g, property := sorry }
@[simp] theorem subst_eval {α : Type u} (f : poly α) (g : (α → ℕ) → ℤ) (e : ∀ (x : α → ℕ), coe_fn f x = g x) (x : α → ℕ) : coe_fn (subst f g e) x = g x :=
rfl
/-- The `i`th projection function, `x_i`. -/
def proj {α : Type u} (i : α) : poly α :=
{ val := fun (x : α → ℕ) => ↑(x i), property := is_poly.proj i }
@[simp] theorem proj_eval {α : Type u} (i : α) (x : α → ℕ) : coe_fn (proj i) x = ↑(x i) :=
rfl
/-- The constant function with value `n : ℤ`. -/
def const {α : Type u} (n : ℤ) : poly α :=
{ val := fun (x : α → ℕ) => n, property := is_poly.const n }
@[simp] theorem const_eval {α : Type u} (n : ℤ) (x : α → ℕ) : coe_fn (const n) x = n :=
rfl
/-- The zero polynomial -/
def zero {α : Type u} : poly α :=
const 0
protected instance has_zero {α : Type u} : HasZero (poly α) :=
{ zero := zero }
@[simp] theorem zero_eval {α : Type u} (x : α → ℕ) : coe_fn 0 x = 0 :=
rfl
/-- The zero polynomial -/
def one {α : Type u} : poly α :=
const 1
protected instance has_one {α : Type u} : HasOne (poly α) :=
{ one := one }
@[simp] theorem one_eval {α : Type u} (x : α → ℕ) : coe_fn 1 x = 1 :=
rfl
/-- Subtraction of polynomials -/
def sub {α : Type u} : poly α → poly α → poly α :=
sorry
protected instance has_sub {α : Type u} : Sub (poly α) :=
{ sub := sub }
@[simp] theorem sub_eval {α : Type u} (f : poly α) (g : poly α) (x : α → ℕ) : coe_fn (f - g) x = coe_fn f x - coe_fn g x := sorry
/-- Negation of a polynomial -/
def neg {α : Type u} (f : poly α) : poly α :=
0 - f
protected instance has_neg {α : Type u} : Neg (poly α) :=
{ neg := neg }
@[simp] theorem neg_eval {α : Type u} (f : poly α) (x : α → ℕ) : coe_fn (-f) x = -coe_fn f x := sorry
/-- Addition of polynomials -/
def add {α : Type u} : poly α → poly α → poly α :=
sorry
protected instance has_add {α : Type u} : Add (poly α) :=
{ add := add }
@[simp] theorem add_eval {α : Type u} (f : poly α) (g : poly α) (x : α → ℕ) : coe_fn (f + g) x = coe_fn f x + coe_fn g x := sorry
/-- Multiplication of polynomials -/
def mul {α : Type u} : poly α → poly α → poly α :=
sorry
protected instance has_mul {α : Type u} : Mul (poly α) :=
{ mul := mul }
@[simp] theorem mul_eval {α : Type u} (f : poly α) (g : poly α) (x : α → ℕ) : coe_fn (f * g) x = coe_fn f x * coe_fn g x := sorry
protected instance comm_ring {α : Type u} : comm_ring (poly α) :=
comm_ring.mk Add.add sorry 0 sorry sorry Neg.neg Sub.sub sorry sorry Mul.mul sorry 1 sorry sorry sorry sorry sorry
theorem induction {α : Type u} {C : poly α → Prop} (H1 : ∀ (i : α), C (proj i)) (H2 : ∀ (n : ℤ), C (const n)) (H3 : ∀ (f g : poly α), C f → C g → C (f - g)) (H4 : ∀ (f g : poly α), C f → C g → C (f * g)) (f : poly α) : C f := sorry
/-- The sum of squares of a list of polynomials. This is relevant for
Diophantine equations, because it means that a list of equations
can be encoded as a single equation: `x = 0 ∧ y = 0 ∧ z = 0` is
equivalent to `x^2 + y^2 + z^2 = 0`. -/
def sumsq {α : Type u} : List (poly α) → poly α :=
sorry
theorem sumsq_nonneg {α : Type u} (x : α → ℕ) (l : List (poly α)) : 0 ≤ coe_fn (sumsq l) x := sorry
theorem sumsq_eq_zero {α : Type u} (x : α → ℕ) (l : List (poly α)) : coe_fn (sumsq l) x = 0 ↔ list_all (fun (a : poly α) => coe_fn a x = 0) l := sorry
/-- Map the index set of variables, replacing `x_i` with `x_(f i)`. -/
def remap {α : Type u_1} {β : Type u_2} (f : α → β) (g : poly α) : poly β :=
{ val := fun (v : β → ℕ) => coe_fn g (v ∘ f), property := sorry }
@[simp] theorem remap_eval {α : Type u_1} {β : Type u_2} (f : α → β) (g : poly α) (v : β → ℕ) : coe_fn (remap f g) v = coe_fn g (v ∘ f) :=
rfl
end poly
namespace sum
/-- combine two functions into a function on the disjoint union -/
def join {α : Type u_1} {β : Type u_2} {γ : Sort u_3} (f : α → γ) (g : β → γ) : α ⊕ β → γ :=
sum.rec f g
end sum
namespace option
/-- Functions from `option` can be combined similarly to `vector.cons` -/
def cons {α : Type u_1} {β : Sort u_2} (a : β) (v : α → β) : Option α → β :=
Option.rec a v
infixr:67 " :: " => Mathlib.option.cons
@[simp] theorem cons_head_tail {α : Type u_1} {β : Sort u_2} (v : Option α → β) : v none :: v ∘ some = v := sorry
end option
/- dioph -/
/-- A set `S ⊆ ℕ^α` is diophantine if there exists a polynomial on
`α ⊕ β` such that `v ∈ S` iff there exists `t : ℕ^β` with `p (v, t) = 0`. -/
def dioph {α : Type u} (S : set (α → ℕ)) :=
Exists fun {β : Type u} => ∃ (p : poly (α ⊕ β)), ∀ (v : α → ℕ), S v ↔ ∃ (t : β → ℕ), coe_fn p (sum.join v t) = 0
namespace dioph
theorem ext {α : Type u} {S : set (α → ℕ)} {S' : set (α → ℕ)} (d : dioph S) (H : ∀ (v : α → ℕ), S v ↔ S' v) : dioph S' :=
Eq._oldrec d ((fun (this : S = S') => this) (set.ext H))
theorem of_no_dummies {α : Type u} (S : set (α → ℕ)) (p : poly α) (h : ∀ (v : α → ℕ), S v ↔ coe_fn p v = 0) : dioph S := sorry
theorem inject_dummies_lem {α : Type u} {β : Type u} {γ : Type u} (f : β → γ) (g : γ → Option β) (inv : ∀ (x : β), g (f x) = some x) (p : poly (α ⊕ β)) (v : α → ℕ) : (∃ (t : β → ℕ), coe_fn p (sum.join v t) = 0) ↔
∃ (t : γ → ℕ), coe_fn (poly.remap (sum.join sum.inl (sum.inr ∘ f)) p) (sum.join v t) = 0 := sorry
theorem inject_dummies {α : Type u} {β : Type u} {γ : Type u} {S : set (α → ℕ)} (f : β → γ) (g : γ → Option β) (inv : ∀ (x : β), g (f x) = some x) (p : poly (α ⊕ β)) (h : ∀ (v : α → ℕ), S v ↔ ∃ (t : β → ℕ), coe_fn p (sum.join v t) = 0) : ∃ (q : poly (α ⊕ γ)), ∀ (v : α → ℕ), S v ↔ ∃ (t : γ → ℕ), coe_fn q (sum.join v t) = 0 :=
Exists.intro (poly.remap (sum.join sum.inl (sum.inr ∘ f)) p)
fun (v : α → ℕ) => iff.trans (h v) (inject_dummies_lem f g inv p v)
theorem reindex_dioph {α : Type u} {β : Type u} {S : set (α → ℕ)} (d : dioph S) (f : α → β) : dioph fun (v : β → ℕ) => S (v ∘ f) := sorry
theorem dioph_list_all {α : Type u} (l : List (set (α → ℕ))) (d : list_all dioph l) : dioph fun (v : α → ℕ) => list_all (fun (S : set (α → ℕ)) => S v) l := sorry
theorem and_dioph {α : Type u} {S : set (α → ℕ)} {S' : set (α → ℕ)} (d : dioph S) (d' : dioph S') : dioph fun (v : α → ℕ) => S v ∧ S' v :=
dioph_list_all [S, S'] { left := d, right := d' }
theorem or_dioph {α : Type u} {S : set (α → ℕ)} {S' : set (α → ℕ)} (d : dioph S) (d' : dioph S') : dioph fun (v : α → ℕ) => S v ∨ S' v := sorry
/-- A partial function is Diophantine if its graph is Diophantine. -/
def dioph_pfun {α : Type u} (f : (α → ℕ) →. ℕ) :=
dioph fun (v : Option α → ℕ) => pfun.graph f (v ∘ some, v none)
/-- A function is Diophantine if its graph is Diophantine. -/
def dioph_fn {α : Type u} (f : (α → ℕ) → ℕ) :=
dioph fun (v : Option α → ℕ) => f (v ∘ some) = v none
theorem reindex_dioph_fn {α : Type u} {β : Type u} {f : (α → ℕ) → ℕ} (d : dioph_fn f) (g : α → β) : dioph_fn fun (v : β → ℕ) => f (v ∘ g) :=
reindex_dioph d (Functor.map g)
theorem ex_dioph {α : Type u} {β : Type u} {S : set (α ⊕ β → ℕ)} : dioph S → dioph fun (v : α → ℕ) => ∃ (x : β → ℕ), S (sum.join v x) := sorry
theorem ex1_dioph {α : Type u} {S : set (Option α → ℕ)} : dioph S → dioph fun (v : α → ℕ) => ∃ (x : ℕ), S (x :: v) := sorry
theorem dom_dioph {α : Type u} {f : (α → ℕ) →. ℕ} (d : dioph_pfun f) : dioph (pfun.dom f) :=
cast (congr_arg dioph (set.ext fun (v : α → ℕ) => iff.symm (pfun.dom_iff_graph f v))) (ex1_dioph d)
theorem dioph_fn_iff_pfun {α : Type u} (f : (α → ℕ) → ℕ) : dioph_fn f = dioph_pfun ↑f :=
congr_arg dioph (set.ext fun (v : Option α → ℕ) => iff.symm pfun.lift_graph)
theorem abs_poly_dioph {α : Type u} (p : poly α) : dioph_fn fun (v : α → ℕ) => int.nat_abs (coe_fn p v) :=
of_no_dummies (fun (v : Option α → ℕ) => (fun (v : α → ℕ) => int.nat_abs (coe_fn p v)) (v ∘ some) = v none)
((poly.remap some p - poly.proj none) * (poly.remap some p + poly.proj none))
fun (v : Option α → ℕ) => int.eq_nat_abs_iff_mul (coe_fn p (v ∘ some)) (v none)
theorem proj_dioph {α : Type u} (i : α) : dioph_fn fun (v : α → ℕ) => v i :=
abs_poly_dioph (poly.proj i)
theorem dioph_pfun_comp1 {α : Type u} {S : set (Option α → ℕ)} (d : dioph S) {f : (α → ℕ) →. ℕ} (df : dioph_pfun f) : dioph fun (v : α → ℕ) => ∃ (h : pfun.dom f v), S (pfun.fn f v h :: v) := sorry
theorem dioph_fn_comp1 {α : Type u} {S : set (Option α → ℕ)} (d : dioph S) {f : (α → ℕ) → ℕ} (df : dioph_fn f) : dioph fun (v : α → ℕ) => S (f v :: v) := sorry
theorem dioph_fn_vec_comp1 {n : ℕ} {S : set (vector3 ℕ (Nat.succ n))} (d : dioph S) {f : vector3 ℕ n → ℕ} (df : dioph_fn f) : dioph fun (v : vector3 ℕ n) => S (f v :: v) := sorry
theorem vec_ex1_dioph (n : ℕ) {S : set (vector3 ℕ (Nat.succ n))} (d : dioph S) : dioph fun (v : vector3 ℕ n) => ∃ (x : ℕ), S (x :: v) := sorry
theorem dioph_fn_vec {n : ℕ} (f : vector3 ℕ n → ℕ) : dioph_fn f ↔ dioph fun (v : vector3 ℕ (Nat.succ n)) => f (v ∘ fin2.fs) = v fin2.fz :=
{ mp := fun (h : dioph_fn f) => reindex_dioph h (fin2.fz :: fin2.fs),
mpr :=
fun (h : dioph fun (v : vector3 ℕ (Nat.succ n)) => f (v ∘ fin2.fs) = v fin2.fz) => reindex_dioph h (none :: some) }
theorem dioph_pfun_vec {n : ℕ} (f : vector3 ℕ n →. ℕ) : dioph_pfun f ↔ dioph fun (v : vector3 ℕ (Nat.succ n)) => pfun.graph f (v ∘ fin2.fs, v fin2.fz) := sorry
theorem dioph_fn_compn {α : Type} {n : ℕ} {S : set (α ⊕ fin2 n → ℕ)} (d : dioph S) {f : vector3 ((α → ℕ) → ℕ) n} (df : vector_allp dioph_fn f) : dioph fun (v : α → ℕ) => S (sum.join v fun (i : fin2 n) => f i v) := sorry
theorem dioph_comp {α : Type} {n : ℕ} {S : set (vector3 ℕ n)} (d : dioph S) (f : vector3 ((α → ℕ) → ℕ) n) (df : vector_allp dioph_fn f) : dioph fun (v : α → ℕ) => S fun (i : fin2 n) => f i v :=
dioph_fn_compn (reindex_dioph d sum.inr) df
theorem dioph_fn_comp {α : Type} {n : ℕ} {f : vector3 ℕ n → ℕ} (df : dioph_fn f) (g : vector3 ((α → ℕ) → ℕ) n) (dg : vector_allp dioph_fn g) : dioph_fn fun (v : α → ℕ) => f fun (i : fin2 n) => g i v := sorry
theorem proj_dioph_of_nat {n : ℕ} (m : ℕ) [fin2.is_lt m n] : dioph_fn fun (v : vector3 ℕ n) => v (fin2.of_nat' m) :=
proj_dioph (fin2.of_nat' m)
theorem const_dioph {α : Type} (n : ℕ) : dioph_fn (function.const (α → ℕ) n) :=
abs_poly_dioph (poly.const ↑n)
theorem dioph_comp2 {α : Type} {f : (α → ℕ) → ℕ} {g : (α → ℕ) → ℕ} (df : dioph_fn f) (dg : dioph_fn g) {S : ℕ → ℕ → Prop} (d : dioph fun (v : vector3 ℕ (bit0 1)) => S (v (fin2.of_nat' 0)) (v (fin2.of_nat' 1))) : dioph fun (v : α → ℕ) => S (f v) (g v) :=
dioph_comp d (f :: g :: vector3.nil) { left := df, right := dg }
theorem dioph_fn_comp2 {α : Type} {f : (α → ℕ) → ℕ} {g : (α → ℕ) → ℕ} (df : dioph_fn f) (dg : dioph_fn g) {h : ℕ → ℕ → ℕ} (d : dioph_fn fun (v : vector3 ℕ (bit0 1)) => h (v (fin2.of_nat' 0)) (v (fin2.of_nat' 1))) : dioph_fn fun (v : α → ℕ) => h (f v) (g v) :=
dioph_fn_comp d (f :: g :: vector3.nil) { left := df, right := dg }
theorem eq_dioph {α : Type} {f : (α → ℕ) → ℕ} {g : (α → ℕ) → ℕ} (df : dioph_fn f) (dg : dioph_fn g) : dioph fun (v : α → ℕ) => f v = g v := sorry
theorem add_dioph {α : Type} {f : (α → ℕ) → ℕ} {g : (α → ℕ) → ℕ} (df : dioph_fn f) (dg : dioph_fn g) : dioph_fn fun (v : α → ℕ) => f v + g v :=
dioph_fn_comp2 df dg (abs_poly_dioph (poly.proj (fin2.of_nat' 0) + poly.proj (fin2.of_nat' 1)))
theorem mul_dioph {α : Type} {f : (α → ℕ) → ℕ} {g : (α → ℕ) → ℕ} (df : dioph_fn f) (dg : dioph_fn g) : dioph_fn fun (v : α → ℕ) => f v * g v :=
dioph_fn_comp2 df dg (abs_poly_dioph (poly.proj (fin2.of_nat' 0) * poly.proj (fin2.of_nat' 1)))
theorem le_dioph {α : Type} {f : (α → ℕ) → ℕ} {g : (α → ℕ) → ℕ} (df : dioph_fn f) (dg : dioph_fn g) : dioph fun (v : α → ℕ) => f v ≤ g v := sorry
theorem lt_dioph {α : Type} {f : (α → ℕ) → ℕ} {g : (α → ℕ) → ℕ} (df : dioph_fn f) (dg : dioph_fn g) : dioph fun (v : α → ℕ) => f v < g v :=
le_dioph (add_dioph df (const_dioph 1)) dg
theorem ne_dioph {α : Type} {f : (α → ℕ) → ℕ} {g : (α → ℕ) → ℕ} (df : dioph_fn f) (dg : dioph_fn g) : dioph fun (v : α → ℕ) => f v ≠ g v :=
ext (or_dioph (lt_dioph df dg) (lt_dioph dg df)) fun (v : α → ℕ) => iff.symm ne_iff_lt_or_gt
theorem sub_dioph {α : Type} {f : (α → ℕ) → ℕ} {g : (α → ℕ) → ℕ} (df : dioph_fn f) (dg : dioph_fn g) : dioph_fn fun (v : α → ℕ) => f v - g v := sorry
theorem dvd_dioph {α : Type} {f : (α → ℕ) → ℕ} {g : (α → ℕ) → ℕ} (df : dioph_fn f) (dg : dioph_fn g) : dioph fun (v : α → ℕ) => f v ∣ g v := sorry
theorem mod_dioph {α : Type} {f : (α → ℕ) → ℕ} {g : (α → ℕ) → ℕ} (df : dioph_fn f) (dg : dioph_fn g) : dioph_fn fun (v : α → ℕ) => f v % g v := sorry
theorem modeq_dioph {α : Type} {f : (α → ℕ) → ℕ} {g : (α → ℕ) → ℕ} (df : dioph_fn f) (dg : dioph_fn g) {h : (α → ℕ) → ℕ} (dh : dioph_fn h) : dioph fun (v : α → ℕ) => nat.modeq (h v) (f v) (g v) :=
eq_dioph (mod_dioph df dh) (mod_dioph dg dh)
theorem div_dioph {α : Type} {f : (α → ℕ) → ℕ} {g : (α → ℕ) → ℕ} (df : dioph_fn f) (dg : dioph_fn g) : dioph_fn fun (v : α → ℕ) => f v / g v := sorry
theorem pell_dioph : dioph
fun (v : vector3 ℕ (bit0 (bit0 1))) =>
∃ (h : 1 < v (fin2.of_nat' 0)),
pell.xn h (v (fin2.of_nat' 1)) = v (fin2.of_nat' (bit0 1)) ∧
pell.yn h (v (fin2.of_nat' 1)) = v (fin2.of_nat' (bit1 1)) := sorry
theorem xn_dioph : dioph_pfun
fun (v : vector3 ℕ (bit0 1)) =>
roption.mk (1 < v (fin2.of_nat' 0)) fun (h : 1 < v (fin2.of_nat' 0)) => pell.xn h (v (fin2.of_nat' 1)) := sorry
theorem pow_dioph {α : Type} {f : (α → ℕ) → ℕ} {g : (α → ℕ) → ℕ} (df : dioph_fn f) (dg : dioph_fn g) : dioph_fn fun (v : α → ℕ) => f v ^ g v := sorry
|
# - [] - Fixing up energy_data ----
BTC_Energy[['Date']] <- as.Date(BTC_Energy[['Date']], tryFormats = '%Y/%m/%d')
ETH_Energy[['Date']] <- as.Date(ETH_Energy[['Date']], tryFormats = '%Y/%m/%d')
remove = c("Minimum TWh per Year")
BTC_Energy = BTC_Energy[, !colnames(BTC_Energy) %in% c(remove)] # conditionally select the variables to remove from the dataframe.
remove = c("Minimum TWh per Year")
ETH_Energy = ETH_Energy[, !colnames(ETH_Energy) %in% c(remove)] # conditionally select the variables to remove from the dataframe.
# - [] - Subsetting Stuff ----
remove = c("open (USD)", "high (USD)", 'low (USD)', 'market cap (USD)')
BTC_data = BTC_data[, !colnames(BTC_data) %in% c(remove)] # conditionally select the variables to remove from the dataframe.
colnames(BTC_data) <- c('timestamp', 'BTC_Close', 'BTC_Volume')
ETH_data = ETH_data[, !colnames(ETH_data) %in% c(remove)] # conditionally select the variables to remove from the dataframe.
colnames(ETH_data) <- c('timestamp', 'ETH_Close', 'ETH_Volume')
data0 = ETH_data %>% inner_join(BTC_data, by="timestamp")
colnames(inflation_data) <- c('timestamp', 'breakeven_inflation')
# - Fixing up breakeven inflation values ----
inflation_data[['breakeven_inflation']] <- as.numeric(inflation_data[['breakeven_inflation']])
data1 = data0 %>% inner_join(inflation_data, by= 'timestamp')
colnames(BTC_Energy) <- c('timestamp', 'BTC_Energy')
data2 = data1 %>% inner_join(BTC_Energy, by= 'timestamp')
colnames(ETH_Energy) <- c('timestamp', 'ETH_Energy')
data = data2 %>% inner_join(ETH_Energy, by= 'timestamp')
print('Made the following DataFrame:')
print(head(data))
write.csv(data, "data/data.csv")
|
`is_element/digraphs` := (V::set) -> proc(E)
if not(`is_element/autorel`(V)(E)) then
return false;
fi;
if not(`is_antisymmetric/autorel`(V)(E)) then
return false;
fi;
return true;
end:
`is_leq/digraphs` := (V::set) -> (E0,E1) -> evalb(E0 minus E1 = {}):
`list_elements/digraphs` := proc(V::set)
local n,EE,E,i,j,a,b;
n := nops(V);
EE := [{}];
for i from 1 to n-1 do
for j from i+1 to n do
a := V[i];
b := V[j];
EE := [op(EE),
seq(E union {[a,b]},E in EE),
seq(E union {[b,a]},E in EE)];
od;
od;
return EE;
end:
`count_elements/digraphs` := (V::set) -> 3^(nops(V)*(nops(V)-1)/2);
`random_element/digraphs` := (V::set) -> proc()
local n,E,r,i,j,a,b,x;
n := nops(V);
E := NULL;
r := rand(-1..1);
for i from 1 to n-1 do
for j from i+1 to n do
a := V[i];
b := V[j];
x := r();
if x = -1 then
E := E,[b,a];
elif x = 1 then
E := E,[a,b];
fi;
od;
od;
return {E};
end:
`neighbour_table/digraphs` := (V) -> proc(E)
local N,v,e;
N := table();
for v in V do N[v] := {}; od:
for e in E do
N[e[1]] := {op(N[e[1]]),e[2]};
N[e[2]] := {op(N[e[2]]),e[1]};
od:
return eval(N);
end:
`in_neighbour_table/digraphs` := (V) -> proc(E)
local N,v,e;
N := table();
for v in V do N[v] := {}; od:
for e in E do
N[e[2]] := {op(N[e[2]]),e[1]};
od:
return eval(N);
end:
`out_neighbour_table/digraphs` := (V) -> proc(E)
local N,v,e;
N := table();
for v in V do N[v] := {}; od:
for e in E do
N[e[1]] := {op(N[e[1]]),e[2]};
od:
return eval(N);
end:
`component_relation/digraphs` := (V) -> (E) ->
`transitive_closure/autorel`(V)(
E union `op/autorel`(V)(E) union `id/autorel`(V)
);
`components/digraphs` := (V) -> (E) ->
`block_partition/equiv`(`component_relation/graphs`(V)(E));
`is_connected/digraphs` := (V) -> (E) ->
evalb(`component_relation/digraphs`(V)(E) = `top/autorel`(V));
`skeleton/digraphs` := (V::set) -> proc(E)
local n,V0,E0,ix,i;
n := nops(V);
V0 := {seq(i,i=1..n)};
ix := table():
for i from 1 to n do ix[V[i]] := i; od;
E0 := map(e -> [ix[e[1]],ix[e[2]]],E);
return [V0,E0];
end:
`is_element/dipaths` := (V) -> (E) -> proc(p)
local i,n;
if not type(p,list) then
return false;
fi;
n := nops(p) - 1;
if n < 0 then
return false;
fi;
if {op(p)} minus V <> {} then
return false;
fi;
for i from 1 to n do
if not(member([p[i],p[i+1]],E)) then
return false;
fi;
od;
return true;
end:
`is_small_element/dipaths` := (V) -> (E) -> (n::nonnegint) -> proc(p)
return evalb(`is_element/dipaths`(V)(E)(p) and (nops(p) = n+1));
end:
`random_small_element/dipaths` := (V) -> (E) -> (n::nonnegint) -> proc(num_tries := 10)
local k,p,F;
if n = 0 then
if V = {} then
return FAIL;
else
return [random_element_of(V)()];
fi;
elif n = 1 then
if E = {} then
return FAIL;
else
return random_element_of(E)();
fi;
fi;
k := num_tries;
while k > 0 do
k := k-1;
p := `random_small_element/dipaths`(V)(E)(n-1)();
if p <> FAIL then
F := select(e -> e[2] = p[1],E);
if F <> {} then
return [random_element_of(F)()[1],op(p)];
fi;
fi;
od;
return FAIL;
end:
`list_small_elements/dipaths` := (V) -> (E) -> proc(n::nonnegint)
option remember;
local P,Q,p,v,F,e;
if n = 0 then
return map(v -> [v],V);
else
P := `list_small_elements/dipaths`(V)(E)(n-1);
Q := NULL;
for p in P do
v := p[1];
F := select(e -> e[2] = v,E);
Q := Q,seq([e[1],op(p)],e in F);
od:
return [Q];
fi;
end:
`is_leq/dipaths` := NULL;
`count_elements/dipaths` := NULL:
`count_small_elements/dipaths` := NULL:
`length/dipaths` := (V) -> (E) -> (p) -> nops(p) - 1;
`is_trail/dipaths` := proc(p)
local n,e,i;
n := nops(p) - 1;
e := {seq([p[i],p[i+1]],i=1..n)};
return evalb(nops(e) = n);
end:
`is_cycle/dipaths` := proc(p)
local n,i;
n := nops(p) - 1;
return evalb(p[1] = p[n+1] and nops({seq(p[i],i=1..n)}) = n);
end:
`is_small_element/ditrails` := (V) -> (E) -> (n::nonnegint) -> proc(p)
if not `is_small_element/dipaths`(V)(E)(n)(p) then
return false;
fi;
return `is_trail/dipaths`(p);
end:
`is_element/ditrails` := (V) -> (E) -> proc(p)
if not `is_element/dipaths`(V)(E)(p) then
return false;
fi;
return `is_trail/dipaths`(p);
end:
`list_small_elements/ditrails` := (V) -> (E) -> proc(n)
option remember;
local E0,E1,P,Q,p,v,F,i,e;
if n = 0 then
return map(v -> [v],V);
else
P := `list_small_elements/ditrails`(V)(E)(n-1);
Q := NULL;
for p in P do
E0 := {seq([p[i],p[i+1]],i=1..n-1)};
E1 := E minus E0;
v := p[1];
F := select(e -> e[2] = v,E1);
Q := Q,seq([e[1],op(p)],e in F);
od:
return [Q];
fi;
end:
`random_small_element/ditrails` := (V) -> (E) -> (n) -> proc(num_tries := 10)
local i,k,p,E0,E1,v,F,e;
k := num_tries;
if n = 0 then
if V = {} then
return FAIL;
else
return [random_element_of(V)()];
fi;
elif n = 1 then
if E = {} then
return FAIL;
else
return random_element_of(E)();
fi;
fi;
while k > 0 do
p := `random_small_element/ditrails`(V)(E)(n-1)();
k := k - 1;
if p <> FAIL then
E0 := {seq([p[i],p[i+1]],i=1..n-1)};
E1 := E minus E0;
v := p[1];
F := select(e -> e[2] = v,E1);
if F <> {} then
e := random_element_of(F)();
return [e[1],op(p)];
fi;
fi;
od;
return FAIL;
end:
`is_leq/ditrails` := NULL:
`count_elements/ditrails` := NULL:
`count_small_elements/ditrails` := NULL:
`roots/digraphs` := (V) -> proc(E)
V minus map(e -> e[2],E);
end:
`root/digraphs` := (V) -> proc(E)
local R;
R := `roots/digraphs`(V)(E);
if nops(R) = 1 then
return R[1];
else
error "Digraph does not have a unique root";
fi;
end:
`leaves/digraphs` := (V) -> proc(E)
V minus map(e -> e[1],E);
end:
`is_forest/digraphs` := (V::set) -> proc(E)
local NI,NO,R,v,U,U1;
NI := `in_neighbour_table/digraphs`(V)(E);
NO := `out_neighbour_table/digraphs`(V)(E);
R := NULL;
for v in V do
if nops(NI[v]) > 1 then
return false;
elif NI[v] = {} then
R := R,v;
fi;
od;
R := {R};
U := R;
U1 := {op(U),seq(op(NO[v]),v in U)};
while nops(U1) > nops(U) do
U := U1;
U1 := {op(U),seq(op(NO[v]),v in U)};
od:
if U <> V then return false; fi;
return true;
end:
`is_tree/digraphs` := (V) -> (E) ->
nops(`roots/digraphs`(V)(E)) = 1 and
`is_forest/digraphs`(V)(E);
`level_table/forest_digraphs` := (V) -> proc(E)
local X,N,L,k,v;
X := `roots/digraphs`(V)(E);
N := `out_neighbour_table/digraphs`(V)(E);
L := table():
k := 0;
while X <> {} do
for v in X do L[v] := k; od:
X := map(v -> op(N[v]),X);
k := k+1;
od:
return eval(L);
end:
`shadow_table/forest_digraphs` := (V) -> proc(E)
local X,Y,Z,S,x,z,N,u;
X := `leaves/digraphs`(V)(E);
Y := V minus X;
S := table():
for x in X do S[x] := {x}; od:
N := `out_neighbour_table/digraphs`(V)(E);
while Y <> {} do
Z := select(y -> ((N[y] minus X) = {}),Y);
if Z = {} then
error "Something wrong";
fi;
z := Z[1];
S[z] := map(op,{seq(S[u],u in N[z])});
X := X union {z};
Y := Y minus {z};
od:
return eval(S);
end:
`shadow_set/forest_digraphs` := (V) -> proc(E)
local S,v;
S := `shadow_table/forest_digraphs`(V)(E);
return {seq(S[v],v in V)};
end:
`is_element/stasheff_digraphs` := (V::set) -> proc(EL)
local E,L,L0,r,i,TT,rTT;
if not (type(EL,list) and nops(EL) = 2) then
return false;
fi;
E,L := op(EL);
if not(`is_tree/digraphs`(V)(E)) then
return false;
fi;
L0 := `leaves/digraphs`(V)(E);
if not({op(L)} = L0 and nops(L0) = nops(L)) then
return false;
fi;
r := table();
for i from 1 to nops(L) do r[L[i]] := i; od;
TT := `shadow_set/forest_digraphs`(V)(E);
rTT := map(T -> map(v -> r[v],T),TT);
return `and`(op(map(`is_element/posint_intervals`,rTT)));
end:
|
-- @@stderr --
dtrace: failed to compile script test/unittest/providers/err.D_PDESC_ZERO.nonprofile.d: [D_PDESC_ZERO] line 24: probe description profile:::test does not match any probes
|
#' dispensr.
#'
#' @name dispensr
#' @docType package
NULL
|
-- ---------------------------------------------------------------------
-- Ejercicio. Demostrar que en los anillos
-- a * 0 = 0
-- ----------------------------------------------------------------------
import algebra.ring
namespace my_ring
variables {R : Type*} [ring R]
variable (a : R)
-- 1ª demostración
-- ===============
example : a * 0 = 0 :=
begin
have h : a * 0 + a * 0 = a * 0 + 0,
calc a * 0 + a * 0
= a * (0 + 0) : (mul_add a 0 0).symm
... = a * 0 : congr_arg (λ x, a * x) (add_zero 0)
... = a * 0 + 0 : (add_zero (a * 0)).symm,
rw add_left_cancel h
end
-- 2ª demostración
-- ===============
example : a * 0 = 0 :=
begin
have h : a * 0 + a * 0 = a * 0 + 0,
calc a * 0 + a * 0
= a * (0 + 0) : by rw ← mul_add
... = a * 0 : by rw add_zero
... = a * 0 + 0 : by rw add_zero,
rw add_left_cancel h
end
-- 3ª demostración
-- ===============
example : a * 0 = 0 :=
begin
have h : a * 0 + a * 0 = a * 0 + 0,
{ rw [←mul_add, add_zero, add_zero] },
rw add_left_cancel h
end
-- 4ª demostración
-- ===============
example : a * 0 = 0 :=
begin
have h : a * 0 + a * 0 = a * 0 + 0,
calc a * 0 + a * 0
= a * (0 + 0) : by simp
... = a * 0 : by simp
... = a * 0 + 0 : by simp,
simp,
end
end my_ring
|
Best Paddock Surface Footing for Rainy Climate | Listen To Your .Jan 20, 2015 . "As a farrier, my favourite footing in this area is crusher dust. . I'd probably have them stand in mud over hogfuel because when hogfuel is new,.paddock stand for crusher,Motorcycle Paddock Stands for sale | Results 1 - 48 of 4162 . Shop for great deals on Motorcycle Paddock Stands. You'll find new or used products in Motorcycle Paddock Stands on .Rear Paddock Stand: Motorcycle Parts | Find great deals on for Rear Paddock Stand in Other Parts. Shop with confidence.
Results 1 - 48 of 4162 . Shop for great deals on Motorcycle Paddock Stands. You'll find new or used products in Motorcycle Paddock Stands on .
Amazon: Venom Sport Bike Motorcycle Front Fork & Rear .
Buy Venom Sport Bike Motorcycle Front Fork & Rear Paddle Combo Wheel Lift Stands Paddock Stands Universal Motorcycle Combo Yamaha Fits Honda.
Amazon: Safstar Motorcycle Stand Sport Bike Rear Forklift Rear .
Buy Safstar Motorcycle Stand Sport Bike Rear Forklift Rear Spoolift Paddock Swingarm Lift for Auto Bike Shop, Red: Stands - Amazon ✓ FREE DELIVERY.
Find great deals on for Rear Paddock Stand in Other Parts. Shop with confidence.
Steel Motorbike Chock Front Wheel Paddock Stand Stay Motorcycle . Motorbike Heavy Duty Paddock Stand for For Front Or Rear Wheel Motorcycle Track.
Jan 20, 2015 . "As a farrier, my favourite footing in this area is crusher dust. . I'd probably have them stand in mud over hogfuel because when hogfuel is new,. |
module Interpreter
import Data.Fin
import Data.Vect
data Ty = TyInt | TyBool | TyFun Ty Ty
interpTy : Ty -> Type
interpTy TyInt = Integer
interpTy TyBool = Bool
interpTy (TyFun x y) = interpTy x -> interpTy y
using (G : Vect n Ty)
-- See 'De Bruijn index'
-- HasType i G t means variable i in context G has type t
-- This type acts like a Nat, like an index to variables
data HasType : (i : Fin n) -> Vect n Ty -> Ty -> Type where
Stop : HasType FZ (t :: G) t
Pop : HasType k G t -> HasType (FS k) (u :: G) t
data Expr : Vect n Ty -> Ty -> Type where
Var : HasType i G t -> Expr G t
Val : (x : Integer) -> Expr G TyInt
Lam : Expr (a :: G) t -> Expr G (TyFun a t)
App : Expr G (TyFun a t) -> Expr G a -> Expr G t
Op : (interpTy a -> interpTy b -> interpTy c) ->
Expr G a -> Expr G b -> Expr G c
If : Expr G TyBool -> Lazy (Expr G a) -> Lazy (Expr G a) -> Expr G a
data Env : Vect n Ty -> Type where
Nil : Env Nil
(::) : interpTy a -> Env G -> Env (a :: G)
lookup : HasType i G t -> Env G -> interpTy t
lookup Stop (x :: xs) = x
lookup (Pop k) (x :: xs) = lookup k xs
interp : Env G -> Expr G t -> interpTy t
interp env (Var i) = lookup i env
interp env (Val x) = x
interp env (Lam f) = \x => interp (x :: env) f
interp env (App f x) = (interp env f) (interp env x)
interp env (Op f x y) = f (interp env x) (interp env y)
interp env (If b x y) = if interp env b
then interp env x
else interp env y
|
function count = compute_counts(data, sz)
% COMPUTE_COUNTS Count the number of times each combination of discrete assignments occurs
% count = compute_counts(data, sz)
%
% data(i,t) is the value of variable i in case t
% sz(i) : values for variable i are assumed to be in [1:sz(i)]
%
% Example: to compute a transition matrix for an HMM from a sequence of labeled states:
% transmat = mk_stochastic(compute_counts([seq(1:end-1); seq(2:end)], [nstates nstates]));
assert(length(sz) == size(data, 1));
P = prod(sz);
indices = subv2ind(sz, data'); % each row of data' is a case
%count = histc(indices, 1:P);
count = hist(indices, 1:P);
count = myreshape(count, sz);
|
### Decision Trees and Random Forests
#### Packages
```python
import sys
import sklearn
import graphviz
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
print('The Python version is {}.\n'.format(sys.version))
print('The Numpy version is {}.\n'.format(np.__version__))
print('The Pandas version is {}.\n'.format(pd.__version__))
print('The Matplotlib version is {}.\n'.format(mpl.__version__))
print('The Scikit-Learn version is {}.\n'.format(sklearn.__version__))
```
The Python version is 3.8.5 (default, Jul 28 2020, 12:59:40)
[GCC 9.3.0].
The Numpy version is 1.19.1.
The Pandas version is 1.1.0.
The Matplotlib version is 3.3.3.
The Scikit-Learn version is 0.24.0.
### Decision Trees
Decision trees and the machine learning models that are based on them, in particular random forests and gradient boosted trees, are fundamentally different types of models than generalized linear models, such as logistic regression. GLMs are rooted in the theories of classical statistics, which have a long history. The mathematics behind linear regression were originally developed at the beginning of the 19th century, by Legendre and Gauss. Because of this, the normal distribution is also called the Gaussian.
Decision trees have an intuitive structure and mimic the way that logical decisions might be made by humans. Therefore, they are a highly interpretable type of mathematical model, which can be a particularly desirable property depending on the audience.
Decision trees are able to capture non-linear effects of the features, as opposed to a linear relationship.
In general, it should be clear that in a tree with $n$ levels, where every node before the final level is split,
there will be $2n$ leaf nodes. This is important to bear in mind as the maximum depth is one of the hyperparameters that you can set for a decision tree classifier in scikit-learn
#### Growing a tree for the study case
Let us load the data and select the desired columns:
```python
df = pd.read_csv('data/default_of_credit_card_clients_cleaned.csv')
features_response = df.columns.tolist()
to_remove = ['id', 'sex'] + ['pay_{}'.format(i) for i in range(2,7)] + features_response[-6:]
features_response = [feature for feature in features_response if feature not in to_remove]
```
Let us split the dataset into train and test sets:
```python
X_train, X_test, y_train, y_test = train_test_split(df[features_response].values,
df.default_payment_next_month.values,
test_size=0.2, random_state=24)
```
Now, let us instantiate the decision tree model:
```python
dt = tree.DecisionTreeClassifier(max_depth=2)
dt.fit(X_train, y_train)
```
DecisionTreeClassifier(max_depth=2)
Using the graphviz library, let display a graphical representation of the tree:
```python
dot_data = tree.export_graphviz(dt, out_file=None, filled=True,
rounded=True, feature_names=features_response,
proportion=True, class_names=['Not Defaulted', 'Defaulted'])
```
```python
graph = graphviz.Source(dot_data)
graph
```
The training decision tree worked by starting with all the training samples in the initial node at the top of the tree, and then splited these into two groups based on a **threshold** in one of the features. The cut point was represented by a Boolean condition in the top **pay_1 <= 1.5** node. In the top node, there were all the samples ("samples = 100.0%"). Following the first split, 89.5% of the samples got sorted into the node on the left, while the remaining 10.5% went into the node on the right.
Let us confirm the proportions stated by the model:
```python
(features_response.index('pay_1'),
X_train.shape)
```
(4, (21331, 17))
```python
sum(X_train[:, features_response.index('pay_1')] <= 1.5)/X_train.shape[0]
```
0.8946134733486475
Let us confirm the values line (the class proportion for the first node):
```python
np.mean(y_train)
```
0.223102526838873
The class fractions were also colored: those with a higher proportion of the negative class than the positive class were orange, with darker orange signifying higher proportions, while those with a higher proportion of the positive class had a similar scheme using a blue color.
The line starting with "class" indicates how the decision tree made predictions from a given node, if that node were a leaf node.
Decision trees for classification make predictions by determining which leaf node a sample will be sorted in to, given the values of the features, and then predicting the class of the majority of the training samples in that leaf node.
#### Node Impurity
The goal of training decision trees is to make splits so that the next two nodes after the split have a higher purity, or, in other words, are closer to containing either only positive or only negative samples.
In practice, decision trees are actually trained using the inverse of purity, or node impurity. This is some measure of how far the node is from having 100% of the training samples belonging to one class and is analogous to the concept of a cost function, which signifies how far a given solution is from a theoretical perfect solution. For a given node $m$, the misclassification rate is simply the proportion of the less common class in that node, since all these samples will be misclassified when the majority class in that node is taken as the prediction.
The fraction of the positive class for a node $m$ and a class $k$ is given by:
$p_{mk+1} = 1 - p_{mk}$
Let us look at the missclassification distribution:
```python
p_m0 = np.linspace(0.01, 0.99, 101)
p_m1 = 1 - p_m0
misclassification_rate = np.minimum(p_m0, p_m1)
plt.plot(p_m0, misclassification_rate, label='Misclassification rate')
plt.xlabel('$p_{m0}$')
plt.legend()
```
Every time a node is split when growing a decision tree, two new nodes are created. Since the prediction from either of these new nodes is simply the majority class, an important goal will be to reduce the misclassification rate. Therefore, we will want to find a feature, from all the possible features, and a value of this feature at which to make a cut point, so that the misclassification rate in the two new nodes will be as low as possible when averaging over all the classes.
The algorithm then considers each possible threshold for every candidate feature and chooses the one that results in the lowest impurity, calculated as the average impurity across the two possible new nodes, weighted by the number of samples in each node. This process is repeated until a stopping criterion of the tree, such as max_depth.
While the misclassification rate is an intuitive measure of impurity, it happens that there are better measures that can be used to find splits during the model training process. The two options that are available in scikit-learn for the impurity calculation, which you can specify with the criterion keyword argument, are the Gini impurity and the cross-entropy options.
#### Gini
The Gini impurity is calculated for a node $m$ using the following formula:
$$
\begin{equation}
\begin{split}
Gini = \sum_{k} p_{mk} (1 - p_{mk})
\end{split}
\end{equation}
$$
Here, the summation is taken over all classes. In the case of a binary classification problem, there are only two classes, and we can write
this programmatically as follows
#### Cross Entropy
Cross entropy is calculated using this formula:
$$
\begin{equation}
\begin{split}
\text{cross entropy} = - \sum_{k} p_{mk} log(p_{mk})
\end{split}
\end{equation}
$$
Let us see these two distributions:
```python
def gini_impurity(**kwargs):
"""
Returns the gini impurity for a given array (or arrays)
Arguments:
**kwargs (array): Numpy Arrays
Returns:
Numpy Array
"""
return sum([i*(1-i) for i in kwargs.values()])
def cross_entropy_impurity(**kwargs):
"""
Returns the cross entropy a given array (or arrays)
Arguments:
**kwargs (array): Numpy Arrays
Returns:
Numpy Array
"""
return - sum([i*np.log(i) for i in kwargs.values()])
```
```python
gini = gini_impurity(x1=p_m0, x2=p_m1)
cross_entropy = cross_entropy_impurity(x1=p_m0, x2=p_m1)
```
```python
plt.plot(p_m0, misclassification_rate, label='Misclassification rate')
plt.plot(p_m0, gini, label='Gini impurity')
plt.plot(p_m0, cross_entropy, label='Cross entropy')
plt.xlabel('$p_{m0}$')
plt.legend()
```
Like the misclassification rate, both the Gini impurity and the cross entropy are highest when the class fractions are equal at 0.5, and they decrease as the node becomes purer – in other words, when they contain a higher proportion of just one of the classes. However, the Gini impurity is somewhat steeper than the misclassification rate in certain regions of the class fraction, which enables it to more effectively find the best split. Cross-entropy looks yet steeper.
It is necessary to consider both impurity metrics in a cross-validation search for hyperparameters in order to
determine the appropriate one. Note that in scikit-learn, Gini impurity can be specified with the criterion argument using the 'gini' string, while cross entropy is just referred to as 'entropy'.
#### Greedy Algorithm
There is no guarantee that a decision tree trained by the process described previously will be the best possible decision tree for finding leaf nodes with the lowest impurity. This is because the algorithm used to train decision trees is what is called a greedy algorithm.
The reason why the algorithm is used this way is because it takes substantially longer to consider all possible splits in a way that enables finding the truly optimal tree. Despite this shortcoming of the decision tree training process, there are methods that you can use to reduce the possible harmful effects of the greedy algorithm. Instead of searching for the best split at each node, the splitter keyword argument to the decision tree class can be specified as random in order to choose a random feature to make a split on.
### Decision Trees Pros
#### No need to scale data:
It is not an algorithms based on gradient descent, neither use L1 or L2 regularization to penalize coefficients. The algorithms that use the previous estimations need to scale the features to balance their weights during the training phase.
#### Non-linear relationships and interactions
Because each successive split in a decision tree is performed on a subset of the samples resulting from previous split(s), decision trees can describe complex non-linear relationships of a single feature, as well as interactions between features.
#### Predicted Probabilities
A decision tree makes predictions based on the majority of class of the leaf nodes. The probability is based on the proportion of the majority class in the leaf node. If the leaf node consisted 75% of the positive class, for
example, the prediction for that node will be the positive class and the predicted probability will be 0.75. The predicted probabilities from decision trees are not considered to be as statistically rigorous as those from generalized linear models, but they are still commonly used to measure the performance of models by methods that depend on varying the threshold for classification, such as the ROC curve or the precision-recall curve.
### Finding the best hyperparameters for a D3 model
Let us see the number of samples in the training study case set:
```python
X_train.shape
```
(21331, 17)
With 21,331 training samples and 4-fold cross-validation, there will be three-fourth of the samples, or about 16,000 samples, in each training fold.
A theoretical limitation is that at least one sample in each leaf is needed. The depth of the tree relates
to the number of leaves, such that a split at every node n, implies 2n leaf nodes. Therefore, a tree with $L$ leaf nodes has a depth of approximately $log_{2}(L)$. In the limiting case, if this tree grow deep enough, so that every leaf node has one training sample for a given fold, the depth will be $log_{2}(16,000) \approx 14$. So, 14 is the theoretical limit to the depth of a tree that could grow in this case.
The depth of 14 would certainly imply overfitting, so this depth is not desired. Let us define different values for the max depth parameter:
```python
params = {'max_depth': [1, 2, 4, 6, 8, 10, 12]}
```
And now instantiate the GridSearch object:
```python
cv = GridSearchCV(dt, param_grid=params, scoring='roc_auc',
n_jobs=None, refit=True, cv=4, verbose=1,
pre_dispatch=None, error_score=np.nan, return_train_score=True)
```
The other options were the ROC AUC metric (scoring='roc_auc'), the 4-fold cross-validation (cv=4), and to calculate training scores (return_train_score=True) to assess the bias-variance trade-off.
Let us fit the data:
```python
cv.fit(X_train, y_train)
```
Fitting 4 folds for each of 7 candidates, totalling 28 fits
GridSearchCV(cv=4, estimator=DecisionTreeClassifier(max_depth=2),
param_grid={'max_depth': [1, 2, 4, 6, 8, 10, 12]},
pre_dispatch=None, return_train_score=True, scoring='roc_auc',
verbose=1)
And save the dictionary of results in a DataFrame object:
```python
cv_results_df = pd.DataFrame(cv.cv_results_)
cv_results_df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>mean_fit_time</th>
<th>std_fit_time</th>
<th>mean_score_time</th>
<th>std_score_time</th>
<th>param_max_depth</th>
<th>params</th>
<th>split0_test_score</th>
<th>split1_test_score</th>
<th>split2_test_score</th>
<th>split3_test_score</th>
<th>mean_test_score</th>
<th>std_test_score</th>
<th>rank_test_score</th>
<th>split0_train_score</th>
<th>split1_train_score</th>
<th>split2_train_score</th>
<th>split3_train_score</th>
<th>mean_train_score</th>
<th>std_train_score</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0.032530</td>
<td>0.007054</td>
<td>0.004060</td>
<td>0.000895</td>
<td>1</td>
<td>{'max_depth': 1}</td>
<td>0.639514</td>
<td>0.643398</td>
<td>0.651891</td>
<td>0.650753</td>
<td>0.646389</td>
<td>0.005136</td>
<td>7</td>
<td>0.648680</td>
<td>0.647384</td>
<td>0.644553</td>
<td>0.644934</td>
<td>0.646388</td>
<td>0.001712</td>
</tr>
<tr>
<th>1</th>
<td>0.051850</td>
<td>0.001237</td>
<td>0.003802</td>
<td>0.000087</td>
<td>2</td>
<td>{'max_depth': 2}</td>
<td>0.695134</td>
<td>0.699022</td>
<td>0.713376</td>
<td>0.699510</td>
<td>0.701761</td>
<td>0.006917</td>
<td>5</td>
<td>0.704034</td>
<td>0.702700</td>
<td>0.698113</td>
<td>0.702535</td>
<td>0.701845</td>
<td>0.002232</td>
</tr>
<tr>
<th>2</th>
<td>0.101508</td>
<td>0.009389</td>
<td>0.003910</td>
<td>0.000102</td>
<td>4</td>
<td>{'max_depth': 4}</td>
<td>0.732720</td>
<td>0.741078</td>
<td>0.746946</td>
<td>0.743731</td>
<td>0.741119</td>
<td>0.005276</td>
<td>2</td>
<td>0.756882</td>
<td>0.752421</td>
<td>0.749368</td>
<td>0.753055</td>
<td>0.752932</td>
<td>0.002673</td>
</tr>
<tr>
<th>3</th>
<td>0.143802</td>
<td>0.002191</td>
<td>0.004772</td>
<td>0.000942</td>
<td>6</td>
<td>{'max_depth': 6}</td>
<td>0.743836</td>
<td>0.746910</td>
<td>0.751932</td>
<td>0.740517</td>
<td>0.745798</td>
<td>0.004201</td>
<td>1</td>
<td>0.782202</td>
<td>0.780044</td>
<td>0.775228</td>
<td>0.774776</td>
<td>0.778062</td>
<td>0.003158</td>
</tr>
<tr>
<th>4</th>
<td>0.188230</td>
<td>0.005357</td>
<td>0.004744</td>
<td>0.000730</td>
<td>8</td>
<td>{'max_depth': 8}</td>
<td>0.727023</td>
<td>0.732562</td>
<td>0.750618</td>
<td>0.731040</td>
<td>0.735311</td>
<td>0.009066</td>
<td>3</td>
<td>0.812061</td>
<td>0.808245</td>
<td>0.803415</td>
<td>0.802370</td>
<td>0.806523</td>
<td>0.003890</td>
</tr>
<tr>
<th>5</th>
<td>0.225156</td>
<td>0.002811</td>
<td>0.004701</td>
<td>0.000228</td>
<td>10</td>
<td>{'max_depth': 10}</td>
<td>0.706871</td>
<td>0.708786</td>
<td>0.717045</td>
<td>0.712074</td>
<td>0.711194</td>
<td>0.003857</td>
<td>4</td>
<td>0.848864</td>
<td>0.853907</td>
<td>0.841430</td>
<td>0.836463</td>
<td>0.845166</td>
<td>0.006704</td>
</tr>
<tr>
<th>6</th>
<td>0.259489</td>
<td>0.005075</td>
<td>0.004958</td>
<td>0.000604</td>
<td>12</td>
<td>{'max_depth': 12}</td>
<td>0.676996</td>
<td>0.661479</td>
<td>0.676430</td>
<td>0.675982</td>
<td>0.672722</td>
<td>0.006501</td>
<td>6</td>
<td>0.887915</td>
<td>0.903416</td>
<td>0.885891</td>
<td>0.875493</td>
<td>0.888179</td>
<td>0.009980</td>
</tr>
</tbody>
</table>
</div>
The DataFrame had one row for each combination of hyperparameters in the grid. As only one hyperparameter is being investigated, there was one row for each of the seven values that were searched for. A lot of output for each row can be seen, such as the mean and standard deviation of the time in seconds that each of the four folds took for both training (fitting) and testing (scoring).
```python
cv_results_df.columns
```
Index(['mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time',
'param_max_depth', 'params', 'split0_test_score', 'split1_test_score',
'split2_test_score', 'split3_test_score', 'mean_test_score',
'std_test_score', 'rank_test_score', 'split0_train_score',
'split1_train_score', 'split2_train_score', 'split3_train_score',
'mean_train_score', 'std_train_score'],
dtype='object')
Generally speaking, the "best" combination of hyperparameters was the one with the highest average testing score. This was an estimation of how well the model, fit using these hyperparameters, could perform when scored on new data.
Let us display how the average testing score varies with the max_depth hyperparameter, including the standard deviations of the 4-fold training and testing scores as error bars:
```python
ax = plt.axes()
ax.errorbar(cv_results_df['param_max_depth'],
cv_results_df['mean_train_score'],
yerr=cv_results_df['std_train_score'],
label='Mean $\pm$ 1 SD training scores')
ax.errorbar(cv_results_df['param_max_depth'],
cv_results_df['mean_test_score'],
yerr=cv_results_df['std_test_score'],
label='Mean $\pm$ 1 SD testing scores')
ax.legend()
plt.xlabel('max_depth')
plt.ylabel('ROC AUC')
```
The standard deviations of the training and testing scores were shown as vertical lines at each value of max_depth that was tried; the distance above and below the average score was 1 standard deviation (they are also in the same units - it would be different when using the variance, which is the std squared).
The error bars indicated how variable the scores were across folds. There was not much variability between the folds, since the standard deviations were no high, so this was not an issue.
It is possible to see that as the tree's depth grown deeper and deeper, the model fitted the training data better and better. As noted previously, if the model grew trees deep enough so that each leaf node had just one training sample, a model that was very specific to the training data was spotted. In fact, it would fit the training data
perfectly. This model can be said to have an extremely high variance.
The apparent effect of increasing max_depth only increased testing scores up to a point, after which deeper trees had lower testing performance. This is another example of how the bias-variance trade-off can be used to create a better predictive model. In summary, shallower trees had more bias, since they were not fitting the training data as well. This is okay as some bias is acceptable so long a better performance on the testing data is verified.
The best depth for the study case was 6.
Comparing the results with the previous - logistic regression - model, the D3 showed a better performance with an ROC CURVE 0.745, the best performance for the logistic regression - with feature engineering - was 0.740.
### Random Forests
Random forests are examples of what are called ensemble models, because they are formed by combining other models. By combining the predictions of many models, it is possible to improve upon the deficiencies of any given one of them.
Random forests are just ensembles of many decision trees; all the models in this kind of ensemble have the same mathematical form. One hyperparameter, **n_estimators**, needs to be specified when building a random forest model. This parameter counts the number of D3 estimators in the forest. Generally speaking, the more trees, the better. As the number of trees increases, the variance of the overall ensemble will decrease. This should result in the random forest model having better generalization to new data, which will be reflected in increased testing scores.
The variance diminishes when using random forests mainly because of two principles:
#### The number of features considered in each split
By using all the features to fit the training data, overfitting is possible. By limiting the number of features considered at each split, some of the decision trees in a random forest will potentially find better splits. his is because, although they are still greedily searching for the best split, they are doing it with a limited selection of features.
#### The samples used to grow different trees
The other way that the trees in a random forest differ from each other is that they are usually grown with different training samples. To do this, a statistical procedure known as bootstrapping is used, which means to generate new synthetic datasets from the original data. The synthetic datasets are created by randomly selecting samples from the original dataset using replacement. Here, "replacement" means that if we select a certain sample, we will continue to consider it for selection, that is, it is "replaced" to the original dataset after we've sampled it.
The procedure of using random sampling to create synthetic datasets, and training models on them separately, is called bagging,which is short for bootstrapped aggregation.
### Selecting the Best Feautures
After model training, classification trees will take an input sample and a produce a predicted class, for example, whether or not a credit account in our case study problem will default. One intuitive approach to combining the predictions of these trees into the ultimate prediction of the forest is to take a majority vote. Sikit-learn uses a somewhat different approach: adding up the predicted probabilities for each class and then choosing the one with the highest probability. This captures more information from each tree than just the predicted class.
#### Predictions and Interpretability
Random forests are difficult to understand and interpret besides the ranking of the features (that is why it is used as a feature selection sometimes). Although the rank is make available, how the rank was done is difficult to know.
Let us train a Random Forest model:
```python
rf = RandomForestClassifier(n_estimators=10, criterion='gini', max_depth=3,
min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0,
max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=None,
random_state=4, verbose=0, warm_start=False, class_weight=None)
```
```python
rf_params = {'n_estimators': list(range(10, 110, 10))}
```
Let us instantiate the cv GridSearch object:
```python
cv_rf = GridSearchCV(rf, param_grid=rf_params, scoring='roc_auc',
n_jobs=None, refit=True, cv=4, verbose=1,
pre_dispatch=None, error_score=np.nan, return_train_score=True)
```
And train the model:
```python
cv_rf.fit(X_train, y_train)
```
Fitting 4 folds for each of 10 candidates, totalling 40 fits
GridSearchCV(cv=4,
estimator=RandomForestClassifier(max_depth=3, n_estimators=10,
random_state=4),
param_grid={'n_estimators': [10, 20, 30, 40, 50, 60, 70, 80, 90,
100]},
pre_dispatch=None, return_train_score=True, scoring='roc_auc',
verbose=1)
```python
cv_rf_results_df = pd.DataFrame(cv_rf.cv_results_)
cv_rf_results_df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>mean_fit_time</th>
<th>std_fit_time</th>
<th>mean_score_time</th>
<th>std_score_time</th>
<th>param_n_estimators</th>
<th>params</th>
<th>split0_test_score</th>
<th>split1_test_score</th>
<th>split2_test_score</th>
<th>split3_test_score</th>
<th>mean_test_score</th>
<th>std_test_score</th>
<th>rank_test_score</th>
<th>split0_train_score</th>
<th>split1_train_score</th>
<th>split2_train_score</th>
<th>split3_train_score</th>
<th>mean_train_score</th>
<th>std_train_score</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0.165941</td>
<td>0.006219</td>
<td>0.009214</td>
<td>0.000139</td>
<td>10</td>
<td>{'n_estimators': 10}</td>
<td>0.739183</td>
<td>0.746599</td>
<td>0.772127</td>
<td>0.771322</td>
<td>0.757308</td>
<td>0.014656</td>
<td>10</td>
<td>0.767973</td>
<td>0.767158</td>
<td>0.760550</td>
<td>0.763683</td>
<td>0.764841</td>
<td>0.002955</td>
</tr>
<tr>
<th>1</th>
<td>0.310278</td>
<td>0.004693</td>
<td>0.014494</td>
<td>0.000648</td>
<td>20</td>
<td>{'n_estimators': 20}</td>
<td>0.745612</td>
<td>0.753712</td>
<td>0.777616</td>
<td>0.771737</td>
<td>0.762169</td>
<td>0.012998</td>
<td>9</td>
<td>0.772819</td>
<td>0.771499</td>
<td>0.765944</td>
<td>0.765395</td>
<td>0.768914</td>
<td>0.003284</td>
</tr>
<tr>
<th>2</th>
<td>0.449935</td>
<td>0.012662</td>
<td>0.020149</td>
<td>0.002076</td>
<td>30</td>
<td>{'n_estimators': 30}</td>
<td>0.747259</td>
<td>0.754154</td>
<td>0.777954</td>
<td>0.771825</td>
<td>0.762798</td>
<td>0.012524</td>
<td>7</td>
<td>0.774655</td>
<td>0.772100</td>
<td>0.765026</td>
<td>0.766697</td>
<td>0.769620</td>
<td>0.003910</td>
</tr>
<tr>
<th>3</th>
<td>0.595663</td>
<td>0.013271</td>
<td>0.025158</td>
<td>0.002189</td>
<td>40</td>
<td>{'n_estimators': 40}</td>
<td>0.746682</td>
<td>0.754553</td>
<td>0.777709</td>
<td>0.773134</td>
<td>0.763020</td>
<td>0.012813</td>
<td>4</td>
<td>0.774300</td>
<td>0.772642</td>
<td>0.765051</td>
<td>0.765876</td>
<td>0.769467</td>
<td>0.004057</td>
</tr>
<tr>
<th>4</th>
<td>0.720641</td>
<td>0.005543</td>
<td>0.028281</td>
<td>0.000115</td>
<td>50</td>
<td>{'n_estimators': 50}</td>
<td>0.748009</td>
<td>0.755878</td>
<td>0.779465</td>
<td>0.771250</td>
<td>0.763651</td>
<td>0.012379</td>
<td>1</td>
<td>0.775241</td>
<td>0.774025</td>
<td>0.765732</td>
<td>0.766947</td>
<td>0.770486</td>
<td>0.004191</td>
</tr>
<tr>
<th>5</th>
<td>0.852607</td>
<td>0.001184</td>
<td>0.033063</td>
<td>0.000228</td>
<td>60</td>
<td>{'n_estimators': 60}</td>
<td>0.748437</td>
<td>0.755604</td>
<td>0.779478</td>
<td>0.770978</td>
<td>0.763624</td>
<td>0.012252</td>
<td>2</td>
<td>0.775419</td>
<td>0.773615</td>
<td>0.766133</td>
<td>0.767032</td>
<td>0.770550</td>
<td>0.004031</td>
</tr>
<tr>
<th>6</th>
<td>0.993426</td>
<td>0.003056</td>
<td>0.038019</td>
<td>0.000113</td>
<td>70</td>
<td>{'n_estimators': 70}</td>
<td>0.747491</td>
<td>0.754465</td>
<td>0.779104</td>
<td>0.771023</td>
<td>0.763021</td>
<td>0.012620</td>
<td>3</td>
<td>0.774420</td>
<td>0.773379</td>
<td>0.765960</td>
<td>0.766174</td>
<td>0.769983</td>
<td>0.003934</td>
</tr>
<tr>
<th>7</th>
<td>1.143173</td>
<td>0.005024</td>
<td>0.043118</td>
<td>0.001129</td>
<td>80</td>
<td>{'n_estimators': 80}</td>
<td>0.747424</td>
<td>0.754488</td>
<td>0.778753</td>
<td>0.770872</td>
<td>0.762884</td>
<td>0.012502</td>
<td>5</td>
<td>0.774234</td>
<td>0.773257</td>
<td>0.765978</td>
<td>0.766220</td>
<td>0.769922</td>
<td>0.003840</td>
</tr>
<tr>
<th>8</th>
<td>1.279748</td>
<td>0.004862</td>
<td>0.049479</td>
<td>0.003227</td>
<td>90</td>
<td>{'n_estimators': 90}</td>
<td>0.747097</td>
<td>0.753794</td>
<td>0.778507</td>
<td>0.770426</td>
<td>0.762456</td>
<td>0.012571</td>
<td>8</td>
<td>0.774095</td>
<td>0.772973</td>
<td>0.766022</td>
<td>0.765748</td>
<td>0.769709</td>
<td>0.003846</td>
</tr>
<tr>
<th>9</th>
<td>1.416619</td>
<td>0.002662</td>
<td>0.052569</td>
<td>0.000116</td>
<td>100</td>
<td>{'n_estimators': 100}</td>
<td>0.747221</td>
<td>0.753924</td>
<td>0.779195</td>
<td>0.770937</td>
<td>0.762819</td>
<td>0.012811</td>
<td>6</td>
<td>0.774340</td>
<td>0.773598</td>
<td>0.766103</td>
<td>0.766914</td>
<td>0.770238</td>
<td>0.003750</td>
</tr>
</tbody>
</table>
</div>
Let us display the performance for each gridsearch params:
```python
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
axs[0].plot(cv_rf_results_df['param_n_estimators'],
cv_rf_results_df['mean_fit_time'],
'-o')
axs[0].set_xlabel('Number of trees')
axs[0].set_ylabel('Mean fit time (seconds)')
axs[1].errorbar(cv_rf_results_df['param_n_estimators'],
cv_rf_results_df['mean_test_score'],
yerr=cv_rf_results_df['std_test_score'])
axs[1].set_xlabel('Number of trees')
axs[1].set_ylabel('Mean testing ROC AUC $\pm$ 1 SD ')
plt.tight_layout()
```
It is possible to see that by using a random forest, the model performance increased on the cross-validation testing folds compared to the previous models. While no attempt to tune the random forest hyperparameters to achieve the best model performance was done, the results indicated that a random forest was higher in performance compared to previous efforts.
However, along with these higher model testing scores, notice that there was also more variability between the folds saw with the decision tree; this variability was visible as larger standard deviations in model testing scores across the folds.
The left plot shows that there was a linear increase in training time as more trees were added to the forest. This was expected as more trees were evaluated. The additional computation time did not compensate in terms of performance, as beyond 20 trees it was not clear that adding more trees reliably improves testing performance. While the model with 50 trees had the highest score, the fact that adding more trees actually decreased the testing score somewhat indicated that the gain in ROC AUC for 50 trees might just was due to randomness.
Let us see the best hyperparameter as well as the best params estimated by the model:
```python
cv_rf.best_params_
```
{'n_estimators': 50}
```python
feat_imp_df = pd.DataFrame({
'Feature name':features_response,
'Importance':cv_rf.best_estimator_.feature_importances_
})
feat_imp_df.sort_values('Importance', ascending=False)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Feature name</th>
<th>Importance</th>
</tr>
</thead>
<tbody>
<tr>
<th>4</th>
<td>pay_1</td>
<td>0.609609</td>
</tr>
<tr>
<th>11</th>
<td>pay_amt1</td>
<td>0.094123</td>
</tr>
<tr>
<th>0</th>
<td>limit_bal</td>
<td>0.079265</td>
</tr>
<tr>
<th>13</th>
<td>pay_amt3</td>
<td>0.047067</td>
</tr>
<tr>
<th>12</th>
<td>pay_amt2</td>
<td>0.035393</td>
</tr>
<tr>
<th>6</th>
<td>bill_amt2</td>
<td>0.022422</td>
</tr>
<tr>
<th>14</th>
<td>pay_amt4</td>
<td>0.021164</td>
</tr>
<tr>
<th>15</th>
<td>pay_amt5</td>
<td>0.015962</td>
</tr>
<tr>
<th>10</th>
<td>bill_amt6</td>
<td>0.014293</td>
</tr>
<tr>
<th>8</th>
<td>bill_amt4</td>
<td>0.013454</td>
</tr>
<tr>
<th>7</th>
<td>bill_amt3</td>
<td>0.013321</td>
</tr>
<tr>
<th>16</th>
<td>pay_amt6</td>
<td>0.011831</td>
</tr>
<tr>
<th>9</th>
<td>bill_amt5</td>
<td>0.011264</td>
</tr>
<tr>
<th>5</th>
<td>bill_amt1</td>
<td>0.008574</td>
</tr>
<tr>
<th>1</th>
<td>education</td>
<td>0.001630</td>
</tr>
<tr>
<th>3</th>
<td>age</td>
<td>0.000459</td>
</tr>
<tr>
<th>2</th>
<td>marriage</td>
<td>0.000170</td>
</tr>
</tbody>
</table>
</div>
The top 5 most important features from the random forest were the same as the top 5 chosen by an ANOVA F-test analysis previously done, though in some different order. This was a good confirmation about the importance of these features as they were evaluated by different methods.
### Cross-Validation GridSearch with Random Forest on two hyperparameters
Let us extend the previous exercise and conduct a grid search over the maximum depth of a tree. First, let us create a dictionary representing the grid for **n_estimators** and **max_depth**:
```python
rf_params = {'n_estimators': [10, 50, 100, 200],
'max_depth': [3, 6, 9, 12]}
```
And instantiate the GridSearch object using the same options that were applied before and train the predictor (to use parallel computing set **n_jobs=-1** that will match the number of cpus available automatically):
```python
cv_rf = GridSearchCV(rf, param_grid=rf_params, scoring='roc_auc',
n_jobs=None, refit=True, cv=4, verbose=2,
pre_dispatch=None, error_score=np.nan, return_train_score=True)
cv_rf.fit(X_train, y_train)
```
Fitting 4 folds for each of 16 candidates, totalling 64 fits
[CV] END .......................max_depth=3, n_estimators=10; total time= 0.2s
[CV] END .......................max_depth=3, n_estimators=10; total time= 0.2s
[CV] END .......................max_depth=3, n_estimators=10; total time= 0.2s
[CV] END .......................max_depth=3, n_estimators=10; total time= 0.2s
[CV] END .......................max_depth=3, n_estimators=50; total time= 0.8s
[CV] END .......................max_depth=3, n_estimators=50; total time= 0.8s
[CV] END .......................max_depth=3, n_estimators=50; total time= 0.8s
[CV] END .......................max_depth=3, n_estimators=50; total time= 0.8s
[CV] END ......................max_depth=3, n_estimators=100; total time= 1.5s
[CV] END ......................max_depth=3, n_estimators=100; total time= 1.5s
[CV] END ......................max_depth=3, n_estimators=100; total time= 1.5s
[CV] END ......................max_depth=3, n_estimators=100; total time= 1.5s
[CV] END ......................max_depth=3, n_estimators=200; total time= 3.0s
[CV] END ......................max_depth=3, n_estimators=200; total time= 3.0s
[CV] END ......................max_depth=3, n_estimators=200; total time= 2.9s
[CV] END ......................max_depth=3, n_estimators=200; total time= 2.9s
[CV] END .......................max_depth=6, n_estimators=10; total time= 0.3s
[CV] END .......................max_depth=6, n_estimators=10; total time= 0.3s
[CV] END .......................max_depth=6, n_estimators=10; total time= 0.3s
[CV] END .......................max_depth=6, n_estimators=10; total time= 0.3s
[CV] END .......................max_depth=6, n_estimators=50; total time= 1.3s
[CV] END .......................max_depth=6, n_estimators=50; total time= 1.3s
[CV] END .......................max_depth=6, n_estimators=50; total time= 1.3s
[CV] END .......................max_depth=6, n_estimators=50; total time= 1.2s
[CV] END ......................max_depth=6, n_estimators=100; total time= 2.5s
[CV] END ......................max_depth=6, n_estimators=100; total time= 2.5s
[CV] END ......................max_depth=6, n_estimators=100; total time= 2.5s
[CV] END ......................max_depth=6, n_estimators=100; total time= 2.5s
[CV] END ......................max_depth=6, n_estimators=200; total time= 5.0s
[CV] END ......................max_depth=6, n_estimators=200; total time= 5.0s
[CV] END ......................max_depth=6, n_estimators=200; total time= 5.0s
[CV] END ......................max_depth=6, n_estimators=200; total time= 5.0s
[CV] END .......................max_depth=9, n_estimators=10; total time= 0.4s
[CV] END .......................max_depth=9, n_estimators=10; total time= 0.4s
[CV] END .......................max_depth=9, n_estimators=10; total time= 0.4s
[CV] END .......................max_depth=9, n_estimators=10; total time= 0.4s
[CV] END .......................max_depth=9, n_estimators=50; total time= 1.7s
[CV] END .......................max_depth=9, n_estimators=50; total time= 1.7s
[CV] END .......................max_depth=9, n_estimators=50; total time= 1.7s
[CV] END .......................max_depth=9, n_estimators=50; total time= 1.7s
[CV] END ......................max_depth=9, n_estimators=100; total time= 3.4s
[CV] END ......................max_depth=9, n_estimators=100; total time= 3.4s
[CV] END ......................max_depth=9, n_estimators=100; total time= 3.4s
[CV] END ......................max_depth=9, n_estimators=100; total time= 3.4s
[CV] END ......................max_depth=9, n_estimators=200; total time= 6.9s
[CV] END ......................max_depth=9, n_estimators=200; total time= 6.9s
[CV] END ......................max_depth=9, n_estimators=200; total time= 6.9s
[CV] END ......................max_depth=9, n_estimators=200; total time= 6.9s
[CV] END ......................max_depth=12, n_estimators=10; total time= 0.4s
[CV] END ......................max_depth=12, n_estimators=10; total time= 0.4s
[CV] END ......................max_depth=12, n_estimators=10; total time= 0.4s
[CV] END ......................max_depth=12, n_estimators=10; total time= 0.4s
[CV] END ......................max_depth=12, n_estimators=50; total time= 2.1s
[CV] END ......................max_depth=12, n_estimators=50; total time= 2.1s
[CV] END ......................max_depth=12, n_estimators=50; total time= 2.1s
[CV] END ......................max_depth=12, n_estimators=50; total time= 2.2s
[CV] END .....................max_depth=12, n_estimators=100; total time= 4.3s
[CV] END .....................max_depth=12, n_estimators=100; total time= 4.3s
[CV] END .....................max_depth=12, n_estimators=100; total time= 4.2s
[CV] END .....................max_depth=12, n_estimators=100; total time= 4.3s
[CV] END .....................max_depth=12, n_estimators=200; total time= 8.7s
[CV] END .....................max_depth=12, n_estimators=200; total time= 8.5s
[CV] END .....................max_depth=12, n_estimators=200; total time= 8.5s
[CV] END .....................max_depth=12, n_estimators=200; total time= 8.6s
GridSearchCV(cv=4,
estimator=RandomForestClassifier(max_depth=3, n_estimators=10,
random_state=4),
param_grid={'max_depth': [3, 6, 9, 12],
'n_estimators': [10, 50, 100, 200]},
pre_dispatch=None, return_train_score=True, scoring='roc_auc',
verbose=2)
Now, let store the results into a pandas DataFrame object:
```python
cv_rf_results_df = pd.DataFrame(cv_rf.cv_results_)
cv_rf_results_df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>mean_fit_time</th>
<th>std_fit_time</th>
<th>mean_score_time</th>
<th>std_score_time</th>
<th>param_max_depth</th>
<th>param_n_estimators</th>
<th>params</th>
<th>split0_test_score</th>
<th>split1_test_score</th>
<th>split2_test_score</th>
<th>split3_test_score</th>
<th>mean_test_score</th>
<th>std_test_score</th>
<th>rank_test_score</th>
<th>split0_train_score</th>
<th>split1_train_score</th>
<th>split2_train_score</th>
<th>split3_train_score</th>
<th>mean_train_score</th>
<th>std_train_score</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0.160572</td>
<td>0.010295</td>
<td>0.009034</td>
<td>0.000137</td>
<td>3</td>
<td>10</td>
<td>{'max_depth': 3, 'n_estimators': 10}</td>
<td>0.739183</td>
<td>0.746599</td>
<td>0.772127</td>
<td>0.771322</td>
<td>0.757308</td>
<td>0.014656</td>
<td>15</td>
<td>0.767973</td>
<td>0.767158</td>
<td>0.760550</td>
<td>0.763683</td>
<td>0.764841</td>
<td>0.002955</td>
</tr>
<tr>
<th>1</th>
<td>0.739119</td>
<td>0.014972</td>
<td>0.028264</td>
<td>0.000112</td>
<td>3</td>
<td>50</td>
<td>{'max_depth': 3, 'n_estimators': 50}</td>
<td>0.748009</td>
<td>0.755878</td>
<td>0.779465</td>
<td>0.771250</td>
<td>0.763651</td>
<td>0.012379</td>
<td>12</td>
<td>0.775241</td>
<td>0.774025</td>
<td>0.765732</td>
<td>0.766947</td>
<td>0.770486</td>
<td>0.004191</td>
</tr>
<tr>
<th>2</th>
<td>1.433841</td>
<td>0.013277</td>
<td>0.052308</td>
<td>0.000071</td>
<td>3</td>
<td>100</td>
<td>{'max_depth': 3, 'n_estimators': 100}</td>
<td>0.747221</td>
<td>0.753924</td>
<td>0.779195</td>
<td>0.770937</td>
<td>0.762819</td>
<td>0.012811</td>
<td>13</td>
<td>0.774340</td>
<td>0.773598</td>
<td>0.766103</td>
<td>0.766914</td>
<td>0.770238</td>
<td>0.003750</td>
</tr>
<tr>
<th>3</th>
<td>2.848571</td>
<td>0.006656</td>
<td>0.100138</td>
<td>0.000516</td>
<td>3</td>
<td>200</td>
<td>{'max_depth': 3, 'n_estimators': 200}</td>
<td>0.747454</td>
<td>0.753057</td>
<td>0.777644</td>
<td>0.771729</td>
<td>0.762471</td>
<td>0.012550</td>
<td>14</td>
<td>0.774453</td>
<td>0.773534</td>
<td>0.766139</td>
<td>0.767404</td>
<td>0.770382</td>
<td>0.003653</td>
</tr>
<tr>
<th>4</th>
<td>0.252126</td>
<td>0.002849</td>
<td>0.010875</td>
<td>0.000209</td>
<td>6</td>
<td>10</td>
<td>{'max_depth': 6, 'n_estimators': 10}</td>
<td>0.752905</td>
<td>0.758904</td>
<td>0.780036</td>
<td>0.774017</td>
<td>0.766466</td>
<td>0.010980</td>
<td>11</td>
<td>0.795911</td>
<td>0.796476</td>
<td>0.793695</td>
<td>0.796446</td>
<td>0.795632</td>
<td>0.001141</td>
</tr>
<tr>
<th>5</th>
<td>1.219600</td>
<td>0.008659</td>
<td>0.036014</td>
<td>0.000334</td>
<td>6</td>
<td>50</td>
<td>{'max_depth': 6, 'n_estimators': 50}</td>
<td>0.760089</td>
<td>0.765251</td>
<td>0.788567</td>
<td>0.778669</td>
<td>0.773144</td>
<td>0.011193</td>
<td>8</td>
<td>0.807081</td>
<td>0.805109</td>
<td>0.799176</td>
<td>0.801229</td>
<td>0.803149</td>
<td>0.003113</td>
</tr>
<tr>
<th>6</th>
<td>2.418893</td>
<td>0.007093</td>
<td>0.067700</td>
<td>0.000225</td>
<td>6</td>
<td>100</td>
<td>{'max_depth': 6, 'n_estimators': 100}</td>
<td>0.760964</td>
<td>0.765515</td>
<td>0.788793</td>
<td>0.778936</td>
<td>0.773552</td>
<td>0.011004</td>
<td>7</td>
<td>0.808194</td>
<td>0.806130</td>
<td>0.800846</td>
<td>0.803123</td>
<td>0.804573</td>
<td>0.002808</td>
</tr>
<tr>
<th>7</th>
<td>4.845598</td>
<td>0.009769</td>
<td>0.131047</td>
<td>0.000598</td>
<td>6</td>
<td>200</td>
<td>{'max_depth': 6, 'n_estimators': 200}</td>
<td>0.761481</td>
<td>0.765319</td>
<td>0.787854</td>
<td>0.779745</td>
<td>0.773600</td>
<td>0.010682</td>
<td>6</td>
<td>0.808276</td>
<td>0.806853</td>
<td>0.800682</td>
<td>0.804006</td>
<td>0.804954</td>
<td>0.002906</td>
</tr>
<tr>
<th>8</th>
<td>0.345823</td>
<td>0.001968</td>
<td>0.012519</td>
<td>0.000102</td>
<td>9</td>
<td>10</td>
<td>{'max_depth': 9, 'n_estimators': 10}</td>
<td>0.757853</td>
<td>0.755444</td>
<td>0.781544</td>
<td>0.773823</td>
<td>0.767166</td>
<td>0.010899</td>
<td>10</td>
<td>0.854161</td>
<td>0.856197</td>
<td>0.850317</td>
<td>0.852039</td>
<td>0.853178</td>
<td>0.002211</td>
</tr>
<tr>
<th>9</th>
<td>1.677115</td>
<td>0.008941</td>
<td>0.045137</td>
<td>0.000182</td>
<td>9</td>
<td>50</td>
<td>{'max_depth': 9, 'n_estimators': 50}</td>
<td>0.763956</td>
<td>0.764420</td>
<td>0.786690</td>
<td>0.780255</td>
<td>0.773830</td>
<td>0.009908</td>
<td>4</td>
<td>0.867242</td>
<td>0.871380</td>
<td>0.868213</td>
<td>0.867390</td>
<td>0.868557</td>
<td>0.001672</td>
</tr>
<tr>
<th>10</th>
<td>3.346243</td>
<td>0.009928</td>
<td>0.086113</td>
<td>0.000312</td>
<td>9</td>
<td>100</td>
<td>{'max_depth': 9, 'n_estimators': 100}</td>
<td>0.763872</td>
<td>0.765788</td>
<td>0.789016</td>
<td>0.781199</td>
<td>0.774969</td>
<td>0.010530</td>
<td>3</td>
<td>0.872099</td>
<td>0.874119</td>
<td>0.870537</td>
<td>0.871196</td>
<td>0.871988</td>
<td>0.001350</td>
</tr>
<tr>
<th>11</th>
<td>6.705547</td>
<td>0.007114</td>
<td>0.167002</td>
<td>0.000740</td>
<td>9</td>
<td>200</td>
<td>{'max_depth': 9, 'n_estimators': 200}</td>
<td>0.764963</td>
<td>0.767541</td>
<td>0.790255</td>
<td>0.782034</td>
<td>0.776199</td>
<td>0.010402</td>
<td>1</td>
<td>0.873807</td>
<td>0.874909</td>
<td>0.871185</td>
<td>0.871246</td>
<td>0.872787</td>
<td>0.001619</td>
</tr>
<tr>
<th>12</th>
<td>0.426109</td>
<td>0.005192</td>
<td>0.014213</td>
<td>0.000121</td>
<td>12</td>
<td>10</td>
<td>{'max_depth': 12, 'n_estimators': 10}</td>
<td>0.754513</td>
<td>0.751116</td>
<td>0.764061</td>
<td>0.759223</td>
<td>0.757228</td>
<td>0.004884</td>
<td>16</td>
<td>0.922544</td>
<td>0.919157</td>
<td>0.926191</td>
<td>0.914636</td>
<td>0.920632</td>
<td>0.004263</td>
</tr>
<tr>
<th>13</th>
<td>2.093706</td>
<td>0.010107</td>
<td>0.053497</td>
<td>0.000099</td>
<td>12</td>
<td>50</td>
<td>{'max_depth': 12, 'n_estimators': 50}</td>
<td>0.763750</td>
<td>0.763663</td>
<td>0.780781</td>
<td>0.778086</td>
<td>0.771570</td>
<td>0.007921</td>
<td>9</td>
<td>0.940170</td>
<td>0.943000</td>
<td>0.942297</td>
<td>0.937049</td>
<td>0.940629</td>
<td>0.002315</td>
</tr>
<tr>
<th>14</th>
<td>4.155320</td>
<td>0.017221</td>
<td>0.107935</td>
<td>0.004810</td>
<td>12</td>
<td>100</td>
<td>{'max_depth': 12, 'n_estimators': 100}</td>
<td>0.765665</td>
<td>0.765061</td>
<td>0.783413</td>
<td>0.781119</td>
<td>0.773815</td>
<td>0.008493</td>
<td>5</td>
<td>0.942902</td>
<td>0.946968</td>
<td>0.946109</td>
<td>0.942448</td>
<td>0.944607</td>
<td>0.001962</td>
</tr>
<tr>
<th>15</th>
<td>8.371203</td>
<td>0.092187</td>
<td>0.208209</td>
<td>0.005270</td>
<td>12</td>
<td>200</td>
<td>{'max_depth': 12, 'n_estimators': 200}</td>
<td>0.765910</td>
<td>0.766963</td>
<td>0.785611</td>
<td>0.783513</td>
<td>0.775499</td>
<td>0.009101</td>
<td>2</td>
<td>0.945018</td>
<td>0.949378</td>
<td>0.946415</td>
<td>0.944689</td>
<td>0.946375</td>
<td>0.001851</td>
</tr>
</tbody>
</table>
</div>
Now, let us plot a three dimensional space into a two dimensional plot using a Checkerboard Graph. A Checkerboard Graph plots a two-dimensional grid with colored squares (or other shapes on it) as a way to show three dimensions of data. First, it is necessary to create a meshgrid of the *x*-axis and *y*-axis:
```python
xx_rf, yy_rf = np.meshgrid(range(5), range(5))
```
Let us set the colour map scale and create the axes:
```python
cm = plt.cm.jet
plt.figure(figsize=(20,10))
ax = plt.axes()
pcolor_graph = ax.pcolormesh(xx_rf,
yy_rf,
cv_rf_results_df['mean_test_score'].values.reshape((4,4)),
cmap=cm)
plt.colorbar(pcolor_graph, label='Average testing ROC AUC')
ax.set_aspect('equal')
ax.set_xticks([0.5, 1.5, 2.5, 3.5])
ax.set_yticks([0.5, 1.5, 2.5, 3.5])
ax.set_xticklabels([str(tick_label) for tick_label in rf_params['n_estimators']])
ax.set_yticklabels([str(tick_label) for tick_label in rf_params['max_depth']])
ax.set_xlabel('Number of trees')
ax.set_ylabel('Maximum depth')
```
There was certainly an advantage to using trees with a depth of more than three. Of the parameter combinations that were tried, **max_depth=9** with **200 trees** yielded the best average testing score, with ROC AUC = 0.776.
This was the best model found so far (compared to the individual D3 and logistic regression).
|
Formal statement is: corollary analytic_continuation_open: assumes "open s" and "open s'" and "s \<noteq> {}" and "connected s'" and "s \<subseteq> s'" assumes "f holomorphic_on s'" and "g holomorphic_on s'" and "\<And>z. z \<in> s \<Longrightarrow> f z = g z" assumes "z \<in> s'" shows "f z = g z" Informal statement is: If $f$ and $g$ are holomorphic functions on an open set $S'$ and agree on an open subset $S$ of $S'$, then $f$ and $g$ agree on all of $S'$. |
function lo=lpcrf2lo(rf)
%LPCRF2LO Convert reflection coefficients to log area ratios LO=(RF)
%the output values are limited to about +-14.5
% Copyright (C) Mike Brookes 1997
% Version: $Id: lpcrf2lo.m 713 2011-10-16 14:45:43Z dmb $
%
% VOICEBOX is a MATLAB toolbox for speech processing.
% Home page: http://www.ee.ic.ac.uk/hp/staff/dmb/voicebox/voicebox.html
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You can obtain a copy of the GNU General Public License from
% http://www.gnu.org/copyleft/gpl.html or by writing to
% Free Software Foundation, Inc.,675 Mass Ave, Cambridge, MA 02139, USA.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
r=max(min(rf,1-1E-6),1E-6-1);
lo=log((1-r)./(1+r));
|
-- @@stderr --
dtrace: failed to compile script test/unittest/tracemem/err.D_PROTO_LEN.toofew.d: [D_PROTO_LEN] line 18: tracemem( ) prototype mismatch: 1 arg passed, at least 2 expected
|
State Before: 𝕜 : Type u_2
inst✝¹⁴ : NontriviallyNormedField 𝕜
D : Type uD
inst✝¹³ : NormedAddCommGroup D
inst✝¹² : NormedSpace 𝕜 D
E : Type uE
inst✝¹¹ : NormedAddCommGroup E
inst✝¹⁰ : NormedSpace 𝕜 E
F : Type uF
inst✝⁹ : NormedAddCommGroup F
inst✝⁸ : NormedSpace 𝕜 F
G : Type uG
inst✝⁷ : NormedAddCommGroup G
inst✝⁶ : NormedSpace 𝕜 G
X : Type ?u.2192953
inst✝⁵ : NormedAddCommGroup X
inst✝⁴ : NormedSpace 𝕜 X
s s₁ t u : Set E
f✝ f₁ : E → F
g✝ : F → G
x x₀ : E
c : F
b : E × F → G
m n : ℕ∞
p✝ : E → FormalMultilinearSeries 𝕜 E F
E' : Type u_1
inst✝³ : NormedAddCommGroup E'
inst✝² : NormedSpace 𝕜 E'
F' : Type u_3
inst✝¹ : NormedAddCommGroup F'
inst✝ : NormedSpace 𝕜 F'
f : E → F
g : E' → F'
p : E × E'
hf : ContDiffAt 𝕜 n f p.fst
hg : ContDiffAt 𝕜 n g p.snd
⊢ ContDiffAt 𝕜 n (Prod.map f g) p State After: case mk
𝕜 : Type u_2
inst✝¹⁴ : NontriviallyNormedField 𝕜
D : Type uD
inst✝¹³ : NormedAddCommGroup D
inst✝¹² : NormedSpace 𝕜 D
E : Type uE
inst✝¹¹ : NormedAddCommGroup E
inst✝¹⁰ : NormedSpace 𝕜 E
F : Type uF
inst✝⁹ : NormedAddCommGroup F
inst✝⁸ : NormedSpace 𝕜 F
G : Type uG
inst✝⁷ : NormedAddCommGroup G
inst✝⁶ : NormedSpace 𝕜 G
X : Type ?u.2192953
inst✝⁵ : NormedAddCommGroup X
inst✝⁴ : NormedSpace 𝕜 X
s s₁ t u : Set E
f✝ f₁ : E → F
g✝ : F → G
x x₀ : E
c : F
b : E × F → G
m n : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
E' : Type u_1
inst✝³ : NormedAddCommGroup E'
inst✝² : NormedSpace 𝕜 E'
F' : Type u_3
inst✝¹ : NormedAddCommGroup F'
inst✝ : NormedSpace 𝕜 F'
f : E → F
g : E' → F'
fst✝ : E
snd✝ : E'
hf : ContDiffAt 𝕜 n f (fst✝, snd✝).fst
hg : ContDiffAt 𝕜 n g (fst✝, snd✝).snd
⊢ ContDiffAt 𝕜 n (Prod.map f g) (fst✝, snd✝) Tactic: rcases p with ⟨⟩ State Before: case mk
𝕜 : Type u_2
inst✝¹⁴ : NontriviallyNormedField 𝕜
D : Type uD
inst✝¹³ : NormedAddCommGroup D
inst✝¹² : NormedSpace 𝕜 D
E : Type uE
inst✝¹¹ : NormedAddCommGroup E
inst✝¹⁰ : NormedSpace 𝕜 E
F : Type uF
inst✝⁹ : NormedAddCommGroup F
inst✝⁸ : NormedSpace 𝕜 F
G : Type uG
inst✝⁷ : NormedAddCommGroup G
inst✝⁶ : NormedSpace 𝕜 G
X : Type ?u.2192953
inst✝⁵ : NormedAddCommGroup X
inst✝⁴ : NormedSpace 𝕜 X
s s₁ t u : Set E
f✝ f₁ : E → F
g✝ : F → G
x x₀ : E
c : F
b : E × F → G
m n : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
E' : Type u_1
inst✝³ : NormedAddCommGroup E'
inst✝² : NormedSpace 𝕜 E'
F' : Type u_3
inst✝¹ : NormedAddCommGroup F'
inst✝ : NormedSpace 𝕜 F'
f : E → F
g : E' → F'
fst✝ : E
snd✝ : E'
hf : ContDiffAt 𝕜 n f (fst✝, snd✝).fst
hg : ContDiffAt 𝕜 n g (fst✝, snd✝).snd
⊢ ContDiffAt 𝕜 n (Prod.map f g) (fst✝, snd✝) State After: no goals Tactic: exact ContDiffAt.prod_map hf hg |
theorem Stone_Weierstrass_polynomial_function_subspace: fixes f :: "'a::euclidean_space \<Rightarrow> 'b::euclidean_space" assumes "compact S" and contf: "continuous_on S f" and "0 < e" and "subspace T" "f ` S \<subseteq> T" obtains g where "polynomial_function g" "g ` S \<subseteq> T" "\<And>x. x \<in> S \<Longrightarrow> norm(f x - g x) < e" |
Require Import FunctionalExtensionality JMeq.
Require Export SpecializedCategory Functor.
Require Import Common Notations FunctorAttributes FEqualDep.
Set Implicit Arguments.
Generalizable All Variables.
Set Asymmetric Patterns.
Set Universe Polymorphism.
Local Infix "==" := JMeq.
Local Ltac faithful_t :=
repeat (unfold Object in *; simpl in *; subst;
match goal with
| _ => intro
| _ => progress trivial
| [ |- _ = _ ] => (apply functional_extensionality_dep; intro)
| _ => progress simpl_eq
| [ H : _ = _ |- _ ] => fg_equal_in H
(*| [ |- projT2 ?a == projT2 ?b ] =>
(cut (projT1 a = projT1 b); [ (*generalize a b*) | faithful_t ](*;
intros [] [] ?*))*)
| _ => progress JMeq_eq
end).
Section sigT_obj_mor.
Context `(A : @SpecializedCategory objA).
Variable Pobj : objA -> Type.
Variable Pmor : forall s d : sigT Pobj, A.(Morphism) (projT1 s) (projT1 d) -> Type.
Variable Pidentity : forall x, @Pmor x x (Identity (C := A) _).
Variable Pcompose : forall s d d', forall m1 m2, @Pmor d d' m1 -> @Pmor s d m2 -> @Pmor s d' (Compose (C := A) m1 m2).
Hypothesis P_Associativity : forall o1 o2 o3 o4 m1 m2 m3 m1' m2' m3',
@Pcompose o1 o2 o4 _ m1 (@Pcompose o2 o3 o4 m3 m2 m3' m2') m1' ==
@Pcompose o1 o3 o4 m3 _ m3' (@Pcompose o1 o2 o3 m2 m1 m2' m1').
Hypothesis P_LeftIdentity : forall a b f f',
@Pcompose a b b _ f (@Pidentity b) f' ==
f'.
Hypothesis P_RightIdentity : forall a b f f',
@Pcompose a a b f _ f' (@Pidentity a) ==
f'.
Definition SpecializedCategory_sigT : @SpecializedCategory (sigT Pobj).
match goal with
| [ |- @SpecializedCategory ?obj ] =>
refine (@Build_SpecializedCategory obj
(fun s d => sigT (@Pmor s d))
(fun x => existT _ (Identity (C := A) (projT1 x)) (Pidentity x))
(fun s d d' m1 m2 => existT _ (Compose (C := A) (projT1 m1) (projT1 m2)) (Pcompose (projT2 m1) (projT2 m2)))
_
_
_
)
end;
abstract (intros; simpl_eq; auto with category).
Defined.
Definition projT1_functor : SpecializedFunctor SpecializedCategory_sigT A
:= Build_SpecializedFunctor SpecializedCategory_sigT A
(@projT1 _ _)
(fun _ _ => @projT1 _ _)
(fun _ _ _ _ _ => eq_refl)
(fun _ => eq_refl).
End sigT_obj_mor.
Arguments projT1_functor {objA A Pobj Pmor Pidentity Pcompose P_Associativity P_LeftIdentity P_RightIdentity}.
Section sigT_obj.
Context `(A : @SpecializedCategory objA).
Variable Pobj : objA -> Type.
Definition SpecializedCategory_sigT_obj : @SpecializedCategory (sigT Pobj).
match goal with
| [ |- @SpecializedCategory ?obj ] =>
refine (@Build_SpecializedCategory obj
(fun s d => A.(Morphism) (projT1 s) (projT1 d))
(fun x => Identity (C := A) (projT1 x))
(fun s d d' m1 m2 => Compose (C := A) m1 m2)
_
_
_
)
end;
abstract (intros; destruct_sig; simpl; auto with category).
Defined.
Definition projT1_obj_functor : SpecializedFunctor SpecializedCategory_sigT_obj A
:= Build_SpecializedFunctor SpecializedCategory_sigT_obj A
(@projT1 _ _)
(fun s d m => m)
(fun _ _ _ _ _ => eq_refl)
(fun _ => eq_refl).
Definition SpecializedCategory_sigT_obj_as_sigT : @SpecializedCategory (sigT Pobj).
refine (@SpecializedCategory_sigT _ A Pobj (fun _ _ _ => unit) (fun _ => tt) (fun _ _ _ _ _ _ _ => tt) _ _ _);
abstract (simpl; intros; trivial).
Defined.
Definition sigT_functor_obj : SpecializedFunctor SpecializedCategory_sigT_obj_as_sigT SpecializedCategory_sigT_obj
:= Build_SpecializedFunctor SpecializedCategory_sigT_obj_as_sigT SpecializedCategory_sigT_obj
(fun x => x)
(fun _ _ => @projT1 _ _)
(fun _ _ _ _ _ => eq_refl)
(fun _ => eq_refl).
Definition sigT_functor_obj_inv : SpecializedFunctor SpecializedCategory_sigT_obj SpecializedCategory_sigT_obj_as_sigT
:= Build_SpecializedFunctor SpecializedCategory_sigT_obj SpecializedCategory_sigT_obj_as_sigT
(fun x => x)
(fun _ _ m => existT _ m tt)
(fun _ _ _ _ _ => eq_refl)
(fun _ => eq_refl).
Lemma sigT_obj_eq : ComposeFunctors sigT_functor_obj sigT_functor_obj_inv = IdentityFunctor _ /\ ComposeFunctors sigT_functor_obj_inv sigT_functor_obj = IdentityFunctor _.
split; functor_eq; hnf in *; destruct_type @sigT; f_equal; trivial.
Qed.
Lemma sigT_obj_compat : ComposeFunctors projT1_obj_functor sigT_functor_obj = projT1_functor.
functor_eq.
Qed.
End sigT_obj.
Arguments projT1_obj_functor {objA A Pobj}.
Section sigT_mor.
Context `(A : @SpecializedCategory objA).
Variable Pmor : forall s d, A.(Morphism) s d -> Type.
Variable Pidentity : forall x, @Pmor x x (Identity (C := A) _).
Variable Pcompose : forall s d d', forall m1 m2, @Pmor d d' m1 -> @Pmor s d m2 -> @Pmor s d' (Compose (C := A) m1 m2).
Hypothesis P_Associativity : forall o1 o2 o3 o4 m1 m2 m3 m1' m2' m3',
@Pcompose o1 o2 o4 _ m1 (@Pcompose o2 o3 o4 m3 m2 m3' m2') m1' ==
@Pcompose o1 o3 o4 m3 _ m3' (@Pcompose o1 o2 o3 m2 m1 m2' m1').
Hypothesis P_LeftIdentity : forall a b f f',
@Pcompose a b b _ f (@Pidentity b) f' ==
f'.
Hypothesis P_RightIdentity : forall a b f f',
@Pcompose a a b f _ f' (@Pidentity a) ==
f'.
Definition SpecializedCategory_sigT_mor : @SpecializedCategory objA.
match goal with
| [ |- @SpecializedCategory ?obj ] =>
refine (@Build_SpecializedCategory obj
(fun s d => sigT (@Pmor s d))
(fun x => existT _ (Identity (C := A) x) (Pidentity x))
(fun s d d' m1 m2 => existT _ (Compose (C := A) (projT1 m1) (projT1 m2)) (Pcompose (projT2 m1) (projT2 m2)))
_
_
_
)
end;
abstract (intros; simpl_eq; auto with category).
Defined.
Definition projT1_mor_functor : SpecializedFunctor SpecializedCategory_sigT_mor A.
refine (Build_SpecializedFunctor SpecializedCategory_sigT_mor A
(fun x => x)
(fun s d m => projT1 m)
_
_
);
intros; reflexivity.
Defined.
Definition SpecializedCategory_sigT_mor_as_sigT : @SpecializedCategory (sigT (fun _ : objA => unit)).
apply (@SpecializedCategory_sigT _ A _ (fun s d => @Pmor (projT1 s) (projT1 d)) (fun _ => Pidentity _) (fun _ _ _ _ _ m1 m2 => Pcompose m1 m2));
abstract (intros; trivial).
Defined.
Definition sigT_functor_mor : SpecializedFunctor SpecializedCategory_sigT_mor_as_sigT SpecializedCategory_sigT_mor.
match goal with
| [ |- SpecializedFunctor ?C ?D ] =>
refine (Build_SpecializedFunctor C D
(@projT1 _ _)
(fun _ _ => @id _)
_
_
)
end;
simpl; intros; reflexivity.
Defined.
Definition sigT_functor_mor_inv : SpecializedFunctor SpecializedCategory_sigT_mor SpecializedCategory_sigT_mor_as_sigT.
match goal with
| [ |- SpecializedFunctor ?C ?D ] =>
refine (Build_SpecializedFunctor C D
(fun x => existT _ x tt)
(fun _ _ => @id _)
_
_
)
end;
abstract (simpl; intros; f_equal; trivial).
Defined.
Lemma sigT_mor_eq : ComposeFunctors sigT_functor_mor sigT_functor_mor_inv = IdentityFunctor _ /\ ComposeFunctors sigT_functor_mor_inv sigT_functor_mor = IdentityFunctor _.
split; functor_eq; simpl_eq; trivial.
Qed.
Lemma sigT_mor_compat : ComposeFunctors projT1_mor_functor sigT_functor_mor = projT1_functor.
functor_eq.
Qed.
End sigT_mor.
Arguments projT1_mor_functor {objA A Pmor Pidentity Pcompose P_Associativity P_LeftIdentity P_RightIdentity}.
|
#' rpdo
#'
#' Monthly Pacific Decadal Oscillation (PDO) index
#' values from January 1900.
#'
#' @seealso [pdo()] and [pdo_download()]
#' @name rpdo
#' @docType package
NULL
|
-- {-# OPTIONS -v tc.cover.split.con:20 #-}
open import Common.Prelude renaming (Nat to ℕ)
open import Common.Equality
infix 3 ¬_
¬_ : Set → Set
¬ P = P → ⊥
-- Decidable relations.
data Dec (P : Set) : Set where
yes : ( p : P) → Dec P
no : (¬p : ¬ P) → Dec P
_≟_ : (m n : ℕ) → Dec (m ≡ n)
zero ≟ zero = yes refl
zero ≟ suc n = no λ()
suc m ≟ zero = no λ()
suc m ≟ suc n with m ≟ n
suc m ≟ suc .m | yes refl = yes refl
suc m ≟ suc n | no prf = no (λ x → prf (cong pred x))
data Ty : Set where
data Cxt : Set where
ε : Cxt
_,_ : (Γ : Cxt) (a : Ty) → Cxt
len : Cxt → ℕ
len ε = 0
len (Γ , _) = suc (len Γ)
-- De Bruijn index
mutual
Var : Cxt → Ty → Set
Var Γ a = Var' a Γ
data Var' (a : Ty) : (Γ : Cxt) → Set where
zero : ∀ {Γ} → Var (Γ , a) a
suc : ∀ {Γ b} (x : Var Γ a) → Var (Γ , b) a
-- De Bruijn Level
Lev = ℕ
-- Valid de Bruijn levels.
data LookupLev : (x : Lev) (Γ : Cxt) (a : Ty) (i : Var Γ a) → Set where
lookupZero : ∀ {Γ a} →
LookupLev (len Γ) (Γ , a) a zero
lookupSuc : ∀ {Γ a b x i} →
LookupLev x Γ a i →
LookupLev x (Γ , b) a (suc i)
record ValidLev (x : Lev) (Γ : Cxt) : Set where
constructor validLev
field
{type } : Ty
{index} : Var Γ type
valid : LookupLev x Γ type index
weakLev : ∀ {x Γ a} → ValidLev x Γ → ValidLev x (Γ , a)
weakLev (validLev d) = validLev (lookupSuc d)
-- Looking up a de Bruijn level.
lookupLev : ∀ (x : Lev) (Γ : Cxt) → Dec (ValidLev x Γ)
lookupLev x ε = no λ { (validLev ()) }
lookupLev x (Γ , a) with x ≟ len Γ
lookupLev ._ (Γ , a) | yes refl = yes (validLev lookupZero)
lookupLev x (Γ , a) | no _ with lookupLev x Γ
lookupLev x (Γ , a) | no ¬p | yes d = yes (weakLev d)
lookupLev x (Γ , a) | no ¬p | no ¬d = no contra
where
contra : ¬ ValidLev x (Γ , a)
contra (validLev (lookupSuc valid)) = ?
{- Unbound indices showing up in error message:
I'm not sure if there should be a case for the constructor
lookupZero, because I get stuck when trying to solve the following
unification problems (inferred index ≟ expected index):
len Γ ≟ @8
Γ , a ≟ @7 , @4
a ≟ type
zero ≟ index
when checking the definition of contra -}
|
/-
Copyright (c) 2021 Yaël Dillies, Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies, Bhavik Mehta
-/
import algebra.hom.freiman
import analysis.asymptotics.asymptotics
import analysis.convex.strict_convex_space
/-!
# Salem-Spencer sets and Roth numbers
This file defines Salem-Spencer sets and the Roth number of a set.
A Salem-Spencer set is a set without arithmetic progressions of length `3`. Equivalently, the
average of any two distinct elements is not in the set.
The Roth number of a finset is the size of its biggest Salem-Spencer subset. This is a more general
definition than the one often found in mathematical litterature, where the `n`-th Roth number is
the size of the biggest Salem-Spencer subset of `{0, ..., n - 1}`.
## Main declarations
* `mul_salem_spencer`: Predicate for a set to be multiplicative Salem-Spencer.
* `add_salem_spencer`: Predicate for a set to be additive Salem-Spencer.
* `mul_roth_number`: The multiplicative Roth number of a finset.
* `add_roth_number`: The additive Roth number of a finset.
* `roth_number_nat`: The Roth number of a natural. This corresponds to
`add_roth_number (finset.range n)`.
## TODO
* Can `add_salem_spencer_iff_eq_right` be made more general?
* Generalize `mul_salem_spencer.image` to Freiman homs
## Tags
Salem-Spencer, Roth, arithmetic progression, average, three-free
-/
open finset function metric nat
open_locale pointwise
variables {F α β 𝕜 E : Type*}
section salem_spencer
open set
section monoid
variables [monoid α] [monoid β] (s t : set α)
/-- A multiplicative Salem-Spencer, aka non averaging, set `s` in a monoid is a set such that the
multiplicative average of any two distinct elements is not in the set. -/
@[to_additive "A Salem-Spencer, aka non averaging, set `s` in an additive monoid
is a set such that the average of any two distinct elements is not in the set."]
def mul_salem_spencer : Prop := ∀ ⦃a b c⦄, a ∈ s → b ∈ s → c ∈ s → a * b = c * c → a = b
/-- Whether a given finset is Salem-Spencer is decidable. -/
@[to_additive "Whether a given finset is Salem-Spencer is decidable."]
instance {α : Type*} [decidable_eq α] [monoid α] {s : finset α} :
decidable (mul_salem_spencer (s : set α)) :=
decidable_of_iff (∀ a ∈ s, ∀ b ∈ s, ∀ c ∈ s, a * b = c * c → a = b)
⟨λ h a b c ha hb hc, h a ha b hb c hc, λ h a ha b hb c hc, h ha hb hc⟩
variables {s t}
@[to_additive]
lemma mul_salem_spencer.mono (h : t ⊆ s) (hs : mul_salem_spencer s) : mul_salem_spencer t :=
λ a b c ha hb hc, hs (h ha) (h hb) (h hc)
@[simp, to_additive]
lemma mul_salem_spencer_empty : mul_salem_spencer (∅ : set α) := λ a _ _ ha, ha.elim
@[to_additive]
lemma set.subsingleton.mul_salem_spencer (hs : s.subsingleton) : mul_salem_spencer s :=
λ a b _ ha hb _ _, hs ha hb
@[simp, to_additive]
lemma mul_salem_spencer_singleton (a : α) : mul_salem_spencer ({a} : set α) :=
subsingleton_singleton.mul_salem_spencer
@[to_additive add_salem_spencer.prod]
lemma mul_salem_spencer.prod {t : set β} (hs : mul_salem_spencer s) (ht : mul_salem_spencer t) :
mul_salem_spencer (s ×ˢ t) :=
λ a b c ha hb hc h,
prod.ext (hs ha.1 hb.1 hc.1 (prod.ext_iff.1 h).1) (ht ha.2 hb.2 hc.2 (prod.ext_iff.1 h).2)
@[to_additive]
lemma mul_salem_spencer_pi {ι : Type*} {α : ι → Type*} [Π i, monoid (α i)] {s : Π i, set (α i)}
(hs : ∀ i, mul_salem_spencer (s i)) :
mul_salem_spencer ((univ : set ι).pi s) :=
λ a b c ha hb hc h, funext $ λ i, hs i (ha i trivial) (hb i trivial) (hc i trivial) $ congr_fun h i
end monoid
section comm_monoid
variables [comm_monoid α] [comm_monoid β] {s : set α} {a : α}
@[to_additive]
lemma mul_salem_spencer.of_image [fun_like F α (λ _, β)] [freiman_hom_class F s β 2] (f : F)
(hf : s.inj_on f) (h : mul_salem_spencer (f '' s)) :
mul_salem_spencer s :=
λ a b c ha hb hc habc, hf ha hb $ h (mem_image_of_mem _ ha) (mem_image_of_mem _ hb)
(mem_image_of_mem _ hc) $ map_mul_map_eq_map_mul_map f ha hb hc hc habc
-- TODO: Generalize to Freiman homs
@[to_additive]
lemma mul_salem_spencer.image [mul_hom_class F α β] (f : F) (hf : (s * s).inj_on f)
(h : mul_salem_spencer s) :
mul_salem_spencer (f '' s) :=
begin
rintro _ _ _ ⟨a, ha, rfl⟩ ⟨b, hb, rfl⟩ ⟨c, hc, rfl⟩ habc,
rw h ha hb hc (hf (mul_mem_mul ha hb) (mul_mem_mul hc hc) $ by rwa [map_mul, map_mul]),
end
end comm_monoid
section cancel_comm_monoid
variables [cancel_comm_monoid α] {s : set α} {a : α}
@[to_additive]
lemma mul_salem_spencer_insert :
mul_salem_spencer (insert a s) ↔ mul_salem_spencer s ∧
(∀ ⦃b c⦄, b ∈ s → c ∈ s → a * b = c * c → a = b) ∧
∀ ⦃b c⦄, b ∈ s → c ∈ s → b * c = a * a → b = c :=
begin
refine ⟨λ hs, ⟨hs.mono (subset_insert _ _),
λ b c hb hc, hs (or.inl rfl) (or.inr hb) (or.inr hc),
λ b c hb hc, hs (or.inr hb) (or.inr hc) (or.inl rfl)⟩, _⟩,
rintro ⟨hs, ha, ha'⟩ b c d hb hc hd h,
rw mem_insert_iff at hb hc hd,
obtain rfl | hb := hb;
obtain rfl | hc := hc,
{ refl },
all_goals { obtain rfl | hd := hd },
{ exact (mul_left_cancel h).symm },
{ exact ha hc hd h },
{ exact mul_right_cancel h },
{ exact (ha hb hd $ (mul_comm _ _).trans h).symm },
{ exact ha' hb hc h },
{ exact hs hb hc hd h }
end
@[simp, to_additive]
lemma mul_salem_spencer_pair (a b : α) : mul_salem_spencer ({a, b} : set α) :=
begin
rw mul_salem_spencer_insert,
refine ⟨mul_salem_spencer_singleton _, _, _⟩,
{ rintro c d (rfl : c = b) (rfl : d = c),
exact mul_right_cancel },
{ rintro c d (rfl : c = b) (rfl : d = c) _,
refl }
end
@[to_additive]
lemma mul_salem_spencer.mul_left (hs : mul_salem_spencer s) : mul_salem_spencer ((*) a '' s) :=
begin
rintro _ _ _ ⟨b, hb, rfl⟩ ⟨c, hc, rfl⟩ ⟨d, hd, rfl⟩ h,
rw [mul_mul_mul_comm, mul_mul_mul_comm a d] at h,
rw hs hb hc hd (mul_left_cancel h),
end
@[to_additive]
lemma mul_salem_spencer.mul_right (hs : mul_salem_spencer s) : mul_salem_spencer ((* a) '' s) :=
begin
rintro _ _ _ ⟨b, hb, rfl⟩ ⟨c, hc, rfl⟩ ⟨d, hd, rfl⟩ h,
rw [mul_mul_mul_comm, mul_mul_mul_comm d] at h,
rw hs hb hc hd (mul_right_cancel h),
end
@[to_additive]
lemma mul_salem_spencer_mul_left_iff : mul_salem_spencer ((*) a '' s) ↔ mul_salem_spencer s :=
⟨λ hs b c d hb hc hd h, mul_left_cancel (hs (mem_image_of_mem _ hb) (mem_image_of_mem _ hc)
(mem_image_of_mem _ hd) $ by rw [mul_mul_mul_comm, h, mul_mul_mul_comm]),
mul_salem_spencer.mul_left⟩
@[to_additive]
lemma mul_salem_spencer_mul_right_iff :
mul_salem_spencer ((* a) '' s) ↔ mul_salem_spencer s :=
⟨λ hs b c d hb hc hd h, mul_right_cancel (hs (set.mem_image_of_mem _ hb) (set.mem_image_of_mem _ hc)
(set.mem_image_of_mem _ hd) $ by rw [mul_mul_mul_comm, h, mul_mul_mul_comm]),
mul_salem_spencer.mul_right⟩
end cancel_comm_monoid
section ordered_cancel_comm_monoid
variables [ordered_cancel_comm_monoid α] {s : set α} {a : α}
@[to_additive]
lemma mul_salem_spencer_insert_of_lt (hs : ∀ i ∈ s, i < a) :
mul_salem_spencer (insert a s) ↔ mul_salem_spencer s ∧
∀ ⦃b c⦄, b ∈ s → c ∈ s → a * b = c * c → a = b :=
begin
refine mul_salem_spencer_insert.trans _,
rw ←and_assoc,
exact and_iff_left (λ b c hb hc h, ((mul_lt_mul_of_lt_of_lt (hs _ hb) (hs _ hc)).ne h).elim),
end
end ordered_cancel_comm_monoid
section cancel_comm_monoid_with_zero
variables [cancel_comm_monoid_with_zero α] [no_zero_divisors α] {s : set α} {a : α}
lemma mul_salem_spencer.mul_left₀ (hs : mul_salem_spencer s) (ha : a ≠ 0) :
mul_salem_spencer ((*) a '' s) :=
begin
rintro _ _ _ ⟨b, hb, rfl⟩ ⟨c, hc, rfl⟩ ⟨d, hd, rfl⟩ h,
rw [mul_mul_mul_comm, mul_mul_mul_comm a d] at h,
rw hs hb hc hd (mul_left_cancel₀ (mul_ne_zero ha ha) h),
end
lemma mul_salem_spencer.mul_right₀ (hs : mul_salem_spencer s) (ha : a ≠ 0) :
mul_salem_spencer ((* a) '' s) :=
begin
rintro _ _ _ ⟨b, hb, rfl⟩ ⟨c, hc, rfl⟩ ⟨d, hd, rfl⟩ h,
rw [mul_mul_mul_comm, mul_mul_mul_comm d] at h,
rw hs hb hc hd (mul_right_cancel₀ (mul_ne_zero ha ha) h),
end
lemma mul_salem_spencer_mul_left_iff₀ (ha : a ≠ 0) :
mul_salem_spencer ((*) a '' s) ↔ mul_salem_spencer s :=
⟨λ hs b c d hb hc hd h, mul_left_cancel₀ ha
(hs (set.mem_image_of_mem _ hb) (set.mem_image_of_mem _ hc) (set.mem_image_of_mem _ hd) $
by rw [mul_mul_mul_comm, h, mul_mul_mul_comm]),
λ hs, hs.mul_left₀ ha⟩
lemma mul_salem_spencer_mul_right_iff₀ (ha : a ≠ 0) :
mul_salem_spencer ((* a) '' s) ↔ mul_salem_spencer s :=
⟨λ hs b c d hb hc hd h, mul_right_cancel₀ ha
(hs (set.mem_image_of_mem _ hb) (set.mem_image_of_mem _ hc) (set.mem_image_of_mem _ hd) $
by rw [mul_mul_mul_comm, h, mul_mul_mul_comm]),
λ hs, hs.mul_right₀ ha⟩
end cancel_comm_monoid_with_zero
section nat
lemma add_salem_spencer_iff_eq_right {s : set ℕ} :
add_salem_spencer s ↔ ∀ ⦃a b c⦄, a ∈ s → b ∈ s → c ∈ s → a + b = c + c → a = c :=
begin
refine forall₄_congr (λ a b c _, forall₃_congr $ λ _ _ habc, ⟨_, _⟩),
{ rintro rfl,
simp_rw ←two_mul at habc,
exact mul_left_cancel₀ two_ne_zero habc },
{ rintro rfl,
exact (add_left_cancel habc).symm }
end
end nat
/-- The frontier of a closed strictly convex set only contains trivial arithmetic progressions.
The idea is that an arithmetic progression is contained on a line and the frontier of a strictly
convex set does not contain lines. -/
lemma add_salem_spencer_frontier [linear_ordered_field 𝕜] [topological_space E] [add_comm_monoid E]
[module 𝕜 E] {s : set E} (hs₀ : is_closed s) (hs₁ : strict_convex 𝕜 s) :
add_salem_spencer (frontier s) :=
begin
intros a b c ha hb hc habc,
obtain rfl : (1 / 2 : 𝕜) • a + (1 / 2 : 𝕜) • b = c,
{ rwa [←smul_add, one_div, inv_smul_eq_iff₀ (show (2 : 𝕜) ≠ 0, by norm_num), two_smul] },
exact hs₁.eq (hs₀.frontier_subset ha) (hs₀.frontier_subset hb) one_half_pos one_half_pos
(add_halves _) hc.2,
end
lemma add_salem_spencer_sphere [normed_add_comm_group E] [normed_space ℝ E]
[strict_convex_space ℝ E] (x : E) (r : ℝ) : add_salem_spencer (sphere x r) :=
begin
obtain rfl | hr := eq_or_ne r 0,
{ rw sphere_zero,
exact add_salem_spencer_singleton _ },
{ convert add_salem_spencer_frontier is_closed_ball (strict_convex_closed_ball ℝ x r),
exact (frontier_closed_ball _ hr).symm }
end
end salem_spencer
open finset
section roth_number
variables [decidable_eq α]
section monoid
variables [monoid α] [decidable_eq β] [monoid β] (s t : finset α)
/-- The multiplicative Roth number of a finset is the cardinality of its biggest multiplicative
Salem-Spencer subset. -/
@[to_additive "The additive Roth number of a finset is the cardinality of its biggest additive
Salem-Spencer subset. The usual Roth number corresponds to `add_roth_number (finset.range n)`, see
`roth_number_nat`. "]
def mul_roth_number : finset α →o ℕ :=
⟨λ s, nat.find_greatest (λ m, ∃ t ⊆ s, t.card = m ∧ mul_salem_spencer (t : set α)) s.card,
begin
rintro t u htu,
refine nat.find_greatest_mono (λ m, _) (card_le_of_subset htu),
rintro ⟨v, hvt, hv⟩,
exact ⟨v, hvt.trans htu, hv⟩,
end⟩
@[to_additive]
lemma mul_roth_number_le : mul_roth_number s ≤ s.card := by convert nat.find_greatest_le s.card
@[to_additive]
lemma mul_roth_number_spec : ∃ t ⊆ s, t.card = mul_roth_number s ∧ mul_salem_spencer (t : set α) :=
@nat.find_greatest_spec _ _ (λ m, ∃ t ⊆ s, t.card = m ∧ mul_salem_spencer (t : set α)) _
(nat.zero_le _) ⟨∅, empty_subset _, card_empty, mul_salem_spencer_empty⟩
variables {s t} {n : ℕ}
@[to_additive]
lemma mul_salem_spencer.le_mul_roth_number (hs : mul_salem_spencer (s : set α)) (h : s ⊆ t) :
s.card ≤ mul_roth_number t :=
le_find_greatest (card_le_of_subset h) ⟨s, h, rfl, hs⟩
@[to_additive]
lemma mul_salem_spencer.roth_number_eq (hs : mul_salem_spencer (s : set α)) :
mul_roth_number s = s.card :=
(mul_roth_number_le _).antisymm $ hs.le_mul_roth_number $ subset.refl _
@[simp, to_additive]
lemma mul_roth_number_empty : mul_roth_number (∅ : finset α) = 0 :=
nat.eq_zero_of_le_zero $ (mul_roth_number_le _).trans card_empty.le
@[simp, to_additive]
lemma mul_roth_number_singleton (a : α) : mul_roth_number ({a} : finset α) = 1 :=
begin
convert mul_salem_spencer.roth_number_eq _,
rw coe_singleton,
exact mul_salem_spencer_singleton a,
end
@[to_additive]
lemma mul_roth_number_union_le (s t : finset α) :
mul_roth_number (s ∪ t) ≤ mul_roth_number s + mul_roth_number t :=
let ⟨u, hus, hcard, hu⟩ := mul_roth_number_spec (s ∪ t) in
calc
mul_roth_number (s ∪ t)
= u.card : hcard.symm
... = (u ∩ s ∪ u ∩ t).card
: by rw [←inter_distrib_left, (inter_eq_left_iff_subset _ _).2 hus]
... ≤ (u ∩ s).card + (u ∩ t).card : card_union_le _ _
... ≤ mul_roth_number s + mul_roth_number t
: add_le_add ((hu.mono $ inter_subset_left _ _).le_mul_roth_number $ inter_subset_right _ _)
((hu.mono $ inter_subset_left _ _).le_mul_roth_number $ inter_subset_right _ _)
@[to_additive]
lemma le_mul_roth_number_product (s : finset α) (t : finset β) :
mul_roth_number s * mul_roth_number t ≤ mul_roth_number (s ×ˢ t) :=
begin
obtain ⟨u, hus, hucard, hu⟩ := mul_roth_number_spec s,
obtain ⟨v, hvt, hvcard, hv⟩ := mul_roth_number_spec t,
rw [←hucard, ←hvcard, ←card_product],
refine mul_salem_spencer.le_mul_roth_number _ (product_subset_product hus hvt),
rw coe_product,
exact hu.prod hv,
end
@[to_additive]
lemma mul_roth_number_lt_of_forall_not_mul_salem_spencer
(h : ∀ t ∈ powerset_len n s, ¬mul_salem_spencer ((t : finset α) : set α)) :
mul_roth_number s < n :=
begin
obtain ⟨t, hts, hcard, ht⟩ := mul_roth_number_spec s,
rw [←hcard, ←not_le],
intro hn,
obtain ⟨u, hut, rfl⟩ := exists_smaller_set t n hn,
exact h _ (mem_powerset_len.2 ⟨hut.trans hts, rfl⟩) (ht.mono hut),
end
end monoid
section cancel_comm_monoid
variables [cancel_comm_monoid α] (s : finset α) (a : α)
@[simp, to_additive] lemma mul_roth_number_map_mul_left :
mul_roth_number (s.map $ mul_left_embedding a) = mul_roth_number s :=
begin
refine le_antisymm _ _,
{ obtain ⟨u, hus, hcard, hu⟩ := mul_roth_number_spec (s.map $ mul_left_embedding a),
rw subset_map_iff at hus,
obtain ⟨u, hus, rfl⟩ := hus,
rw coe_map at hu,
rw [←hcard, card_map],
exact (mul_salem_spencer_mul_left_iff.1 hu).le_mul_roth_number hus },
{ obtain ⟨u, hus, hcard, hu⟩ := mul_roth_number_spec s,
have h : mul_salem_spencer (u.map $ mul_left_embedding a : set α),
{ rw coe_map,
exact hu.mul_left },
convert h.le_mul_roth_number (map_subset_map.2 hus),
rw [card_map, hcard] }
end
@[simp, to_additive] lemma mul_roth_number_map_mul_right :
mul_roth_number (s.map $ mul_right_embedding a) = mul_roth_number s :=
by rw [←mul_left_embedding_eq_mul_right_embedding, mul_roth_number_map_mul_left s a]
end cancel_comm_monoid
end roth_number
section roth_number_nat
variables {s : finset ℕ} {k n : ℕ}
/-- The Roth number of a natural `N` is the largest integer `m` for which there is a subset of
`range N` of size `m` with no arithmetic progression of length 3.
Trivially, `roth_number_nat N ≤ N`, but Roth's theorem (proved in 1953) shows that
`roth_number_nat N = o(N)` and the construction by Behrend gives a lower bound of the form
`N * exp(-C sqrt(log(N))) ≤ roth_number_nat N`.
A significant refinement of Roth's theorem by Bloom and Sisask announced in 2020 gives
`roth_number_nat N = O(N / (log N)^(1+c))` for an absolute constant `c`. -/
def roth_number_nat : ℕ →o ℕ :=
⟨λ n, add_roth_number (range n), add_roth_number.mono.comp range_mono⟩
lemma roth_number_nat_def (n : ℕ) : roth_number_nat n = add_roth_number (range n) := rfl
lemma roth_number_nat_le (N : ℕ) : roth_number_nat N ≤ N :=
(add_roth_number_le _).trans (card_range _).le
lemma roth_number_nat_spec (n : ℕ) :
∃ t ⊆ range n, t.card = roth_number_nat n ∧ add_salem_spencer (t : set ℕ) :=
add_roth_number_spec _
/-- A verbose specialization of `add_salem_spencer.le_add_roth_number`, sometimes convenient in
practice. -/
lemma add_salem_spencer.le_roth_number_nat (s : finset ℕ) (hs : add_salem_spencer (s : set ℕ))
(hsn : ∀ x ∈ s, x < n) (hsk : s.card = k) :
k ≤ roth_number_nat n :=
hsk.ge.trans $ hs.le_add_roth_number $ λ x hx, mem_range.2 $ hsn x hx
/-- The Roth number is a subadditive function. Note that by Fekete's lemma this shows that
the limit `roth_number_nat N / N` exists, but Roth's theorem gives the stronger result that this
limit is actually `0`. -/
lemma roth_number_nat_add_le (M N : ℕ) :
roth_number_nat (M + N) ≤ roth_number_nat M + roth_number_nat N :=
begin
simp_rw roth_number_nat_def,
rw [range_add_eq_union, ←add_roth_number_map_add_left (range N) M],
exact add_roth_number_union_le _ _,
end
@[simp] lemma roth_number_nat_zero : roth_number_nat 0 = 0 := rfl
lemma add_roth_number_Ico (a b : ℕ) : add_roth_number (Ico a b) = roth_number_nat (b - a) :=
begin
obtain h | h := le_total b a,
{ rw [tsub_eq_zero_of_le h, Ico_eq_empty_of_le h, roth_number_nat_zero, add_roth_number_empty] },
convert add_roth_number_map_add_left _ a,
rw [range_eq_Ico, map_eq_image],
convert (image_add_left_Ico 0 (b - a) _).symm,
exact (add_tsub_cancel_of_le h).symm,
end
open asymptotics filter
lemma roth_number_nat_is_O_with_id :
is_O_with 1 at_top (λ N, (roth_number_nat N : ℝ)) (λ N, (N : ℝ)) :=
is_O_with_of_le _ $ by simpa only [real.norm_coe_nat, nat.cast_le] using roth_number_nat_le
/-- The Roth number has the trivial bound `roth_number_nat N = O(N)`. -/
lemma roth_number_nat_is_O_id : (λ N, (roth_number_nat N : ℝ)) =O[at_top] (λ N, (N : ℝ)) :=
roth_number_nat_is_O_with_id.is_O
end roth_number_nat
|
(* Title: ZF/Resid/Redex.thy
Author: Ole Rasmussen, University of Cambridge
*)
theory Redex imports ZF begin
consts
redexes :: i
datatype
"redexes" = Var ("n \<in> nat")
| Fun ("t \<in> redexes")
| App ("b \<in> bool","f \<in> redexes", "a \<in> redexes")
consts
Ssub :: "i"
Scomp :: "i"
Sreg :: "i"
abbreviation
Ssub_rel (infixl \<open>\<Longleftarrow>\<close> 70) where
"a \<Longleftarrow> b == <a,b> \<in> Ssub"
abbreviation
Scomp_rel (infixl \<open>\<sim>\<close> 70) where
"a \<sim> b == <a,b> \<in> Scomp"
abbreviation
"regular(a) == a \<in> Sreg"
consts union_aux :: "i=>i"
primrec (*explicit lambda is required because both arguments of "\<squnion>" vary*)
"union_aux(Var(n)) =
(\<lambda>t \<in> redexes. redexes_case(%j. Var(n), %x. 0, %b x y.0, t))"
"union_aux(Fun(u)) =
(\<lambda>t \<in> redexes. redexes_case(%j. 0, %y. Fun(union_aux(u)`y),
%b y z. 0, t))"
"union_aux(App(b,f,a)) =
(\<lambda>t \<in> redexes.
redexes_case(%j. 0, %y. 0,
%c z u. App(b or c, union_aux(f)`z, union_aux(a)`u), t))"
definition
union (infixl \<open>\<squnion>\<close> 70) where
"u \<squnion> v == union_aux(u)`v"
inductive
domains "Ssub" \<subseteq> "redexes*redexes"
intros
Sub_Var: "n \<in> nat ==> Var(n) \<Longleftarrow> Var(n)"
Sub_Fun: "[|u \<Longleftarrow> v|]==> Fun(u) \<Longleftarrow> Fun(v)"
Sub_App1: "[|u1 \<Longleftarrow> v1; u2 \<Longleftarrow> v2; b \<in> bool|]==>
App(0,u1,u2) \<Longleftarrow> App(b,v1,v2)"
Sub_App2: "[|u1 \<Longleftarrow> v1; u2 \<Longleftarrow> v2|]==> App(1,u1,u2) \<Longleftarrow> App(1,v1,v2)"
type_intros redexes.intros bool_typechecks
inductive
domains "Scomp" \<subseteq> "redexes*redexes"
intros
Comp_Var: "n \<in> nat ==> Var(n) \<sim> Var(n)"
Comp_Fun: "[|u \<sim> v|]==> Fun(u) \<sim> Fun(v)"
Comp_App: "[|u1 \<sim> v1; u2 \<sim> v2; b1 \<in> bool; b2 \<in> bool|]
==> App(b1,u1,u2) \<sim> App(b2,v1,v2)"
type_intros redexes.intros bool_typechecks
inductive
domains "Sreg" \<subseteq> redexes
intros
Reg_Var: "n \<in> nat ==> regular(Var(n))"
Reg_Fun: "[|regular(u)|]==> regular(Fun(u))"
Reg_App1: "[|regular(Fun(u)); regular(v) |]==>regular(App(1,Fun(u),v))"
Reg_App2: "[|regular(u); regular(v) |]==>regular(App(0,u,v))"
type_intros redexes.intros bool_typechecks
declare redexes.intros [simp]
(* ------------------------------------------------------------------------- *)
(* Specialisation of comp-rules *)
(* ------------------------------------------------------------------------- *)
lemmas compD1 [simp] = Scomp.dom_subset [THEN subsetD, THEN SigmaD1]
lemmas compD2 [simp] = Scomp.dom_subset [THEN subsetD, THEN SigmaD2]
lemmas regD [simp] = Sreg.dom_subset [THEN subsetD]
(* ------------------------------------------------------------------------- *)
(* Equality rules for union *)
(* ------------------------------------------------------------------------- *)
lemma union_Var [simp]: "n \<in> nat ==> Var(n) \<squnion> Var(n)=Var(n)"
by (simp add: union_def)
lemma union_Fun [simp]: "v \<in> redexes ==> Fun(u) \<squnion> Fun(v) = Fun(u \<squnion> v)"
by (simp add: union_def)
lemma union_App [simp]:
"[|b2 \<in> bool; u2 \<in> redexes; v2 \<in> redexes|]
==> App(b1,u1,v1) \<squnion> App(b2,u2,v2)=App(b1 or b2,u1 \<squnion> u2,v1 \<squnion> v2)"
by (simp add: union_def)
lemma or_1_right [simp]: "a or 1 = 1"
by (simp add: or_def cond_def)
lemma or_0_right [simp]: "a \<in> bool \<Longrightarrow> a or 0 = a"
by (simp add: or_def cond_def bool_def, auto)
declare Ssub.intros [simp]
declare bool_typechecks [simp]
declare Sreg.intros [simp]
declare Scomp.intros [simp]
declare Scomp.intros [intro]
inductive_cases [elim!]:
"regular(App(b,f,a))"
"regular(Fun(b))"
"regular(Var(b))"
"Fun(u) \<sim> Fun(t)"
"u \<sim> Fun(t)"
"u \<sim> Var(n)"
"u \<sim> App(b,t,a)"
"Fun(t) \<sim> v"
"App(b,f,a) \<sim> v"
"Var(n) \<sim> u"
(* ------------------------------------------------------------------------- *)
(* comp proofs *)
(* ------------------------------------------------------------------------- *)
lemma comp_refl [simp]: "u \<in> redexes ==> u \<sim> u"
by (erule redexes.induct, blast+)
lemma comp_sym: "u \<sim> v ==> v \<sim> u"
by (erule Scomp.induct, blast+)
lemma comp_sym_iff: "u \<sim> v \<longleftrightarrow> v \<sim> u"
by (blast intro: comp_sym)
lemma comp_trans [rule_format]: "u \<sim> v ==> \<forall>w. v \<sim> w\<longrightarrow>u \<sim> w"
by (erule Scomp.induct, blast+)
(* ------------------------------------------------------------------------- *)
(* union proofs *)
(* ------------------------------------------------------------------------- *)
lemma union_l: "u \<sim> v \<Longrightarrow> u \<Longleftarrow> (u \<squnion> v)"
apply (erule Scomp.induct)
apply (erule_tac [3] boolE, simp_all)
done
lemma union_r: "u \<sim> v \<Longrightarrow> v \<Longleftarrow> (u \<squnion> v)"
apply (erule Scomp.induct)
apply (erule_tac [3] c = b2 in boolE, simp_all)
done
lemma union_sym: "u \<sim> v \<Longrightarrow> u \<squnion> v = v \<squnion> u"
by (erule Scomp.induct, simp_all add: or_commute)
(* ------------------------------------------------------------------------- *)
(* regular proofs *)
(* ------------------------------------------------------------------------- *)
lemma union_preserve_regular [rule_format]:
"u \<sim> v \<Longrightarrow> regular(u) \<longrightarrow> regular(v) \<longrightarrow> regular(u \<squnion> v)"
by (erule Scomp.induct, auto)
end
|
import Decidable.Equality
data Vect : Nat -> Type -> Type where
Nil : Vect Z a
(::) : a -> Vect k a -> Vect (S k) a
%name Vect xs, ys, zs
exactLength : {m : _} ->
(len : Nat) -> (input : Vect m a) -> Maybe (Vect len a)
exactLength {m} len input = case decEq m len of
Yes Refl => Just input
No contra => Nothing
|
State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D : Type uD
inst✝⁹ : NormedAddCommGroup D
inst✝⁸ : NormedSpace 𝕜 D
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.21033
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s s₁ t u : Set E
f f₁ : E → F
g : F → G
x x₀ : E
c : F
b : E × F → G
m✝ n : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
m : ℕ
x✝ : ↑m ≤ n
⊢ Differentiable 𝕜 (iteratedFDeriv 𝕜 m fun x => 0) State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D : Type uD
inst✝⁹ : NormedAddCommGroup D
inst✝⁸ : NormedSpace 𝕜 D
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.21033
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s s₁ t u : Set E
f f₁ : E → F
g : F → G
x x₀ : E
c : F
b : E × F → G
m✝ n : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
m : ℕ
x✝ : ↑m ≤ n
⊢ Differentiable 𝕜 0 Tactic: rw [iteratedFDeriv_zero_fun] State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D : Type uD
inst✝⁹ : NormedAddCommGroup D
inst✝⁸ : NormedSpace 𝕜 D
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.21033
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s s₁ t u : Set E
f f₁ : E → F
g : F → G
x x₀ : E
c : F
b : E × F → G
m✝ n : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
m : ℕ
x✝ : ↑m ≤ n
⊢ Differentiable 𝕜 0 State After: no goals Tactic: exact differentiable_const (0 : E[×m]→L[𝕜] F) |
[STATEMENT]
lemma d_OUT_mono: "(\<And>y. f (x, y) \<le> g (x, y)) \<Longrightarrow> d_OUT f x \<le> d_OUT g x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>y. f (x, y) \<le> g (x, y)) \<Longrightarrow> d_OUT f x \<le> d_OUT g x
[PROOF STEP]
by(auto simp add: d_OUT_def le_fun_def intro: nn_integral_mono) |
[STATEMENT]
lemma linaform: "is_aform (linaform p) vs = is_aform p vs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_aform (linaform p) vs = is_aform p vs
[PROOF STEP]
by (induct p rule: linaform.induct) (auto simp add: linum) |
State Before: α : Type u_1
inst✝¹ : CommRing α
inst✝ : LinearOrder α
p : α
hp : Prime p
⊢ Prime (Abs.abs p) State After: case inl
α : Type u_1
inst✝¹ : CommRing α
inst✝ : LinearOrder α
p : α
hp : Prime p
h : Abs.abs p = p
⊢ Prime p
case inr
α : Type u_1
inst✝¹ : CommRing α
inst✝ : LinearOrder α
p : α
hp : Prime p
h : Abs.abs p = -p
⊢ Prime (-p) Tactic: obtain h | h := abs_choice p <;> rw [h] State Before: case inl
α : Type u_1
inst✝¹ : CommRing α
inst✝ : LinearOrder α
p : α
hp : Prime p
h : Abs.abs p = p
⊢ Prime p State After: no goals Tactic: exact hp State Before: case inr
α : Type u_1
inst✝¹ : CommRing α
inst✝ : LinearOrder α
p : α
hp : Prime p
h : Abs.abs p = -p
⊢ Prime (-p) State After: no goals Tactic: exact hp.neg |
lemma integrable_restrict_UNIV: fixes f :: "'a::euclidean_space \<Rightarrow> 'b::{banach, second_countable_topology}" assumes S: "S \<in> sets lebesgue" shows "integrable lebesgue (\<lambda>x. if x \<in> S then f x else 0) \<longleftrightarrow> integrable (lebesgue_on S) f" |
[STATEMENT]
lemma vD_update_val [dest]:
"\<And>dip rt dip' dsn dsk hops nhip pre.
dip \<in> vD(update rt dip' (dsn, dsk, val, hops, nhip, pre)) \<Longrightarrow> (dip\<in>vD(rt) \<or> dip=dip')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>dip rt dip' dsn dsk hops nhip pre. dip \<in> vD (update rt dip' (dsn, dsk, val, hops, nhip, pre)) \<Longrightarrow> dip \<in> vD rt \<or> dip = dip'
[PROOF STEP]
unfolding update_def vD_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>dip rt dip' dsn dsk hops nhip pre. dip \<in> {dip. flag (case rt dip' of None \<Rightarrow> rt(dip' \<mapsto> (dsn, dsk, val, hops, nhip, pre)) | Some s \<Rightarrow> if \<pi>\<^sub>2 s < \<pi>\<^sub>2 (dsn, dsk, val, hops, nhip, pre) then rt(dip' \<mapsto> addpre (dsn, dsk, val, hops, nhip, pre) (\<pi>\<^sub>7 s)) else if \<pi>\<^sub>2 s = \<pi>\<^sub>2 (dsn, dsk, val, hops, nhip, pre) \<and> (\<pi>\<^sub>5 (dsn, dsk, val, hops, nhip, pre) < \<pi>\<^sub>5 s \<or> \<pi>\<^sub>4 s = Aodv_Basic.inv) then rt(dip' \<mapsto> addpre (dsn, dsk, val, hops, nhip, pre) (\<pi>\<^sub>7 s)) else if \<pi>\<^sub>3 (dsn, dsk, val, hops, nhip, pre) = unk then rt(dip' \<mapsto> (\<pi>\<^sub>2 s, snd (addpre (dsn, dsk, val, hops, nhip, pre) (\<pi>\<^sub>7 s)))) else rt(dip' \<mapsto> addpre s (\<pi>\<^sub>7 (dsn, dsk, val, hops, nhip, pre)))) dip = Some val} \<Longrightarrow> dip \<in> {dip. flag rt dip = Some val} \<or> dip = dip'
[PROOF STEP]
by (clarsimp split: option.split_asm if_split_asm) |
use LeafClass
implicit none
type(Leaf_) :: leaf
call leaf%create(filename="Tutorial/playon_obj/grape_leaf.txt")
call leaf%vtk("grape")
end |
theory T99
imports Main
begin
lemma "(
(\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) &
(\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, meet(y, z)) = meet(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(x, join(y, z)) = join(undr(x, y), undr(x, z))) &
(\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) &
(\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) &
(\<forall> x::nat. invo(invo(x)) = x)
) \<longrightarrow>
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z)))
"
nitpick[card nat=4,timeout=86400]
oops
end |
[STATEMENT]
lemma derangement_enum_aux_no_overlap: "zs \<in> set (derangement_enum_aux xs ys) \<Longrightarrow> no_overlap xs zs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. zs \<in> set (derangement_enum_aux xs ys) \<Longrightarrow> no_overlap xs zs
[PROOF STEP]
by(induct xs arbitrary: zs ys) auto |
module Prelude.Bound
public export
interface Ord b => MinBound b where
||| The lower bound for the type
minBound : b
public export
interface Ord b => MaxBound b where
||| The upper bound for the type
maxBound : b
|
Formal statement is: lemma chain: "a \<in> s \<Longrightarrow> b \<in> s \<Longrightarrow> a \<le> b \<or> b \<le> a" Informal statement is: If $a$ and $b$ are elements of a chain, then either $a \leq b$ or $b \leq a$. |
lemma space_empty_eq_bot: "space a = {} \<longleftrightarrow> a = bot" |
[STATEMENT]
lemma polytope_sing: "polytope {a}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. polytope {a}
[PROOF STEP]
using polytope_def
[PROOF STATE]
proof (prove)
using this:
polytope ?S \<equiv> \<exists>v. finite v \<and> ?S = convex hull v
goal (1 subgoal):
1. polytope {a}
[PROOF STEP]
by force |
module Data.OneOf
import public Data.List.Elem
import public Data.HList
%default total
public export
data OneOf : List Type -> Type where
Here : x -> OneOf (x :: xs)
There : OneOf xs -> OneOf (x :: xs)
public export
make : Elem a as => a -> OneOf as
make x @{Here} = Here x
make x @{There _} = There (make x)
public export
TypeAt : (as : List Type) -> OneOf as -> Type
TypeAt (x :: _) (Here _) = x
TypeAt (_ :: xs) (There x) = TypeAt xs x
public export
Eliminators : (as : List Type) -> (r : Type) -> Type
Eliminators xs r = HList (map (\x => x -> r) xs)
public export
get : (o : OneOf as) -> TypeAt as o
get (Here x) = x
get (There x) = get x
public export
match : OneOf as -> Eliminators as r -> r
match (Here x) (f :: _) = f x
match (There x) (_ :: fs) = match x fs
public export
extend : OneOf as -> OneOf (as ++ bs)
extend (Here x) = Here x
extend (There x) = There (extend x)
|
/-
Copyright (c) 2018 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Johan Commelin, Bhavik Mehta
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.category_theory.natural_isomorphism
import Mathlib.PostPort
universes v₁ v₂ v₃ u₁ u₂ u₃ l
namespace Mathlib
/-!
# Comma categories
A comma category is a construction in category theory, which builds a category out of two functors
with a common codomain. Specifically, for functors `L : A ⥤ T` and `R : B ⥤ T`, an object in
`comma L R` is a morphism `hom : L.obj left ⟶ R.obj right` for some objects `left : A` and
`right : B`, and a morphism in `comma L R` between `hom : L.obj left ⟶ R.obj right` and
`hom' : L.obj left' ⟶ R.obj right'` is a commutative square
```
L.obj left ⟶ L.obj left'
| |
hom | | hom'
↓ ↓
R.obj right ⟶ R.obj right',
```
where the top and bottom morphism come from morphisms `left ⟶ left'` and `right ⟶ right'`,
respectively.
## Main definitions
* `comma L R`: the comma category of the functors `L` and `R`.
* `over X`: the over category of the object `X` (developed in `over.lean`).
* `under X`: the under category of the object `X` (also developed in `over.lean`).
* `arrow T`: the arrow category of the category `T` (developed in `arrow.lean`).
## References
* <https://ncatlab.org/nlab/show/comma+category>
## Tags
comma, slice, coslice, over, under, arrow
-/
namespace category_theory
/-- The objects of the comma category are triples of an object `left : A`, an object
`right : B` and a morphism `hom : L.obj left ⟶ R.obj right`. -/
structure comma {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] (L : A ⥤ T) (R : B ⥤ T)
where
left : autoParam A
(Lean.Syntax.ident Lean.SourceInfo.none (String.toSubstring "Mathlib.obviously")
(Lean.Name.mkStr (Lean.Name.mkStr Lean.Name.anonymous "Mathlib") "obviously") [])
right : autoParam B
(Lean.Syntax.ident Lean.SourceInfo.none (String.toSubstring "Mathlib.obviously")
(Lean.Name.mkStr (Lean.Name.mkStr Lean.Name.anonymous "Mathlib") "obviously") [])
hom : functor.obj L left ⟶ functor.obj R right
-- Satisfying the inhabited linter
protected instance comma.inhabited {T : Type u₃} [category T] [Inhabited T] : Inhabited (comma 𝟭 𝟭) :=
{ default := comma.mk 𝟙 }
/-- A morphism between two objects in the comma category is a commutative square connecting the
morphisms coming from the two objects using morphisms in the image of the functors `L` and `R`.
-/
structure comma_morphism {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] {L : A ⥤ T} {R : B ⥤ T} (X : comma L R) (Y : comma L R)
where
left : autoParam (comma.left X ⟶ comma.left Y)
(Lean.Syntax.ident Lean.SourceInfo.none (String.toSubstring "Mathlib.obviously")
(Lean.Name.mkStr (Lean.Name.mkStr Lean.Name.anonymous "Mathlib") "obviously") [])
right : autoParam (comma.right X ⟶ comma.right Y)
(Lean.Syntax.ident Lean.SourceInfo.none (String.toSubstring "Mathlib.obviously")
(Lean.Name.mkStr (Lean.Name.mkStr Lean.Name.anonymous "Mathlib") "obviously") [])
w' : autoParam (functor.map L left ≫ comma.hom Y = comma.hom X ≫ functor.map R right)
(Lean.Syntax.ident Lean.SourceInfo.none (String.toSubstring "Mathlib.obviously")
(Lean.Name.mkStr (Lean.Name.mkStr Lean.Name.anonymous "Mathlib") "obviously") [])
-- Satisfying the inhabited linter
protected instance comma_morphism.inhabited {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] {L : A ⥤ T} {R : B ⥤ T} [Inhabited (comma L R)] : Inhabited (comma_morphism Inhabited.default Inhabited.default) :=
{ default := comma_morphism.mk }
@[simp] theorem comma_morphism.w {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] {L : A ⥤ T} {R : B ⥤ T} {X : comma L R} {Y : comma L R} (c : comma_morphism X Y) : functor.map L (comma_morphism.left c) ≫ comma.hom Y = comma.hom X ≫ functor.map R (comma_morphism.right c) := sorry
@[simp] theorem comma_morphism.w_assoc {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] {L : A ⥤ T} {R : B ⥤ T} {X : comma L R} {Y : comma L R} (c : comma_morphism X Y) {X' : T} (f' : functor.obj R (comma.right Y) ⟶ X') : functor.map L (comma_morphism.left c) ≫ comma.hom Y ≫ f' = comma.hom X ≫ functor.map R (comma_morphism.right c) ≫ f' := sorry
protected instance comma_category {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] {L : A ⥤ T} {R : B ⥤ T} : category (comma L R) :=
category.mk
namespace comma
@[simp] theorem id_left {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] {L : A ⥤ T} {R : B ⥤ T} {X : comma L R} : comma_morphism.left 𝟙 = 𝟙 :=
rfl
@[simp] theorem id_right {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] {L : A ⥤ T} {R : B ⥤ T} {X : comma L R} : comma_morphism.right 𝟙 = 𝟙 :=
rfl
@[simp] theorem comp_left {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] {L : A ⥤ T} {R : B ⥤ T} {X : comma L R} {Y : comma L R} {Z : comma L R} {f : X ⟶ Y} {g : Y ⟶ Z} : comma_morphism.left (f ≫ g) = comma_morphism.left f ≫ comma_morphism.left g :=
rfl
@[simp] theorem comp_right {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] {L : A ⥤ T} {R : B ⥤ T} {X : comma L R} {Y : comma L R} {Z : comma L R} {f : X ⟶ Y} {g : Y ⟶ Z} : comma_morphism.right (f ≫ g) = comma_morphism.right f ≫ comma_morphism.right g :=
rfl
/-- The functor sending an object `X` in the comma category to `X.left`. -/
def fst {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] (L : A ⥤ T) (R : B ⥤ T) : comma L R ⥤ A :=
functor.mk (fun (X : comma L R) => left X) fun (_x _x_1 : comma L R) (f : _x ⟶ _x_1) => comma_morphism.left f
/-- The functor sending an object `X` in the comma category to `X.right`. -/
@[simp] theorem snd_map {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] (L : A ⥤ T) (R : B ⥤ T) (_x : comma L R) : ∀ (_x_1 : comma L R) (f : _x ⟶ _x_1), functor.map (snd L R) f = comma_morphism.right f :=
fun (_x_1 : comma L R) (f : _x ⟶ _x_1) => Eq.refl (functor.map (snd L R) f)
/-- We can interpret the commutative square constituting a morphism in the comma category as a
natural transformation between the functors `fst ⋙ L` and `snd ⋙ R` from the comma category
to `T`, where the components are given by the morphism that constitutes an object of the comma
category. -/
@[simp] theorem nat_trans_app {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] (L : A ⥤ T) (R : B ⥤ T) (X : comma L R) : nat_trans.app (nat_trans L R) X = hom X :=
Eq.refl (nat_trans.app (nat_trans L R) X)
/--
Construct an isomorphism in the comma category given isomorphisms of the objects whose forward
directions give a commutative square.
-/
@[simp] theorem iso_mk_inv_left {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] {L₁ : A ⥤ T} {R₁ : B ⥤ T} {X : comma L₁ R₁} {Y : comma L₁ R₁} (l : left X ≅ left Y) (r : right X ≅ right Y) (h : functor.map L₁ (iso.hom l) ≫ hom Y = hom X ≫ functor.map R₁ (iso.hom r)) : comma_morphism.left (iso.inv (iso_mk l r h)) = iso.inv l :=
Eq.refl (comma_morphism.left (iso.inv (iso_mk l r h)))
/-- A natural transformation `L₁ ⟶ L₂` induces a functor `comma L₂ R ⥤ comma L₁ R`. -/
@[simp] theorem map_left_obj_right {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] (R : B ⥤ T) {L₁ : A ⥤ T} {L₂ : A ⥤ T} (l : L₁ ⟶ L₂) (X : comma L₂ R) : right (functor.obj (map_left R l) X) = right X :=
Eq.refl (right (functor.obj (map_left R l) X))
/-- The functor `comma L R ⥤ comma L R` induced by the identity natural transformation on `L` is
naturally isomorphic to the identity functor. -/
@[simp] theorem map_left_id_inv_app_right {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] (L : A ⥤ T) (R : B ⥤ T) (X : comma L R) : comma_morphism.right (nat_trans.app (iso.inv (map_left_id L R)) X) = 𝟙 :=
Eq.refl (comma_morphism.right (nat_trans.app (iso.inv (map_left_id L R)) X))
/-- The functor `comma L₁ R ⥤ comma L₃ R` induced by the composition of two natural transformations
`l : L₁ ⟶ L₂` and `l' : L₂ ⟶ L₃` is naturally isomorphic to the composition of the two functors
induced by these natural transformations. -/
@[simp] theorem map_left_comp_hom_app_left {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] (R : B ⥤ T) {L₁ : A ⥤ T} {L₂ : A ⥤ T} {L₃ : A ⥤ T} (l : L₁ ⟶ L₂) (l' : L₂ ⟶ L₃) (X : comma L₃ R) : comma_morphism.left (nat_trans.app (iso.hom (map_left_comp R l l')) X) = 𝟙 :=
Eq.refl (comma_morphism.left (nat_trans.app (iso.hom (map_left_comp R l l')) X))
/-- A natural transformation `R₁ ⟶ R₂` induces a functor `comma L R₁ ⥤ comma L R₂`. -/
def map_right {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] (L : A ⥤ T) {R₁ : B ⥤ T} {R₂ : B ⥤ T} (r : R₁ ⟶ R₂) : comma L R₁ ⥤ comma L R₂ :=
functor.mk (fun (X : comma L R₁) => mk (hom X ≫ nat_trans.app r (right X)))
fun (X Y : comma L R₁) (f : X ⟶ Y) => comma_morphism.mk
/-- The functor `comma L R ⥤ comma L R` induced by the identity natural transformation on `R` is
naturally isomorphic to the identity functor. -/
@[simp] theorem map_right_id_inv_app_left {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] (L : A ⥤ T) (R : B ⥤ T) (X : comma L R) : comma_morphism.left (nat_trans.app (iso.inv (map_right_id L R)) X) = 𝟙 :=
Eq.refl (comma_morphism.left (nat_trans.app (iso.inv (map_right_id L R)) X))
/-- The functor `comma L R₁ ⥤ comma L R₃` induced by the composition of the natural transformations
`r : R₁ ⟶ R₂` and `r' : R₂ ⟶ R₃` is naturally isomorphic to the composition of the functors
induced by these natural transformations. -/
@[simp] theorem map_right_comp_inv_app_right {A : Type u₁} [category A] {B : Type u₂} [category B] {T : Type u₃} [category T] (L : A ⥤ T) {R₁ : B ⥤ T} {R₂ : B ⥤ T} {R₃ : B ⥤ T} (r : R₁ ⟶ R₂) (r' : R₂ ⟶ R₃) (X : comma L R₁) : comma_morphism.right (nat_trans.app (iso.inv (map_right_comp L r r')) X) = 𝟙 :=
Eq.refl (comma_morphism.right (nat_trans.app (iso.inv (map_right_comp L r r')) X))
|
[GOAL]
α✝ : Sort u_1
β✝ : Sort u_2
γ : Sort u_3
p✝ q✝ : α✝ → Prop
α β : Sort u_4
p : α → Prop
q : β → Prop
a : { x // p x }
b : { y // q y }
h : α = β
h' : HEq p q
⊢ HEq a b ↔ HEq ↑a ↑b
[PROOFSTEP]
subst h
[GOAL]
α✝ : Sort u_1
β : Sort u_2
γ : Sort u_3
p✝ q✝ : α✝ → Prop
α : Sort u_4
p : α → Prop
a : { x // p x }
q : α → Prop
b : { y // q y }
h' : HEq p q
⊢ HEq a b ↔ HEq ↑a ↑b
[PROOFSTEP]
subst h'
[GOAL]
α✝ : Sort u_1
β : Sort u_2
γ : Sort u_3
p✝ q : α✝ → Prop
α : Sort u_4
p : α → Prop
a b : { y // p y }
⊢ HEq a b ↔ HEq ↑a ↑b
[PROOFSTEP]
rw [heq_iff_eq, heq_iff_eq, ext_iff]
[GOAL]
α : Sort u_1
β : Sort u_2
γ : Sort u_3
p q : α → Prop
a : Subtype p
b : α
⊢ (∃ h, { val := b, property := h } = a) ↔ b = ↑a
[PROOFSTEP]
simp only [@eq_comm _ b, exists_eq_subtype_mk_iff, @eq_comm _ _ a]
[GOAL]
α✝ : Sort u_1
β✝ : Sort u_2
γ : Sort u_3
p✝ q : α✝ → Prop
α : Sort u_5
β : α → Type u_4
f : (x : α) → β x
p : α → Prop
x : Subtype p
⊢ restrict p f x = f ↑x
[PROOFSTEP]
rfl
[GOAL]
α✝ : Sort u_1
β✝ : Sort u_2
γ : Sort u_3
p✝ q : α✝ → Prop
α : Sort u_5
β : α → Type u_4
ne : ∀ (a : α), Nonempty (β a)
p : α → Prop
⊢ Surjective fun f => restrict p f
[PROOFSTEP]
letI := Classical.decPred p
[GOAL]
α✝ : Sort u_1
β✝ : Sort u_2
γ : Sort u_3
p✝ q : α✝ → Prop
α : Sort u_5
β : α → Type u_4
ne : ∀ (a : α), Nonempty (β a)
p : α → Prop
this : DecidablePred p := Classical.decPred p
⊢ Surjective fun f => restrict p f
[PROOFSTEP]
refine' fun f ↦ ⟨fun x ↦ if h : p x then f ⟨x, h⟩ else Nonempty.some (ne x), funext <| _⟩
[GOAL]
α✝ : Sort u_1
β✝ : Sort u_2
γ : Sort u_3
p✝ q : α✝ → Prop
α : Sort u_5
β : α → Type u_4
ne : ∀ (a : α), Nonempty (β a)
p : α → Prop
this : DecidablePred p := Classical.decPred p
f : (x : Subtype p) → β ↑x
⊢ ∀ (x : Subtype p),
(fun f => restrict p f)
(fun x => if h : p x then f { val := x, property := h } else Nonempty.some (_ : Nonempty (β x))) x =
f x
[PROOFSTEP]
rintro ⟨x, hx⟩
[GOAL]
case mk
α✝ : Sort u_1
β✝ : Sort u_2
γ : Sort u_3
p✝ q : α✝ → Prop
α : Sort u_5
β : α → Type u_4
ne : ∀ (a : α), Nonempty (β a)
p : α → Prop
this : DecidablePred p := Classical.decPred p
f : (x : Subtype p) → β ↑x
x : α
hx : p x
⊢ (fun f => restrict p f)
(fun x => if h : p x then f { val := x, property := h } else Nonempty.some (_ : Nonempty (β x)))
{ val := x, property := hx } =
f { val := x, property := hx }
[PROOFSTEP]
exact dif_pos hx
[GOAL]
α✝ : Sort u_1
β✝ : Sort u_2
γ : Sort u_3
p✝ q : α✝ → Prop
α : Sort u_4
β : Sort u_5
f : α → β
p : β → Prop
h : ∀ (a : α), p (f a)
hf : Injective f
x y : α
hxy : coind f h x = coind f h y
⊢ f x = f y
[PROOFSTEP]
apply congr_arg Subtype.val hxy
|
lemma binary_in_sigma_sets: "binary a b i \<in> sigma_sets sp A" if "a \<in> sigma_sets sp A" and "b \<in> sigma_sets sp A" |
[STATEMENT]
lemma lossless_G: "lossless_spmf G"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lossless_spmf G
[PROOF STEP]
by(simp add: G_def) |
theory ETCS_Terminal
imports ETCS_Cartesian
begin
section \<open>Axiom 3: Terminal Objects\<close>
axiomatization
terminal_func :: "cset \<Rightarrow> cfunc" ("\<beta>\<^bsub>_\<^esub>" 100) and
one :: "cset"
where
terminal_func_type[type_rule]: "\<beta>\<^bsub>X\<^esub> : X \<rightarrow> one" and
terminal_func_unique: "h : X \<rightarrow> one \<Longrightarrow> h = \<beta>\<^bsub>X\<^esub>" and
one_separator: "f : X \<rightarrow> Y \<Longrightarrow> g : X \<rightarrow> Y \<Longrightarrow> (\<And> x. x : one \<rightarrow> X \<Longrightarrow> f \<circ>\<^sub>c x = g \<circ>\<^sub>c x) \<Longrightarrow> f = g"
lemma terminal_func_comp:
"x : X \<rightarrow> Y \<Longrightarrow> \<beta>\<^bsub>Y\<^esub> \<circ>\<^sub>c x = \<beta>\<^bsub>X\<^esub>"
by (simp add: comp_type terminal_func_type terminal_func_unique)
(* Definition 2.1.16 *)
abbreviation member :: "cfunc \<Rightarrow> cset \<Rightarrow> bool" (infix "\<in>\<^sub>c" 50) where
"x \<in>\<^sub>c X \<equiv> (x : one \<rightarrow> X)"
definition nonempty :: "cset \<Rightarrow> bool" where
"nonempty X \<equiv> (\<exists>x. x \<in>\<^sub>c X)"
definition terminal_object :: "cset \<Rightarrow> bool" where
"terminal_object(X) \<longleftrightarrow> (\<forall> Y. \<exists>! f. f : Y \<rightarrow> X)"
lemma diag_on_elements:
assumes "x \<in>\<^sub>c X"
shows "diagonal(X) \<circ>\<^sub>c x = \<langle>x,x\<rangle>"
using assms cfunc_prod_comp cfunc_type_def diagonal_def id_left_unit id_type by auto
lemma one_separator_contrapos:
assumes "f : X \<rightarrow> Y" "g : X \<rightarrow> Y"
shows "f \<noteq> g \<Longrightarrow> \<exists> x. x : one \<rightarrow> X \<and> f \<circ>\<^sub>c x \<noteq> g \<circ>\<^sub>c x"
proof -
have "(\<forall> x. x : one \<rightarrow> X \<longrightarrow> f \<circ>\<^sub>c x = g \<circ>\<^sub>c x) \<longrightarrow> f = g"
using assms(1) assms(2) one_separator by blast
then show "f \<noteq> g \<Longrightarrow> \<exists>x. x \<in>\<^sub>c X \<and> f \<circ>\<^sub>c x \<noteq> g \<circ>\<^sub>c x"
by blast
qed
lemma one_terminal_object: "terminal_object(one)"
unfolding terminal_object_def
proof auto
fix Y
have "\<beta>\<^bsub>Y\<^esub> : Y \<rightarrow> one"
using terminal_func_type by simp
then show "\<exists>f. (f : Y \<rightarrow> one)"
by auto
next
fix Y f y
assume "f : Y \<rightarrow> one" "y : Y \<rightarrow> one"
then have "f = \<beta>\<^bsub>Y\<^esub> \<and> y = \<beta>\<^bsub>Y\<^esub>"
using terminal_func_unique by auto
then show "f = y"
by simp
qed
(* Exercise 2.1.15 *)
lemma terminal_objects_isomorphic:
assumes "terminal_object X" "terminal_object Y"
shows "X \<cong> Y"
unfolding is_isomorphic_def
proof -
obtain f where f_type: "f : X \<rightarrow> Y" and f_unique: "\<forall>g. g : X \<rightarrow> Y \<longrightarrow> f = g"
using assms(2) terminal_object_def by force
obtain g where g_type: "g : Y \<rightarrow> X" and g_unique: "\<forall>f. f : Y \<rightarrow> X \<longrightarrow> g = f"
using assms(1) terminal_object_def by force
have g_f_is_id: "g \<circ>\<^sub>c f = id X"
using assms(1) comp_type f_type g_type id_type terminal_object_def by blast
have f_g_is_id: "f \<circ>\<^sub>c g = id Y"
using assms(2) comp_type f_type g_type id_type terminal_object_def by blast
have f_isomorphism: "isomorphism f"
unfolding isomorphism_def
using cfunc_type_def f_type g_type g_f_is_id f_g_is_id
by (rule_tac x=g in exI, auto)
show "\<exists>f. f : X \<rightarrow> Y \<and> isomorphism f"
using f_isomorphism f_type by auto
qed
(* Exercise 2.1.18 *)
lemma element_monomorphism:
"x \<in>\<^sub>c X \<Longrightarrow> monomorphism x"
unfolding monomorphism_def
by (metis cfunc_type_def domain_comp terminal_func_unique)
(* Generalization of Exercise 2.1.18 *)
lemma terminal_el__monomorphism:
assumes "x : T \<rightarrow> X"
assumes "terminal_object(T)"
shows "monomorphism x"
unfolding monomorphism_def
by (metis assms cfunc_type_def domain_comp terminal_object_def)
lemma one_unique_element:
"\<exists>! x. x \<in>\<^sub>c one"
proof (rule_tac a="id one" in ex1I)
show "id\<^sub>c one \<in>\<^sub>c one"
by (simp add: id_type)
next
fix x
assume "x \<in>\<^sub>c one"
then show "x = id\<^sub>c one"
by (metis id_type terminal_func_unique)
qed
lemma one_cross_one_unique_element:
"\<exists>! x. x \<in>\<^sub>c one \<times>\<^sub>c one"
proof (rule_tac a="diagonal one" in ex1I)
show "diagonal one \<in>\<^sub>c one \<times>\<^sub>c one"
by (simp add: cfunc_prod_type diagonal_def id_type)
next
fix x
assume x_type: "x \<in>\<^sub>c one \<times>\<^sub>c one"
have left_eq: "left_cart_proj one one \<circ>\<^sub>c x = id one"
using x_type one_unique_element by (typecheck_cfuncs, blast)
have right_eq: "right_cart_proj one one \<circ>\<^sub>c x = id one"
using x_type one_unique_element by (typecheck_cfuncs, blast)
then show "x = diagonal one"
unfolding diagonal_def using cfunc_prod_unique id_type left_eq x_type by blast
qed
(* Proposition 2.1.19 *)
lemma single_elem_iso_one:
"(\<exists>! x. x \<in>\<^sub>c X) \<longleftrightarrow> X \<cong> one"
proof
assume X_iso_one: "X \<cong> one"
then have "one \<cong> X"
by (simp add: isomorphic_is_symmetric)
then obtain f where f_type: "f : one \<rightarrow> X" and f_iso: "isomorphism f"
using is_isomorphic_def by blast
show "\<exists>!x. x \<in>\<^sub>c X"
proof (rule_tac a=f in ex1I, auto simp add: f_type)
fix x
assume x_type: "x \<in>\<^sub>c X"
then have \<beta>x_eq_\<beta>f: "\<beta>\<^bsub>X\<^esub> \<circ>\<^sub>c x = \<beta>\<^bsub>X\<^esub> \<circ>\<^sub>c f"
using f_type terminal_func_comp by auto
have "isomorphism (\<beta>\<^bsub>X\<^esub>)"
using X_iso_one is_isomorphic_def terminal_func_unique by blast
then have "monomorphism (\<beta>\<^bsub>X\<^esub>)"
by (simp add: iso_imp_epi_and_monic)
then show "x = f"
unfolding monomorphism_def using \<beta>x_eq_\<beta>f x_type cfunc_type_def f_type terminal_func_type by auto
qed
next
assume "\<exists>!x. x \<in>\<^sub>c X"
then obtain x where x_type: "x : one \<rightarrow> X" and x_unique: "\<forall> y. y : one \<rightarrow> X \<longrightarrow> x = y"
by blast
have "terminal_object X"
unfolding terminal_object_def
proof
fix Y
show "\<exists>!f. f : Y \<rightarrow> X"
proof (rule_tac a="x \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub>" in ex1I)
show "x \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub> : Y \<rightarrow> X"
using comp_type terminal_func_type x_type by blast
next
fix xa
assume xa_type: "xa : Y \<rightarrow> X"
show "xa = x \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub>"
proof (rule ccontr)
assume "xa \<noteq> x \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub>"
then obtain y where elems_neq: "xa \<circ>\<^sub>c y \<noteq> (x \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub>) \<circ>\<^sub>c y" and y_type: "y : one \<rightarrow> Y"
using one_separator_contrapos[where f=xa, where g="x \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub>", where X=Y, where Y=X]
using comp_type terminal_func_type x_type xa_type by blast
have elem1: "xa \<circ>\<^sub>c y \<in>\<^sub>c X"
using comp_type xa_type y_type by auto
have elem2: "(x \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub>) \<circ>\<^sub>c y \<in>\<^sub>c X"
using comp_type terminal_func_type x_type y_type by blast
show False
using elem1 elem2 elems_neq x_unique by blast
qed
qed
qed
then show "X \<cong> one"
by (simp add: one_terminal_object terminal_objects_isomorphic)
qed
(* Converse to Exercise 2.1.15: Part 1 *)
lemma iso_to1_is_term:
assumes "X \<cong> one"
shows "terminal_object X"
unfolding terminal_object_def
proof
fix Y
obtain x where x_type: "x : one \<rightarrow> X" and x_unique: "\<forall> y. y : one \<rightarrow> X \<longrightarrow> x = y"
using assms single_elem_iso_one by fastforce
show "\<exists>!f. f : Y \<rightarrow> X"
proof (rule_tac a="x \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub>" in ex1I)
show "x \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub> : Y \<rightarrow> X"
using comp_type terminal_func_type x_type by blast
next
fix xa
assume xa_type: "xa : Y \<rightarrow> X"
show "xa = x \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub>"
proof (rule ccontr)
assume "xa \<noteq> x \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub>"
then obtain y where elems_neq: "xa \<circ>\<^sub>c y \<noteq> (x \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub>) \<circ>\<^sub>c y" and y_type: "y : one \<rightarrow> Y"
using one_separator_contrapos[where f=xa, where g="x \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub>", where X=Y, where Y=X]
using comp_type terminal_func_type x_type xa_type by blast
have elem1: "xa \<circ>\<^sub>c y \<in>\<^sub>c X"
using comp_type xa_type y_type by auto
have elem2: "(x \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub>) \<circ>\<^sub>c y \<in>\<^sub>c X"
using comp_type terminal_func_type x_type y_type by blast
show False
using elem1 elem2 elems_neq x_unique by blast
qed
qed
qed
(* Converse to Exercise 2.1.15: Part 2 *)
lemma iso_to_term_is_term:
assumes "X \<cong> Y"
assumes "terminal_object Y"
shows "terminal_object X"
by (meson assms iso_to1_is_term isomorphic_is_transitive one_terminal_object terminal_objects_isomorphic)
(* Proposition 2.1.20 *)
lemma X_is_cart_prod1:
"is_cart_prod X (id X) (\<beta>\<^bsub>X\<^esub>) X one"
unfolding is_cart_prod_def
proof auto
show "id\<^sub>c X : X \<rightarrow> X"
by (simp add: id_type)
next
show "\<beta>\<^bsub>X\<^esub> : X \<rightarrow> one"
by (simp add: terminal_func_type)
next
fix f g Y
assume f_type: "f : Y \<rightarrow> X" and g_type: "g : Y \<rightarrow> one"
then show "\<exists>h. h : Y \<rightarrow> X \<and>
id\<^sub>c X \<circ>\<^sub>c h = f \<and> \<beta>\<^bsub>X\<^esub> \<circ>\<^sub>c h = g \<and> (\<forall>h2. h2 : Y \<rightarrow> X \<and> id\<^sub>c X \<circ>\<^sub>c h2 = f \<and> \<beta>\<^bsub>X\<^esub> \<circ>\<^sub>c h2 = g \<longrightarrow> h2 = h)"
proof (rule_tac x=f in exI, auto)
show "id X \<circ>\<^sub>c f = f"
using cfunc_type_def f_type id_left_unit by auto
show "\<beta>\<^bsub>X\<^esub> \<circ>\<^sub>c f = g"
by (metis comp_type f_type g_type terminal_func_type terminal_func_unique)
show "\<And>h2. h2 : Y \<rightarrow> X \<Longrightarrow> h2 = id\<^sub>c X \<circ>\<^sub>c h2"
using cfunc_type_def id_left_unit by auto
qed
qed
lemma X_is_cart_prod2:
"is_cart_prod X (\<beta>\<^bsub>X\<^esub>) (id X) one X"
unfolding is_cart_prod_def
proof auto
show "id\<^sub>c X : X \<rightarrow> X"
by (simp add: id_type)
next
show "\<beta>\<^bsub>X\<^esub> : X \<rightarrow> one"
by (simp add: terminal_func_type)
next
fix f g Z
assume f_type: "f : Z \<rightarrow> one" and g_type: "g : Z \<rightarrow> X"
then show "\<exists>h. h : Z \<rightarrow> X \<and>
\<beta>\<^bsub>X\<^esub> \<circ>\<^sub>c h = f \<and> id\<^sub>c X \<circ>\<^sub>c h = g \<and> (\<forall>h2. h2 : Z \<rightarrow> X \<and> \<beta>\<^bsub>X\<^esub> \<circ>\<^sub>c h2 = f \<and> id\<^sub>c X \<circ>\<^sub>c h2 = g \<longrightarrow> h2 = h)"
proof (rule_tac x=g in exI, auto)
show "id\<^sub>c X \<circ>\<^sub>c g = g"
using cfunc_type_def g_type id_left_unit by auto
show "\<beta>\<^bsub>X\<^esub> \<circ>\<^sub>c g = f"
by (metis comp_type f_type g_type terminal_func_type terminal_func_unique)
show "\<And>h2. h2 : Z \<rightarrow> X \<Longrightarrow> h2 = id\<^sub>c X \<circ>\<^sub>c h2"
using cfunc_type_def id_left_unit by auto
qed
qed
lemma A_x_one_iso_A:
"X \<times>\<^sub>c one \<cong> X"
by (metis X_is_cart_prod1 canonical_cart_prod_is_cart_prod cart_prods_isomorphic fst_conv is_isomorphic_def snd_conv)
lemma one_x_A_iso_A:
"one \<times>\<^sub>c X \<cong> X"
by (meson A_x_one_iso_A isomorphic_is_transitive product_commutes)
(* concrete examples of above isomorphisms *)
lemma left_cart_proj_one_left_inverse:
"\<langle>id X,\<beta>\<^bsub>X\<^esub>\<rangle> \<circ>\<^sub>c left_cart_proj X one = id (X \<times>\<^sub>c one)"
by (typecheck_cfuncs, smt (z3) cfunc_prod_comp cfunc_prod_unique id_left_unit2 id_right_unit2 right_cart_proj_type terminal_func_comp terminal_func_unique)
lemma left_cart_proj_one_right_inverse:
"left_cart_proj X one \<circ>\<^sub>c \<langle>id X,\<beta>\<^bsub>X\<^esub>\<rangle> = id X"
using left_cart_proj_cfunc_prod by (typecheck_cfuncs, blast)
lemma right_cart_proj_one_left_inverse:
"\<langle>\<beta>\<^bsub>X\<^esub>,id X\<rangle> \<circ>\<^sub>c right_cart_proj one X = id (one \<times>\<^sub>c X)"
by (typecheck_cfuncs, smt (z3) cart_prod_decomp cfunc_prod_comp id_left_unit2 id_right_unit2 right_cart_proj_cfunc_prod terminal_func_comp terminal_func_unique)
lemma right_cart_proj_one_right_inverse:
"right_cart_proj one X \<circ>\<^sub>c \<langle>\<beta>\<^bsub>X\<^esub>,id X\<rangle> = id X"
using right_cart_proj_cfunc_prod by (typecheck_cfuncs, blast)
lemma cfunc_cross_prod_right_terminal_decomp:
assumes "f : X \<rightarrow> Y" "x : one \<rightarrow> Z"
shows "f \<times>\<^sub>f x = \<langle>f, x \<circ>\<^sub>c \<beta>\<^bsub>X\<^esub>\<rangle> \<circ>\<^sub>c left_cart_proj X one"
using assms by (typecheck_cfuncs, smt (z3) cfunc_cross_prod_def cfunc_prod_comp cfunc_type_def
comp_associative2 right_cart_proj_type terminal_func_comp terminal_func_unique)
(* Proposition 2.1.21 *)
lemma cart_prod_elem_eq:
assumes "a \<in>\<^sub>c X \<times>\<^sub>c Y" "b \<in>\<^sub>c X \<times>\<^sub>c Y"
shows "a = b \<longleftrightarrow>
(left_cart_proj X Y \<circ>\<^sub>c a = left_cart_proj X Y \<circ>\<^sub>c b
\<and> right_cart_proj X Y \<circ>\<^sub>c a = right_cart_proj X Y \<circ>\<^sub>c b)"
by (metis (full_types) assms cfunc_prod_unique comp_type left_cart_proj_type right_cart_proj_type)
(* Note 2.1.22 *)
lemma element_pair_eq:
assumes "x \<in>\<^sub>c X" "x' \<in>\<^sub>c X" "y \<in>\<^sub>c Y" "y' \<in>\<^sub>c Y"
shows "\<langle>x, y\<rangle> = \<langle>x', y'\<rangle> \<longleftrightarrow> x = x' \<and> y = y'"
by (metis assms left_cart_proj_cfunc_prod right_cart_proj_cfunc_prod)
(* Proposition 2.1.23 *)
lemma nonempty_right_imp_left_proj_epimorphism:
"nonempty Y \<Longrightarrow> epimorphism (left_cart_proj X Y)"
proof -
assume "nonempty Y"
then obtain y where y_in_Y: "y : one \<rightarrow> Y"
using nonempty_def by blast
then have id_eq: "(left_cart_proj X Y) \<circ>\<^sub>c \<langle>id X, y \<circ>\<^sub>c \<beta>\<^bsub>X\<^esub>\<rangle> = id X"
using comp_type id_type left_cart_proj_cfunc_prod terminal_func_type by blast
then show "epimorphism (left_cart_proj X Y)"
unfolding epimorphism_def
proof auto
fix g h
assume domain_g: "domain g = codomain (left_cart_proj X Y)"
assume domain_h: "domain h = codomain (left_cart_proj X Y)"
assume "g \<circ>\<^sub>c left_cart_proj X Y = h \<circ>\<^sub>c left_cart_proj X Y"
then have "g \<circ>\<^sub>c left_cart_proj X Y \<circ>\<^sub>c \<langle>id X, y \<circ>\<^sub>c \<beta>\<^bsub>X\<^esub>\<rangle> = h \<circ>\<^sub>c left_cart_proj X Y \<circ>\<^sub>c \<langle>id X, y \<circ>\<^sub>c \<beta>\<^bsub>X\<^esub>\<rangle>"
using y_in_Y by (typecheck_cfuncs, simp add: cfunc_type_def comp_associative domain_g domain_h)
then show "g = h"
by (metis cfunc_type_def domain_g domain_h id_eq id_right_unit left_cart_proj_type)
qed
qed
(*Pair to Proposition 2.1.23 *)
lemma nonempty_left_imp_right_proj_epimorphism:
"nonempty X \<Longrightarrow> epimorphism (right_cart_proj X Y)"
proof -
assume "nonempty X"
then obtain y where y_in_Y: "y: one \<rightarrow> X"
using nonempty_def by blast
then have id_eq: "(right_cart_proj X Y) \<circ>\<^sub>c \<langle>y \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub>, id Y\<rangle> = id Y"
using comp_type id_type right_cart_proj_cfunc_prod terminal_func_type by blast
then show "epimorphism (right_cart_proj X Y)"
unfolding epimorphism_def
proof auto
fix g h
assume domain_g: "domain g = codomain (right_cart_proj X Y)"
assume domain_h: "domain h = codomain (right_cart_proj X Y)"
assume "g \<circ>\<^sub>c right_cart_proj X Y = h \<circ>\<^sub>c right_cart_proj X Y"
then have "g \<circ>\<^sub>c right_cart_proj X Y \<circ>\<^sub>c \<langle>y \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub>, id Y\<rangle> = h \<circ>\<^sub>c right_cart_proj X Y \<circ>\<^sub>c \<langle>y \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub>, id Y\<rangle>"
using y_in_Y by (typecheck_cfuncs, simp add: cfunc_type_def comp_associative domain_g domain_h)
then show "g = h"
by (metis cfunc_type_def domain_g domain_h id_eq id_right_unit right_cart_proj_type)
qed
qed
lemma prod_with_empty_is_empty1:
assumes "\<not>(nonempty A)"
shows "\<not>(nonempty (A \<times>\<^sub>c B))"
by (meson assms comp_type left_cart_proj_type nonempty_def)
lemma prod_with_empty_is_empty2:
assumes "\<not>(nonempty B)"
shows "\<not>(nonempty (A \<times>\<^sub>c B))"
using assms cart_prod_decomp nonempty_def by blast
(* Definition 2.1.24 *)
definition injective :: "cfunc \<Rightarrow> bool" where
"injective f \<longleftrightarrow> (\<forall> x y. (x \<in>\<^sub>c domain f \<and> y \<in>\<^sub>c domain f \<and> f \<circ>\<^sub>c x = f \<circ>\<^sub>c y) \<longrightarrow> x = y)"
(* Exercise 2.1.26 *)
lemma monomorphism_imp_injective:
"monomorphism f \<Longrightarrow> injective f"
by (simp add: cfunc_type_def injective_def monomorphism_def)
(* Proposition 2.1.27 *)
lemma injective_imp_monomorphism:
assumes "f \<in> ETCS_func"
shows "injective f \<Longrightarrow> monomorphism f"
unfolding monomorphism_def injective_def
proof safe
fix g h
assume f_inj: "\<forall>x y. x \<in>\<^sub>c domain f \<and> y \<in>\<^sub>c domain f \<and> f \<circ>\<^sub>c x = f \<circ>\<^sub>c y \<longrightarrow> x = y"
assume cd_g_eq_d_f: "codomain g = domain f"
assume cd_h_eq_d_f: "codomain h = domain f"
assume fg_eq_fh: "f \<circ>\<^sub>c g = f \<circ>\<^sub>c h"
obtain X Y where f_type: "f : X \<rightarrow> Y"
using assms cfunc_type_def by blast
obtain A where g_type: "g : A \<rightarrow> X" and h_type: "h : A \<rightarrow> X"
by (metis cd_g_eq_d_f cd_h_eq_d_f cfunc_type_def domain_comp f_type fg_eq_fh)
have "(\<forall>x. x \<in>\<^sub>c A \<longrightarrow> g \<circ>\<^sub>c x = h \<circ>\<^sub>c x)"
proof auto
fix x
assume x_in_A: "x \<in>\<^sub>c A"
have "f \<circ>\<^sub>c (g \<circ>\<^sub>c x) = f \<circ>\<^sub>c (h \<circ>\<^sub>c x)"
using g_type h_type x_in_A f_type comp_associative2 fg_eq_fh by (typecheck_cfuncs, auto)
then show "g \<circ>\<^sub>c x = h \<circ>\<^sub>c x"
using cd_h_eq_d_f cfunc_type_def comp_type f_inj g_type h_type x_in_A by presburger
qed
then show "g = h"
using g_type h_type one_separator by auto
qed
(* Definition 2.1.28 *)
definition surjective :: "cfunc \<Rightarrow> bool" where
"surjective f \<longleftrightarrow> (\<forall>y. y \<in>\<^sub>c codomain f \<longrightarrow> (\<exists>x. x \<in>\<^sub>c domain f \<and> f \<circ>\<^sub>c x = y))"
lemma surjective_def2:
assumes "f : X \<rightarrow> Y"
shows "surjective f \<longleftrightarrow> (\<forall>y. y \<in>\<^sub>c Y \<longrightarrow> (\<exists>x. x \<in>\<^sub>c X \<and> f \<circ>\<^sub>c x = y))"
using assms unfolding surjective_def cfunc_type_def by auto
(* Exercise 2.1.30 *)
lemma surjective_is_epimorphism:
"surjective f \<Longrightarrow> epimorphism f"
unfolding surjective_def epimorphism_def
proof (cases "nonempty (codomain f)", auto)
fix g h
assume f_surj: "\<forall>y. y \<in>\<^sub>c codomain f \<longrightarrow> (\<exists>x. x \<in>\<^sub>c domain f \<and> f \<circ>\<^sub>c x = y)"
assume d_g_eq_cd_f: "domain g = codomain f"
assume d_h_eq_cd_f: "domain h = codomain f"
assume gf_eq_hf: "g \<circ>\<^sub>c f = h \<circ>\<^sub>c f"
assume nonempty: "nonempty (codomain f)"
obtain X Y where f_type: "f : X \<rightarrow> Y"
using nonempty cfunc_type_def f_surj nonempty_def by auto
obtain A where g_type: "g : Y \<rightarrow> A" and h_type: "h : Y \<rightarrow> A"
by (metis cfunc_type_def codomain_comp d_g_eq_cd_f d_h_eq_cd_f f_type gf_eq_hf)
show "g = h"
proof (rule ccontr)
assume "g \<noteq> h"
then obtain y where y_in_X: "y \<in>\<^sub>c Y" and gy_neq_hy: "g \<circ>\<^sub>c y \<noteq> h \<circ>\<^sub>c y"
using g_type h_type one_separator by blast
then obtain x where "x \<in>\<^sub>c X" and "f \<circ>\<^sub>c x = y"
using cfunc_type_def f_surj f_type by auto
then have "g \<circ>\<^sub>c f \<noteq> h \<circ>\<^sub>c f"
using comp_associative2 f_type g_type gy_neq_hy h_type by auto
then show False
using gf_eq_hf by auto
qed
next
fix g h
assume empty: "\<not> nonempty (codomain f)"
assume "domain g = codomain f" "domain h = codomain f"
then show "g \<circ>\<^sub>c f = h \<circ>\<^sub>c f \<Longrightarrow> g = h"
by (metis empty cfunc_type_def codomain_comp nonempty_def one_separator)
qed
lemma cart_prod_extract_left:
assumes "f : one \<rightarrow> X" "g : one \<rightarrow> Y"
shows "\<langle>f, g\<rangle> = \<langle>id X, g \<circ>\<^sub>c \<beta>\<^bsub>X\<^esub>\<rangle> \<circ>\<^sub>c f"
proof -
have "\<langle>f, g\<rangle> = \<langle>id X \<circ>\<^sub>c f, g \<circ>\<^sub>c \<beta>\<^bsub>X\<^esub> \<circ>\<^sub>c f\<rangle>"
using assms by (typecheck_cfuncs, metis id_left_unit2 id_right_unit2 id_type one_unique_element)
also have "... = \<langle>id X, g \<circ>\<^sub>c \<beta>\<^bsub>X\<^esub>\<rangle> \<circ>\<^sub>c f"
using assms by (typecheck_cfuncs, simp add: cfunc_prod_comp comp_associative2)
then show ?thesis
using calculation by auto
qed
lemma cart_prod_extract_right:
assumes "f : one \<rightarrow> X" "g : one \<rightarrow> Y"
shows "\<langle>f, g\<rangle> = \<langle>f \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub>, id Y\<rangle> \<circ>\<^sub>c g"
proof -
have "\<langle>f, g\<rangle> = \<langle>f \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub> \<circ>\<^sub>c g, id Y \<circ>\<^sub>c g\<rangle>"
using assms by (typecheck_cfuncs, metis id_left_unit2 id_right_unit2 id_type one_unique_element)
also have "... = \<langle>f \<circ>\<^sub>c \<beta>\<^bsub>Y\<^esub>, id Y\<rangle> \<circ>\<^sub>c g"
using assms by (typecheck_cfuncs, simp add: cfunc_prod_comp comp_associative2)
then show ?thesis
using calculation by auto
qed
subsection \<open>More Results on Cartesian Products\<close>
lemma cfunc_cross_prod_surj:
assumes type_assms: "f : A \<rightarrow> C" "g : B \<rightarrow> D"
assumes f_surj: "surjective f" and g_surj: "surjective g"
shows "surjective (f \<times>\<^sub>f g)"
unfolding surjective_def
proof(auto)
fix y
assume y_type: "y \<in>\<^sub>c codomain (f \<times>\<^sub>f g)"
have fg_type: "f \<times>\<^sub>f g: (A \<times>\<^sub>c B) \<rightarrow> (C \<times>\<^sub>c D)"
using assms by typecheck_cfuncs
then have "y \<in>\<^sub>c (C \<times>\<^sub>c D)"
using cfunc_type_def y_type by auto
then have "\<exists> c d. c \<in>\<^sub>c C \<and> d \<in>\<^sub>c D \<and> y = \<langle>c,d\<rangle>"
using cart_prod_decomp by blast
then obtain c d where y_def: "c \<in>\<^sub>c C \<and> d \<in>\<^sub>c D \<and> y = \<langle>c,d\<rangle>"
by blast
then have "\<exists> a b. a \<in>\<^sub>c A \<and> b \<in>\<^sub>c B \<and> f \<circ>\<^sub>c a = c \<and> g \<circ>\<^sub>c b = d"
by (metis cfunc_type_def f_surj g_surj surjective_def type_assms)
then obtain a b where ab_def: "a \<in>\<^sub>c A \<and> b \<in>\<^sub>c B \<and> f \<circ>\<^sub>c a = c \<and> g \<circ>\<^sub>c b = d"
by blast
then obtain x where x_def: "x = \<langle>a,b\<rangle>"
by auto
have x_type: "x \<in>\<^sub>c domain (f \<times>\<^sub>f g)"
using ab_def cfunc_prod_type cfunc_type_def fg_type x_def by auto
have "(f \<times>\<^sub>f g) \<circ>\<^sub>c x = y"
using ab_def cfunc_cross_prod_comp_cfunc_prod type_assms(1) type_assms(2) x_def y_def by blast
then show "\<exists>x. x \<in>\<^sub>c domain (f \<times>\<^sub>f g) \<and> (f \<times>\<^sub>f g) \<circ>\<^sub>c x = y"
using x_type by blast
qed
lemma cfunc_cross_prod_surj_converse:
assumes type_assms: "f : A \<rightarrow> C" "g : B \<rightarrow> D"
assumes "surjective (f \<times>\<^sub>f g)"
shows "(surjective (f)) \<and> (surjective (g))"
unfolding surjective_def
proof(auto)
fix y
assume y_type: "y \<in>\<^sub>c codomain f"
then have y_type2: "y \<in>\<^sub>c C"
using cfunc_type_def type_assms(1) by auto
oops
lemma cfunc_cross_prod_mono_converse:
assumes type_assms: "f : X \<rightarrow> Y" "g : Z \<rightarrow> W"
assumes fg_inject: "injective (f \<times>\<^sub>f g)"
assumes nonempty: "nonempty(X)" "nonempty(Z)"
shows "injective f \<and> injective g"
unfolding injective_def
proof (auto)
fix x y
assume x_type: "x \<in>\<^sub>c domain f"
assume y_type: "y \<in>\<^sub>c domain f"
assume equals: "f \<circ>\<^sub>c x = f \<circ>\<^sub>c y"
have fg_type: "(f \<times>\<^sub>f g) : (X \<times>\<^sub>c Z) \<rightarrow> (Y \<times>\<^sub>c W)"
by (simp add: cfunc_cross_prod_type type_assms)
have x_type2: "x \<in>\<^sub>c X"
using cfunc_type_def type_assms(1) x_type by auto
have y_type2: "y \<in>\<^sub>c X"
using cfunc_type_def type_assms(1) y_type by auto
show "x = y"
proof -
obtain b where b_def: "b \<in>\<^sub>c Z"
using nonempty(2) nonempty_def by blast
have xb_type: "\<langle>x,b\<rangle> \<in>\<^sub>c X \<times>\<^sub>c Z"
by (simp add: b_def cfunc_prod_type x_type2)
have yb_type: "\<langle>y,b\<rangle> \<in>\<^sub>c X \<times>\<^sub>c Z"
by (simp add: b_def cfunc_prod_type y_type2)
have "(f \<times>\<^sub>f g) \<circ>\<^sub>c \<langle>x,b\<rangle> = \<langle>f \<circ>\<^sub>c x,g \<circ>\<^sub>c b\<rangle>"
using b_def cfunc_cross_prod_comp_cfunc_prod type_assms(1) type_assms(2) x_type2 by blast
also have "... = \<langle>f \<circ>\<^sub>c y,g \<circ>\<^sub>c b\<rangle>"
by (simp add: equals)
also have "... = (f \<times>\<^sub>f g) \<circ>\<^sub>c \<langle>y,b\<rangle>"
using b_def cfunc_cross_prod_comp_cfunc_prod type_assms(1) type_assms(2) y_type2 by auto
then have "\<langle>x,b\<rangle> = \<langle>y,b\<rangle>"
by (metis calculation cfunc_type_def fg_inject fg_type injective_def xb_type yb_type)
then show "x = y"
using b_def element_pair_eq x_type2 y_type2 by auto
qed
next
fix x y
assume x_type: "x \<in>\<^sub>c domain g"
assume y_type: "y \<in>\<^sub>c domain g"
assume equals: "g \<circ>\<^sub>c x = g \<circ>\<^sub>c y"
have fg_type: "(f \<times>\<^sub>f g) : (X \<times>\<^sub>c Z) \<rightarrow> (Y \<times>\<^sub>c W)"
by (simp add: cfunc_cross_prod_type type_assms)
have x_type2: "x \<in>\<^sub>c Z"
using cfunc_type_def type_assms(2) x_type by auto
have y_type2: "y \<in>\<^sub>c Z"
using cfunc_type_def type_assms(2) y_type by auto
show "x = y"
proof -
obtain b where b_def: "b \<in>\<^sub>c X"
using nonempty(1) nonempty_def by blast
have xb_type: "\<langle>b,x\<rangle> \<in>\<^sub>c X \<times>\<^sub>c Z"
by (simp add: b_def cfunc_prod_type x_type2)
have yb_type: "\<langle>b,y\<rangle> \<in>\<^sub>c X \<times>\<^sub>c Z"
by (simp add: b_def cfunc_prod_type y_type2)
have "(f \<times>\<^sub>f g) \<circ>\<^sub>c \<langle>b,x\<rangle> = \<langle>f \<circ>\<^sub>c b,g \<circ>\<^sub>c x\<rangle>"
using b_def cfunc_cross_prod_comp_cfunc_prod type_assms(1) type_assms(2) x_type2 by blast
also have "... = \<langle>f \<circ>\<^sub>c b,g \<circ>\<^sub>c x\<rangle>"
by (simp add: equals)
also have "... = (f \<times>\<^sub>f g) \<circ>\<^sub>c \<langle>b,y\<rangle>"
using b_def cfunc_cross_prod_comp_cfunc_prod equals type_assms(1) type_assms(2) y_type2 by auto
then have "\<langle>b,x\<rangle> = \<langle>b,y\<rangle>"
by (metis \<open>(f \<times>\<^sub>f g) \<circ>\<^sub>c \<langle>b,x\<rangle> = \<langle>f \<circ>\<^sub>c b,g \<circ>\<^sub>c x\<rangle>\<close> cfunc_type_def fg_inject fg_type injective_def xb_type yb_type)
then show "x = y"
using b_def element_pair_eq x_type2 y_type2 by auto
qed
qed
(*The next lemma shows us that unless
both domains are nonempty we gain no new information.
That is, it will be the case that f\<times>g is injective, and we
cannot infer from this that f or g are injective since
f\<times>g will be injective no matter what.*)
lemma the_nonempty_assumption_above_is_always_required:
assumes "f : X \<rightarrow> Y" "g : Z \<rightarrow> W"
assumes "\<not>(nonempty(X)) \<or> \<not>(nonempty(Z))"
shows "injective (f \<times>\<^sub>f g)"
unfolding injective_def
proof(cases "nonempty(X)", auto)
fix x y
assume nonempty: "nonempty X"
assume x_type: "x \<in>\<^sub>c domain (f \<times>\<^sub>f g)"
assume "y \<in>\<^sub>c domain (f \<times>\<^sub>f g)"
then have "\<not>(nonempty(Z))"
using nonempty assms(3) by blast
have fg_type: "(f \<times>\<^sub>f g) : (X \<times>\<^sub>c Z) \<rightarrow> (Y \<times>\<^sub>c W)"
by (typecheck_cfuncs, simp add: assms(1) assms(2))
then have "x \<in>\<^sub>c (X \<times>\<^sub>c Z)"
using x_type cfunc_type_def by auto
then have "\<exists>z. z\<in>\<^sub>c Z"
using cart_prod_decomp by blast
then have False
using assms(3) nonempty nonempty_def by blast
then show "x=y"
by auto
next
fix x y
assume X_is_empty: "\<not> nonempty X"
assume x_type: "x \<in>\<^sub>c domain (f \<times>\<^sub>f g)"
assume "y \<in>\<^sub>c domain (f \<times>\<^sub>f g)"
have fg_type: "(f \<times>\<^sub>f g) : (X \<times>\<^sub>c Z) \<rightarrow> (Y \<times>\<^sub>c W)"
by (typecheck_cfuncs, simp add: assms(1) assms(2))
then have "x \<in>\<^sub>c (X \<times>\<^sub>c Z)"
using x_type cfunc_type_def by auto
then have "\<exists>z. z\<in>\<^sub>c X"
using cart_prod_decomp by blast
then have False
using assms(3) X_is_empty nonempty_def by blast
then show "x=y"
by auto
qed
lemma nonempty_cfunc_cross_prod_decomp:
assumes f_type: "f : A \<times>\<^sub>c B \<rightarrow> C \<times>\<^sub>c D"
assumes A_nonempty: "a \<in>\<^sub>c A" and B_nonempty: "b \<in>\<^sub>c B"
assumes "\<And> a b1 b2. a \<in>\<^sub>c A \<Longrightarrow> b1 \<in>\<^sub>c B \<Longrightarrow> b2 \<in>\<^sub>c B \<Longrightarrow>
left_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>a, b1\<rangle> = left_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>a, b2\<rangle>"
assumes "\<And> a1 a2 b. a1 \<in>\<^sub>c A \<Longrightarrow> a2 \<in>\<^sub>c A \<Longrightarrow> b \<in>\<^sub>c B \<Longrightarrow>
right_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>a1, b\<rangle> = right_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>a2, b\<rangle>"
shows "\<exists>! g. \<exists>! h. g : A \<rightarrow> C \<and> h : B \<rightarrow> D \<and> f = g \<times>\<^sub>f h"
proof (rule_tac a="left_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>id A, b \<circ>\<^sub>c \<beta>\<^bsub>A\<^esub>\<rangle>" in ex1I)
show "\<exists>!h. left_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>id\<^sub>c A,b \<circ>\<^sub>c \<beta>\<^bsub>A\<^esub>\<rangle> : A \<rightarrow> C \<and>
h : B \<rightarrow> D \<and> f = (left_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>id\<^sub>c A,b \<circ>\<^sub>c \<beta>\<^bsub>A\<^esub>\<rangle>) \<times>\<^sub>f h"
proof (rule_tac a="right_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle> a \<circ>\<^sub>c \<beta>\<^bsub>B\<^esub>, id B\<rangle>" in ex1I, auto)
show "left_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>id\<^sub>c A,b \<circ>\<^sub>c \<beta>\<^bsub>A\<^esub>\<rangle> : A \<rightarrow> C"
using assms by typecheck_cfuncs
show "right_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>a \<circ>\<^sub>c \<beta>\<^bsub>B\<^esub>,id\<^sub>c B\<rangle> : B \<rightarrow> D"
using assms by typecheck_cfuncs
show "f = (left_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>id\<^sub>c A,b \<circ>\<^sub>c \<beta>\<^bsub>A\<^esub>\<rangle>) \<times>\<^sub>f right_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>a \<circ>\<^sub>c \<beta>\<^bsub>B\<^esub>,id\<^sub>c B\<rangle>"
proof (subst cart_prod_eq[where Z="A \<times>\<^sub>c B", where X=C, where Y=D], auto)
show "f : A \<times>\<^sub>c B \<rightarrow> C \<times>\<^sub>c D"
using f_type by auto
show "(left_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>id\<^sub>c A,b \<circ>\<^sub>c \<beta>\<^bsub>A\<^esub>\<rangle>) \<times>\<^sub>f right_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>a \<circ>\<^sub>c \<beta>\<^bsub>B\<^esub>,id\<^sub>c B\<rangle>
: A \<times>\<^sub>c B \<rightarrow> C \<times>\<^sub>c D"
using assms by typecheck_cfuncs
show "left_cart_proj C D \<circ>\<^sub>c f =
left_cart_proj C D \<circ>\<^sub>c
(left_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>id\<^sub>c A,b \<circ>\<^sub>c \<beta>\<^bsub>A\<^esub>\<rangle>)
\<times>\<^sub>f right_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>a \<circ>\<^sub>c \<beta>\<^bsub>B\<^esub>,id\<^sub>c B\<rangle>"
(is "left_cart_proj C D \<circ>\<^sub>c f = left_cart_proj C D \<circ>\<^sub>c (?left \<times>\<^sub>f ?right)")
proof -
have "left_cart_proj C D \<circ>\<^sub>c (?left \<times>\<^sub>f ?right) = ?left \<circ>\<^sub>c left_cart_proj A B"
using assms left_cart_proj_cfunc_cross_prod by (typecheck_cfuncs, blast)
also have "... = left_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>left_cart_proj A B,b \<circ>\<^sub>c \<beta>\<^bsub>A \<times>\<^sub>c B\<^esub>\<rangle>"
using assms by (typecheck_cfuncs, smt cfunc_prod_comp comp_associative2 id_left_unit2 terminal_func_comp)
also have "... = left_cart_proj C D \<circ>\<^sub>c f"
proof (rule one_separator[where X="A \<times>\<^sub>c B", where Y=C])
show "left_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>left_cart_proj A B,b \<circ>\<^sub>c \<beta>\<^bsub>A \<times>\<^sub>c B\<^esub>\<rangle> : A \<times>\<^sub>c B \<rightarrow> C"
using assms by typecheck_cfuncs
show "left_cart_proj C D \<circ>\<^sub>c f : A \<times>\<^sub>c B \<rightarrow> C"
using assms by typecheck_cfuncs
next
fix x
assume x_type: "x \<in>\<^sub>c A \<times>\<^sub>c B"
then obtain xa xb where xa_xb_types: "xa \<in>\<^sub>c A" "xb \<in>\<^sub>c B" and x_def: "x = \<langle>xa, xb\<rangle>"
using cart_prod_decomp by blast
have "(left_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>left_cart_proj A B,b \<circ>\<^sub>c \<beta>\<^bsub>A \<times>\<^sub>c B\<^esub>\<rangle>) \<circ>\<^sub>c x
= left_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>left_cart_proj A B \<circ>\<^sub>c \<langle>xa, xb\<rangle>, b \<circ>\<^sub>c \<beta>\<^bsub>A \<times>\<^sub>c B\<^esub> \<circ>\<^sub>c \<langle>xa, xb\<rangle>\<rangle>"
unfolding x_def using assms xa_xb_types
by (typecheck_cfuncs, smt cfunc_prod_comp comp_associative2)
also have "... = left_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>xa, b\<rangle>"
using assms xa_xb_types
by (typecheck_cfuncs, metis id_right_unit2 id_type left_cart_proj_cfunc_prod one_unique_element)
also have "... = left_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c x"
unfolding x_def by (simp add: B_nonempty assms(4) xa_xb_types)
also have "... = (left_cart_proj C D \<circ>\<^sub>c f) \<circ>\<^sub>c x"
using assms x_type by (typecheck_cfuncs, metis comp_associative2)
then show "(left_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>left_cart_proj A B,b \<circ>\<^sub>c \<beta>\<^bsub>A \<times>\<^sub>c B\<^esub>\<rangle>) \<circ>\<^sub>c x = (left_cart_proj C D \<circ>\<^sub>c f) \<circ>\<^sub>c x"
using calculation by auto
qed
then show "left_cart_proj C D \<circ>\<^sub>c f = left_cart_proj C D \<circ>\<^sub>c (?left \<times>\<^sub>f ?right)"
using calculation by auto
qed
show "right_cart_proj C D \<circ>\<^sub>c f =
right_cart_proj C D \<circ>\<^sub>c
(left_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>id\<^sub>c A,b \<circ>\<^sub>c \<beta>\<^bsub>A\<^esub>\<rangle>)
\<times>\<^sub>f right_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>a \<circ>\<^sub>c \<beta>\<^bsub>B\<^esub>,id\<^sub>c B\<rangle>"
(is "right_cart_proj C D \<circ>\<^sub>c f = right_cart_proj C D \<circ>\<^sub>c (?left \<times>\<^sub>f ?right)")
proof -
have "right_cart_proj C D \<circ>\<^sub>c (?left \<times>\<^sub>f ?right) = ?right \<circ>\<^sub>c right_cart_proj A B"
using assms right_cart_proj_cfunc_cross_prod by (typecheck_cfuncs, blast)
also have "... = right_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>a \<circ>\<^sub>c \<beta>\<^bsub>A \<times>\<^sub>c B\<^esub>, right_cart_proj A B\<rangle>"
using assms by (typecheck_cfuncs, smt cfunc_prod_comp comp_associative2 id_left_unit2 terminal_func_comp)
also have "... = right_cart_proj C D \<circ>\<^sub>c f"
proof (rule one_separator[where X="A \<times>\<^sub>c B", where Y=D])
show "right_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>a \<circ>\<^sub>c \<beta>\<^bsub>A \<times>\<^sub>c B\<^esub>,right_cart_proj A B\<rangle> : A \<times>\<^sub>c B \<rightarrow> D"
using assms by typecheck_cfuncs
show "right_cart_proj C D \<circ>\<^sub>c f : A \<times>\<^sub>c B \<rightarrow> D"
using assms by typecheck_cfuncs
next
fix x
assume x_type: "x \<in>\<^sub>c A \<times>\<^sub>c B"
then obtain xa xb where xa_xb_types: "xa \<in>\<^sub>c A" "xb \<in>\<^sub>c B" and x_def: "x = \<langle>xa, xb\<rangle>"
using cart_prod_decomp by blast
have "(right_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>a \<circ>\<^sub>c \<beta>\<^bsub>A \<times>\<^sub>c B\<^esub>,right_cart_proj A B\<rangle>) \<circ>\<^sub>c x
= right_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>a \<circ>\<^sub>c \<beta>\<^bsub>A \<times>\<^sub>c B\<^esub> \<circ>\<^sub>c \<langle>xa, xb\<rangle>, right_cart_proj A B \<circ>\<^sub>c \<langle>xa, xb\<rangle>\<rangle>"
unfolding x_def using assms xa_xb_types
by (typecheck_cfuncs, smt cfunc_prod_comp comp_associative2)
also have "... = right_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>a, xb\<rangle>"
using assms xa_xb_types
by (typecheck_cfuncs, metis id_right_unit2 id_type right_cart_proj_cfunc_prod one_unique_element)
also have "... = right_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c x"
unfolding x_def by (simp add: A_nonempty assms(5) xa_xb_types)
also have "... = (right_cart_proj C D \<circ>\<^sub>c f) \<circ>\<^sub>c x"
using assms x_type by (typecheck_cfuncs, metis comp_associative2)
then show "(right_cart_proj C D \<circ>\<^sub>c f \<circ>\<^sub>c \<langle>a \<circ>\<^sub>c \<beta>\<^bsub>A \<times>\<^sub>c B\<^esub>,right_cart_proj A B\<rangle>) \<circ>\<^sub>c x = (right_cart_proj C D \<circ>\<^sub>c f) \<circ>\<^sub>c x"
using calculation by auto
qed
then show "right_cart_proj C D \<circ>\<^sub>c f = right_cart_proj C D \<circ>\<^sub>c (?left \<times>\<^sub>f ?right)"
using calculation by auto
qed
qed
next
fix x
oops
end |
Hey!!! Great tune! I liked it and voted it!
Check out also our latest track Show Goes on it’s a banger! And feel free to vote and follow us!
By giving feedback to my music thank you very much!
could you plz vote for my track "dead's are alive"
supported! I may play this at the club next weekend! can you vote back on my future house track "Anomaly"--.
OMG! its really an amazing and attractive track i ever heared of , i appriciated your hard behind this music . your track must deserve the track of the month, week as well. voted and supported your track. |
-- @@stderr --
dtrace: failed to compile script test/unittest/decls/err.D_DECL_VOIDATTR.ShortVoidDecl.d: [D_DECL_VOIDATTR] line 18: invalid type declaration: attributes may not be used with void type
|
[STATEMENT]
lemma infinite_part2: "infinite V \<Longrightarrow> infinite (part2 V)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. infinite V \<Longrightarrow> infinite (part2 V)
[PROOF STEP]
using part12
[PROOF STATE]
proof (prove)
using this:
infinite ?V \<Longrightarrow> part12_pred ?V (part12 ?V)
goal (1 subgoal):
1. infinite V \<Longrightarrow> infinite (part2 V)
[PROOF STEP]
unfolding part2_def part12_pred_def
[PROOF STATE]
proof (prove)
using this:
infinite ?V \<Longrightarrow> ?V = fst (part12 ?V) \<union> snd (part12 ?V) \<and> fst (part12 ?V) \<inter> snd (part12 ?V) = {} \<and> infinite (fst (part12 ?V)) \<and> infinite (snd (part12 ?V))
goal (1 subgoal):
1. infinite V \<Longrightarrow> infinite ((snd \<circ> part12) V)
[PROOF STEP]
by auto |
function [alt, lat] = geodet1 (rmag, dec)
% geodetic latitude and altitude
% series solution
% input
% rmag = geocentric radius (kilometers)
% dec = geocentric declination (radians)
% (+north, -south; -pi/2 <= dec <= +pi/2)
% output
% alt = geodetic altitude (kilometers)
% lat = geodetic latitude (radians)
% (+north, -south; -pi/2 <= lat <= +pi/2)
% global constants
% req = equatorial radius (kilometers)
% flat = flattening factor (non-dimensional)
% reference: NASA TN D-7522
% Orbital Mechanics with Matlab
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
global req flat
n = req / rmag;
o = flat * flat;
a = 2 * dec;
p = sin(a);
q = cos(a);
a = 4 * dec;
r = sin(a);
s = cos(a);
% geodetic latitude (radians)
lat = dec + flat * n * p + o * n * r * (n - 0.25);
% geodetic altitude (kilometers)
alt = rmag + req * (flat * 0.5 * (1.0 - q) + o * (0.25 * n - 0.0625) * (1.0 - s) - 1.0);
|
Phone Number: (530)5750018
Office: Tangles Studio Davis 515 G Street
Website: http://www.tanglesstudiodavis.com
Online Booking: http://tanglesstudio.fullslate.com
John opened Tangles Studio in Davis on March 1, 2013, purposefully downsizing to a quiet and intimate atmosphere conducive to conversation and relaxation. The studio is warm and friendly, inviting you to come in and share a cup of coffee or glass of wine.
|
-- Andreas, 2016-05-13 Issue 1973 reported by Nisse
-- Problems with parameters to overloaded projections
-- {-# OPTIONS -v tc.proj.amb:100 #-}
-- {-# OPTIONS -v tc.deftype:100 #-}
record R₁ : Set₁ where
field
f : Set
open R₁ public
postulate
F : ∀ {a} → Set a → Set a
module M (_ : Set₁) where
record R₂ a (G : Set a → Set a) (A : Set a) : Set a where
field
f : G A
open R₂ public
open module N = M Set using (f)
works : ∀ a (A : Set a) → N.R₂ a F A → F A
works a A x = N.R₂.f x
-- WAS:
-- a F !=< F A of type Set
-- when checking that the expression f x has type F A
ill-formed-term-in-error-message : ∀ a (A : Set a) → N.R₂ a F A → F A
ill-formed-term-in-error-message a A x = f x
-- What Agda did here is to copy parameters from the reduced record type
-- M.R₂ Set a F A
-- to the unreduced projection
-- N.R₂.f
-- The number of projections (3) was queried from N.R₂.f, but the projections
-- were taken from M.R₂ Set a F A.
-- Now, take the original version of the projection,
-- M.R₂.f
-- which accepts 4 parameters, and these are the ones provided by M.R₂ Set a F A.
-- WAS:
-- An internal error has occurred. Please report this as a bug.
-- Location of the error: src/full/Agda/TypeChecking/Substitute.hs:93
internal-error : (A : Set) → N.R₂ _ F A → F A
internal-error A x = f x
-- should work now.
|
section \<open>Monotonicity theorem\<close>
theory CHERI_Monotonicity
imports
"Sail-Morello.Morello_lemmas"
CHERI_Instantiation
CHERI_Cap_Properties
CHERI_Mem_Properties
CHERI_Fetch_Properties
CHERI_Invariant
"Sail-T-CHERI.Trace_Assumptions"
"Sail-T-CHERI.Properties"
begin
locale Morello_Trace_Automaton = Morello_Fixed_Address_Translation + fixes t :: "register_value trace"
locale Morello_Instr_Trace_Automaton = Morello_Trace_Automaton + fixes instr :: instr
locale Morello_Instr_Trace_Write_Cap_Automaton =
Morello_Instr_Trace_Automaton + Morello_Instr_Write_Cap_Automaton
where ex_traces = "instr_raises_ex instr t"
and invoked_caps = "trace_invokes_caps t"
and invoked_regs = "trace_invokes_regs t"
and invoked_indirect_caps = "invokes_indirect_caps instr t"
and invoked_indirect_regs = "trace_invokes_indirect_regs t"
and load_auths = "trace_load_auths t"
and load_caps_permitted = "uses_mem_caps instr t"
and is_indirect_branch = "trace_is_indirect_branch t"
and no_system_reg_access = "\<not>trace_has_system_reg_access t"
and is_in_c64 = "trace_is_in_c64 t"
and translate_address = "\<lambda>addr _ _. translate_address addr"
begin
abbreviation "instr_trace_assms \<equiv> trace_assms initial t \<and> \<not>trace_has_system_reg_access t"
lemma instr_exp_assms_instr_semI:
assumes "hasTrace t (instr_sem instr)"
shows "instr_exp_assms (instr_sem instr)"
using hasTrace_determ_instrs_eqs[OF assms determ_instrs_instr_sem]
unfolding instr_exp_assms_def invocation_instr_exp_assms_def load_instr_exp_assms_def
by auto
end
locale Morello_Instr_Trace_Mem_Automaton =
Morello_Instr_Trace_Automaton + Morello_Instr_Mem_Automaton
where ex_traces = "instr_raises_ex instr t"
and invoked_caps = "trace_invokes_caps t"
and invoked_regs = "trace_invokes_regs t"
and invoked_indirect_caps = "invokes_indirect_caps instr t"
and invoked_indirect_regs = "trace_invokes_indirect_regs t"
and load_auths = "trace_load_auths t"
and load_caps_permitted = "uses_mem_caps instr t"
and is_indirect_branch = "trace_is_indirect_branch t"
and no_system_reg_access = "\<not>trace_has_system_reg_access t"
and is_in_c64 = "trace_is_in_c64 t"
locale Morello_Fetch_Trace_Write_Cap_Automaton =
Morello_Trace_Automaton + Morello_Fetch_Write_Cap_Automaton
where ex_traces = "fetch_raises_ex t"
and invoked_caps = "{}"
and invoked_regs = "{}"
and invoked_indirect_caps = "{}"
and invoked_indirect_regs = "{}"
and load_auths = "{}"
and load_caps_permitted = "True"
and is_indirect_branch = "False"
and no_system_reg_access = "\<not>trace_has_system_reg_access t"
and is_in_c64 = "trace_is_in_c64 t"
and translate_address = "\<lambda>addr _ _. translate_address addr"
begin
abbreviation "fetch_trace_assms \<equiv> trace_assms initial t \<and> \<not>trace_has_system_reg_access t"
end
locale Morello_Fetch_Trace_Mem_Automaton =
Morello_Trace_Automaton + Morello_Fetch_Mem_Automaton
where ex_traces = "fetch_raises_ex t"
and invoked_caps = "{}"
and invoked_regs = "{}"
and invoked_indirect_caps = "{}"
and invoked_indirect_regs = "{}"
and load_auths = "{}"
and load_caps_permitted = "True"
and is_indirect_branch = "False"
and no_system_reg_access = "\<not>trace_has_system_reg_access t"
and is_in_c64 = "trace_is_in_c64 t"
context Morello_Fixed_Address_Translation
begin
abbreviation "s_read_from reg s \<equiv> read_from reg (regstate s)"
abbreviation "pcc_not_sealed s \<equiv> (let pcc = s_read_from PCC_ref s in CapIsTagSet pcc \<longrightarrow> \<not>CapIsSealed pcc)"
abbreviation "pcc_tagged s \<equiv> CapIsTagSet (s_read_from PCC_ref s)"
abbreviation "non_debug_state s \<equiv> ((ucast (s_read_from EDSCR_ref s) :: 6 word) = 2) \<and> (s_read_from DBGEN_ref s = LOW)"
abbreviation "cache_line_size_64 s \<equiv> ((ucast (s_read_from DCZID_EL0_ref s) :: 4 word) = 4)"
definition "fetch_state_assms s \<equiv> pcc_not_sealed s \<and> non_debug_state s \<and> cache_line_size_64 s"
definition "instr_state_assms _ s \<equiv> fetch_state_assms s \<and> pcc_tagged s"
text \<open>TODO: Show that the trace assumptions (apart from the translation and UNKNOWN cap ones) are
implied by the state assumptions and reduce the following to the remaining trace assumptions.\<close>
abbreviation "instr_trace_assms instr t \<equiv> Morello_Instr_Trace_Write_Cap_Automaton.instr_trace_assms translate_address is_translation_event translation_assms UNKNOWN_caps t \<and> wellformed_trace t"
abbreviation "fetch_trace_assms t \<equiv> Morello_Fetch_Trace_Write_Cap_Automaton.fetch_trace_assms translate_address is_translation_event translation_assms UNKNOWN_caps t \<and> wellformed_trace t"
abbreviation "s_translate_address addr acctype s \<equiv> translate_address addr"
sublocale CHERI_ISA_State CC ISA cap_invariant UNKNOWN_caps fetch_trace_assms fetch_state_assms instr_trace_assms instr_state_assms get_regval set_regval s_translate_address
proof
fix t :: "register_value trace" and instr :: instr and n :: nat
interpret Write_Cap: Morello_Instr_Trace_Write_Cap_Automaton where instr = instr and t = t
..
assume t: "hasTrace t (instr_sem_ISA instr)"
and inv: "instr_available_caps_invariant instr t n"
and ia: "instr_trace_assms instr t"
and n: "n \<le> length t"
from t have iea: "Write_Cap.instr_exp_assms (instr_sem instr)"
by (intro Write_Cap.instr_exp_assms_instr_semI) simp
from ia have no_asr: "\<not>trace_has_system_reg_access t"
by simp
have *: "Write_Cap.traces_enabled (instr_sem instr) Write_Cap.initial"
using iea[unfolded Write_Cap.instr_exp_assms_instr_sem_iff] no_asr
unfolding instr_sem_def
by (intro Write_Cap.traces_enabledI) auto
interpret Mem: Morello_Instr_Trace_Mem_Automaton where instr = instr and t = t
..
have **: "Mem.traces_enabled (instr_sem instr) Mem.initial"
using iea[unfolded Write_Cap.instr_exp_assms_instr_sem_iff] no_asr
unfolding instr_sem_def
by (intro Mem.traces_enabledI) auto
show "instr_cheri_axioms instr t n"
using * ** t inv ia n
unfolding cheri_axioms_def ISA_simps
by (intro conjI; elim Write_Cap.traces_enabled_reg_axioms Mem.traces_enabled_mem_axioms)
(auto simp: instr_raises_ex_def Write_Cap.trace_raises_isa_exception_def
elim: is_isa_exception.elims intro: Write_Cap.holds_along_trace_take)
next
fix t :: "register_value trace" and n :: nat
interpret Write_Cap: Morello_Fetch_Trace_Write_Cap_Automaton where t = t
..
assume t: "hasTrace t (isa.instr_fetch ISA)"
and inv: "fetch_available_caps_invariant t n"
and ia: "fetch_trace_assms t"
and n: "n \<le> length t"
from ia have no_asr: "\<not>trace_has_system_reg_access t"
by simp
have *: "Write_Cap.traces_enabled (instr_fetch) Write_Cap.initial"
using no_asr
unfolding instr_fetch_def bind_assoc
by (intro Write_Cap.traces_enabledI Write_Cap.accessible_regs_no_writes_run_subset) auto
interpret Mem: Morello_Fetch_Trace_Mem_Automaton where t = t
..
have **: "Mem.traces_enabled (instr_fetch) Mem.initial"
unfolding instr_fetch_def bind_assoc
by (intro Mem.traces_enabledI Mem.accessible_regs_no_writes_run_subset) auto
show "fetch_cheri_axioms t n"
using * ** t inv ia n
unfolding cheri_axioms_def ISA_simps
by (intro conjI; elim Write_Cap.traces_enabled_reg_axioms Mem.traces_enabled_mem_axioms)
(auto simp: fetch_raises_ex_def Write_Cap.trace_raises_isa_exception_def
elim: is_isa_exception.elims intro: Write_Cap.holds_along_trace_take)
qed auto
abbreviation "unknown_caps_of_trace t \<equiv> {c. E_choose ''UNKNOWN_Capability'' (Regval_bitvector_129_dec c) \<in> set t}"
abbreviation "unknown_caps_reachable t s \<equiv> unknown_caps_of_trace t \<subseteq> UNKNOWN_caps \<and> UNKNOWN_caps \<subseteq> reachable_caps s"
lemma fetch_state_assms_iff_invs:
"fetch_state_assms s \<longleftrightarrow> cheri_invariant s"
unfolding fetch_state_assms_def
by (auto simp: register_defs cheri_invariant_defs reg_inv_def)
theorem morello_monotonicity:
assumes "hasTrace t (fetch_execute_loop ISA n)"
and "s_run_trace t s = Some s'"
and "\<forall>c\<in>reachable_caps s. is_tagged_method CC c \<longrightarrow> cap_invariant c"
and "\<not>instrs_raise_ex ISA n t"
and "instrs_invoke_caps ISA n t \<union> instrs_invoke_indirect_caps ISA n t \<subseteq> reachable_caps s"
and "\<not>system_access_reachable s"
and "translation_assms_trace t"
and "unknown_caps_reachable t s"
and "pcc_not_sealed s"
and "non_debug_state s"
and "cache_line_size_64 s" \<comment> \<open>Fixed in Morello, but configurable in ASL\<close>
and "instrs_trace_assms n t" \<comment> \<open>TODO: Show that the above assumptions imply this\<close>
shows "reachable_caps s' \<subseteq> reachable_caps s"
proof (rule reachable_caps_instrs_trace_intradomain_monotonicity[OF assms(1)])
show "instrs_preserve_state_assms s"
proof (unfold instrs_preserve_state_assms_plus_def, intro allI impI, elim conjE)
fix ti instr si si'
assume ti: "Run (instr_sem_ISA instr) ti ()" "s_run_trace ti si = Some si'"
and si: "instr_state_assms instr si"
have "runs_preserve_invariant (liftS (instr_sem instr)) fetch_state_assms"
unfolding fetch_state_assms_iff_invs instr_sem_def liftState_simp comp_def
by (preserves_invariantI)
then show "fetch_state_assms si'"
using Run_runTraceS_Value_liftState[OF ti] si
by (auto simp: instr_state_assms_def elim: PrePostE_elim)
qed
show "fetch_preserves_state_assms s"
proof (unfold fetch_preserves_state_assms_plus_def, intro allI impI, elim conjE)
fix tf instr sf sf'
assume tf: "Run (isa.instr_fetch ISA) tf instr" "s_run_trace tf sf = Some sf'"
and sf: "fetch_state_assms sf"
have "runs_preserve_invariant (liftS instr_fetch) fetch_state_assms"
unfolding fetch_state_assms_iff_invs instr_fetch_def liftState_simp comp_def
by (preserves_invariantI)
moreover have "runs_establish_invariant (liftS instr_fetch) pcc_tagged"
by (rule instr_fetch_establishes_PCC_tagged[THEN PrePostE_weaken_post])
(auto simp: register_defs PCC_tagged_def reg_inv_def)
ultimately show "instr_state_assms instr sf'"
using Run_runTraceS_Value_liftState[OF tf] sf
by (auto simp: instr_state_assms_def elim: PrePostE_elim)
qed
show "s_invariant_holds (addr_trans_invariant False s) t s"
\<comment> \<open>Holds trivially because of the @{locale Morello_Fixed_Address_Translation} setup; the real proof
obligations about the actual address translation will show up when instantiating that locale.\<close>
by (rule take_s_invariant_holdsI[OF \<open>s_run_trace t s = Some s'\<close>])
(auto simp: addr_trans_invariant_plus_def)
qed (use assms in \<open>auto simp: fetch_state_assms_def\<close>)
end
end
|
{-
This second-order signature was created from the following second-order syntax description:
syntax UTLC | Λ
type
* : 0-ary
term
app : * * -> * | _$_ l20
lam : *.* -> * | ƛ_ r10
theory
(ƛβ) b : *.* a : * |> app (lam (x.b[x]), a) = b[a]
(ƛη) f : * |> lam (x.app (f, x)) = f
(lβ) b : *.* a : * |> letd (a, x. b) = b[a]
-}
module UTLC.Signature where
open import SOAS.Context
open import SOAS.Common
open import SOAS.Syntax.Signature *T public
open import SOAS.Syntax.Build *T public
-- Operator symbols
data Λₒ : Set where
appₒ lamₒ : Λₒ
-- Term signature
Λ:Sig : Signature Λₒ
Λ:Sig = sig λ
{ appₒ → (⊢₀ *) , (⊢₀ *) ⟼₂ *
; lamₒ → (* ⊢₁ *) ⟼₁ *
}
open Signature Λ:Sig public
|
Formal statement is: lemma AE_I': "N \<in> null_sets M \<Longrightarrow> {x\<in>space M. \<not> P x} \<subseteq> N \<Longrightarrow> (AE x in M. P x)" Informal statement is: If $N$ is a null set and $\{x \in X : \lnot P(x)\} \subseteq N$, then $P$ holds almost everywhere. |
-- @@stderr --
dtrace: failed to compile script test/unittest/providers/err.D_PDESC_INVAL.wrongdec4.d: [D_PDESC_INVAL] line 26: invalid probe description "profile::::tick-1sec": Overspecified probe description
|
-- Principio de contraposición
-- ===========================
import tactic
variables (P Q : Prop)
open_locale classical
-- ----------------------------------------------------
-- Ejercicio. Demostrar el principio de contraposición
-- (P → Q) ↔ (¬Q → ¬P)
-- ----------------------------------------------------
-- 1ª demostración
example :
(P → Q) ↔ (¬Q → ¬P) :=
begin
split,
{ intros hPQ hnQ hP,
apply hnQ,
apply hPQ,
exact hP,},
{ intros hQP hP,
by_contradiction hnQ,
apply absurd hP,
apply hQP,
exact hnQ, },
end
-- 2ª demostración
example :
(P → Q) ↔ (¬Q → ¬P) :=
begin
split,
{ intros hPQ hnQ hP,
exact hnQ (hPQ hP),},
{ intros hQP hP,
by_contradiction hnQ,
exact absurd hP (hQP hnQ), },
end
-- 3ª demostración
example :
(P → Q) ↔ (¬Q → ¬P) :=
begin
split,
{ exact λ hPQ hnQ hP, hnQ (hPQ hP), },
{ exact λ hQP hP, by_contradiction (λ hnQ , absurd hP (hQP hnQ)), },
end
-- 4ª demostración
example :
(P → Q) ↔ (¬Q → ¬P) :=
⟨λ hPQ hnQ hP, hnQ (hPQ hP),
λ hQP hP, by_contradiction (λ hnQ , absurd hP (hQP hnQ))⟩
-- 5ª demostración
example :
(P → Q) ↔ (¬Q → ¬P) :=
begin
split,
{ intros h1 h2 h3,
apply h2,
exact h1 h3, },
{ intro h4,
contrapose,
exact h4, },
end
-- 6ª demostración
example :
(P → Q) ↔ (¬Q → ¬P) :=
iff.intro
( assume hPQ : P → Q,
assume hnQ : ¬Q,
assume hP : P,
have hQ : Q,
from hPQ hP,
show false,
from hnQ hQ )
( assume hQP : (¬Q → ¬P),
assume hP : P,
show Q, from
by_contradiction
( assume hnQ : ¬Q,
have hnP : ¬P,
from hQP hnQ,
show false,
from hnP hP))
-- 7ª demostración
example :
(P → Q) ↔ (¬Q → ¬P) :=
iff.intro
(λ hPQ hnQ hP, hnQ (hPQ hP))
(λ hQP hP, by_contradiction (λ hnQ, (hQP hnQ) hP))
-- 8ª demostración
example :
(P → Q) ↔ (¬Q → ¬P) :=
-- by library_search
not_imp_not.symm
-- 9ª demostración
example :
(P → Q) ↔ (¬Q → ¬P) :=
-- by hint
by tauto
-- 10ª demostración
example :
(P → Q) ↔ (¬Q → ¬P) :=
by finish
|
State Before: α : Type u_2
β : Type u_1
γ : Type ?u.832965
δ : Type ?u.832968
m : MeasurableSpace α
μ ν : Measure α
inst✝² : MeasurableSpace δ
inst✝¹ : NormedAddCommGroup β
inst✝ : NormedAddCommGroup γ
c : β
⊢ (Integrable fun x => c) ↔ c = 0 ∨ ↑↑μ univ < ⊤ State After: α : Type u_2
β : Type u_1
γ : Type ?u.832965
δ : Type ?u.832968
m : MeasurableSpace α
μ ν : Measure α
inst✝² : MeasurableSpace δ
inst✝¹ : NormedAddCommGroup β
inst✝ : NormedAddCommGroup γ
c : β
this : AEStronglyMeasurable (fun x => c) μ
⊢ (Integrable fun x => c) ↔ c = 0 ∨ ↑↑μ univ < ⊤ Tactic: have : AEStronglyMeasurable (fun _ : α => c) μ := aestronglyMeasurable_const State Before: α : Type u_2
β : Type u_1
γ : Type ?u.832965
δ : Type ?u.832968
m : MeasurableSpace α
μ ν : Measure α
inst✝² : MeasurableSpace δ
inst✝¹ : NormedAddCommGroup β
inst✝ : NormedAddCommGroup γ
c : β
this : AEStronglyMeasurable (fun x => c) μ
⊢ (Integrable fun x => c) ↔ c = 0 ∨ ↑↑μ univ < ⊤ State After: no goals Tactic: rw [Integrable, and_iff_right this, hasFiniteIntegral_const_iff] |
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: lda_exc *)
AA := -0.655868:
BB := 4.888270:
CC := 3.177037:
DD := 0.897889:
phi := z -> 1/2*(opz_pow_n(z,2/3) + opz_pow_n(-z,2/3)):
f_rc04 := (rs, zeta) -> phi(zeta)^3 * (AA*arctan(BB + CC*rs) + DD)/rs:
f := (rs, zeta) -> f_rc04(rs, zeta):
|
-- {-# OPTIONS -v tc.polarity:10 -v tc.conv.elim:25 #-}
module Issue755 where
open import Common.Prelude renaming (Nat to ℕ)
open import Common.Equality
abstract
foo : Bool → ℕ → ℕ
foo true x = 0
foo false x = 0
-- should work
works : ∀{b} → foo b 0 ≡ foo b 1 → foo b 0 ≡ foo b 1
works refl = refl
-- should fail
test : ∀{b} → foo b 0 ≡ foo b 1 → foo b 0 ≡ foo b 1
test refl = refl
-- 0 != 1 of type ℕ
-- when checking that the pattern refl has type foo 0 ≡ foo 1
|
theory T102
imports Main
begin
lemma "(
(\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) &
(\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(meet(x, y), z) = meet(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(x, join(y, z)) = join(undr(x, y), undr(x, z))) &
(\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) &
(\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) &
(\<forall> x::nat. invo(invo(x)) = x)
) \<longrightarrow>
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z)))
"
nitpick[card nat=4,timeout=86400]
oops
end |
[GOAL]
α : Type u_1
β : Type u_2
inst✝² : TopologicalSpace β
inst✝¹ : AddZeroClass β
inst✝ : ContinuousAdd β
l : Filter α
f g : α → β
hf : ZeroAtFilter l f
hg : ZeroAtFilter l g
⊢ ZeroAtFilter l (f + g)
[PROOFSTEP]
simpa using hf.add hg
[GOAL]
α : Type u_1
β : Type u_2
inst✝² : TopologicalSpace β
inst✝¹ : AddGroup β
inst✝ : ContinuousNeg β
l : Filter α
f : α → β
hf : ZeroAtFilter l f
⊢ ZeroAtFilter l (-f)
[PROOFSTEP]
simpa using hf.neg
[GOAL]
α : Type u_1
β : Type u_2
𝕜 : Type u_3
inst✝⁵ : TopologicalSpace 𝕜
inst✝⁴ : TopologicalSpace β
inst✝³ : Zero 𝕜
inst✝² : Zero β
inst✝¹ : SMulWithZero 𝕜 β
inst✝ : ContinuousSMul 𝕜 β
l : Filter α
f : α → β
c : 𝕜
hf : ZeroAtFilter l f
⊢ ZeroAtFilter l (c • f)
[PROOFSTEP]
simpa using hf.const_smul c
[GOAL]
α : Type u_1
β : Type u_2
inst✝ : NormedAddCommGroup β
l : Filter α
f : α → β
hf : ZeroAtFilter l f
⊢ BoundedAtFilter l f
[PROOFSTEP]
rw [ZeroAtFilter, ← Asymptotics.isLittleO_const_iff (one_ne_zero' ℝ)] at hf
[GOAL]
α : Type u_1
β : Type u_2
inst✝ : NormedAddCommGroup β
l : Filter α
f : α → β
hf : f =o[l] fun _x => 1
⊢ BoundedAtFilter l f
[PROOFSTEP]
exact hf.isBigO
[GOAL]
α : Type u_1
β : Type u_2
inst✝ : NormedAddCommGroup β
l : Filter α
f g : α → β
hf : BoundedAtFilter l f
hg : BoundedAtFilter l g
⊢ BoundedAtFilter l (f + g)
[PROOFSTEP]
simpa using hf.add hg
[GOAL]
α : Type u_1
β : Type u_2
inst✝ : NormedField β
l : Filter α
f g : α → β
hf : BoundedAtFilter l f
hg : BoundedAtFilter l g
⊢ BoundedAtFilter l (f * g)
[PROOFSTEP]
refine' (hf.mul hg).trans _
[GOAL]
α : Type u_1
β : Type u_2
inst✝ : NormedField β
l : Filter α
f g : α → β
hf : BoundedAtFilter l f
hg : BoundedAtFilter l g
⊢ (fun x => OfNat.ofNat 1 x * OfNat.ofNat 1 x) =O[l] 1
[PROOFSTEP]
convert Asymptotics.isBigO_refl (E := ℝ) _ l
[GOAL]
case h.e'_8.h
α : Type u_1
β : Type u_2
inst✝ : NormedField β
l : Filter α
f g : α → β
hf : BoundedAtFilter l f
hg : BoundedAtFilter l g
x✝ : α
⊢ OfNat.ofNat 1 x✝ = OfNat.ofNat 1 x✝ * OfNat.ofNat 1 x✝
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
inst✝ : NormedField β
l : Filter α
⊢ Subalgebra β (α → β)
[PROOFSTEP]
refine' Submodule.toSubalgebra (boundedFilterSubmodule l) _ fun f g hf hg ↦ _
[GOAL]
case refine'_1
α : Type u_1
β : Type u_2
inst✝ : NormedField β
l : Filter α
⊢ 1 ∈ boundedFilterSubmodule l
[PROOFSTEP]
exact const_boundedAtFilter l (1 : β)
[GOAL]
case refine'_2
α : Type u_1
β : Type u_2
inst✝ : NormedField β
l : Filter α
f g : α → β
hf : f ∈ boundedFilterSubmodule l
hg : g ∈ boundedFilterSubmodule l
⊢ f * g ∈ boundedFilterSubmodule l
[PROOFSTEP]
simpa only [Pi.one_apply, mul_one, norm_mul] using hf.mul hg
|
# Approximate Inference
For many interesting models (e.g. neural networks) the evidence
$$
p(\mathcal{D}|\mathcal{M}_i) = \int p(\mathcal{D}|\mathcal{M}_i, \theta) p(\theta| \mathcal{M}_i) d\theta
$$
is intractable, *i.e* either the integral has no closed-form or the dimensionality is so big that numerical integration is not feasible
> If the evidence is intractable then the posterior is also intractable
In these cases we resort to approximations
- **Stochastic approximation:** For example Markov Chain Monte Carlo (MCMC). MCMC is computationally demanding (for complex models) but produces asymptotically exact samples from the intractable distribution
- **Deterministic approximation:** For example Variational Inference (VI). VI is more efficient than MCMC, but it is not asymptotically exact. Instead of samples we get a direct approximation of the intractable distribution
The main topic of this lecture is deterministic approximations
## The Laplace Approximation
In Bayesian statistics, the Laplace approximation refers to the application of [Laplace](https://en.wikipedia.org/wiki/Pierre-Simon_Laplace)'s method to approximate an intractable integral (evidence) using a Gaussian distribution
In particular, Laplace's method is a technique to solve integrals of the form
$$
f(x) = \int e^{g(\theta)} d\theta,
$$
by defining the auxiliary function as
$$
\begin{split}
g(\theta) &= \log p(\mathcal{D}| \theta) p(\theta) \\
&= \log p(\mathcal{D}| \theta) + \log p(\theta) \\
&= \sum_{i=1}^N \log p(x_i |\theta) + \log p(\theta)
\end{split}
$$
then $f(x)$ is equivalent to the evidence.
The "approximation" consists of performing a second order Taylor expansion of $g(\theta)$ around $\theta= \hat \theta_{\text{map}}$, i.e. the MAP solution. The result of this is
$$
g(\theta) \approx g(\hat \theta_{\text{map}}) - \frac{1}{2} (\theta - \hat \theta_{\text{map}})^T \Lambda (\theta - \hat \theta_{\text{map}})
$$
where
$$
\Lambda = -\frac{d^2 g}{d\theta^2} (\hat \theta_{\text{map}}),
$$
is the negative Hessian evaluated at $\hat \theta_{\text{map}}$
:::{note}
By definition the first derivative of $g(\theta)$ evaluated at $\hat \theta_{\text{map}}$ is zero
:::
If we plug the Gaussian approximation back into the evidence we can now solve the integral as
$$
\begin{split}
p(\mathcal{D}) &\approx e^{g(\hat \theta_{\text{map}})} \int e^{-\frac{1}{2} (\theta - \hat \theta_{\text{map}})^T \Lambda (\theta - \hat \theta_{\text{map}})} d\theta \\
&= e^{g(\hat \theta_{\text{map}})} (2\pi)^{K/2} |\Lambda|^{-1/2}
\end{split}
$$
where $K$ is the dimensionality of $\theta$. With this the posterior is
$$
\begin{split}
p(\theta| \mathcal{D}) &= \frac{p(\mathcal{D}|\theta) p(\theta) }{p(\mathcal{D})} = \frac{e^{ g(\theta)}}{\int e^{ g(\theta)} d\theta} = \\
&\approx \frac{1}{(2\pi)^{K/2} |\Lambda|^{-1/2}} e^{- \frac{1}{2} (\theta - \hat \theta_{\text{map}})^T \Lambda (\theta - \hat \theta_{\text{map}})}
\end{split}
$$
:::{important}
Laplace's method approximates the posterior by a **Multivariate Gaussian** centered on the MAP.
:::
:::{warning}
As the following figure shows, Laplace's method might not be the "best" gaussian fit to our distribution
:::
A non-gaussian distribution is shown in yellow. The red line corresponds to a gaussian centered on the mode (Laplace approximation), while the green line corresponds to a gaussian with minimum reverse KL divergence
**Requirement of the Laplace approximation**
For Laplace's approximation the MAP solution and the negative Hessian evaluated at the MAP solution are needed
In the first place $g(\theta)$ has to be continuous and differentiable on $\theta$, and the negative Hessian has to be positive definite
**A closer look to the evidence in Laplace approximation**
Using Laplace approximation the log evidence can be decomposed as
$$
\begin{align}
\log p(\mathcal{D}|\mathcal{M}_i) &\approx g(\hat \theta_{\text{map}}) + \frac{K}{2} \log(2\pi) - \frac{1}{2} \log | \Lambda | \nonumber \\
&=\log p(\mathcal{D}|\mathcal{M}_i, \hat \theta_{\text{map}}) + \log p(\hat \theta_{\text{map}}| \mathcal{M}_i) + \frac{K}{2} \log(2\pi) - \frac{1}{2} \log | \Lambda | \nonumber
\end{align}
$$
*i.e.* the log evidence is approximated by the best likelihood fit plus the **Occam's factor**
The Occam's factor depends on the
- log pdf of $\theta$: Prior
- number of parameters $K$: Complexity
- second derivative around the MAP: Model uncertainty
**Relationship between the evidence in Laplace approximation and the BIC**
In the regime of very large number of samples ($N$) it can be shown that Laplace's approximations is dominated by
$$
\log p(\mathcal{D}|\mathcal{M}_i) \approx \log p(\mathcal{D}|\mathcal{M}_i, \hat \theta_{\text{mle}}) - \frac{K}{2} \log N,
$$
where $\theta_{\text{mle}}$ is the maximum likelihood solution.
The expression above is equivalent to the negative of the [Bayesian Information Criterion (BIC)](https://en.wikipedia.org/wiki/Bayesian_information_criterion)
:::{seealso}
- Chapters 27 and 28 of [D. Mackay's book](http://www.inference.org.uk/itprnn/book.pdf)
- Section 28.2 of [D. Barber's book](http://web4.cs.ucl.ac.uk/staff/D.Barber/pmwiki/pmwiki.php?n=Brml.Online)
:::
## Variational Inference
In this section we review a more general method for deterministic approximation. Remember that we are interested in the (intractable) posterior
$$
p(\theta|\mathcal{D}) = \frac{p(\mathcal{D}|\theta) p(\theta)}{p(\mathcal{D})}
$$
Variational Inference (VI) is a family of methods in which a simpler (tractable) posterior distribution is proposed to "replace" $p(\theta|\mathcal{D})$. This simpler posterior is denoted as $q_\nu(\theta)$ which represents a family of distributions parameterized by $\nu$
> **Optimization problem:** The objective is to find $\nu$ that makes $q$ most similar to $p$
We can formalize this as a KL divergence minimization problem
$$
\hat \nu = \text{arg}\min_\nu D_{\text{KL}}[q_\nu(\theta) || p(\theta|\mathcal{D})] = \int q_\nu(\theta) \log \frac{q_\nu(\theta)}{p(\theta|\mathcal{D})} d\theta,
$$
but this expression depends on the intractable posterior. To continue we use Bayes Theorem to move the evidence out from the integral
$$
D_{\text{KL}}[q_\nu(\theta) || p(\theta|\mathcal{D})] = \log p(\mathcal{D}) + \int q_\nu(\theta) \log \frac{q_\nu(\theta)}{p(\mathcal{D}, \theta)} d\theta
$$
As $p(\mathcal{D})$ does not depend on $\nu$ we can focus on the right hand term. The optimization problem is typically written as
$$
\hat \nu = \text{arg}\max_\nu \mathcal{L}(\nu) =\int q_\nu(\theta) \log \frac{p(\mathcal{D}, \theta)}{q_\nu(\theta)}d\theta
$$
where $ \mathcal{L}(\nu)$ is called the **Evidence Lower BOund** (ELBO).
The name comes from the fact that
$$
\log p(\mathcal{D}) \geq \mathcal{L}(\nu) = \int q_\nu(\theta) \log \frac{p(\mathcal{D}, \theta)}{q_\nu(\theta)}d\theta,
$$
which is a result of the non-negativity of the KL divergence.
> Ideally we choose a "simple-enough" parametric family $q$ so that the ELBO is tractable
:::{note}
The ELBO can only be tight if $p$ is within the family of $q$
:::
:::{seealso}
[Calculus of variations](https://en.wikipedia.org/wiki/Calculus_of_variations): Derivatives of functionals (function of functions)
:::
### More attention on the ELBO
The ELBO can also be decomposed as
$$
\begin{align}
\mathcal{L}(\nu) &= \int q_\nu(\theta) \log \frac{p(\mathcal{D}|\theta) p (\theta)}{q_\nu(\theta)}d\theta \nonumber \\
&= \int q_\nu(\theta) \log p(\mathcal{D}|\theta) d\theta - \int q_\nu(\theta) \log \frac{q_\nu(\theta)}{ p (\theta)} d\theta \nonumber \\
&= \mathbb{E}_{\theta \sim q_\nu(\theta)} \left[\log p(\mathcal{D}|\theta)\right] - D_{KL}[q_\nu(\theta) || p(\theta)] \nonumber
\end{align}
$$
From which we can recognize that maximizing the ELBO is equivalent to:
- Maximizing the log likelihood for parameters sampled from the approximate posterior: Generative model produces realistic data samples
- Minimizing the KL divergence between the approximate posterior and prior: Regularization for the approximate posterior
**Another way to "obtain" the ELBO**
We can get the ELBO using [Jensen's inequality](https://en.wikipedia.org/wiki/Jensen%27s_inequality) on the log evidence
$$
\begin{align}
\log p(\mathcal{D}) &= \log \mathbb{E}_{\theta\sim p(\theta)} \left[p(\mathcal{D}|\theta)\right]\nonumber \\
&= \log \mathbb{E}_{\theta\sim q_\nu(\theta)} \left[p(\mathcal{D}|\theta)\frac{p(\theta)}{q_\nu(\theta)}\right]\nonumber \\
&\geq \mathbb{E}_{\theta\sim q_\nu(\theta)} \left[\log \frac{p(\mathcal{D},\theta)}{q_\nu(\theta)}\right] =\int q_\nu(\theta) \log \frac{p(\mathcal{D},\theta)}{q_\nu(\theta)}d\theta \nonumber
\end{align}
$$
### A simple posterior: Fully-factorized posterior
A broadly-used idea to make posteriors tractable is to assume that there is no correlation between factors
$$
q_\nu(\theta) = \prod_{i=1}^K q_{\nu}(\theta_i),
$$
this is known as the **Mean-field** VI
Replacing this factorized posterior on the ELBO
$$
\begin{align}
\mathcal{L}_\text{MF}(\nu) &= \int \prod_{i=1}^K q_{\nu}(\theta_i) \left ( \log p(\mathcal{D}, \theta) - \sum_{i=1}^K \log q_{\nu}(\theta_i) \right) d\theta \nonumber \\
&= \int q_{\nu_i}(\theta_i) \left [ \int \prod_{j\neq i} q_{\nu_j}(\theta_j) \log p(\mathcal{D}, \theta) d\theta_j \right ] d\theta_i - \sum_{i=1}^K \int q_{\nu_i}(\theta_i) \log q_{\nu_i}(\theta_i) d\theta_i \nonumber \\
\end{align}
$$
Asumming that we keep all $\theta$ except $\theta_i$ fixed, we can update $\theta_i$ iteratively using
$$
\mathcal{L}_\text{MF}(\nu_i) = \int q_{\nu_i}(\theta_i) \mathbb{E}_{\prod q_{i\neq j}} \left[ \log p(\mathcal{D},\theta) \right ] d\theta_i - \int q_{\nu_i}(\theta_i) \log q_{\nu_i}(\theta_i) d\theta_i + \text{Constant}
$$
:::{note}
(In this case) Maximizing the ELBO is equivalent to minimizing the KL between $q_{\nu_i}(\theta_i)$ and $\mathbb{E}_{\prod q_{i\neq j}} \left[ p(\mathcal{D}, \theta) \right ]$
:::
The optimal $q$ in this case is given by
$$
q_i(\nu_i) \propto \exp \left ( \mathbb{E}_{\prod q_{i\neq j}} \left[ p(\mathcal{D}, \theta) \right ] \right )
$$
:::{seealso}
- Chapters 33 of [D. Mackay's book](http://www.inference.org.uk/itprnn/book.pdf)
- Section 28.4 of [D. Barber's book](http://web4.cs.ucl.ac.uk/staff/D.Barber/pmwiki/pmwiki.php?n=Brml.Online)
- David Blei, ["Variational Inference: A review for statisticians"](https://arxiv.org/abs/1601.00670), ["Foundations and innovations"](https://www.youtube.com/watch?v=DaqNNLidswA)
- Tamara Broderick, ["Variational Bayes and beyond: Bayesian inference for big data"](http://www.tamarabroderick.com/tutorial_2018_icml.html)
:::
```python
```
|
/-
Copyright (c) Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import data.complex.module
import data.complex.is_R_or_C
/-!
# Normed space structure on `ℂ`.
This file gathers basic facts on complex numbers of an analytic nature.
## Main results
This file registers `ℂ` as a normed field, expresses basic properties of the norm, and gives
tools on the real vector space structure of `ℂ`. Notably, in the namespace `complex`,
it defines functions:
* `re_clm`
* `im_clm`
* `of_real_clm`
* `conj_cle`
They are bundled versions of the real part, the imaginary part, the embedding of `ℝ` in `ℂ`, and
the complex conjugate as continuous `ℝ`-linear maps. The last two are also bundled as linear
isometries in `of_real_li` and `conj_lie`.
We also register the fact that `ℂ` is an `is_R_or_C` field.
-/
noncomputable theory
namespace complex
open_locale complex_conjugate
instance : has_norm ℂ := ⟨abs⟩
instance : normed_group ℂ :=
normed_group.of_core ℂ
{ norm_eq_zero_iff := λ z, abs_eq_zero,
triangle := abs_add,
norm_neg := abs_neg }
instance : normed_field ℂ :=
{ norm := abs,
dist_eq := λ _ _, rfl,
norm_mul' := abs_mul,
.. complex.field }
instance : nondiscrete_normed_field ℂ :=
{ non_trivial := ⟨2, by simp [norm]; norm_num⟩ }
instance {R : Type*} [normed_field R] [normed_algebra R ℝ] : normed_algebra R ℂ :=
{ norm_algebra_map_eq := λ x, (abs_of_real $ algebra_map R ℝ x).trans (norm_algebra_map_eq ℝ x),
to_algebra := complex.algebra }
/-- The module structure from `module.complex_to_real` is a normed space. -/
@[priority 900] -- see Note [lower instance priority]
instance _root_.normed_space.complex_to_real {E : Type*} [normed_group E] [normed_space ℂ E] :
normed_space ℝ E :=
normed_space.restrict_scalars ℝ ℂ E
@[simp] lemma norm_eq_abs (z : ℂ) : ∥z∥ = abs z := rfl
lemma dist_eq (z w : ℂ) : dist z w = abs (z - w) := rfl
@[simp] lemma norm_real (r : ℝ) : ∥(r : ℂ)∥ = ∥r∥ := abs_of_real _
@[simp] lemma norm_rat (r : ℚ) : ∥(r : ℂ)∥ = |(r : ℝ)| :=
suffices ∥((r : ℝ) : ℂ)∥ = |r|, by simpa,
by rw [norm_real, real.norm_eq_abs]
@[simp] lemma norm_nat (n : ℕ) : ∥(n : ℂ)∥ = n := abs_of_nat _
@[simp] lemma norm_int {n : ℤ} : ∥(n : ℂ)∥ = |n| :=
suffices ∥((n : ℝ) : ℂ)∥ = |n|, by simpa,
by rw [norm_real, real.norm_eq_abs]
lemma norm_int_of_nonneg {n : ℤ} (hn : 0 ≤ n) : ∥(n : ℂ)∥ = n :=
by rw [norm_int, _root_.abs_of_nonneg]; exact int.cast_nonneg.2 hn
@[continuity] lemma continuous_abs : continuous abs := continuous_norm
@[continuity] lemma continuous_norm_sq : continuous norm_sq :=
by simpa [← norm_sq_eq_abs] using continuous_abs.pow 2
/-- The `abs` function on `ℂ` is proper. -/
lemma tendsto_abs_cocompact_at_top : filter.tendsto abs (filter.cocompact ℂ) filter.at_top :=
tendsto_norm_cocompact_at_top
/-- The `norm_sq` function on `ℂ` is proper. -/
lemma tendsto_norm_sq_cocompact_at_top :
filter.tendsto norm_sq (filter.cocompact ℂ) filter.at_top :=
by simpa [mul_self_abs] using
tendsto_abs_cocompact_at_top.at_top_mul_at_top tendsto_abs_cocompact_at_top
open continuous_linear_map
/-- Continuous linear map version of the real part function, from `ℂ` to `ℝ`. -/
def re_clm : ℂ →L[ℝ] ℝ := re_lm.mk_continuous 1 (λ x, by simp [real.norm_eq_abs, abs_re_le_abs])
@[continuity] lemma continuous_re : continuous re := re_clm.continuous
@[simp] lemma re_clm_coe : (coe (re_clm) : ℂ →ₗ[ℝ] ℝ) = re_lm := rfl
@[simp]
@[simp] lemma re_clm_norm : ∥re_clm∥ = 1 :=
le_antisymm (linear_map.mk_continuous_norm_le _ zero_le_one _) $
calc 1 = ∥re_clm 1∥ : by simp
... ≤ ∥re_clm∥ : unit_le_op_norm _ _ (by simp)
/-- Continuous linear map version of the real part function, from `ℂ` to `ℝ`. -/
def im_clm : ℂ →L[ℝ] ℝ := im_lm.mk_continuous 1 (λ x, by simp [real.norm_eq_abs, abs_im_le_abs])
@[continuity] lemma continuous_im : continuous im := im_clm.continuous
@[simp] lemma im_clm_coe : (coe (im_clm) : ℂ →ₗ[ℝ] ℝ) = im_lm := rfl
@[simp] lemma im_clm_apply (z : ℂ) : (im_clm : ℂ → ℝ) z = z.im := rfl
@[simp] lemma im_clm_norm : ∥im_clm∥ = 1 :=
le_antisymm (linear_map.mk_continuous_norm_le _ zero_le_one _) $
calc 1 = ∥im_clm I∥ : by simp
... ≤ ∥im_clm∥ : unit_le_op_norm _ _ (by simp)
lemma restrict_scalars_one_smul_right' {E : Type*} [normed_group E] [normed_space ℂ E] (x : E) :
continuous_linear_map.restrict_scalars ℝ ((1 : ℂ →L[ℂ] ℂ).smul_right x : ℂ →L[ℂ] E) =
re_clm.smul_right x + I • im_clm.smul_right x :=
by { ext ⟨a, b⟩, simp [mk_eq_add_mul_I, add_smul, mul_smul, smul_comm I] }
lemma restrict_scalars_one_smul_right (x : ℂ) :
continuous_linear_map.restrict_scalars ℝ ((1 : ℂ →L[ℂ] ℂ).smul_right x : ℂ →L[ℂ] ℂ) = x • 1 :=
by { ext1 z, dsimp, apply mul_comm }
/-- The complex-conjugation function from `ℂ` to itself is an isometric linear equivalence. -/
def conj_lie : ℂ ≃ₗᵢ[ℝ] ℂ := ⟨conj_ae.to_linear_equiv, abs_conj⟩
@[simp] lemma conj_lie_apply (z : ℂ) : conj_lie z = conj z := rfl
lemma isometry_conj : isometry (conj : ℂ → ℂ) := conj_lie.isometry
@[continuity] lemma continuous_conj : continuous (conj : ℂ → ℂ) := conj_lie.continuous
/-- Continuous linear equiv version of the conj function, from `ℂ` to `ℂ`. -/
def conj_cle : ℂ ≃L[ℝ] ℂ := conj_lie
@[simp] lemma conj_cle_coe : conj_cle.to_linear_equiv = conj_ae.to_linear_equiv := rfl
@[simp] lemma conj_cle_apply (z : ℂ) : conj_cle z = conj z := rfl
@[simp] lemma conj_cle_norm : ∥(conj_cle : ℂ →L[ℝ] ℂ)∥ = 1 :=
conj_lie.to_linear_isometry.norm_to_continuous_linear_map
/-- Linear isometry version of the canonical embedding of `ℝ` in `ℂ`. -/
def of_real_li : ℝ →ₗᵢ[ℝ] ℂ := ⟨of_real_am.to_linear_map, norm_real⟩
lemma isometry_of_real : isometry (coe : ℝ → ℂ) := of_real_li.isometry
@[continuity] lemma continuous_of_real : continuous (coe : ℝ → ℂ) := of_real_li.continuous
/-- Continuous linear map version of the canonical embedding of `ℝ` in `ℂ`. -/
def of_real_clm : ℝ →L[ℝ] ℂ := of_real_li.to_continuous_linear_map
@[simp] lemma of_real_clm_coe : (of_real_clm : ℝ →ₗ[ℝ] ℂ) = of_real_am.to_linear_map := rfl
@[simp] lemma of_real_clm_apply (x : ℝ) : of_real_clm x = x := rfl
@[simp] lemma of_real_clm_norm : ∥of_real_clm∥ = 1 := of_real_li.norm_to_continuous_linear_map
noncomputable instance : is_R_or_C ℂ :=
{ re := ⟨complex.re, complex.zero_re, complex.add_re⟩,
im := ⟨complex.im, complex.zero_im, complex.add_im⟩,
I := complex.I,
I_re_ax := by simp only [add_monoid_hom.coe_mk, complex.I_re],
I_mul_I_ax := by simp only [complex.I_mul_I, eq_self_iff_true, or_true],
re_add_im_ax := λ z, by simp only [add_monoid_hom.coe_mk, complex.re_add_im,
complex.coe_algebra_map, complex.of_real_eq_coe],
of_real_re_ax := λ r, by simp only [add_monoid_hom.coe_mk, complex.of_real_re,
complex.coe_algebra_map, complex.of_real_eq_coe],
of_real_im_ax := λ r, by simp only [add_monoid_hom.coe_mk, complex.of_real_im,
complex.coe_algebra_map, complex.of_real_eq_coe],
mul_re_ax := λ z w, by simp only [complex.mul_re, add_monoid_hom.coe_mk],
mul_im_ax := λ z w, by simp only [add_monoid_hom.coe_mk, complex.mul_im],
conj_re_ax := λ z, rfl,
conj_im_ax := λ z, rfl,
conj_I_ax := by simp only [complex.conj_I, ring_hom.coe_mk],
norm_sq_eq_def_ax := λ z, by simp only [←complex.norm_sq_eq_abs, ←complex.norm_sq_apply,
add_monoid_hom.coe_mk, complex.norm_eq_abs],
mul_im_I_ax := λ z, by simp only [mul_one, add_monoid_hom.coe_mk, complex.I_im],
inv_def_ax := λ z, by simp only [complex.inv_def, complex.norm_sq_eq_abs, complex.coe_algebra_map,
complex.of_real_eq_coe, complex.norm_eq_abs],
div_I_ax := complex.div_I }
section
variables {α β γ : Type*}
[add_comm_monoid α] [topological_space α] [add_comm_monoid γ] [topological_space γ]
/-- The natural `add_equiv` from `ℂ` to `ℝ × ℝ`. -/
def equiv_real_prod_add_hom : ℂ ≃+ ℝ × ℝ :=
{ map_add' := by simp, .. equiv_real_prod }
/-- The natural `linear_equiv` from `ℂ` to `ℝ × ℝ`. -/
def equiv_real_prod_add_hom_lm : ℂ ≃ₗ[ℝ] ℝ × ℝ :=
{ map_smul' := by simp [equiv_real_prod_add_hom], .. equiv_real_prod_add_hom }
/-- The natural `continuous_linear_equiv` from `ℂ` to `ℝ × ℝ`. -/
def equiv_real_prodₗ : ℂ ≃L[ℝ] ℝ × ℝ :=
equiv_real_prod_add_hom_lm.to_continuous_linear_equiv
end
lemma has_sum_iff {α} (f : α → ℂ) (c : ℂ) :
has_sum f c ↔ has_sum (λ x, (f x).re) c.re ∧ has_sum (λ x, (f x).im) c.im :=
begin
refine ⟨λ h, ⟨h.mapL re_clm, h.mapL im_clm⟩, _⟩,
rintro ⟨h₁, h₂⟩,
convert (h₁.prod_mk h₂).mapL equiv_real_prodₗ.symm.to_continuous_linear_map,
{ ext x; refl },
{ cases c, refl }
end
end complex
namespace is_R_or_C
local notation `reC` := @is_R_or_C.re ℂ _
local notation `imC` := @is_R_or_C.im ℂ _
local notation `IC` := @is_R_or_C.I ℂ _
local notation `absC` := @is_R_or_C.abs ℂ _
local notation `norm_sqC` := @is_R_or_C.norm_sq ℂ _
@[simp] lemma re_to_complex {x : ℂ} : reC x = x.re := rfl
@[simp] lemma im_to_complex {x : ℂ} : imC x = x.im := rfl
@[simp] lemma I_to_complex : IC = complex.I := rfl
@[simp] lemma norm_sq_to_complex {x : ℂ} : norm_sqC x = complex.norm_sq x :=
by simp [is_R_or_C.norm_sq, complex.norm_sq]
@[simp] lemma abs_to_complex {x : ℂ} : absC x = complex.abs x :=
by simp [is_R_or_C.abs, complex.abs]
end is_R_or_C
|
[STATEMENT]
lemma apx_min:
"S = \<Union> U \<Longrightarrow> U \<subseteq> \<R> \<Longrightarrow> S = [M]\<^bsub>v,n\<^esub> \<Longrightarrow> \<forall> i\<le>n. \<forall> j\<le>n. M i j \<noteq> \<infinity> \<longrightarrow> get_const (M i j) \<in> \<int>
\<Longrightarrow> normalized M \<Longrightarrow> Z \<subseteq> S \<Longrightarrow> Approx\<^sub>\<beta> Z \<subseteq> S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>S = \<Union> U; U \<subseteq> \<R>; S = [M]\<^bsub>v,n\<^esub>; dbm_int M n; normalized M; Z \<subseteq> S\<rbrakk> \<Longrightarrow> Approx\<^sub>\<beta> Z \<subseteq> S
[PROOF STEP]
unfolding apx_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>S = \<Union> U; U \<subseteq> \<R>; S = [M]\<^bsub>v,n\<^esub>; dbm_int M n; normalized M; Z \<subseteq> S\<rbrakk> \<Longrightarrow> \<Inter> {S. \<exists>U M. S = \<Union> U \<and> U \<subseteq> \<R> \<and> Z \<subseteq> S \<and> (S = [M]\<^bsub>v,n\<^esub> \<and> dbm_int M n) \<and> normalized M} \<subseteq> S
[PROOF STEP]
by blast |
State Before: F : Type ?u.78159
α : Type u_1
β : Type ?u.78165
inst✝¹ : LinearOrderedSemiring α
inst✝ : FloorSemiring α
a : α
n✝ : ℕ
ha : 0 ≤ a
n b : ℕ
⊢ b ≤ ⌊a + ↑n⌋₊ ↔ b ≤ ⌊a⌋₊ + n State After: F : Type ?u.78159
α : Type u_1
β : Type ?u.78165
inst✝¹ : LinearOrderedSemiring α
inst✝ : FloorSemiring α
a : α
n✝ : ℕ
ha : 0 ≤ a
n b : ℕ
⊢ ↑b ≤ a + ↑n ↔ b ≤ ⌊a⌋₊ + n Tactic: rw [le_floor_iff (add_nonneg ha n.cast_nonneg)] State Before: F : Type ?u.78159
α : Type u_1
β : Type ?u.78165
inst✝¹ : LinearOrderedSemiring α
inst✝ : FloorSemiring α
a : α
n✝ : ℕ
ha : 0 ≤ a
n b : ℕ
⊢ ↑b ≤ a + ↑n ↔ b ≤ ⌊a⌋₊ + n State After: case inl
F : Type ?u.78159
α : Type u_1
β : Type ?u.78165
inst✝¹ : LinearOrderedSemiring α
inst✝ : FloorSemiring α
a : α
n✝ : ℕ
ha : 0 ≤ a
n b : ℕ
hb : n ≤ b
⊢ ↑b ≤ a + ↑n ↔ b ≤ ⌊a⌋₊ + n
case inr
F : Type ?u.78159
α : Type u_1
β : Type ?u.78165
inst✝¹ : LinearOrderedSemiring α
inst✝ : FloorSemiring α
a : α
n✝ : ℕ
ha : 0 ≤ a
n b : ℕ
hb : b ≤ n
⊢ ↑b ≤ a + ↑n ↔ b ≤ ⌊a⌋₊ + n Tactic: obtain hb | hb := le_total n b State Before: case inl
F : Type ?u.78159
α : Type u_1
β : Type ?u.78165
inst✝¹ : LinearOrderedSemiring α
inst✝ : FloorSemiring α
a : α
n✝ : ℕ
ha : 0 ≤ a
n b : ℕ
hb : n ≤ b
⊢ ↑b ≤ a + ↑n ↔ b ≤ ⌊a⌋₊ + n State After: case inl.intro
F : Type ?u.78159
α : Type u_1
β : Type ?u.78165
inst✝¹ : LinearOrderedSemiring α
inst✝ : FloorSemiring α
a : α
n✝ : ℕ
ha : 0 ≤ a
n d : ℕ
hb : n ≤ n + d
⊢ ↑(n + d) ≤ a + ↑n ↔ n + d ≤ ⌊a⌋₊ + n Tactic: obtain ⟨d, rfl⟩ := exists_add_of_le hb State Before: case inl.intro
F : Type ?u.78159
α : Type u_1
β : Type ?u.78165
inst✝¹ : LinearOrderedSemiring α
inst✝ : FloorSemiring α
a : α
n✝ : ℕ
ha : 0 ≤ a
n d : ℕ
hb : n ≤ n + d
⊢ ↑(n + d) ≤ a + ↑n ↔ n + d ≤ ⌊a⌋₊ + n State After: no goals Tactic: rw [Nat.cast_add, add_comm n, add_comm (n : α), add_le_add_iff_right, add_le_add_iff_right,
le_floor_iff ha] State Before: case inr
F : Type ?u.78159
α : Type u_1
β : Type ?u.78165
inst✝¹ : LinearOrderedSemiring α
inst✝ : FloorSemiring α
a : α
n✝ : ℕ
ha : 0 ≤ a
n b : ℕ
hb : b ≤ n
⊢ ↑b ≤ a + ↑n ↔ b ≤ ⌊a⌋₊ + n State After: case inr.intro
F : Type ?u.78159
α : Type u_1
β : Type ?u.78165
inst✝¹ : LinearOrderedSemiring α
inst✝ : FloorSemiring α
a : α
n : ℕ
ha : 0 ≤ a
b d : ℕ
hb : b ≤ b + d
⊢ ↑b ≤ a + ↑(b + d) ↔ b ≤ ⌊a⌋₊ + (b + d) Tactic: obtain ⟨d, rfl⟩ := exists_add_of_le hb State Before: case inr.intro
F : Type ?u.78159
α : Type u_1
β : Type ?u.78165
inst✝¹ : LinearOrderedSemiring α
inst✝ : FloorSemiring α
a : α
n : ℕ
ha : 0 ≤ a
b d : ℕ
hb : b ≤ b + d
⊢ ↑b ≤ a + ↑(b + d) ↔ b ≤ ⌊a⌋₊ + (b + d) State After: case inr.intro
F : Type ?u.78159
α : Type u_1
β : Type ?u.78165
inst✝¹ : LinearOrderedSemiring α
inst✝ : FloorSemiring α
a : α
n : ℕ
ha : 0 ≤ a
b d : ℕ
hb : b ≤ b + d
⊢ ↑b ≤ ↑b + (a + ↑d) ↔ b ≤ b + (⌊a⌋₊ + d) Tactic: rw [Nat.cast_add, add_left_comm _ b, add_left_comm _ (b : α)] State Before: case inr.intro
F : Type ?u.78159
α : Type u_1
β : Type ?u.78165
inst✝¹ : LinearOrderedSemiring α
inst✝ : FloorSemiring α
a : α
n : ℕ
ha : 0 ≤ a
b d : ℕ
hb : b ≤ b + d
⊢ ↑b ≤ ↑b + (a + ↑d) ↔ b ≤ b + (⌊a⌋₊ + d) State After: case inr.intro
F : Type ?u.78159
α : Type u_1
β : Type ?u.78165
inst✝¹ : LinearOrderedSemiring α
inst✝ : FloorSemiring α
a : α
n : ℕ
ha : 0 ≤ a
b d : ℕ
hb : b ≤ b + d
⊢ ↑b ≤ ↑b + (a + ↑d) Tactic: refine' iff_of_true _ le_self_add State Before: case inr.intro
F : Type ?u.78159
α : Type u_1
β : Type ?u.78165
inst✝¹ : LinearOrderedSemiring α
inst✝ : FloorSemiring α
a : α
n : ℕ
ha : 0 ≤ a
b d : ℕ
hb : b ≤ b + d
⊢ ↑b ≤ ↑b + (a + ↑d) State After: no goals Tactic: exact le_add_of_nonneg_right <| ha.trans <| le_add_of_nonneg_right d.cast_nonneg |
import algebra.field
import gtm106.weierstrass_equation.basic
import gtm106.weierstrass_equation.point
import myhelper.mypoly.basic
import tactic
namespace weierstrass_equation
/--
intersection of `y^2 + a1*x*y + a3*y = x^3 + a2*x^2 + a4*x + a6`
and `y = A*x + B`
-/
@[ext]
structure intersection_with_line (K : Type*) [field K] :=
mk :: (E : weierstrass_equation K)
(A B : K)
namespace intersection_with_line
/--
intersection of `y^2 + a1*x*y + a3*y = x^3 + a2*x^2 + a4*x + a6`
and `y - y0 = A*(x - x0)`
-/
def from_point {K : Type*} [field K]
(E : weierstrass_equation K)
(P : affine_plane_point K)
(A : K)
: intersection_with_line K
:= ⟨ E, A, P.y - A * P.x ⟩
@[simp]
lemma from_point.E {K : Type*} [field K]
(E : weierstrass_equation K)
(P : affine_plane_point K)
(A : K)
: (from_point E P A).E = E := rfl
@[simp]
lemma from_point.A {K : Type*} [field K]
(E : weierstrass_equation K)
(P : affine_plane_point K)
(A : K)
: (from_point E P A).A = A := rfl
section
parameters {K : Type*} [field K] (L : intersection_with_line K)
def a : K := L.E.a2 - (L.A + L.E.a1) * L.A
def b : K := L.E.a4 - (2 * L.A + L.E.a1) * L.B - L.A * L.E.a3
def c : K := L.E.a6 - (L.B + L.E.a3) * L.B
def poly : monic_cubic_poly K := ⟨ L.a, L.b, L.c ⟩
def point (x : K) : affine_plane_point K := ⟨ x, L.A * x + L.B ⟩
lemma from_point' (x : K)
: from_point L.E (L.point x) L.A = L :=
begin
simp [from_point, point, intersection_with_line.ext_iff],
end
lemma eval_at' (x : K)
: L.poly.eval_at x = - L.E.eval_at_affine_point (L.point x) :=
sub_eq_zero.1 begin
simp only [monic_cubic_poly.eval_at, poly, a, b, c, eval_at_affine_point, point],
ring,
end
lemma eval_dx_at' (x : K)
: L.poly.eval_dx_at x = - L.E.eval_dx_at_affine_point (L.point x) - L.A * L.E.eval_dy_at_affine_point (L.point x) :=
sub_eq_zero.1 begin
simp only [monic_cubic_poly.eval_dx_at, poly, a, b, c,
eval_dx_at_affine_point, eval_dy_at_affine_point, point],
ring,
end
lemma is_on' (x : K)
: L.poly.is_root x ↔ L.E.affine_point_on_curve (L.point x) :=
begin
simp [monic_cubic_poly.is_root, affine_point_on_curve, eval_at'],
end
lemma is_on_2' (x : K)
: L.poly.is_multiple_root x
↔ L.E.affine_point_on_curve (L.point x)
∧ - L.E.eval_dx_at_affine_point (L.point x) = L.A * L.E.eval_dy_at_affine_point (L.point x) :=
begin
simp [monic_cubic_poly.is_multiple_root, affine_point_on_curve,
eval_at', eval_dx_at', sub_eq_zero],
end
end
lemma point_this {K : Type*} [field K]
(E : weierstrass_equation K)
(P : affine_plane_point K)
(A : K)
: (from_point E P A).point P.x = P :=
begin
simp [from_point, point, affine_plane_point.ext_iff],
end
lemma is_on_this {K : Type*} [field K]
(E : weierstrass_equation K)
(P : affine_plane_point K)
(A : K)
: (from_point E P A).poly.is_root P.x ↔ E.affine_point_on_curve P :=
begin
rw [is_on', point_this],
simp [from_point],
end
lemma from_point'' {K : Type*} [field K]
(E : weierstrass_equation K)
(P : affine_plane_point K)
(A x : K)
: from_point E ((from_point E P A).point x) A = from_point E P A :=
begin
have := from_point' (from_point E P A) x,
simp at this,
exact this,
end
end intersection_with_line
end weierstrass_equation
|
(* Author: Tobias Nipkow *)
section \<open>Pairing Heap According to Oksaki (Modified)\<close>
theory Pairing_Heap_List2
imports
"HOL-Library.Multiset"
"HOL-Data_Structures.Priority_Queue_Specs"
begin
subsection \<open>Definitions\<close>
text \<open>This version of pairing heaps is a modified version
of the one by Okasaki \<^cite>\<open>"Okasaki"\<close> that avoids structural invariants.\<close>
datatype 'a hp = Hp 'a (hps: "'a hp list")
type_synonym 'a heap = "'a hp option"
hide_const (open) insert
fun get_min :: "'a heap \<Rightarrow> 'a" where
"get_min (Some(Hp x _)) = x"
fun link :: "('a::linorder) hp \<Rightarrow> 'a hp \<Rightarrow> 'a hp" where
"link (Hp x lx) (Hp y ly) =
(if x < y then Hp x (Hp y ly # lx) else Hp y (Hp x lx # ly))"
fun merge :: "('a::linorder) heap \<Rightarrow> 'a heap \<Rightarrow> 'a heap" where
"merge h None = h" |
"merge None h = h" |
"merge (Some h1) (Some h2) = Some(link h1 h2)"
lemma merge_None[simp]: "merge None h = h"
by(cases h)auto
fun insert :: "('a::linorder) \<Rightarrow> 'a heap \<Rightarrow> 'a heap" where
"insert x None = Some(Hp x [])" |
"insert x (Some h) = Some(link (Hp x []) h)"
fun pass\<^sub>1 :: "('a::linorder) hp list \<Rightarrow> 'a hp list" where
"pass\<^sub>1 [] = []"
| "pass\<^sub>1 [h] = [h]"
| "pass\<^sub>1 (h1#h2#hs) = link h1 h2 # pass\<^sub>1 hs"
fun pass\<^sub>2 :: "('a::linorder) hp list \<Rightarrow> 'a heap" where
"pass\<^sub>2 [] = None"
| "pass\<^sub>2 (h#hs) = Some(case pass\<^sub>2 hs of None \<Rightarrow> h | Some h' \<Rightarrow> link h h')"
fun merge_pairs :: "('a::linorder) hp list \<Rightarrow> 'a heap" where
"merge_pairs [] = None"
| "merge_pairs [h] = Some h"
| "merge_pairs (h1 # h2 # hs) =
Some(let h12 = link h1 h2 in case merge_pairs hs of None \<Rightarrow> h12 | Some h \<Rightarrow> link h12 h)"
fun del_min :: "('a::linorder) heap \<Rightarrow> 'a heap" where
"del_min None = None"
| "del_min (Some(Hp x hs)) = pass\<^sub>2 (pass\<^sub>1 hs)"
subsection \<open>Correctness Proofs\<close>
text \<open>An optimization:\<close>
lemma pass12_merge_pairs: "pass\<^sub>2 (pass\<^sub>1 hs) = merge_pairs hs"
by (induction hs rule: merge_pairs.induct) (auto split: option.split)
declare pass12_merge_pairs[code_unfold]
subsubsection \<open>Invariants\<close>
fun php :: "('a::linorder) hp \<Rightarrow> bool" where
"php (Hp x hs) = (\<forall>h \<in> set hs. (\<forall>y \<in> set_hp h. x \<le> y) \<and> php h)"
definition invar :: "('a::linorder) heap \<Rightarrow> bool" where
"invar ho = (case ho of None \<Rightarrow> True | Some h \<Rightarrow> php h)"
lemma php_link: "php h1 \<Longrightarrow> php h2 \<Longrightarrow> php (link h1 h2)"
by (induction h1 h2 rule: link.induct) fastforce+
lemma invar_merge:
"\<lbrakk> invar h1; invar h2 \<rbrakk> \<Longrightarrow> invar (merge h1 h2)"
by (auto simp: php_link invar_def split: option.splits)
lemma invar_insert: "invar h \<Longrightarrow> invar (insert x h)"
by (auto simp: php_link invar_def split: option.splits)
lemma invar_pass1: "\<forall>h \<in> set hs. php h \<Longrightarrow> \<forall>h \<in> set (pass\<^sub>1 hs). php h"
by(induction hs rule: pass\<^sub>1.induct) (auto simp: php_link)
lemma invar_pass2: "\<forall>h \<in> set hs. php h \<Longrightarrow> invar (pass\<^sub>2 hs)"
by (induction hs)(auto simp: php_link invar_def split: option.splits)
lemma invar_del_min: "invar h \<Longrightarrow> invar (del_min h)"
by(induction h rule: del_min.induct)
(auto simp: invar_Some intro!: invar_pass1 invar_pass2)
subsubsection \<open>Functional Correctness\<close>
fun mset_hp :: "'a hp \<Rightarrow>'a multiset" where
"mset_hp (Hp x hs) = {#x#} + sum_mset(mset(map mset_hp hs))"
definition mset_heap :: "'a heap \<Rightarrow>'a multiset" where
"mset_heap ho = (case ho of None \<Rightarrow> {#} | Some h \<Rightarrow> mset_hp h)"
lemma set_mset_mset_hp: "set_mset (mset_hp h) = set_hp h"
by(induction h) auto
lemma mset_hp_empty[simp]: "mset_hp hp \<noteq> {#}"
by (cases hp) auto
lemma mset_heap_Some: "mset_heap(Some hp) = mset_hp hp"
by(simp add: mset_heap_def)
lemma mset_heap_empty: "mset_heap h = {#} \<longleftrightarrow> h = None"
by (cases h) (auto simp add: mset_heap_def)
lemma get_min_in:
"h \<noteq> None \<Longrightarrow> get_min h \<in> set_hp(the h)"
by(induction rule: get_min.induct)(auto)
lemma get_min_min: "\<lbrakk> h \<noteq> None; invar h; x \<in> set_hp(the h) \<rbrakk> \<Longrightarrow> get_min h \<le> x"
by(induction h rule: get_min.induct)(auto simp: invar_def)
lemma mset_link: "mset_hp (link h1 h2) = mset_hp h1 + mset_hp h2"
by(induction h1 h2 rule: link.induct)(auto simp: add_ac)
lemma mset_merge: "mset_heap (merge h1 h2) = mset_heap h1 + mset_heap h2"
by (induction h1 h2 rule: merge.induct)
(auto simp add: mset_heap_def mset_link ac_simps)
lemma mset_insert: "mset_heap (insert a h) = {#a#} + mset_heap h"
by(cases h) (auto simp add: mset_link mset_heap_def insert_def)
lemma mset_merge_pairs: "mset_heap (merge_pairs hs) = sum_mset(image_mset mset_hp (mset hs))"
by(induction hs rule: merge_pairs.induct)
(auto simp: mset_merge mset_link mset_heap_def Let_def split: option.split)
lemma mset_del_min: "h \<noteq> None \<Longrightarrow>
mset_heap (del_min h) = mset_heap h - {#get_min h#}"
by(induction h rule: del_min.induct)
(auto simp: mset_heap_Some pass12_merge_pairs mset_merge_pairs)
text \<open>Last step: prove all axioms of the priority queue specification:\<close>
interpretation pairing: Priority_Queue_Merge
where empty = None and is_empty = "\<lambda>h. h = None"
and merge = merge and insert = insert
and del_min = del_min and get_min = get_min
and invar = invar and mset = mset_heap
proof(standard, goal_cases)
case 1 show ?case by(simp add: mset_heap_def)
next
case (2 q) thus ?case by(auto simp add: mset_heap_def split: option.split)
next
case 3 show ?case by(simp add: mset_insert mset_merge)
next
case 4 thus ?case by(simp add: mset_del_min mset_heap_empty)
next
case (5 q) thus ?case using get_min_in[of q]
by(auto simp add: eq_Min_iff get_min_min mset_heap_empty mset_heap_Some set_mset_mset_hp)
next
case 6 thus ?case by (simp add: invar_def)
next
case 7 thus ?case by(rule invar_insert)
next
case 8 thus ?case by (simp add: invar_del_min)
next
case 9 thus ?case by (simp add: mset_merge)
next
case 10 thus ?case by (simp add: invar_merge)
qed
end
|
Formal statement is: lemma smallomega_iff_smallo: "g \<in> \<omega>[F](f) \<longleftrightarrow> f \<in> o[F](g)" Informal statement is: $f \in \omega[F](g)$ if and only if $g \in o[F](f)$. |
-- @@stderr --
dtrace: failed to compile script test/unittest/printf/err.D_PRINTF_DYN_PROTO.nowidth.d: [D_PRINTF_DYN_PROTO] line 18: printf( ) prototype mismatch: conversion #2 (%d) is missing a corresponding "*" argument
|
import mynat.mul
-- this is one of *three* routes to
-- canonically_ordered_comm_semiring
namespace mynat
def le (a b : mynat) := ∃ (c : mynat), b = a + c
-- Another choice is to define it recursively:
-- | le 0 _
-- | le (succ a) (succ b) = le ab
-- notation
instance : has_le mynat := ⟨mynat.le⟩
@[leakage] theorem le_def' : mynat.le = (≤) := rfl
theorem le_iff_exists_add (a b : mynat) : a ≤ b ↔ ∃ (c : mynat), b = a + c := iff.rfl
end mynat |
import .basic
import ..myset.basic
namespace hidden
namespace myring
structure is_ideal {α : Type} [myring α] (I : myset α) : Prop :=
intro ::
(contains_zero: (0: α) ∈ I)
(add_closure (a b : α) : a ∈ I → b ∈ I → a + b ∈ I)
(mul_closure (r x : α) : x ∈ I → r * x ∈ I)
variables {α : Type} [myring α] (I J : myset α)
namespace is_ideal
theorem neg_closure {I: myset α} (hI: is_ideal I) (a: α) (ha: a ∈ I):
-a ∈ I :=
begin
have := hI.mul_closure (-1: α) a ha,
rw ←neg_eq_mul_neg_one at this,
from this,
end
end is_ideal
theorem ideal_intersection (hI : is_ideal I) (hJ : is_ideal J) : is_ideal (I ∩ J) :=
begin
split, {
split, {
from hI.contains_zero,
}, {
from hJ.contains_zero,
},
}, {
intros a b,
assume haIJ hbIJ,
split,
apply hI.add_closure _ _ haIJ.left hbIJ.left,
apply hJ.add_closure _ _ haIJ.right hbIJ.right,
}, {
intros r x,
assume h,
split,
apply hI.mul_closure _ _ h.left,
apply hJ.mul_closure _ _ h.right,
},
end
variables {β : Type} [myring β]
def ker (f : α → β) : myset α := { a | f a = 0 }
def im (f : α → β) : myset β := { b | ∃ a, f a = b }
structure is_homomorphism (f : α → β) : Prop :=
intro :: -- necessary ?
(respects_add (a b : α) : f (a + b) = f a + f b)
(respects_mul (a b : α) : f (a * b) = f a * f b)
(respects_one (a b : α) : f 1 = 1)
namespace is_homomorphism
variables {f : α → β} (hf : is_homomorphism f)
include hf
theorem respects_zero : f 0 = 0 :=
begin
apply add_cancel_left (f 0),
rw [←hf.respects_add, add_zero, add_zero],
end
theorem respects_neg (a : α) : f (-a) = -f a :=
begin
rw [←neg_unique, ←respects_add hf, neg_add, respects_zero hf],
end
theorem kernel_ideal : is_ideal (ker f) :=
begin
split, {
from hf.respects_zero,
}, {
intros a b,
assume ha hb,
change f (a + b) = 0,
change f a = 0 at ha,
change f b = 0 at hb,
rw [hf.respects_add, ha, hb, add_zero],
}, {
intros r x,
assume hx,
change f (r * x) = 0,
change f x = 0 at hx,
rw [hf.respects_mul, hx, mul_zero],
},
end
end is_homomorphism
end myring
end hidden |
If $f$ and $g$ are analytic at $z$, then the $n$th derivative of $f - g$ at $z$ is equal to the $n$th derivative of $f$ at $z$ minus the $n$th derivative of $g$ at $z$. |
(* Title: HOL/Auth/n_german_lemma_inv__46_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_german Protocol Case Study*}
theory n_german_lemma_inv__46_on_rules imports n_german_lemma_on_inv__46
begin
section{*All lemmas on causal relation between inv__46*}
lemma lemma_inv__46_on_rules:
assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__46 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> j. j\<le>N\<and>r=n_SendReqS j)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqEI i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqES i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReq N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInvE i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInvS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)\<or>
(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> j. j\<le>N\<and>r=n_SendReqS j)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqSVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqEI i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqEIVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqES i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqESVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReq N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvE i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInvEVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInvSVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInvAckVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvInvAckVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntSVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntEVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntSVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntEVsinv__46) done
}
moreover {
assume d1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_StoreVsinv__46) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
-- ------------------------------------------------------- [ Specification.idr ]
-- Module : Specification.idr
-- Copyright : (c) Jan de Muijnck-Hughes
-- License : see LICENSE
-- --------------------------------------------------------------------- [ EOH ]
||| A language to build abstract interface specifications.
module Cordial.EDSL.Specification
import public Data.Vect
import public Data.List.Quantifiers
import public Data.Ranged
import public Text.Markup.Edda
import public Language.EDSL.SingleStateVar
import public Cordial.Model.Specification
import public Cordial.EDSL.Specification.Lang
import public Cordial.EDSL.Specification.API
%default total
%access public export
-- --------------------------------------------------------------------- [ EOF ]
|
Formal statement is: lemma mem_interior: "x \<in> interior S \<longleftrightarrow> (\<exists>e>0. ball x e \<subseteq> S)" Informal statement is: A point $x$ is in the interior of a set $S$ if and only if there exists an open ball around $x$ that is contained in $S$. |
-- Copyright 2017, the blau.io contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
module API.Web.DOM.EventListener
import API.Web.DOM.Event
%access public export
%default total
||| An EventListener can be used to observe a specific event.
|||
||| The original specification can be found at
||| https://dom.spec.whatwg.org/#concept-event-listener
record EventListener where
constructor New
type : String
||| The *callback* arguments sets the **callback** that will be invoked when
||| the event get dispatched.
callback : Event -> IO ()
|
[STATEMENT]
lemma create_name_igba_correct: "create_name_igba \<phi> \<le> SPEC (\<lambda>G.
igba G \<and> finite (g_V G) \<and> (\<forall>\<xi>. igba.accept G \<xi> \<longleftrightarrow> \<xi> \<Turnstile>\<^sub>r \<phi>))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. create_name_igba \<phi> \<le> SPEC (\<lambda>G. igba G \<and> finite (g_V G) \<and> (\<forall>\<xi>. igba.accept G \<xi> = \<xi> \<Turnstile>\<^sub>r \<phi>))
[PROOF STEP]
unfolding create_name_igba_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. create_name_gba \<phi> \<bind> (\<lambda>A. gba_to_idx A \<bind> (\<lambda>A'. stat_set_data_nres (card (g_V A)) (card (g_V0 A')) (igbg_num_acc A') \<bind> (\<lambda>_. RETURN A'))) \<le> SPEC (\<lambda>G. igba G \<and> finite (g_V G) \<and> (\<forall>\<xi>. igba.accept G \<xi> = \<xi> \<Turnstile>\<^sub>r \<phi>))
[PROOF STEP]
apply (refine_rcg
order_trans[OF create_name_gba_correct]
order_trans[OF gba.gba_to_idx_ext_correct]
refine_vcg)
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. \<And>x. gba x \<and> finite (g_V x) \<and> (\<forall>\<xi>. gba.accept x \<xi> = \<xi> \<Turnstile>\<^sub>r \<phi>) \<Longrightarrow> gba x
2. \<And>x A. \<lbrakk>gba x \<and> finite (g_V x) \<and> (\<forall>\<xi>. gba.accept x \<xi> = \<xi> \<Turnstile>\<^sub>r \<phi>); A \<in> gbg_F x\<rbrakk> \<Longrightarrow> finite A
3. \<And>x xa. \<lbrakk>gba x \<and> finite (g_V x) \<and> (\<forall>\<xi>. gba.accept x \<xi> = \<xi> \<Turnstile>\<^sub>r \<phi>); igba.accept xa = gba.accept x \<and> g_V xa = g_V x \<and> g_E xa = g_E x \<and> g_V0 xa = g_V0 x \<and> igba_rec.more xa = () \<and> igba xa\<rbrakk> \<Longrightarrow> igba xa
4. \<And>x xa. \<lbrakk>gba x \<and> finite (g_V x) \<and> (\<forall>\<xi>. gba.accept x \<xi> = \<xi> \<Turnstile>\<^sub>r \<phi>); igba.accept xa = gba.accept x \<and> g_V xa = g_V x \<and> g_E xa = g_E x \<and> g_V0 xa = g_V0 x \<and> igba_rec.more xa = () \<and> igba xa\<rbrakk> \<Longrightarrow> finite (g_V xa)
5. \<And>x xa \<xi>. \<lbrakk>gba x \<and> finite (g_V x) \<and> (\<forall>\<xi>. gba.accept x \<xi> = \<xi> \<Turnstile>\<^sub>r \<phi>); igba.accept xa = gba.accept x \<and> g_V xa = g_V x \<and> g_E xa = g_E x \<and> g_V0 xa = g_V0 x \<and> igba_rec.more xa = () \<and> igba xa\<rbrakk> \<Longrightarrow> igba.accept xa \<xi> = \<xi> \<Turnstile>\<^sub>r \<phi>
[PROOF STEP]
apply clarsimp_all
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x A. \<lbrakk>A \<in> gbg_F x; gba x; finite (g_V x); \<forall>\<xi>. gba.accept x \<xi> = \<xi> \<Turnstile>\<^sub>r \<phi>\<rbrakk> \<Longrightarrow> finite A
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x A. \<lbrakk>A \<in> gbg_F x; gba x; finite (g_V x); \<forall>\<xi>. gba.accept x \<xi> = \<xi> \<Turnstile>\<^sub>r \<phi>\<rbrakk> \<Longrightarrow> finite A
[PROOF STEP]
fix G :: "(nat, 'a set) gba_rec"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x A. \<lbrakk>A \<in> gbg_F x; gba x; finite (g_V x); \<forall>\<xi>. gba.accept x \<xi> = \<xi> \<Turnstile>\<^sub>r \<phi>\<rbrakk> \<Longrightarrow> finite A
[PROOF STEP]
fix A :: "nat set"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x A. \<lbrakk>A \<in> gbg_F x; gba x; finite (g_V x); \<forall>\<xi>. gba.accept x \<xi> = \<xi> \<Turnstile>\<^sub>r \<phi>\<rbrakk> \<Longrightarrow> finite A
[PROOF STEP]
assume 1: "gba G"
[PROOF STATE]
proof (state)
this:
gba G
goal (1 subgoal):
1. \<And>x A. \<lbrakk>A \<in> gbg_F x; gba x; finite (g_V x); \<forall>\<xi>. gba.accept x \<xi> = \<xi> \<Turnstile>\<^sub>r \<phi>\<rbrakk> \<Longrightarrow> finite A
[PROOF STEP]
assume 2: "finite (g_V G)" "A \<in> gbg_F G"
[PROOF STATE]
proof (state)
this:
finite (g_V G)
A \<in> gbg_F G
goal (1 subgoal):
1. \<And>x A. \<lbrakk>A \<in> gbg_F x; gba x; finite (g_V x); \<forall>\<xi>. gba.accept x \<xi> = \<xi> \<Turnstile>\<^sub>r \<phi>\<rbrakk> \<Longrightarrow> finite A
[PROOF STEP]
interpret gba G
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. gba G
[PROOF STEP]
using 1
[PROOF STATE]
proof (prove)
using this:
gba G
goal (1 subgoal):
1. gba G
[PROOF STEP]
.
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x A. \<lbrakk>A \<in> gbg_F x; gba x; finite (g_V x); \<forall>\<xi>. gba.accept x \<xi> = \<xi> \<Turnstile>\<^sub>r \<phi>\<rbrakk> \<Longrightarrow> finite A
[PROOF STEP]
show "finite A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite A
[PROOF STEP]
using finite_V_Fe 2
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>finite V; ?A \<in> F\<rbrakk> \<Longrightarrow> finite ?A
finite V
A \<in> F
goal (1 subgoal):
1. finite A
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
finite A
goal:
No subgoals!
[PROOF STEP]
qed |
[GOAL]
B : Type u
inst✝ : Bicategory B
a b c : B
f : a ⟶ b
g h : b ⟶ c
η : g = h
⊢ f ◁ eqToHom η = eqToHom (_ : f ≫ g = f ≫ h)
[PROOFSTEP]
cases η
[GOAL]
case refl
B : Type u
inst✝ : Bicategory B
a b c : B
f : a ⟶ b
g : b ⟶ c
⊢ f ◁ eqToHom (_ : g = g) = eqToHom (_ : f ≫ g = f ≫ g)
[PROOFSTEP]
simp only [whiskerLeft_id, eqToHom_refl]
[GOAL]
B : Type u
inst✝ : Bicategory B
a b c : B
f g : a ⟶ b
η : f = g
h : b ⟶ c
⊢ eqToHom η ▷ h = eqToHom (_ : f ≫ h = g ≫ h)
[PROOFSTEP]
cases η
[GOAL]
case refl
B : Type u
inst✝ : Bicategory B
a b c : B
f : a ⟶ b
h : b ⟶ c
⊢ eqToHom (_ : f = f) ▷ h = eqToHom (_ : f ≫ h = f ≫ h)
[PROOFSTEP]
simp only [id_whiskerRight, eqToHom_refl]
|
theory T140
imports Main
begin
lemma "(
(\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) &
(\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, meet(y, z)) = meet(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(meet(x, y), z) = meet(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z))) &
(\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) &
(\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) &
(\<forall> x::nat. invo(invo(x)) = x)
) \<longrightarrow>
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(x, meet(y, z)) = join(over(x, y), over(x, z)))
"
nitpick[card nat=7,timeout=86400]
oops
end |
import order.boolean_algebra
import order.zorn
import tactic.ext
import .extensionality
open_locale classical
universe u
namespace order.boolean_algebra
variables {α : Type*} [boolean_algebra α] {x y : α}
@[ext] structure filter (α : Type*) [boolean_algebra α] :=
(pred : set α)
(pred_top : pred ⊤)
(pred_inf : ∀ (x y : α), pred x → pred y → pred (x ⊓ y))
(pred_monotone : ∀ (x y : α), x ≤ y → pred x → pred y)
namespace filter
instance : partial_order (filter α) :=
{ le := λ f g, f.pred ⊆ g.pred,
le_refl := λ f, set.subset.rfl,
le_trans := λ f g h, set.subset.trans,
le_antisymm := λ f g h₁ h₂, filter.ext _ _ (set.subset.antisymm h₁ h₂) }
instance : has_inf (filter α) :=
⟨λ f g,
{ pred := f.pred ∩ g.pred,
pred_top := by split; apply filter.pred_top,
pred_inf := λ x y ⟨h₁, h₂⟩ ⟨h₃, h₄⟩, by split; apply filter.pred_inf; assumption,
pred_monotone := λ x y h₁ ⟨h₂, h₃⟩, by split; apply filter.pred_monotone; assumption }⟩
instance : semilattice_inf (filter α) :=
{ inf_le_left := λ f g, set.inter_subset_left _ _,
inf_le_right := λ f g, set.inter_subset_right _ _,
le_inf := λ f g h, set.subset_inter,
.. filter.has_inf, .. filter.partial_order }
def principal (x : α) : filter α :=
{ pred := λ y, x ≤ y,
pred_top := le_top,
pred_inf := λ y z, le_inf,
pred_monotone := λ y z, function.swap le_trans }
instance : order_top (filter α) :=
{ top := principal ⊥,
le_top := λ f x (h : f.pred x), bot_le,
.. filter.partial_order }
instance : order_bot (filter α) :=
{ bot := principal ⊤,
bot_le := λ f x (h : ⊤ ≤ x), (top_unique h).substr f.pred_top,
.. filter.partial_order }
def insert (x : α) (f : filter α) : filter α :=
{ pred := λ y, ∃ z, (f.pred z) ∧ (x ⊓ z ≤ y),
pred_top := ⟨⊤, f.pred_top, inf_le_right⟩,
pred_inf := λ y z ⟨y', h_fy', h_xy'⟩ ⟨z', h_fz', h_xz'⟩,
⟨y' ⊓ z',
f.pred_inf _ _ h_fy' h_fz',
le_inf
(calc x ⊓ (y' ⊓ z') ≤ x ⊓ y' : inf_le_inf_left _ inf_le_left
... ≤ y : h_xy')
(calc x ⊓ (y' ⊓ z') ≤ x ⊓ z' : inf_le_inf_left _ inf_le_right
... ≤ z : h_xz')⟩,
pred_monotone := λ y z h_yz ⟨y', h_fy', h_xy'⟩, ⟨y', h_fy', le_trans h_xy' h_yz⟩ }
lemma mem_insert (x : α) (f : filter α) : (insert x f).pred x :=
⟨⊤, f.pred_top, inf_le_left⟩
lemma le_insert (x : α) (f : filter α) : f ≤ (insert x f) :=
λ y h, ⟨y, h, inf_le_right⟩
end filter
structure ultrafilter (α : Type*) [boolean_algebra α] extends filter α :=
(or_compl : ∀ (x : α), pred x ∨ pred xᶜ)
(not_and_compl : ∀ (x : α), ¬(pred x ∧ pred xᶜ))
variables {u : ultrafilter α}
namespace ultrafilter
@[priority 0] instance : has_mem (ultrafilter α) α := ⟨λ u x, u.pred x⟩
lemma mem_monotone : x ≤ y → u ∈ x → u ∈ y :=
u.pred_monotone x y
lemma mem_top : u ∈ (⊤ : α) :=
u.pred_top
lemma not_mem_bot : u ∉ (⊥ : α) :=
λ h, u.not_and_compl ⊥ ⟨h, compl_bot.substr mem_top⟩
lemma mem_sup_left : u ∈ x → u ∈ (x ⊔ y) :=
mem_monotone le_sup_left
lemma mem_sup_right : u ∈ y → u ∈ (x ⊔ y) :=
mem_monotone le_sup_right
lemma or_of_mem_sup : u ∈ (x ⊔ y) → (u ∈ x) ∨ (u ∈ y) :=
λ h, or.imp_right
(λ hc, mem_monotone
(calc
(x ⊔ y) ⊓ xᶜ = y ⊓ xᶜ : by rw [inf_sup_right, inf_compl_eq_bot, bot_sup_eq]
... ≤ y : inf_le_left)
(u.pred_inf _ _ h hc))
(u.or_compl x)
lemma inf_mem_left : u ∈ (x ⊓ y) → u ∈ x :=
λ h, mem_monotone inf_le_left h
lemma inf_mem_right : u ∈ (x ⊓ y) → u ∈ y :=
λ h, mem_monotone inf_le_right h
lemma mem_inf : u ∈ x → u ∈ y → u ∈ (x ⊓ y) :=
u.pred_inf x y
lemma exists_of_not_le : ¬(x ≤ y) → ∃ u, (u ∈ x) ∧ (u ∉ y) :=
λ h_not_le,
let filters := {f : filter α // f.pred x ∧ ¬(f.pred y)},
le : filters → filters → Prop :=
λ f g, f.val ≤ g.val,
bot : filters :=
⟨filter.principal x, le_refl x, h_not_le⟩,
bot_le {f : filters} : le bot f :=
λ z (h_le : x ≤ z), f.val.pred_monotone x z h_le f.prop.left,
bound c (hc : zorn.chain le c) : filters :=
let c' := insert bot c,
hc' := zorn.chain_insert hc (λ f _ _, or.inl bot_le),
h_bot : c' bot := set.mem_insert bot c in
⟨{ pred := λ z, ∃ f, (c' f) ∧ (f.val.pred z),
pred_top := ⟨bot, h_bot, (le_top : x ≤ ⊤)⟩,
pred_inf := λ z w ⟨f, h_cf, h_fz⟩ ⟨g, h_cg, h_gw⟩,
or.elim
(zorn.chain.total hc' h_cf h_cg)
(λ h_fg, ⟨g, h_cg, g.val.pred_inf _ _ (h_fg h_fz) h_gw⟩)
(λ h_gf, ⟨f, h_cf, f.val.pred_inf _ _ h_fz (h_gf h_gw)⟩),
pred_monotone := λ z w h_zw ⟨f, h_cf, h_fz⟩,
⟨f, h_cf, f.val.pred_monotone _ _ h_zw h_fz⟩ },
⟨bot, h_bot, bot.prop.left⟩,
(λ ⟨f, h_cf, h_fy⟩, f.prop.right h_fy)⟩,
⟨⟨m, h_mx, h_my⟩, is_max⟩ := zorn.exists_maximal_of_chains_bounded
(λ c hc, ⟨bound c hc, λ f h_cf z h_fz, ⟨f, set.mem_insert_of_mem _ h_cf, h_fz⟩⟩)
(λ f g h, set.subset.trans) in
⟨{ or_compl := λ z,
if h_z : (filter.insert z m).pred y
then if h_zc : (filter.insert zᶜ m).pred y
then let ⟨w, h_mw, h_zw⟩ := h_z,
⟨wc, h_mwc, h_zwc⟩ := h_zc in
false.elim (h_my (m.pred_monotone _ _
(calc
w ⊓ wc = (z ⊔ zᶜ) ⊓ (w ⊓ wc) : by simp
... = (z ⊓ (w ⊓ wc)) ⊔ (zᶜ ⊓ (w ⊓ wc)) : inf_sup_right
... ≤ (z ⊓ w) ⊔ (zᶜ ⊓ wc) : sup_le_sup
(inf_le_inf_left _ inf_le_left)
(inf_le_inf_left _ inf_le_right)
... ≤ y : sup_le h_zw h_zwc)
(m.pred_inf _ _ h_mw h_mwc)))
else or.inr
((is_max
⟨filter.insert zᶜ m, filter.le_insert zᶜ m h_mx, h_zc⟩
(filter.le_insert zᶜ m))
(filter.mem_insert zᶜ m))
else or.inl
((is_max
⟨filter.insert z m, filter.le_insert z m h_mx, h_z⟩
(filter.le_insert z m))
(filter.mem_insert z m)),
not_and_compl := λ z ⟨h_z, h_zc⟩,
h_my (m.pred_monotone _ _ (by simp) (m.pred_inf _ _ h_z h_zc)),
.. m },
h_mx,
h_my⟩
end ultrafilter
namespace extensionality
open ultrafilter
lemma rw_eq : (x = y) ↔ (x ≤ y) ∧ (y ≤ x) :=
le_antisymm_iff
lemma rw_le : (x ≤ y) ↔ ∀ u, u ∈ x → u ∈ y :=
⟨λ h u, mem_monotone h,
λ h_forall, not_not.mp (λ h_not_le,
let ⟨u, hx, hy⟩ := exists_of_not_le h_not_le in hy (h_forall u hx))⟩
lemma rw_bot : u ∈ (⊥ : α) ↔ false :=
iff_false_intro not_mem_bot
lemma rw_top : u ∈ (⊤ : α) ↔ true :=
iff_true_intro mem_top
lemma rw_sup : u ∈ (x ⊔ y) ↔ (u ∈ x) ∨ (u ∈ y) :=
⟨or_of_mem_sup,
or.rec mem_sup_left mem_sup_right⟩
lemma rw_inf : u ∈ (x ⊓ y) ↔ (u ∈ x) ∧ (u ∈ y) :=
⟨λ h, ⟨inf_mem_left h, inf_mem_right h⟩,
λ h, mem_inf h.left h.right⟩
lemma rw_compl : u ∈ xᶜ ↔ ¬(u ∈ x) :=
⟨(λ hc h, u.not_and_compl x ⟨h, hc⟩),
(or.resolve_left (u.or_compl x))⟩
lemma rw_sdiff : u ∈ x \ y ↔ u ∈ x ∧ u ∉ y := begin
rewrite sdiff_eq,
rewrite <- rw_compl,
rw rw_inf,
end
instance boolalg_base_ext_lemmas (α : Type*) [boolean_algebra α]
: (boolalg_ext_lemmas α (ultrafilter α)) :=
{
simpl_eq := by apply rw_eq,
simpl_lt := by apply lt_iff_le_not_le,
ext_bot := by apply rw_bot,
ext_sdiff := by apply rw_sdiff,
ext_le := by apply rw_le,
ext_meet := by apply rw_sup,
ext_join := by apply rw_inf,
}
instance boolalg_base_ext_lemmas_compl (α : Type*) [boolean_algebra α] :
(boolalg_ext_lemmas_compl α (ultrafilter α)) :=
{
ext_compl := by apply rw_compl,
}
instance boolalg_base_ext_lemmas_top (α : Type*) [boolean_algebra α] :
(boolalg_ext_lemmas_top α (ultrafilter α)) :=
{
ext_top := by apply rw_top,
}
end extensionality
end order.boolean_algebra
|
/-
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura, Jeremy Avigad
-/
import data.finset.basic
import tactic.by_contra
/-!
# Cardinality of a finite set
This defines the cardinality of a `finset` and provides induction principles for finsets.
## Main declarations
* `finset.card`: `s.card : ℕ` returns the cardinality of `s : finset α`.
### Induction principles
* `finset.strong_induction`: Strong induction
* `finset.strong_induction_on`
* `finset.strong_downward_induction`
* `finset.strong_downward_induction_on`
* `finset.case_strong_induction_on`
## TODO
Should we add a noncomputable version?
-/
open function multiset nat
variables {α β : Type*}
namespace finset
variables {s t : finset α} {a b : α}
/-- `s.card` is the number of elements of `s`, aka its cardinality. -/
def card (s : finset α) : ℕ := s.1.card
lemma card_def (s : finset α) : s.card = s.1.card := rfl
@[simp] lemma card_mk {m nodup} : (⟨m, nodup⟩ : finset α).card = m.card := rfl
@[simp] lemma card_empty : card (∅ : finset α) = 0 := rfl
lemma card_le_of_subset : s ⊆ t → s.card ≤ t.card := multiset.card_le_of_le ∘ val_le_iff.mpr
@[mono] lemma card_mono : monotone (@card α) := by apply card_le_of_subset
@[simp] lemma card_eq_zero : s.card = 0 ↔ s = ∅ := card_eq_zero.trans val_eq_zero
lemma card_pos : 0 < s.card ↔ s.nonempty :=
pos_iff_ne_zero.trans $ (not_congr card_eq_zero).trans nonempty_iff_ne_empty.symm
alias card_pos ↔ _ nonempty.card_pos
lemma card_ne_zero_of_mem (h : a ∈ s) : s.card ≠ 0 := (not_congr card_eq_zero).2 $ ne_empty_of_mem h
@[simp] lemma card_singleton (a : α) : card ({a} : finset α) = 1 := card_singleton _
lemma card_singleton_inter [decidable_eq α] : ({a} ∩ s).card ≤ 1 :=
begin
cases (finset.decidable_mem a s),
{ simp [finset.singleton_inter_of_not_mem h] },
{ simp [finset.singleton_inter_of_mem h] }
end
@[simp] lemma card_cons (h : a ∉ s) : (s.cons a h).card = s.card + 1 := card_cons _ _
section insert_erase
variables [decidable_eq α]
@[simp] lemma card_insert_of_not_mem (h : a ∉ s) : (insert a s).card = s.card + 1 :=
by rw [←cons_eq_insert _ _ h, card_cons]
lemma card_insert_of_mem (h : a ∈ s) : card (insert a s) = s.card := by rw insert_eq_of_mem h
lemma card_insert_le (a : α) (s : finset α) : card (insert a s) ≤ s.card + 1 :=
by by_cases a ∈ s; [{rw insert_eq_of_mem h, exact nat.le_succ _ }, rw card_insert_of_not_mem h]
/-- If `a ∈ s` is known, see also `finset.card_insert_of_mem` and `finset.card_insert_of_not_mem`.
-/
lemma card_insert_eq_ite : card (insert a s) = if a ∈ s then s.card else s.card + 1 :=
begin
by_cases h : a ∈ s,
{ rw [card_insert_of_mem h, if_pos h] },
{ rw [card_insert_of_not_mem h, if_neg h] }
end
@[simp] lemma card_doubleton (h : a ≠ b) : ({a, b} : finset α).card = 2 :=
by rw [card_insert_of_not_mem (not_mem_singleton.2 h), card_singleton]
@[simp] lemma card_erase_of_mem : a ∈ s → (s.erase a).card = s.card - 1 := card_erase_of_mem
@[simp] lemma card_erase_add_one : a ∈ s → (s.erase a).card + 1 = s.card := card_erase_add_one
lemma card_erase_lt_of_mem : a ∈ s → (s.erase a).card < s.card := card_erase_lt_of_mem
lemma card_erase_le : (s.erase a).card ≤ s.card := card_erase_le
lemma pred_card_le_card_erase : s.card - 1 ≤ (s.erase a).card :=
begin
by_cases h : a ∈ s,
{ exact (card_erase_of_mem h).ge },
{ rw erase_eq_of_not_mem h,
exact nat.sub_le _ _ }
end
/-- If `a ∈ s` is known, see also `finset.card_erase_of_mem` and `finset.erase_eq_of_not_mem`. -/
lemma card_erase_eq_ite : (s.erase a).card = if a ∈ s then s.card - 1 else s.card :=
card_erase_eq_ite
end insert_erase
@[simp] lemma card_range (n : ℕ) : (range n).card = n := card_range n
@[simp] lemma card_attach : s.attach.card = s.card := multiset.card_attach
end finset
section to_list_multiset
variables [decidable_eq α] (m : multiset α) (l : list α)
lemma multiset.card_to_finset : m.to_finset.card = m.dedup.card := rfl
lemma multiset.to_finset_card_le : m.to_finset.card ≤ m.card := card_le_of_le $ dedup_le _
lemma multiset.to_finset_card_of_nodup {m : multiset α} (h : m.nodup) : m.to_finset.card = m.card :=
congr_arg card $ multiset.dedup_eq_self.mpr h
lemma list.card_to_finset : l.to_finset.card = l.dedup.length := rfl
lemma list.to_finset_card_le : l.to_finset.card ≤ l.length := multiset.to_finset_card_le ⟦l⟧
lemma list.to_finset_card_of_nodup {l : list α} (h : l.nodup) : l.to_finset.card = l.length :=
multiset.to_finset_card_of_nodup h
end to_list_multiset
namespace finset
variables {s t : finset α} {f : α → β} {n : ℕ}
@[simp] lemma length_to_list (s : finset α) : s.to_list.length = s.card :=
by { rw [to_list, ←multiset.coe_card, multiset.coe_to_list], refl }
lemma card_image_le [decidable_eq β] : (s.image f).card ≤ s.card :=
by simpa only [card_map] using (s.1.map f).to_finset_card_le
lemma card_image_of_inj_on [decidable_eq β] (H : set.inj_on f s) : (s.image f).card = s.card :=
by simp only [card, image_val_of_inj_on H, card_map]
lemma inj_on_of_card_image_eq [decidable_eq β] (H : (s.image f).card = s.card) : set.inj_on f s :=
begin
change (s.1.map f).dedup.card = s.1.card at H,
have : (s.1.map f).dedup = s.1.map f,
{ refine multiset.eq_of_le_of_card_le (multiset.dedup_le _) _,
rw H,
simp only [multiset.card_map] },
rw multiset.dedup_eq_self at this,
exact inj_on_of_nodup_map this,
end
lemma card_image_iff [decidable_eq β] : (s.image f).card = s.card ↔ set.inj_on f s :=
⟨inj_on_of_card_image_eq, card_image_of_inj_on⟩
lemma card_image_of_injective [decidable_eq β] (s : finset α) (H : injective f) :
(s.image f).card = s.card :=
card_image_of_inj_on $ λ x _ y _ h, H h
lemma fiber_card_ne_zero_iff_mem_image (s : finset α) (f : α → β) [decidable_eq β] (y : β) :
(s.filter (λ x, f x = y)).card ≠ 0 ↔ y ∈ s.image f :=
by { rw [←pos_iff_ne_zero, card_pos, fiber_nonempty_iff_mem_image] }
@[simp] lemma card_map (f : α ↪ β) : (s.map f).card = s.card := multiset.card_map _ _
@[simp] lemma card_subtype (p : α → Prop) [decidable_pred p] (s : finset α) :
(s.subtype p).card = (s.filter p).card :=
by simp [finset.subtype]
lemma card_filter_le (s : finset α) (p : α → Prop) [decidable_pred p] :
(s.filter p).card ≤ s.card :=
card_le_of_subset $ filter_subset _ _
lemma eq_of_subset_of_card_le {s t : finset α} (h : s ⊆ t) (h₂ : t.card ≤ s.card) : s = t :=
eq_of_veq $ multiset.eq_of_le_of_card_le (val_le_iff.mpr h) h₂
lemma map_eq_of_subset {f : α ↪ α} (hs : s.map f ⊆ s) : s.map f = s :=
eq_of_subset_of_card_le hs (card_map _).ge
lemma filter_card_eq {p : α → Prop} [decidable_pred p] (h : (s.filter p).card = s.card) (x : α)
(hx : x ∈ s) :
p x :=
begin
rw [←eq_of_subset_of_card_le (s.filter_subset p) h.ge, mem_filter] at hx,
exact hx.2,
end
lemma card_lt_card (h : s ⊂ t) : s.card < t.card := card_lt_of_lt $ val_lt_iff.2 h
lemma card_eq_of_bijective (f : ∀ i, i < n → α) (hf : ∀ a ∈ s, ∃ i, ∃ h : i < n, f i h = a)
(hf' : ∀ i (h : i < n), f i h ∈ s) (f_inj : ∀ i j (hi : i < n)
(hj : j < n), f i hi = f j hj → i = j) :
s.card = n :=
begin
classical,
have : ∀ (a : α), a ∈ s ↔ ∃ i (hi : i ∈ range n), f i (mem_range.1 hi) = a,
from λ a, ⟨λ ha, let ⟨i, hi, eq⟩ := hf a ha in ⟨i, mem_range.2 hi, eq⟩,
λ ⟨i, hi, eq⟩, eq ▸ hf' i (mem_range.1 hi)⟩,
have : s = ((range n).attach.image $ λi, f i.1 (mem_range.1 i.2)),
by simpa only [ext_iff, mem_image, exists_prop, subtype.exists, mem_attach, true_and],
calc s.card = card ((range n).attach.image $ λ i, f i.1 (mem_range.1 i.2)) :
by rw this
... = card ((range n).attach) :
card_image_of_injective _ $ λ ⟨i, hi⟩ ⟨j, hj⟩ eq,
subtype.eq $ f_inj i j (mem_range.1 hi) (mem_range.1 hj) eq
... = card (range n) : card_attach
... = n : card_range n
end
lemma card_congr {t : finset β} (f : Π a ∈ s, β) (h₁ : ∀ a ha, f a ha ∈ t)
(h₂ : ∀ a b ha hb, f a ha = f b hb → a = b) (h₃ : ∀ b ∈ t, ∃ a ha, f a ha = b) :
s.card = t.card :=
by classical;
calc s.card = s.attach.card : card_attach.symm
... = (s.attach.image (λ (a : {a // a ∈ s}), f a.1 a.2)).card
: eq.symm (card_image_of_injective _ $ λ a b h, subtype.eq $ h₂ _ _ _ _ h)
... = t.card : congr_arg card (finset.ext $ λ b,
⟨λ h, let ⟨a, ha₁, ha₂⟩ := mem_image.1 h in ha₂ ▸ h₁ _ _,
λ h, let ⟨a, ha₁, ha₂⟩ := h₃ b h in mem_image.2 ⟨⟨a, ha₁⟩, by simp [ha₂]⟩⟩)
lemma card_le_card_of_inj_on {t : finset β} (f : α → β) (hf : ∀ a ∈ s, f a ∈ t)
(f_inj : ∀ a₁ ∈ s, ∀ a₂ ∈ s, f a₁ = f a₂ → a₁ = a₂) :
s.card ≤ t.card :=
by classical;
calc s.card = (s.image f).card : (card_image_of_inj_on f_inj).symm
... ≤ t.card : card_le_of_subset $ image_subset_iff.2 hf
/-- If there are more pigeons than pigeonholes, then there are two pigeons in the same pigeonhole.
-/
lemma exists_ne_map_eq_of_card_lt_of_maps_to {t : finset β} (hc : t.card < s.card)
{f : α → β} (hf : ∀ a ∈ s, f a ∈ t) :
∃ (x ∈ s) (y ∈ s), x ≠ y ∧ f x = f y :=
begin
classical,
by_contra' hz,
refine hc.not_le (card_le_card_of_inj_on f hf _),
intros x hx y hy, contrapose, exact hz x hx y hy,
end
lemma le_card_of_inj_on_range (f : ℕ → α) (hf : ∀ i < n, f i ∈ s)
(f_inj : ∀ (i < n) (j < n), f i = f j → i = j) :
n ≤ s.card :=
calc n = card (range n) : (card_range n).symm
... ≤ s.card : card_le_card_of_inj_on f (by simpa only [mem_range]) (by simpa only [mem_range])
lemma surj_on_of_inj_on_of_card_le {t : finset β} (f : Π a ∈ s, β) (hf : ∀ a ha, f a ha ∈ t)
(hinj : ∀ a₁ a₂ ha₁ ha₂, f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂) (hst : t.card ≤ s.card) :
∀ b ∈ t, ∃ a ha, b = f a ha :=
begin
classical,
intros b hb,
have h : (s.attach.image $ λ (a : {a // a ∈ s}), f a a.prop).card = s.card,
{ exact @card_attach _ s ▸ card_image_of_injective _
(λ ⟨a₁, ha₁⟩ ⟨a₂, ha₂⟩ h, subtype.eq $ hinj _ _ _ _ h) },
have h' : image (λ a : {a // a ∈ s}, f a a.prop) s.attach = t,
{ exact eq_of_subset_of_card_le (λ b h, let ⟨a, ha₁, ha₂⟩ := mem_image.1 h in
ha₂ ▸ hf _ _) (by simp [hst, h]) },
rw ←h' at hb,
obtain ⟨a, ha₁, ha₂⟩ := mem_image.1 hb,
exact ⟨a, a.2, ha₂.symm⟩,
end
lemma inj_on_of_surj_on_of_card_le {t : finset β} (f : Π a ∈ s, β) (hf : ∀ a ha, f a ha ∈ t)
(hsurj : ∀ b ∈ t, ∃ a ha, b = f a ha) (hst : s.card ≤ t.card) ⦃a₁ a₂⦄ (ha₁ : a₁ ∈ s)
(ha₂ : a₂ ∈ s) (ha₁a₂: f a₁ ha₁ = f a₂ ha₂) :
a₁ = a₂ :=
by haveI : inhabited {x // x ∈ s} := ⟨⟨a₁, ha₁⟩⟩; exact
let f' : {x // x ∈ s} → {x // x ∈ t} := λ x, ⟨f x.1 x.2, hf x.1 x.2⟩ in
let g : {x // x ∈ t} → {x // x ∈ s} :=
@surj_inv _ _ f'
(λ x, let ⟨y, hy₁, hy₂⟩ := hsurj x.1 x.2 in ⟨⟨y, hy₁⟩, subtype.eq hy₂.symm⟩) in
have hg : injective g, from injective_surj_inv _,
have hsg : surjective g, from λ x,
let ⟨y, hy⟩ := surj_on_of_inj_on_of_card_le (λ (x : {x // x ∈ t}) (hx : x ∈ t.attach), g x)
(λ x _, show (g x) ∈ s.attach, from mem_attach _ _)
(λ x y _ _ hxy, hg hxy) (by simpa) x (mem_attach _ _) in
⟨y, hy.snd.symm⟩,
have hif : injective f',
from (left_inverse_of_surjective_of_right_inverse hsg
(right_inverse_surj_inv _)).injective,
subtype.ext_iff_val.1 (@hif ⟨a₁, ha₁⟩ ⟨a₂, ha₂⟩ (subtype.eq ha₁a₂))
@[simp] lemma card_disj_union (s t : finset α) (h) : (s.disj_union t h).card = s.card + t.card :=
multiset.card_add _ _
/-! ### Lattice structure -/
section lattice
variables [decidable_eq α]
lemma card_union_add_card_inter (s t : finset α) : (s ∪ t).card + (s ∩ t).card = s.card + t.card :=
finset.induction_on t (by simp) $ λ a r har, by by_cases a ∈ s; simp *; cc
lemma card_union_le (s t : finset α) : (s ∪ t).card ≤ s.card + t.card :=
card_union_add_card_inter s t ▸ nat.le_add_right _ _
lemma card_union_eq (h : disjoint s t) : (s ∪ t).card = s.card + t.card :=
by rw [←disj_union_eq_union s t $ disjoint_left.mp h, card_disj_union _ _ _]
@[simp] lemma card_disjoint_union (h : disjoint s t) : card (s ∪ t) = s.card + t.card :=
card_union_eq h
lemma card_sdiff (h : s ⊆ t) : card (t \ s) = t.card - s.card :=
suffices card (t \ s) = card ((t \ s) ∪ s) - s.card, by rwa sdiff_union_of_subset h at this,
by rw [card_disjoint_union sdiff_disjoint, add_tsub_cancel_right]
lemma card_sdiff_add_card_eq_card {s t : finset α} (h : s ⊆ t) : card (t \ s) + card s = card t :=
((nat.sub_eq_iff_eq_add (card_le_of_subset h)).mp (card_sdiff h).symm).symm
lemma le_card_sdiff (s t : finset α) : t.card - s.card ≤ card (t \ s) :=
calc card t - card s
≤ card t - card (s ∩ t) : tsub_le_tsub_left (card_le_of_subset (inter_subset_left s t)) _
... = card (t \ (s ∩ t)) : (card_sdiff (inter_subset_right s t)).symm
... ≤ card (t \ s) : by rw sdiff_inter_self_right t s
lemma card_sdiff_add_card : (s \ t).card + t.card = (s ∪ t).card :=
by rw [←card_disjoint_union sdiff_disjoint, sdiff_union_self_eq_union]
end lattice
lemma filter_card_add_filter_neg_card_eq_card (p : α → Prop) [decidable_pred p] :
(s.filter p).card + (s.filter (not ∘ p)).card = s.card :=
by { classical, simp [←card_union_eq, filter_union_filter_neg_eq, disjoint_filter] }
/-- Given a set `A` and a set `B` inside it, we can shrink `A` to any appropriate size, and keep `B`
inside it. -/
lemma exists_intermediate_set {A B : finset α} (i : ℕ) (h₁ : i + card B ≤ card A) (h₂ : B ⊆ A) :
∃ (C : finset α), B ⊆ C ∧ C ⊆ A ∧ card C = i + card B :=
begin
classical,
rcases nat.le.dest h₁ with ⟨k, _⟩,
clear h₁,
induction k with k ih generalizing A,
{ exact ⟨A, h₂, subset.refl _, h.symm⟩ },
have : (A \ B).nonempty,
{ rw [←card_pos, card_sdiff h₂, ←h, nat.add_right_comm,
add_tsub_cancel_right, nat.add_succ],
apply nat.succ_pos },
rcases this with ⟨a, ha⟩,
have z : i + card B + k = card (erase A a),
{ rw [card_erase_of_mem (mem_sdiff.1 ha).1, ←h],
refl },
rcases ih _ z with ⟨B', hB', B'subA', cards⟩,
{ exact ⟨B', hB', trans B'subA' (erase_subset _ _), cards⟩ },
{ rintro t th,
apply mem_erase_of_ne_of_mem _ (h₂ th),
rintro rfl,
exact not_mem_sdiff_of_mem_right th ha }
end
/-- We can shrink `A` to any smaller size. -/
lemma exists_smaller_set (A : finset α) (i : ℕ) (h₁ : i ≤ card A) :
∃ (B : finset α), B ⊆ A ∧ card B = i :=
let ⟨B, _, x₁, x₂⟩ := exists_intermediate_set i (by simpa) (empty_subset A) in ⟨B, x₁, x₂⟩
lemma exists_subset_or_subset_of_two_mul_lt_card [decidable_eq α] {X Y : finset α} {n : ℕ}
(hXY : 2 * n < (X ∪ Y).card) :
∃ C : finset α, n < C.card ∧ (C ⊆ X ∨ C ⊆ Y) :=
begin
have h₁ : (X ∩ (Y \ X)).card = 0 := finset.card_eq_zero.mpr (finset.inter_sdiff_self X Y),
have h₂ : (X ∪ Y).card = X.card + (Y \ X).card,
{ rw [←card_union_add_card_inter X (Y \ X), finset.union_sdiff_self_eq_union, h₁, add_zero] },
rw [h₂, two_mul] at hXY,
rcases lt_or_lt_of_add_lt_add hXY with h|h,
{ exact ⟨X, h, or.inl (finset.subset.refl X)⟩ },
{ exact ⟨Y \ X, h, or.inr (finset.sdiff_subset Y X)⟩ }
end
/-! ### Explicit description of a finset from its card -/
lemma card_eq_one : s.card = 1 ↔ ∃ a, s = {a} :=
by cases s; simp only [multiset.card_eq_one, finset.card, ←val_inj, singleton_val]
lemma exists_eq_insert_iff [decidable_eq α] {s t : finset α} :
(∃ a ∉ s, insert a s = t) ↔ s ⊆ t ∧ s.card + 1 = t.card :=
begin
split,
{ rintro ⟨a, ha, rfl⟩,
exact ⟨subset_insert _ _, (card_insert_of_not_mem ha).symm⟩ },
{ rintro ⟨hst, h⟩,
obtain ⟨a, ha⟩ : ∃ a, t \ s = {a},
{ exact card_eq_one.1 (by rw [card_sdiff hst, ←h, add_tsub_cancel_left]) },
refine ⟨a, λ hs, (_ : a ∉ {a}) $ mem_singleton_self _,
by rw [insert_eq, ←ha, sdiff_union_of_subset hst]⟩,
rw ←ha,
exact not_mem_sdiff_of_mem_right hs }
end
lemma card_le_one : s.card ≤ 1 ↔ ∀ (a ∈ s) (b ∈ s), a = b :=
begin
obtain rfl | ⟨x, hx⟩ := s.eq_empty_or_nonempty,
{ simp },
refine (nat.succ_le_of_lt (card_pos.2 ⟨x, hx⟩)).le_iff_eq.trans (card_eq_one.trans ⟨_, _⟩),
{ rintro ⟨y, rfl⟩,
simp },
{ exact λ h, ⟨x, eq_singleton_iff_unique_mem.2 ⟨hx, λ y hy, h _ hy _ hx⟩⟩ }
end
lemma card_le_one_iff : s.card ≤ 1 ↔ ∀ {a b}, a ∈ s → b ∈ s → a = b := by { rw card_le_one, tauto }
lemma card_le_one_iff_subset_singleton [nonempty α] : s.card ≤ 1 ↔ ∃ (x : α), s ⊆ {x} :=
begin
refine ⟨λ H, _, _⟩,
{ obtain rfl | ⟨x, hx⟩ := s.eq_empty_or_nonempty,
{ exact ⟨classical.arbitrary α, empty_subset _⟩ },
{ exact ⟨x, λ y hy, by rw [card_le_one.1 H y hy x hx, mem_singleton]⟩ } },
{ rintro ⟨x, hx⟩,
rw ←card_singleton x,
exact card_le_of_subset hx }
end
/-- A `finset` of a subsingleton type has cardinality at most one. -/
lemma card_le_one_of_subsingleton [subsingleton α] (s : finset α) : s.card ≤ 1 :=
finset.card_le_one_iff.2 $ λ _ _ _ _, subsingleton.elim _ _
lemma one_lt_card : 1 < s.card ↔ ∃ (a ∈ s) (b ∈ s), a ≠ b :=
by { rw ←not_iff_not, push_neg, exact card_le_one }
lemma one_lt_card_iff : 1 < s.card ↔ ∃ a b, a ∈ s ∧ b ∈ s ∧ a ≠ b :=
by { rw one_lt_card, simp only [exists_prop, exists_and_distrib_left] }
lemma two_lt_card_iff : 2 < s.card ↔ ∃ a b c, a ∈ s ∧ b ∈ s ∧ c ∈ s ∧ a ≠ b ∧ a ≠ c ∧ b ≠ c :=
begin
classical,
refine ⟨λ h, _, _⟩,
{ obtain ⟨c, hc⟩ := card_pos.mp (zero_lt_two.trans h),
have : 1 < (s.erase c).card := by rwa [←add_lt_add_iff_right 1, card_erase_add_one hc],
obtain ⟨a, b, ha, hb, hab⟩ := one_lt_card_iff.mp this,
exact ⟨a, b, c, mem_of_mem_erase ha, mem_of_mem_erase hb, hc,
hab, ne_of_mem_erase ha, ne_of_mem_erase hb⟩ },
{ rintros ⟨a, b, c, ha, hb, hc, hab, hac, hbc⟩,
rw [←card_erase_add_one hc, ←card_erase_add_one (mem_erase_of_ne_of_mem hbc hb),
←card_erase_add_one (mem_erase_of_ne_of_mem hab (mem_erase_of_ne_of_mem hac ha))],
apply nat.le_add_left },
end
lemma two_lt_card : 2 < s.card ↔ ∃ (a ∈ s) (b ∈ s) (c ∈ s), a ≠ b ∧ a ≠ c ∧ b ≠ c :=
by simp_rw [two_lt_card_iff, exists_prop, exists_and_distrib_left]
lemma exists_ne_of_one_lt_card (hs : 1 < s.card) (a : α) : ∃ b, b ∈ s ∧ b ≠ a :=
begin
obtain ⟨x, hx, y, hy, hxy⟩ := finset.one_lt_card.mp hs,
by_cases ha : y = a,
{ exact ⟨x, hx, ne_of_ne_of_eq hxy ha⟩ },
{ exact ⟨y, hy, ha⟩ }
end
lemma card_eq_succ [decidable_eq α] : s.card = n + 1 ↔ ∃ a t, a ∉ t ∧ insert a t = s ∧ t.card = n :=
⟨λ h,
let ⟨a, has⟩ := card_pos.mp (h.symm ▸ nat.zero_lt_succ _ : 0 < s.card) in
⟨a, s.erase a, s.not_mem_erase a, insert_erase has,
by simp only [h, card_erase_of_mem has, add_tsub_cancel_right]⟩,
λ ⟨a, t, hat, s_eq, n_eq⟩, s_eq ▸ n_eq ▸ card_insert_of_not_mem hat⟩
lemma card_eq_two [decidable_eq α] : s.card = 2 ↔ ∃ x y, x ≠ y ∧ s = {x, y} :=
begin
split,
{ rw card_eq_succ,
simp_rw [card_eq_one],
rintro ⟨a, _, hab, rfl, b, rfl⟩,
exact ⟨a, b, not_mem_singleton.1 hab, rfl⟩ },
{ rintro ⟨x, y, h, rfl⟩,
exact card_doubleton h }
end
lemma card_eq_three [decidable_eq α] :
s.card = 3 ↔ ∃ x y z, x ≠ y ∧ x ≠ z ∧ y ≠ z ∧ s = {x, y, z} :=
begin
split,
{ rw card_eq_succ,
simp_rw [card_eq_two],
rintro ⟨a, _, abc, rfl, b, c, bc, rfl⟩,
rw [mem_insert, mem_singleton, not_or_distrib] at abc,
exact ⟨a, b, c, abc.1, abc.2, bc, rfl⟩ },
{ rintro ⟨x, y, z, xy, xz, yz, rfl⟩,
simp only [xy, xz, yz, mem_insert, card_insert_of_not_mem, not_false_iff, mem_singleton,
or_self, card_singleton] }
end
/-! ### Inductions -/
/-- Suppose that, given objects defined on all strict subsets of any finset `s`, one knows how to
define an object on `s`. Then one can inductively define an object on all finsets, starting from
the empty set and iterating. This can be used either to define data, or to prove properties. -/
def strong_induction {p : finset α → Sort*} (H : ∀ s, (∀ t ⊂ s, p t) → p s) :
∀ (s : finset α), p s
| s := H s (λ t h, have t.card < s.card, from card_lt_card h, strong_induction t)
using_well_founded {rel_tac := λ _ _, `[exact ⟨_, measure_wf card⟩]}
lemma strong_induction_eq {p : finset α → Sort*} (H : ∀ s, (∀ t ⊂ s, p t) → p s) (s : finset α) :
strong_induction H s = H s (λ t h, strong_induction H t) :=
by rw strong_induction
/-- Analogue of `strong_induction` with order of arguments swapped. -/
@[elab_as_eliminator] def strong_induction_on {p : finset α → Sort*} (s : finset α) :
(∀ s, (∀ t ⊂ s, p t) → p s) → p s :=
λ H, strong_induction H s
lemma strong_induction_on_eq {p : finset α → Sort*} (s : finset α) (H : ∀ s, (∀ t ⊂ s, p t) → p s) :
s.strong_induction_on H = H s (λ t h, t.strong_induction_on H) :=
by { dunfold strong_induction_on, rw strong_induction }
@[elab_as_eliminator] lemma case_strong_induction_on [decidable_eq α] {p : finset α → Prop}
(s : finset α) (h₀ : p ∅) (h₁ : ∀ a s, a ∉ s → (∀ t ⊆ s, p t) → p (insert a s)) :
p s :=
finset.strong_induction_on s $ λ s,
finset.induction_on s (λ _, h₀) $ λ a s n _ ih, h₁ a s n $
λ t ss, ih _ (lt_of_le_of_lt ss (ssubset_insert n) : t < _)
/-- Suppose that, given that `p t` can be defined on all supersets of `s` of cardinality less than
`n`, one knows how to define `p s`. Then one can inductively define `p s` for all finsets `s` of
cardinality less than `n`, starting from finsets of card `n` and iterating. This
can be used either to define data, or to prove properties. -/
def strong_downward_induction {p : finset α → Sort*} {n : ℕ} (H : ∀ t₁, (∀ {t₂ : finset α},
t₂.card ≤ n → t₁ ⊂ t₂ → p t₂) → t₁.card ≤ n → p t₁) :
∀ (s : finset α), s.card ≤ n → p s
| s := H s (λ t ht h, have n - t.card < n - s.card,
from (tsub_lt_tsub_iff_left_of_le ht).2 (finset.card_lt_card h),
strong_downward_induction t ht)
using_well_founded {rel_tac := λ _ _, `[exact ⟨_, measure_wf (λ (t : finset α), n - t.card)⟩]}
lemma strong_downward_induction_eq {p : finset α → Sort*}
(H : ∀ t₁, (∀ {t₂ : finset α}, t₂.card ≤ n → t₁ ⊂ t₂ → p t₂) → t₁.card ≤ n → p t₁)
(s : finset α) :
strong_downward_induction H s = H s (λ t ht hst, strong_downward_induction H t ht) :=
by rw strong_downward_induction
/-- Analogue of `strong_downward_induction` with order of arguments swapped. -/
@[elab_as_eliminator] def strong_downward_induction_on {p : finset α → Sort*} (s : finset α)
(H : ∀ t₁, (∀ {t₂ : finset α}, t₂.card ≤ n → t₁ ⊂ t₂ → p t₂) → t₁.card ≤ n → p t₁) :
s.card ≤ n → p s :=
strong_downward_induction H s
lemma strong_downward_induction_on_eq {p : finset α → Sort*} (s : finset α) (H : ∀ t₁,
(∀ {t₂ : finset α}, t₂.card ≤ n → t₁ ⊂ t₂ → p t₂) → t₁.card ≤ n → p t₁) :
s.strong_downward_induction_on H = H s (λ t ht h, t.strong_downward_induction_on H ht) :=
by { dunfold strong_downward_induction_on, rw strong_downward_induction }
lemma lt_wf {α} : well_founded (@has_lt.lt (finset α) _) :=
have H : subrelation (@has_lt.lt (finset α) _)
(inv_image ( < ) card),
from λ x y hxy, card_lt_card hxy,
subrelation.wf H $ inv_image.wf _ $ nat.lt_wf
end finset
|
module HRAL
-- Based on https://www.cambridge.org/core/services/aop-cambridge-core/content/view/CC82B2E79DC5CCAD57E0AC5DF0D43DEC/S0956796820000064a.pdf/div-class-title-heterogeneous-binary-random-access-lists-div.pdf
data Tree : Type -> Nat -> Type where
Leaf : a -> Tree a Z
Node : Tree a n -> Tree a n -> Tree a (S n)
data Path : Nat -> Type where
Here : Path Z
Left : Path n -> Path (S n)
Right : Path n -> Path (S n)
namespace Tree
lookup : Tree a n -> Path n -> a
lookup (Leaf x) Here = x
lookup (Node t1 t2) (Left p) = lookup t1 p
lookup (Node t1 t2) (Right p) = lookup t2 p
data Bin : Type where
End : Bin
One : Bin -> Bin
Zero : Bin -> Bin
bsucc : Bin -> Bin
bsucc End = One End
bsucc (One b) = Zero (bsucc b)
bsucc (Zero b) = One b
data RAL : Type -> Nat -> Bin -> Type where
Nil : RAL a n End
Cons1 : Tree a n -> RAL a (S n) b -> RAL a n (One b)
Cons0 : RAL a (S n) b -> RAL a n (Zero b)
data Pos : Nat -> Bin -> Type where
PosHere : Path n -> Pos n (One b)
There0 : Pos (S n) b -> Pos n (Zero b)
There1 : Pos (S n) b -> Pos n (One b)
namespace RAL
lookup : RAL a n b -> Pos n b -> a
lookup [] (PosHere _) impossible
lookup [] (There0 _) impossible
lookup [] (There1 _) impossible
lookup (Cons1 t r) (PosHere p) = Tree.lookup t p
lookup (Cons1 t r) (There1 p) = lookup r p
lookup (Cons0 r) (There0 p) = lookup r p
consTree : Tree a n -> RAL a n b -> RAL a n (bsucc b)
consTree t [] = Cons1 t Nil
consTree t (Cons1 t' r) = Cons0 (consTree (Node t t') r)
consTree t (Cons0 r) = Cons1 t r
cons : a -> RAL a Z b -> RAL a Z (bsucc b)
cons x r = consTree (Leaf x) r
data HTree : Tree Type n -> Type where
HLeaf : u -> HTree (Leaf u)
HNode : HTree us -> HTree vs -> HTree (Node us vs)
data HPath : Tree Type n -> Type -> Type where
HHere : HPath (Leaf u) u
HLeft : HPath us u -> HPath (Node us vs) u
HRight : HPath vs u -> HPath (Node us vs) u
namespace HTree
lookup : HTree us -> HPath us u -> u
lookup (HLeaf u) HHere = u
lookup (HNode t1 t2) (HLeft p) = lookup t1 p
lookup (HNode t1 t2) (HRight p) = lookup t2 p
data HRAL : RAL Type n b -> Type where
HNil : HRAL Nil
HCons1 : HTree t -> HRAL r -> HRAL (Cons1 t r)
HCons0 : HRAL r -> HRAL (Cons0 r)
data HPos : RAL Type n b -> Type -> Type where
HPosHere : HPath t u -> HPos (Cons1 t r) u
HThere0 : HPos r u -> HPos (Cons0 r) u
HThere1 : HPos r u -> HPos (Cons1 t r) u
namespace HRAL
lookup : HRAL r -> HPos r u -> u
lookup HNil (HPosHere _) impossible
lookup HNil (HThere0 _) impossible
lookup HNil (HThere1 _) impossible
lookup (HCons1 t r) (HPosHere p) = HTree.lookup t p
lookup (HCons1 t r) (HThere1 p) = lookup r p
lookup (HCons0 r) (HThere0 p) = lookup r p
consTree : HTree t -> HRAL r -> HRAL (RAL.consTree t r)
consTree t HNil = HCons1 t HNil
consTree t (HCons1 t' r) = HCons0 (consTree (HNode t t') r)
consTree t (HCons0 r) = HCons1 t r
cons : (u : Type) -> HRAL r -> HRAL (RAL.cons Type r)
cons x r = consTree (HLeaf x) r
|
Formal statement is: lemma convergent_eq_Cauchy: fixes S :: "nat \<Rightarrow> 'a::complete_space" shows "(\<exists>l. (S \<longlongrightarrow> l) sequentially) \<longleftrightarrow> Cauchy S" Informal statement is: A sequence $S$ converges if and only if it is Cauchy. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.