text
stringlengths 0
3.34M
|
---|
lemma connected_iff_connected_component: "connected S \<longleftrightarrow> (\<forall>x \<in> S. \<forall>y \<in> S. connected_component S x y)" |
import MyNat.le
import MyNat.addition_world
import MyNat.advanced_addition_world
import MyNat.advanced_proposition_world
open MyNat
lemma one_add_le_self (x : ℕ) : x ≤ 1 + x := by
exists 1
rewrite [add_comm]
rfl
lemma le_refl (x : ℕ) : x ≤ x := by
exists 0
theorem le_succ (a b : ℕ) : a ≤ b → a ≤ succ b := by
intro h
cases h with
| intro c h' =>
exists (succ c)
rewrite [h', add_succ]
rfl
lemma zero_le (a : ℕ) : 0 ≤ a := by
induction a with
| zero => exact le_refl zero
| succ a' h => exact le_succ zero a' h
theorem le_trans (a b c : ℕ)
(hab : a ≤ b) (hbc : b ≤ c) : a ≤ c := by
cases hab with
| intro d hab' =>
cases hbc with
| intro e hbc' =>
exists (d + e)
rewrite [←add_assoc, ←hab']
exact hbc'
theorem le_antisem (a b : ℕ)
(hab : a ≤ b) (hba : b ≤ a) : a = b := by
cases hab with
| intro c hab =>
cases hba with
| intro c' hba =>
rewrite [hab, add_assoc,]at hba
have h1 := eq_zero_of_add_right_eq_self _ _ (Eq.symm hba)
have h2 := add_left_eq_zero _ _ h1
rewrite [h2, zero_equal_numeral, add_zero] at h1
rewrite [h1, add_zero] at hab
exact (Eq.symm hab)
theorem le_zero (a : ℕ) (h : a ≤ 0) : a = 0 := by
cases h with
| intro _ h =>
exact add_right_eq_zero _ _ (Eq.symm h)
theorem le_total (a b : ℕ) : a ≤ b ∨ b ≤ a := by
induction b with
| zero => exact (Or.inr (zero_le a))
| succ b ih =>
exact (
Or.elim
ih
(fun a_le_b => Or.inl (le_succ _ _ a_le_b))
(fun b_le_a => by
rewrite [le_iff_exists_add] at b_le_a
cases b_le_a with
| intro c h =>
cases c with
| zero =>
rewrite [add_zero] at h
rewrite [h, succ_eq_add_one, add_comm]
exact (Or.inl (one_add_le_self b))
| succ c =>
apply Or.inr
exists c
rewrite [add_succ, add_comm, ←add_succ, ←add_comm] at h
trivial
)
)
lemma le_succ_self (a : ℕ) : a ≤ succ a := by
rewrite [←add_one_eq_succ, add_comm]
exact one_add_le_self a
theorem add_le_add_right (a b : ℕ) :
a ≤ b → ∀ t, (a + t) ≤ (b + t) := by
intro h
intro t
cases h with
| intro c h =>
exists c
rewrite [h, add_right_comm]
rfl
theorem le_of_succ_le_succ (a b : ℕ) :
succ a ≤ succ b → a ≤ b := by
intro h
cases h with
| intro c h =>
exists c
rewrite [succ_add] at h
exact succ_inj b (a + c) h
theorem not_succ_le_self (a : ℕ) : ¬ (succ a ≤ a) := by
rewrite [succ_eq_add_one, ←add_zero a, add_assoc, zero_add]
intro h
cases h with
| intro c h =>
rewrite [add_assoc] at h
let f := add_left_cancel a zero (1 + c) h
rewrite [add_comm, ←succ_eq_add_one] at f
exact zero_ne_succ c f
theorem add_le_add_left (a b t : ℕ)
(h : a ≤ b) : t + a ≤ t + b := by
cases h with
| intro c h =>
exists c
rewrite [h, add_assoc]
rfl
|
theory Test
imports
"../Ordinal/Model/OrdRec_Model"
"../Functions/Model/Function_Model"
"../OPair/Model/OPair_Model_Base"
"../Exception/Exception_Model"
ZFC_in_HOL_Bootstrap
begin
ML_file \<open>../ModelKit/Tools/model_implementation.ML\<close>
instance V :: Tagging ..
lemma if_cases :
assumes "Q \<Longrightarrow> P x" "\<not> Q \<Longrightarrow> P y"
shows "P (if Q then x else y)"
using assms by auto
lemma if_cases2 :
assumes "P y" "Q \<Longrightarrow> P x"
shows "P (if Q then x else y)"
using if_cases assms by auto
lemma int_typ_eq :
"x : P \<triangle> Q \<longleftrightarrow> x : P \<and> x : Q"
unfolding inter_ty_def has_ty_def by auto
lemma disjointI :
assumes "x : P \<Longrightarrow> x : Q \<Longrightarrow> False"
shows "(x : P \<triangle> Q) = (x : \<bottom>)"
using assms
unfolding has_ty_def inter_ty_def empty_typ_def by auto
lemma setmem_any : "(b :: V) : SetMem"
using set_setmem[where 'a = V]
unfolding has_ty_def Set_V_def by auto
lemmas pair_inject_any =
pair_inject[OF pair_pair[OF setmem_any setmem_any]
pair_pair[OF setmem_any setmem_any]]
lemma four_neq :
"succ (succ (succ (succ 0))) \<noteq> 0"
"succ (succ (succ (succ 0))) \<noteq> succ 0"
"succ (succ (succ (succ 0))) \<noteq> succ (succ 0)"
"succ (succ (succ (succ 0))) \<noteq> succ (succ (succ 0))"
by (tactic \<open>REPEAT_DETERM (Proof_Context.fact_tac \<^context>
[neq_thm (4,0), neq_thm (4,1), neq_thm (4,2), neq_thm (4,3)] 1)\<close>)
instantiation
V :: \<open>ModelBase\<close>
begin
definition mexc' :: V where
"mexc' = <succ (succ (succ (succ 0))), \<emptyset>>"
lemma fst_mexc : "\<tau> mexc' = succ (succ (succ (succ 0)))"
unfolding mexc'_def
using OPair.fst_eq[OF setmem_pair[OF setmem_any setmem_any]] by auto
local_setup
\<open> model_implementation \<^typ>\<open>V\<close> [ordrec_model, pair_model, func_model, exc_model]
[\<^term>\<open>\<lambda>b :: V. b = mexc'\<close>,\<^term>\<open>\<lambda>b :: V. b = mexc'\<close>, \<^term>\<open>\<lambda>b :: V. b = mexc'\<close>,
\<^term>\<open>\<lambda>b :: V. b = mexc'\<close>, \<^term>\<open>\<lambda>b :: V. b = mexc'\<close>]
[\<^term>\<open>Set :: V \<Rightarrow> bool\<close>, \<^term>\<open>Ord :: V \<Rightarrow> bool\<close>, \<^term>\<open>Pair :: V \<Rightarrow> bool\<close>,
\<^term>\<open>Function :: V \<Rightarrow> bool\<close>, \<^term>\<open>Set :: V \<Rightarrow> bool\<close>]\<close>
instance proof
show "Variants : (\<Pi> (i :: V) : Tag. nonLimit \<rightarrow> Set \<rightarrow> SetOf (\<alpha> i))"
proof (rule depfunI, rule funI, rule funI)
fix i :: V and j :: V and x :: V
assume "i : Tag" "j : nonLimit" and x :"x : Set"
hence "i : Ord" "i < \<omega>" and j : "j : Ord"
using tagD nonlimit_ord by auto
show "Variants i j x : SetOf (\<alpha> i)"
unfolding Variants_V_def
proof (rule if_cases2[OF if_cases2[OF if_cases2[OF if_cases2[OF if_cases2]]]])
assume "i = 0"
thus "caseof_ord \<emptyset> (\<P> x) \<emptyset> j : SetOf (\<alpha> i)"
using gzf_model_case_typ[OF j x] \<alpha>_V_0 by auto
next
assume "i = succ 0"
thus "caseof_ord {0} {j} {j} j : SetOf (\<alpha> i)"
using ord_model_case_typ[OF j] \<alpha>_V_1 by auto
next
assume "i = succ (succ 0)"
thus "caseof_ord \<emptyset> (x \<times> x) \<emptyset> j : SetOf (\<alpha> i)"
using pair_model_case_typ[OF j x] \<alpha>_V_2 by auto
next
assume "i = succ (succ (succ 0))"
thus "caseof_ord \<emptyset> (x \<midarrow>p\<rightarrow> x) \<emptyset> j : SetOf (\<alpha> i)"
using func_model_case_typ[OF j x] \<alpha>_V_3 by auto
next
assume "i = succ (succ (succ (succ 0)))"
thus "caseof_ord {\<emptyset>} \<emptyset> \<emptyset> j : SetOf (\<alpha> i)"
using exc_model_case_typ[OF j] \<alpha>_V_4 by auto
next
show "\<emptyset> : SetOf (\<alpha> i)"
using emp_setof by auto
qed
qed
show "Variants : (\<Pi> (i :: V) :Tag. Limit \<rightarrow> Function \<rightarrow> SetOf (\<alpha> i))"
proof (rule depfunI, rule funI, rule funI)
fix i :: V and u :: V and f :: V
assume "i : Tag" and u : "u : Limit" and f :"f : Function"
show "Variants i u f : SetOf (\<alpha> i)"
unfolding Variants_V_def case_ord_lim[OF u]
using sng_setof[OF ord_setmem[OF limit_ord[OF u]] limit_ord[OF u]]
\<alpha>_V_1 emp_setof by auto
qed
qed
end
instantiation
V :: ModelBase'
begin
definition mdefault_V :: V
where "mdefault_V \<equiv> <succ (succ (succ (succ 0))),\<emptyset>>"
instance proof
let ?four = "succ (succ (succ (succ 0)))"
show "(mdefault :: V) : M"
unfolding mdefault_V_def
proof (rule mI[OF zero_ord], rule tier0I)
show "?four : Tag"
by (tactic \<open>Proof_Context.fact_tac \<^context> [tag_thm 4] 1\<close>)
show "(\<emptyset> :: V) \<in> Variants ?four 0 \<emptyset>"
unfolding Variants_V_4_zero
sng_iff[OF set_setmem[OF emp_set]]
by auto
qed
qed
end
instantiation
V :: \<open>{GZF_Model, Ordinal_Model, OrdRec_Model,
OPair_Model, Function_Model, Exc_Model}\<close>
begin
local_setup \<open>defn_tags_defaults \<^typ>\<open>V\<close>
[(set_model, \<^term>\<open>mdefault :: V\<close>), (ord_model, \<^term>\<open>mdefault :: V\<close>),
(ordrec_model, \<^term>\<open>mdefault :: V\<close>),
(pair_model, \<^term>\<open>mdefault :: V\<close>), (func_model, \<^term>\<open>mdefault :: V\<close>),
(exc_model, \<^term>\<open>mdefault :: V\<close>)]\<close>
instance
proof
show
"(GZF_Model_mdefault :: V) : M"
"(Ordinal_Model_mdefault :: V) : M"
"(OrdRec_Model_mdefault :: V) : M"
"(OPair_Model_mdefault :: V) : M"
"(Function_Model_mdefault :: V) : M"
"(Exc_Model_mdefault :: V) : M"
unfolding
GZF_Model_mdefault_V_def
Ordinal_Model_mdefault_V_def
OrdRec_Model_mdefault_V_def
OPair_Model_mdefault_V_def
Function_Model_mdefault_V_def
Exc_Model_mdefault_V_def
using
mdefault_m by auto
show
"(GZF_Model_class.set :: V) : Tag"
"(ord :: V) : Tag"
"(opair :: V) : Tag"
"(func :: V) : Tag"
"(exc_tag :: V) : Tag"
unfolding
set_V_def ord_V_def opair_V_def
func_V_def exc_tag_V_def
by (tactic \<open>REPEAT_DETERM (Proof_Context.fact_tac \<^context>
[tag_thm 0, tag_thm 1, tag_thm 2, tag_thm 3, tag_thm 4] 1)\<close>)
show
"\<alpha> (GZF_Model_class.set :: V) = Set"
"\<alpha> (ord :: V) = Ord"
"\<alpha> (opair :: V) = Pair"
"\<alpha> (func :: V) = Function"
"\<alpha> (exc_tag :: V) = Set"
unfolding
\<alpha>_V_0 \<alpha>_V_1 \<alpha>_V_2 \<alpha>_V_3 \<alpha>_V_4
set_V_def ord_V_def opair_V_def
func_V_def exc_tag_V_def
by auto
show
"Variants (GZF_Model_class.set :: V) 0 = (\<lambda>x. \<emptyset>)"
"\<And>j. j : Ord \<Longrightarrow> Variants (GZF_Model_class.set :: V) (succ j) = Pow"
"\<And>u. u : Limit \<Longrightarrow> Variants (GZF_Model_class.set :: V) u = (\<lambda>_. \<emptyset>)"
unfolding set_V_def
using Variants_V_0_zero Variants_V_0_succ Variants_V_0_lim by auto
show
"Variants ord 0 = (\<lambda>x::V. {0})"
"\<And>j::V. j : Ord \<Longrightarrow> Variants ord (succ j) = (\<lambda>x::V. {succ j})"
"\<And>u::V. u : Limit \<Longrightarrow> Variants ord u = (\<lambda>f::V. {u})"
unfolding ord_V_def
using Variants_V_1_zero Variants_V_1_succ Variants_V_1_lim by auto
show
"Variants opair 0 = (\<lambda>x::V. \<emptyset>)"
"\<And>j::V. j : Ord \<Longrightarrow> Variants opair (succ j) = (\<lambda>y::V. y \<times> y)"
"\<And>u::V. u : Limit \<Longrightarrow> Variants opair u = (\<lambda>_::V. \<emptyset>)"
unfolding opair_V_def
using Variants_V_2_zero Variants_V_2_succ Variants_V_2_lim by auto
show
"Variants func 0 = (\<lambda>x::V. \<emptyset>)"
"\<And>j::V. j : Ord \<Longrightarrow> Variants func (succ j) = (\<lambda>x::V. x \<midarrow>p\<rightarrow> x)"
"\<And>u::V. u : Limit \<Longrightarrow> Variants func u = (\<lambda>_::V. \<emptyset>)"
unfolding func_V_def
using Variants_V_3_zero Variants_V_3_succ Variants_V_3_lim by auto
show
"Variants exc_tag 0 = (\<lambda>x::V. {\<emptyset>})"
"\<And>j::V. j : Ord \<Longrightarrow> Variants exc_tag (succ j) = (\<lambda>y::V. \<emptyset>)"
"\<And>u::V. u : Limit \<Longrightarrow> Variants exc_tag u = (\<lambda>_::V. \<emptyset>)"
unfolding exc_tag_V_def
using Variants_V_4_zero Variants_V_4_succ Variants_V_4_lim by auto
show "\<And>x' :: V. \<not> Excluded GZF_Model_class.set \<langle>GZF_Model_class.set,x'\<rangle>"
using Excluded_V_0 four_neq(1) pair_inject_any
unfolding set_V_def mexc'_def pair_V_def by (metis)
show "\<And>j'::V. \<not> Excluded GZF_Model_class.set \<langle>ord,j'\<rangle>"
using Excluded_V_0 four_neq(2) pair_inject_any
unfolding ord_V_def set_V_def mexc'_def pair_V_def by metis
show "\<And>p'::V. \<not> Excluded opair \<langle>opair,p'\<rangle>"
using Excluded_V_2 four_neq(3) pair_inject_any
unfolding opair_V_def mexc'_def pair_V_def by metis
show "\<And>f'::V. \<not> Excluded func \<langle>func,f'\<rangle>"
using Excluded_V_3 four_neq(4) pair_inject_any
unfolding func_V_def mexc'_def pair_V_def by metis
show "Excluded (GZF_Model_class.set :: V) << Excluded func"
unfolding set_V_def func_V_def
unfolding Excluded_V_0 Excluded_V_3
by (rule subtypI)
qed
end
typedef d1 =
"Set.Collect (\<lambda>x :: V. x : M)" using mdefault_m by auto
setup_lifting type_definition_d1
interpretation ConnectionBase Abs_d1 Rep_d1 pcr_d1 mdefault
by (unfold ConnectionBase_def, rule, rule type_definition_d1,
unfold pcr_d1_def cr_d1_def, auto, rule mdefault_m)
instantiation d1 :: GZF begin
local_setup \<open>lift_mconsts @{thms mGZF_resp} \<^typ>\<open>d1\<close> mGZF\<close>
instance
by (intro_classes, unfold funty_eq depfunty_eq, transfer_all,
tactic \<open>REPEAT_DETERM (Proof_Context.fact_tac \<^context> @{thms mGZF_axioms} 1)\<close>)
end
instantiation d1 :: Ordinal begin
local_setup \<open>lift_mconsts @{thms mOrdinal_resp} \<^typ>\<open>d1\<close> mOrdinal\<close>
instance
by (intro_classes, unfold funty_eq depfunty_eq, transfer_all,
tactic \<open>REPEAT_DETERM (Proof_Context.fact_tac \<^context> @{thms mOrdinal_axioms} 1)\<close>)
end
instantiation d1 :: OrdRec begin
local_setup \<open>lift_mconsts @{thms mOrdRec_resp} \<^typ>\<open>d1\<close> mOrdRec\<close>
instance
by (intro_classes, unfold funty_eq depfunty_eq, transfer_all,
tactic \<open>REPEAT_DETERM (Proof_Context.fact_tac \<^context> @{thms mOrdRec_axioms} 1)\<close>)
end
instantiation d1 :: OPair begin
local_setup \<open>lift_mconsts @{thms mOPair_rsp} \<^typ>\<open>d1\<close> mOPair\<close>
instance
by (intro_classes, unfold funty_eq depfunty_eq, transfer_all,
tactic \<open>REPEAT_DETERM (Proof_Context.fact_tac \<^context> @{thms mOPair_axioms} 1)\<close>)
end
instantiation d1 :: Function begin
local_setup \<open>lift_mconsts @{thms mFunction_rsp} \<^typ>\<open>d1\<close> mFunction\<close>
instance
by (intro_classes, unfold funty_eq depfunty_eq, transfer_all,
tactic \<open>REPEAT_DETERM (Proof_Context.fact_tac \<^context> @{thms mFunction_axioms} 1)\<close>)
end
instantiation d1 :: Exc begin
local_setup \<open>lift_mconsts @{thms mExc_rsp} \<^typ>\<open>d1\<close> mExc\<close>
instance
by (intro_classes, transfer, rule mexc_ax)
end
lemma top_transfer [transfer_rule] :
"rel_fun pcr_d1 iff (\<top>) (\<top>)"
unfolding Any_def by auto
lemma bot_transfer [transfer_rule] :
"rel_fun pcr_d1 iff (\<bottom>) (\<bottom>)"
unfolding empty_typ_def by auto
lemma uni_typ_eq :
"x : P \<mho> Q \<longleftrightarrow> x : P \<or> x : Q"
unfolding union_ty_def has_ty_def by auto
declare [[show_types = true, show_sorts = true, goals_limit = 100]]
instantiation d1 :: ZFplus begin
instance proof (intro_classes, unfold pred_ty_eq subtyp_eq, transfer_all)
show "m\<forall>x. \<not> x : mSetOf mSet \<longrightarrow> m\<Union> x = GZF_Model_mdefault"
unfolding mUnion_def by auto
show "m\<forall>x. \<not> x : mSet \<longrightarrow> m\<P> x = GZF_Model_mdefault"
unfolding mPow_def by auto
show "m\<forall>x. \<not> x : mSet \<longrightarrow> mSucc x = GZF_Model_mdefault"
unfolding mSucc_def by auto
show "m\<forall>x. \<forall>xa. \<not> x : mSet \<or> \<not> xa : mReplPred x \<longrightarrow>
m\<R> x xa = GZF_Model_mdefault"
unfolding mRepl_def by auto
show "m\<forall>x. \<not> x : mOrd \<longrightarrow> msucc x = Ordinal_Model_mdefault"
unfolding msucc_def by auto
show "m\<forall>x xa. \<not> x : mPairMem \<or> \<not> xa : mPairMem \<longrightarrow>
mpair x xa = (OPair_Model_mdefault :: V)"
proof -
have "\<not> (OPair_Model_mdefault :: V) : mPair"
unfolding mPair_def
proof (rule, drule intE, auto elim: partE)
assume "(OPair_Model_mdefault :: V) :\<^enum> opair"
then obtain p :: V where "OPair_Model_mdefault = <opair,p>"
using partE by metis
thus "False"
unfolding OPair_Model_mdefault_V_def mdefault_V_def
pair_V_def opair_V_def
using pair_inject_any(1) four_neq(3) by auto
qed
thus ?thesis
unfolding mpair_def by auto
qed
show "m\<forall>x. \<forall>xa. \<not> x : mSet \<or> \<not> xa : mFunPred x \<longrightarrow>
mk_mfun x xa = Function_Model_mdefault"
unfolding mk_mfun_def by auto
show "m\<forall>x. \<not> x : mFunc \<longrightarrow> mdom x = Function_Model_mdefault"
unfolding mdom_def by auto
show "m\<forall>x. \<not> x : mFunc \<longrightarrow> mran x = Function_Model_mdefault"
unfolding mran_def by auto
show "m\<forall>x xa. \<not> x : mSet \<or> \<not> xa : mSet \<longrightarrow>
mfspace x xa = Function_Model_mdefault"
unfolding mfspace_def by auto
show
"(GZF_Model_mdefault :: V) = mexc"
"(Ordinal_Model_mdefault :: V) = mexc"
"(OPair_Model_mdefault :: V) = mexc"
"(Function_Model_mdefault :: V) = mexc"
"(mexc :: V) = mexc"
unfolding
GZF_Model_mdefault_V_def
Ordinal_Model_mdefault_V_def
OPair_Model_mdefault_V_def
Function_Model_mdefault_V_def
mdefault_V_def mexc_def exc_tag_V_def
by auto
show "m\<forall>x :: V. (x : mSet \<mho> (mOrd \<mho> (mPair \<mho> (mFunc \<mho> mExc)))) = (x : \<top>)"
proof (rule, rule, rule anyI, unfold uni_typ_eq)
fix x :: V assume "x : M"
hence x : "x : (\<Sigma> i : Tag. \<alpha> i)"
using m_depsum by auto
obtain i :: V and x' :: V where
i : "i : Tag" and x': "x' : \<alpha> i" and
x_eq : "x = <i, x'>" and x_pair : "x : Pair"
unfolding pair_V_def
by (metis \<open>(x::V) : M\<close> mtagE pair_V_def)
hence "i = GZF_Model_class.set \<or> i = ord \<or> i = opair
\<or> i = func \<or> i = exc_tag"
unfolding
\<alpha>_V_def set_V_def ord_V_def opair_V_def
func_V_def exc_tag_V_def
using empty_typD[where 'a = V]
by fastforce
thus
"x : mSet \<or> x : mOrd \<or> x : mPair \<or> x : mFunc \<or> x : mExc"
unfolding mSet_def mOrd_def mPair_def mFunc_def mExc_def
using intI[OF \<open>x : M\<close> partI[OF x_eq x_pair]]
by auto
qed
show "m\<forall>x::V. (x : mSet \<triangle> mOrd) = (x : \<bottom>)"
proof (rule, rule disjointI)
fix x :: V assume
"x : mSet" "x : mOrd"
hence
"\<tau> x = GZF_Model_class.set" "\<tau> x = ord"
unfolding mSet_def mOrd_def int_typ_eq
using partD by auto
thus "False"
unfolding set_V_def ord_V_def
by (tactic \<open>Method.insert_tac \<^context> [neq_thm (1,0)] 1\<close>, auto)
qed
show "m\<forall>x::V. (x : mSet \<triangle> mPair) = (x : \<bottom>)"
proof (rule, rule disjointI)
fix x :: V assume
"x : mSet" "x : mPair"
hence
"\<tau> x = GZF_Model_class.set" "\<tau> x = opair"
unfolding mSet_def mPair_def int_typ_eq
using partD by auto
thus "False"
unfolding set_V_def opair_V_def
by (tactic \<open>Method.insert_tac \<^context> [neq_thm (2,0)] 1\<close>, auto)
qed
show "m\<forall>x::V. (x : mSet \<triangle> mFunc) = (x : \<bottom>)"
proof (rule, rule disjointI)
fix x :: V assume
"x : mSet" "x : mFunc"
hence
"\<tau> x = GZF_Model_class.set" "\<tau> x = func"
unfolding mSet_def mFunc_def int_typ_eq
using partD by auto
thus "False"
unfolding set_V_def func_V_def
by (tactic \<open>Method.insert_tac \<^context> [neq_thm (3,0)] 1\<close>, auto)
qed
show "m\<forall>x::V. (x : mSet \<triangle> mExc) = (x : \<bottom>)"
proof (rule, rule disjointI)
fix x :: V assume
"x : mSet" "x : mExc"
hence
"\<tau> x = GZF_Model_class.set" "\<tau> x = exc_tag"
unfolding mSet_def mExc_def int_typ_eq
using partD by auto
thus "False"
unfolding set_V_def exc_tag_V_def
by (tactic \<open>Method.insert_tac \<^context> [neq_thm (4,0)] 1\<close>, auto)
qed
show "m\<forall>x::V. (x : mOrd \<triangle> mPair) = (x : \<bottom>)"
proof (rule, rule disjointI)
fix x :: V assume
"x : mOrd" "x : mPair"
hence
"\<tau> x = ord" "\<tau> x = opair"
unfolding mOrd_def mPair_def int_typ_eq
using partD by auto
thus "False"
unfolding ord_V_def opair_V_def
by (tactic \<open>Method.insert_tac \<^context> [neq_thm (2,1)] 1\<close>, auto)
qed
show "m\<forall>x::V. (x : mOrd \<triangle> mFunc) = (x : \<bottom>)"
proof (rule, rule disjointI)
fix x :: V assume
"x : mOrd" "x : mFunc"
hence
"\<tau> x = ord" "\<tau> x = func"
unfolding mOrd_def mFunc_def int_typ_eq
using partD by auto
thus "False"
unfolding ord_V_def func_V_def
by (tactic \<open>Method.insert_tac \<^context> [neq_thm (3,1)] 1\<close>, auto)
qed
show "m\<forall>x::V. (x : mOrd \<triangle> mExc) = (x : \<bottom>)"
proof (rule, rule disjointI)
fix x :: V assume
"x : mOrd" "x : mExc"
hence
"\<tau> x = ord" "\<tau> x = exc_tag"
unfolding mOrd_def mExc_def int_typ_eq
using partD by auto
thus "False"
unfolding ord_V_def exc_tag_V_def
by (tactic \<open>Method.insert_tac \<^context> [neq_thm (4,1)] 1\<close>, auto)
qed
show "m\<forall>x::V. (x : mPair \<triangle> mFunc) = (x : \<bottom>)"
proof (rule, rule disjointI)
fix x :: V assume
"x : mPair" "x : mFunc"
hence
"\<tau> x = opair" "\<tau> x = func"
unfolding mPair_def mFunc_def int_typ_eq
using partD by auto
thus "False"
unfolding opair_V_def func_V_def
by (tactic \<open>Method.insert_tac \<^context> [neq_thm (3,2)] 1\<close>, auto)
qed
show "m\<forall>x::V. (x : mPair \<triangle> mExc) = (x : \<bottom>)"
proof (rule, rule disjointI)
fix x :: V assume
"x : mPair" "x : mExc"
hence
"\<tau> x = opair" "\<tau> x = exc_tag"
unfolding mPair_def mExc_def int_typ_eq
using partD by auto
thus "False"
unfolding opair_V_def exc_tag_V_def
by (tactic \<open>Method.insert_tac \<^context> [neq_thm (4,2)] 1\<close>, auto)
qed
show "m\<forall>x::V. (x : mFunc \<triangle> mExc) = (x : \<bottom>)"
proof (rule, rule disjointI)
fix x :: V assume
"x : mFunc" "x : mExc"
hence
"\<tau> x = func" "\<tau> x = exc_tag"
unfolding mFunc_def mExc_def int_typ_eq
using partD by auto
thus "False"
unfolding func_V_def exc_tag_V_def
by (tactic \<open>Method.insert_tac \<^context> [neq_thm (4,3)] 1\<close>, auto)
qed
show "m\<forall>x::V. x : mSet \<mho> (mOrd \<mho> (mPair \<mho> mFunc)) \<longrightarrow> x : mSetMem"
proof (rule, rule)
fix x :: V assume "x : M"
and "x : mSet \<mho> (mOrd \<mho> (mPair \<mho> mFunc))"
hence "x :\<^enum> GZF_Model_class.set \<or> x :\<^enum> ord \<or> x :\<^enum> opair \<or> x :\<^enum> func"
unfolding uni_typ_eq int_typ_eq
mSet_def mPair_def mOrd_def mFunc_def
by auto
hence "\<tau> x \<noteq> \<tau> mexc'"
unfolding set_V_def ord_V_def opair_V_def func_V_def
using fst_mexc partD(2) four_neq by metis
hence "x \<noteq> mexc'" by auto
hence "\<not> Excluded GZF_Model_class.set x"
unfolding Excluded_V_def set_V_def by auto
thus "x : mSetMem"
using not_excluded_msmem[OF \<open>x : M\<close>] by auto
qed
show "m\<forall>x::V. x : mSet \<mho> (mOrd \<mho> (mPair \<mho> mFunc)) \<longrightarrow> x : mPairMem"
proof (rule, rule)
fix x :: V assume "x : M"
and "x : mSet \<mho> (mOrd \<mho> (mPair \<mho> mFunc))"
hence "x :\<^enum> GZF_Model_class.set \<or> x :\<^enum> ord \<or> x :\<^enum> opair \<or> x :\<^enum> func"
unfolding uni_typ_eq int_typ_eq
mSet_def mPair_def mOrd_def mFunc_def
by auto
hence "\<tau> x \<noteq> \<tau> mexc'"
unfolding set_V_def ord_V_def opair_V_def func_V_def
using fst_mexc partD(2) four_neq by metis
hence "x \<noteq> mexc'" by auto
hence "\<not> Excluded opair x"
unfolding Excluded_V_def opair_V_def by auto
thus "x : mPairMem"
using mE[OF \<open>x : M\<close>] mpmemI[OF _ exsetI[OF tier_set]] by metis
qed
show "m\<forall>x::V. x : mSet \<mho> (mOrd \<mho> (mPair \<mho> mFunc)) \<longrightarrow> x : mFunMem"
proof (rule, rule)
fix x :: V assume "x : M"
and "x : mSet \<mho> (mOrd \<mho> (mPair \<mho> mFunc))"
hence "x :\<^enum> GZF_Model_class.set \<or> x :\<^enum> ord \<or> x :\<^enum> opair \<or> x :\<^enum> func"
unfolding uni_typ_eq int_typ_eq
mSet_def mPair_def mOrd_def mFunc_def
by auto
hence "\<tau> x \<noteq> \<tau> mexc'"
unfolding set_V_def ord_V_def opair_V_def func_V_def
using fst_mexc partD(2) four_neq by metis
hence "x \<noteq> mexc'" by auto
hence "\<not> Excluded func x"
unfolding Excluded_V_def func_V_def by auto
thus "x : mFunMem"
using ex_mfmem[OF \<open>x : M\<close>] setmem_any
unfolding FunMem_V_def by auto
qed
show "m\<forall>x::V. x : mSet \<mho> (mOrd \<mho> (mPair \<mho> (mFunc \<mho> mExc))) \<longrightarrow> x : \<top>"
by auto
show "m\<forall>x::V. (x : mExc \<triangle> mSetMem) = (x : \<bottom>)"
proof (rule, rule disjointI)
fix x :: V assume
"x : M" "x : mExc" "x : mSetMem"
hence "x = mexc'" "\<not> Excluded GZF_Model_class.set x"
using mexc_ax[where 'a = V] msmem_not_excluded
unfolding mexc_def mexc'_def exc_tag_V_def has_ty_def mall_def tall_def
by auto
thus "False"
unfolding set_V_def Excluded_V_0 by auto
qed
show "m\<forall>x::V. (x : mExc \<triangle> mPairMem) = (x : \<bottom>)"
proof (rule, rule disjointI)
fix x :: V assume
"x : M" "x : mExc" "x : mPairMem"
hence "x = mexc'" "\<not> Excluded opair x"
using mexc_ax[where 'a = V] mpmem_ex
unfolding mexc_def mexc'_def exc_tag_V_def has_ty_def mall_def tall_def
by auto
thus "False"
unfolding opair_V_def Excluded_V_2 by auto
qed
show "m\<forall>x::V. (x : mExc \<triangle> mFunMem) = (x : \<bottom>)"
proof (rule, rule disjointI)
fix x :: V assume
"x : M" "x : mExc" "x : mFunMem"
hence "x = mexc'" "\<not> Excluded func x"
using mexc_ax[where 'a = V] mfmem_ex
unfolding mexc_def mexc'_def exc_tag_V_def has_ty_def mall_def tall_def
by auto
thus "False"
unfolding func_V_def Excluded_V_3 by auto
qed
show "m\<forall>x::V. (x : \<bottom> \<triangle> \<top>) = (x : \<bottom>)"
by (rule, rule disjointI, auto dest: empty_typD)
qed
end
end |
PROGRAM array4_test
CALL claw_test ( )
END PROGRAM array4_test
SUBROUTINE claw_test ( )
INTEGER , ALLOCATABLE :: vec1 ( : , : )
INTEGER , ALLOCATABLE :: vec2 ( : , : )
INTEGER :: claw_induction_0
INTEGER :: claw_induction_1
ALLOCATE ( vec1 ( 10 , 20 ) )
ALLOCATE ( vec2 ( 10 , 20 ) )
vec1 ( : , : ) = 0
vec2 ( : , : ) = 100
DO claw_induction_0 = 1 , size ( vec1 , 1 )
DO claw_induction_1 = 1 , size ( vec1 , 2 )
vec1 ( claw_induction_0 , claw_induction_1 ) = vec2 ( claw_induction_0 ,&
claw_induction_1 ) + 10
vec2 ( claw_induction_0 , claw_induction_1 ) = vec2 ( claw_induction_0 ,&
claw_induction_1 ) + 10
END DO
END DO
PRINT * , sum ( vec1 )
PRINT * , sum ( vec2 )
DEALLOCATE ( vec1 )
DEALLOCATE ( vec2 )
END SUBROUTINE claw_test
|
# # Example 3: Pulse Parametrization
#md # !!! tip
#md # This example is also available as a Jupyter notebook:
#md # [`state_to_state_parametrizations.ipynb`](@__NBVIEWER_ROOT_URL__/examples/state_to_state_parametrizations.ipynb).
#md # ``\gdef\op#1{\hat{#1}}``
#md # ``\gdef\init{\text{init}}``
#md # ``\gdef\tgt{\text{tgt}}``
#nb # $
#nb # \newcommand{tr}[0]{\operatorname{tr}}
#nb # \newcommand{diag}[0]{\operatorname{diag}}
#nb # \newcommand{abs}[0]{\operatorname{abs}}
#nb # \newcommand{pop}[0]{\operatorname{pop}}
#nb # \newcommand{aux}[0]{\text{aux}}
#nb # \newcommand{opt}[0]{\text{opt}}
#nb # \newcommand{tgt}[0]{\text{tgt}}
#nb # \newcommand{init}[0]{\text{init}}
#nb # \newcommand{lab}[0]{\text{lab}}
#nb # \newcommand{rwa}[0]{\text{rwa}}
#nb # \newcommand{bra}[1]{\langle#1\vert}
#nb # \newcommand{ket}[1]{\vert#1\rangle}
#nb # \newcommand{Bra}[1]{\left\langle#1\right\vert}
#nb # \newcommand{Ket}[1]{\left\vert#1\right\rangle}
#nb # \newcommand{Braket}[2]{\left\langle #1\vphantom{#2}\mid{#2}\vphantom{#1}\right\rangle}
#nb # \newcommand{op}[1]{\hat{#1}}
#nb # \newcommand{Op}[1]{\hat{#1}}
#nb # \newcommand{dd}[0]{\,\text{d}}
#nb # \newcommand{Liouville}[0]{\mathcal{L}}
#nb # \newcommand{DynMap}[0]{\mathcal{E}}
#nb # \newcommand{identity}[0]{\mathbf{1}}
#nb # \newcommand{Norm}[1]{\lVert#1\rVert}
#nb # \newcommand{Abs}[1]{\left\vert#1\right\vert}
#nb # \newcommand{avg}[1]{\langle#1\rangle}
#nb # \newcommand{Avg}[1]{\left\langle#1\right\rangle}
#nb # \newcommand{AbsSq}[1]{\left\vert#1\right\vert^2}
#nb # \newcommand{Re}[0]{\operatorname{Re}}
#nb # \newcommand{Im}[0]{\operatorname{Im}}
#nb # $
# This example illustrates the parametrization of control pulses as a
# form of amplitude constraint.
using DrWatson
@quickactivate "KrotovTests"
#-
using QuantumControl
using QuantumControl.Shapes: flattop
using Krotov:
SquareParametrization,
TanhParametrization,
TanhSqParametrization,
LogisticParametrization,
LogisticSqParametrization
using LinearAlgebra
#-
#jl using Test; println("")
using Plots
Plots.default(
linewidth = 3,
size = (550, 300),
legend = :top,
foreground_color_legend = nothing,
background_color_legend = RGBA(1, 1, 1, 0.8),
)
#-
# ## Parametrizations
# ## Symmetric Bounded Controls
#-
include(joinpath(@__DIR__, "plots", "symmetric_parametrization_comparison.jl")) # hide
fig = plot_symmetric_parametrization_comparison() # hide
#jl display(fig)
# ## Positive (Bounded) Controls
#-
include(joinpath(@__DIR__, "plots", "positive_parametrization_comparison.jl")) # hide
fig = plot_positive_parametrization_comparison() # hide
#jl display(fig)
# ## Two-level Hamiltonian
# We consider the Hamiltonian $\op{H}_{0} = - \frac{\omega}{2} \op{\sigma}_{z}$, representing
# a simple qubit with energy level splitting $\omega$ in the basis
# $\{\ket{0},\ket{1}\}$. The control field $\epsilon(t)$ is assumed to couple via
# the Hamiltonian $\op{H}_{1}(t) = \epsilon(t) \op{\sigma}_{x}$ to the qubit,
# i.e., the control field effectively drives transitions between both qubit
# states.
#
# We we will use
ϵ(t) = 0.2 * flattop(t, T=5, t_rise=0.3, func=:blackman);
#-
"""Two-level-system Hamiltonian."""
function hamiltonian(Ω=1.0, ϵ=ϵ)
σ̂_z = ComplexF64[
1 0
0 -1
]
σ̂_x = ComplexF64[
0 1
1 0
]
Ĥ₀ = -0.5 * Ω * σ̂_z
Ĥ₁ = σ̂_x
return (Ĥ₀, (Ĥ₁, ϵ))
end;
#-
H = hamiltonian();
#jl @test length(H) == 2
# The control field here switches on from zero at $t=0$ to it's maximum amplitude
# 0.2 within the time period 0.3 (the switch-on shape is half a [Blackman pulse](https://en.wikipedia.org/wiki/Window_function#Blackman_window)).
# It switches off again in the time period 0.3 before the
# final time $T=5$). We use a time grid with 500 time steps between 0 and $T$:
tlist = collect(range(0, 5, length=500));
#-
function plot_control(pulse::Vector, tlist)
plot(tlist, pulse, xlabel="time", ylabel="amplitude", legend=false)
end
plot_control(ϵ::T, tlist) where {T<:Function} = plot_control([ϵ(t) for t in tlist], tlist)
plot_control(H[2][2], tlist)
# ## Optimization target
# The `krotov` package requires the goal of the optimization to be described by a
# list of `Objective` instances. In this example, there is only a single
# objective: the state-to-state transfer from initial state $\ket{\Psi_{\init}} =
# \ket{0}$ to the target state $\ket{\Psi_{\tgt}} = \ket{1}$, under the dynamics
# of the Hamiltonian $\op{H}(t)$:
function ket(label)
result = Dict("0" => Vector{ComplexF64}([1, 0]), "1" => Vector{ComplexF64}([0, 1]),)
return result[string(label)]
end;
#-
#jl @test dot(ket(0), ket(1)) ≈ 0
#-
objectives = [Objective(initial_state=ket(0), generator=H, target_state=ket(1))]
#-
#jl @test length(objectives) == 1
#-
# ## Square-parametrization for positive pulses
problem = ControlProblem(
objectives=objectives,
pulse_options=IdDict(
ϵ => Dict(
:lambda_a => 5,
:update_shape => t -> flattop(t, T=5, t_rise=0.3, func=:blackman),
:parametrization => SquareParametrization(),
)
),
tlist=tlist,
iter_stop=50,
J_T=QuantumControl.Functionals.J_T_ss,
check_convergence=res -> begin
((res.J_T < 1e-3) && (res.converged = true) && (res.message = "J_T < 10⁻³"))
end
);
#-
opt_result_positive, file = @optimize_or_load(
datadir(),
problem;
method=:krotov,
filename="parametrization#opt_result_positive.jld2"
);
#-
opt_result_positive
# We can plot the optimized field:
#-
#!jl plot_control(opt_result_positive.optimized_controls[1], tlist)
#-
#-
#jl @test minimum(opt_result_positive.optimized_controls[1]) ≥ 0.0
#jl @test minimum(opt_result_positive.optimized_controls[1]) < 1e-16
#jl @test maximum(opt_result_positive.optimized_controls[1]) > 0.0
#-
# ## Tanh-Square-Parametrization for positive amplitude-constrained pulses
problem_tanhsq = ControlProblem(
objectives=objectives,
pulse_options=IdDict(
ϵ => Dict(
:lambda_a => 10,
:update_shape => t -> flattop(t, T=5, t_rise=0.3, func=:blackman),
:parametrization => TanhSqParametrization(3),
)
),
tlist=tlist,
iter_stop=50,
J_T=QuantumControl.Functionals.J_T_ss,
check_convergence=res -> begin
((res.J_T < 1e-3) && (res.converged = true) && (res.message = "J_T < 10⁻³"))
end
);
#-
opt_result_tanhsq, file = @optimize_or_load(
datadir(),
problem_tanhsq;
method=:krotov,
filename="parametrization#opt_result_tanhsq.jld2"
);
#-
opt_result_tanhsq
# We can plot the optimized field:
#-
#!jl plot_control(opt_result_tanhsq.optimized_controls[1], tlist)
#-
#-
#jl @test minimum(opt_result_tanhsq.optimized_controls[1]) ≥ 0.0
#jl @test minimum(opt_result_tanhsq.optimized_controls[1]) < 1e-16
#jl @test maximum(opt_result_tanhsq.optimized_controls[1]) > 0.0
#jl @test maximum(opt_result_tanhsq.optimized_controls[1]) < 3.0
#-
# ## Logistic-Square-Parametrization for positive amplitude-constrained pulses
problem_logisticsq = ControlProblem(
objectives=objectives,
pulse_options=IdDict(
ϵ => Dict(
:lambda_a => 1,
:update_shape => t -> flattop(t, T=5, t_rise=0.3, func=:blackman),
:parametrization => LogisticSqParametrization(3, k=1.0),
)
),
tlist=tlist,
iter_stop=50,
J_T=QuantumControl.Functionals.J_T_ss,
check_convergence=res -> begin
((res.J_T < 1e-3) && (res.converged = true) && (res.message = "J_T < 10⁻³"))
end
);
#-
opt_result_logisticsq, file = @optimize_or_load(
datadir(),
problem_logisticsq;
method=:krotov,
filename="parametrization#opt_result_logisticsq.jld2"
);
# We can plot the optimized field:
#-
#!jl plot_control(opt_result_logisticsq.optimized_controls[1], tlist)
#-
#-
#jl @test minimum(opt_result_logisticsq.optimized_controls[1]) ≥ 0.0
#jl @test minimum(opt_result_logisticsq.optimized_controls[1]) < 1e-16
#jl @test maximum(opt_result_logisticsq.optimized_controls[1]) > 0.0
#jl @test maximum(opt_result_logisticsq.optimized_controls[1]) < 3.0
#-
# ## Tanh-parametrization for amplitude-constrained pulses
problem_tanh = ControlProblem(
objectives=objectives,
pulse_options=IdDict(
ϵ => Dict(
:lambda_a => 1,
:update_shape => t -> flattop(t, T=5, t_rise=0.3, func=:blackman),
:parametrization => TanhParametrization(-0.5, 0.5),
)
),
tlist=tlist,
iter_stop=50,
J_T=QuantumControl.Functionals.J_T_ss,
check_convergence=res -> begin
((res.J_T < 1e-3) && (res.converged = true) && (res.message = "J_T < 10⁻³"))
end
);
#-
opt_result_tanh, file = @optimize_or_load(
datadir(),
problem_tanh;
method=:krotov,
filename="parametrization#opt_result_tanh.jld2"
);
#-
#!jl plot_control(opt_result_tanh.optimized_controls[1], tlist)
#-
#jl @test minimum(opt_result_tanh.optimized_controls[1]) > -0.5
#jl @test maximum(opt_result_tanh.optimized_controls[1]) < 0.5
#-
|
theory Lens_Statespace_Example
imports Lenses
begin
statespace myss =
x :: int
y :: int
statespace myss' =
a :: string
statespace myss2 = myss + myss' +
z :: string
context myss2
begin
lemma "x \<bowtie> y"
by (simp)
end
statespace myss3 = myss2 +
v :: string
text \<open> We can instantiate one of the statespaces with a concrete alphabet type as shown below. \<close>
alphabet myss_c =
x :: int
y :: int
a :: string
z :: string
v :: string
lemma "myss3 x y a z v"
by (simp add: myss'_def myss2_axioms_def myss2_def myss3.intro myss3_axioms.intro myss_def)
end |
lemma higher_deriv_linear [simp]: "(deriv ^^ n) (\<lambda>w. c*w) = (\<lambda>z. if n = 0 then c*z else if n = 1 then c else 0)" |
(* Title: Tree Automata
Author: Peter Lammich <peter dot lammich at uni-muenster.de>
Maintainer: Peter Lammich <peter dot lammich at uni-muenster.de>
*)
header "Abstract Tree Automata Algorithms"
theory AbsAlgo
imports
Ta
"../Collections/Examples/ICF/Exploration"
"../Collections/ICF/CollectionsV1"
begin
no_notation fun_rel_syn (infixr "\<rightarrow>" 60)
text_raw {*\label{sec:absalgo}*}
text {* This theory defines tree automata algorithms on an abstract level,
that is using non-executable datatypes and constructs like sets,
set-collecting operations, etc.
These algorithms are then refined to executable algorithms in
Section~\ref{sec:taimpl}.
*}
subsection {* Word Problem *}
text {*
First, a recursive version of the @{const accs}-predicate is defined.
*}
fun r_match :: "'a set list \<Rightarrow> 'a list \<Rightarrow> bool" where
"r_match [] [] \<longleftrightarrow> True" |
"r_match (A#AS) (a#as) \<longleftrightarrow> a\<in>A \<and> r_match AS as" |
"r_match _ _ \<longleftrightarrow> False"
-- {* @{const r_match} accepts two lists, if they have the same length and
the elements in the second list are contained in the respective
elements of the first list: *}
lemma r_match_alt:
"r_match L l \<longleftrightarrow> length L = length l \<and> (\<forall>i<length l. l!i \<in> L!i)"
apply (induct L l rule: r_match.induct)
apply auto
apply (case_tac i)
apply auto
done
-- "Whether a rule matches the given state, label and list of sets of states"
fun r_matchc where
"r_matchc q l Qs (qr \<rightarrow> lr qsr) \<longleftrightarrow> q=qr \<and> l=lr \<and> r_match Qs qsr"
-- {* recursive version of @{const accs}-predicate *}
fun faccs :: "('Q,'L) ta_rule set \<Rightarrow> 'L tree \<Rightarrow> 'Q set" where
"faccs \<delta> (NODE f ts) = (
let Qs = map (faccs \<delta>) (ts) in
{q. \<exists>r\<in>\<delta>. r_matchc q f Qs r }
)"
lemma faccs_correct_aux:
"q\<in>faccs \<delta> n = accs \<delta> n q" (is ?T1)
"(map (faccs \<delta>) ts = map (\<lambda>t. { q . accs \<delta> t q}) ts)" (is ?T2)
proof -
have "(\<forall>q. q\<in>faccs \<delta> n = accs \<delta> n q)
\<and> (map (faccs \<delta>) ts = map (\<lambda>t. { q . accs \<delta> t q}) ts)"
proof (induct rule: compat_tree_tree_list.induct)
case (NODE f ts)
thus ?case
apply (intro allI iffI)
apply simp
apply (erule bexE)
apply (case_tac x)
apply simp
apply (rule accs.intros)
apply assumption
apply (unfold r_match_alt)
apply simp
apply fastforce
apply simp
apply (erule accs.cases)
apply auto
apply (rule_tac x="qa \<rightarrow> f qs" in bexI)
apply simp
apply (unfold r_match_alt)
apply auto
done
qed auto
thus ?T1 ?T2 by auto
qed
theorem faccs_correct1: "q\<in>faccs \<delta> n \<Longrightarrow> accs \<delta> n q"
by (simp add: faccs_correct_aux)
theorem faccs_correct2: "accs \<delta> n q \<Longrightarrow> q\<in>faccs \<delta> n"
by (simp add: faccs_correct_aux)
theorems faccs_correct = faccs_correct1 faccs_correct2
lemma faccs_alt: "faccs \<delta> t = {q. accs \<delta> t q}" by (auto intro: faccs_correct)
subsection {* Backward Reduction and Emptiness Check *}
subsubsection "Auxiliary Definitions"
-- {*
Step function, that maps a set of states to those states
that are reachable via one backward step.
*}
inductive_set bacc_step :: "('Q,'L) ta_rule set \<Rightarrow> 'Q set \<Rightarrow> 'Q set"
for \<delta> Q
where
"\<lbrakk> r\<in>\<delta>; set (rhsq r) \<subseteq> Q \<rbrakk> \<Longrightarrow> lhs r \<in> bacc_step \<delta> Q"
-- {*
If a set is closed under adding all states that are reachable from the set
by one backward step, then this set contains all backward accessible states.
*}
lemma b_accs_as_closed:
assumes A: "bacc_step \<delta> Q \<subseteq> Q"
shows "b_accessible \<delta> \<subseteq> Q"
proof (rule subsetI)
fix q
assume "q\<in>b_accessible \<delta>"
thus "q\<in>Q"
proof (induct rule: b_accessible.induct)
fix q f qs
assume BC: "(q\<rightarrow>f qs)\<in>\<delta>"
"!!x. x\<in>set qs \<Longrightarrow> x\<in>b_accessible \<delta>"
"!!x. x\<in>set qs \<Longrightarrow> x\<in>Q"
from bacc_step.intros[OF BC(1)] BC(3) have "q\<in>bacc_step \<delta> Q" by auto
with A show "q\<in>Q" by blast
qed
qed
subsubsection "Algorithms"
text {*
First, the basic workset algorithm is specified.
Then, it is refined to contain a counter for each rule,
that counts the number of undiscovered states on the RHS.
For both levels of abstraction, a version that computes the
backwards reduction, and a version that checks for emptiness is specified.
Additionally, a version of the algorithm that computes a witness
for non-emptiness is provided.
Levels of abstraction:
\begin{itemize}
\item[@{text \<alpha>}] On this level, the state consists of a set of
discovered states and a workset.
\item[@{text \<alpha>'}] On this level, the state consists of a set of
discovered states, a workset and a map from rules to number of
undiscovered rhs states. This map can be used to make the discovery of
rules that have to be considered more efficient.
\end{itemize}
*}
text_raw {* \paragraph {@{text \<alpha>} - Level:} *}
-- "A state contains the set of discovered states and a workset"
type_synonym ('Q,'L) br_state = "'Q set \<times> 'Q set"
-- {* Set of states that are non-empty (accept a tree) after adding the
state $q$ to the set of discovered states *}
definition br_dsq
:: "('Q,'L) ta_rule set \<Rightarrow> 'Q \<Rightarrow> ('Q,'L) br_state \<Rightarrow> 'Q set"
where
"br_dsq \<delta> q == \<lambda>(Q,W). { lhs r | r. r\<in>\<delta> \<and> set (rhsq r) \<subseteq> (Q-(W-{q})) }"
-- {*
Description of a step: One state is removed from the workset, and all
new states that become non-empty due to this state are added to, both,
the workset and the set of discovered states *}
inductive_set br_step
:: "('Q,'L) ta_rule set \<Rightarrow> (('Q,'L) br_state \<times> ('Q,'L) br_state) set"
for \<delta> where
"\<lbrakk>
q\<in>W;
Q' = Q \<union> br_dsq \<delta> q (Q,W);
W' = W - {q} \<union> (br_dsq \<delta> q (Q,W) - Q)
\<rbrakk> \<Longrightarrow> ((Q,W),(Q',W'))\<in>br_step \<delta>"
-- "Termination condition for backwards reduction: The workset is empty"
definition br_cond :: "('Q,'L) br_state set"
where "br_cond == {(Q,W). W\<noteq>{}}"
-- "Termination condition for emptiness check:
The workset is empty or a non-empty initial state has been discovered"
definition bre_cond :: "'Q set \<Rightarrow> ('Q,'L) br_state set"
where "bre_cond Qi == {(Q,W). W\<noteq>{} \<and> (Qi\<inter>Q={})}"
-- "Set of all states that occur on the lhs of a constant-rule"
definition br_iq :: "('Q,'L) ta_rule set \<Rightarrow> 'Q set"
where "br_iq \<delta> == { lhs r | r. r\<in>\<delta> \<and> rhsq r = [] }"
-- "Initial state for the iteration"
definition br_initial :: "('Q,'L) ta_rule set \<Rightarrow> ('Q,'L) br_state"
where "br_initial \<delta> == (br_iq \<delta>, br_iq \<delta>)"
-- {*
Invariant for the iteration:
\begin{itemize}
\item States on the workset have been discovered
\item Only accessible states have been discovered
\item If a state is non-empty due to a rule whose
rhs-states have been discovered and processed
(i.e. are in $Q-W$), then the lhs state of the
rule has also been discovered.
\item The set of discovered states is finite
\end{itemize}
*}
definition br_invar :: "('Q,'L) ta_rule set \<Rightarrow> ('Q,'L) br_state set"
where "br_invar \<delta> == {(Q,W).
W\<subseteq>Q \<and>
Q \<subseteq> b_accessible \<delta> \<and>
bacc_step \<delta> (Q - W) \<subseteq> Q \<and>
finite Q}"
definition "br_algo \<delta> == \<lparr>
wa_cond = br_cond,
wa_step = br_step \<delta>,
wa_initial = {br_initial \<delta>},
wa_invar = br_invar \<delta>
\<rparr>"
definition "bre_algo Qi \<delta> == \<lparr>
wa_cond = bre_cond Qi,
wa_step = br_step \<delta>,
wa_initial = {br_initial \<delta>},
wa_invar = br_invar \<delta>
\<rparr>"
-- {* Termination: Either a new state is added, or the workset decreases. *}
definition "br_termrel \<delta> ==
({(Q',Q). Q \<subset> Q' \<and> Q' \<subseteq> b_accessible \<delta>}) <*lex*> finite_psubset"
lemma bre_cond_imp_br_cond[intro, simp]: "bre_cond Qi \<subseteq> br_cond"
by (auto simp add: br_cond_def bre_cond_def)
-- "Only accessible states are discovered"
lemma br_dsq_ss:
assumes A: "(Q,W)\<in>br_invar \<delta>" "W \<noteq> {}" "q\<in>W"
shows "br_dsq \<delta> q (Q,W) \<subseteq> b_accessible \<delta>"
proof (rule subsetI)
fix q'
assume B: "q'\<in>br_dsq \<delta> q (Q,W)"
then obtain r where
R: "q' = lhs r" "r\<in>\<delta>" and
S: "set (rhsq r) \<subseteq> (Q-(W-{q}))"
by (unfold br_dsq_def) auto
note S
also have "(Q-(W-{q})) \<subseteq> b_accessible \<delta>" using A(1,3)
by (auto simp add: br_invar_def)
finally show "q'\<in>b_accessible \<delta>" using R
by (cases r)
(auto intro: b_accessible.intros)
qed
lemma br_step_in_termrel:
assumes A: "\<Sigma>\<in>br_cond" "\<Sigma>\<in>br_invar \<delta>" "(\<Sigma>,\<Sigma>')\<in>br_step \<delta>"
shows "(\<Sigma>', \<Sigma>)\<in>br_termrel \<delta>"
proof -
obtain Q W Q' W' where
[simp]: "\<Sigma>=(Q,W)" "\<Sigma>'=(Q',W')"
by (cases \<Sigma>, cases \<Sigma>', auto)
obtain q where
QIW: "q\<in>W" and
ASSFMT[simp]: "Q' = Q \<union> br_dsq \<delta> q (Q, W)"
"W' = W - {q} \<union> (br_dsq \<delta> q (Q, W) - Q)"
by (auto intro: br_step.cases[OF A(3)[simplified]])
from A(2) have [simp]: "finite Q"
by (auto simp add: br_invar_def)
from A(2)[unfolded br_invar_def] have [simp]: "finite W"
by (auto simp add: finite_subset)
from A(1) have WNE: "W\<noteq>{}" by (unfold br_cond_def) auto
note DSQSS = br_dsq_ss[OF A(2)[simplified] WNE QIW]
{
assume "br_dsq \<delta> q (Q,W) - Q = {}"
hence ?thesis using QIW
by (simp add: br_termrel_def set_simps)
} moreover {
assume "br_dsq \<delta> q (Q,W) - Q \<noteq> {}"
hence "Q \<subset> Q'" by auto
moreover from DSQSS A(2)[unfolded br_invar_def] have
"Q' \<subseteq> b_accessible \<delta>"
by auto
ultimately have ?thesis
by (simp add: br_termrel_def)
} ultimately show ?thesis by blast
qed
lemma br_invar_initial[simp]: "finite \<delta> \<Longrightarrow> (br_initial \<delta>)\<in>br_invar \<delta>"
apply (auto simp add: br_initial_def br_invar_def br_iq_def)
apply (case_tac r)
apply (fastforce intro: b_accessible.intros)
apply (fastforce elim!: bacc_step.cases)
done
lemma br_invar_step:
assumes [simp]: "finite \<delta>"
assumes A: "\<Sigma>\<in>br_cond" "\<Sigma>\<in>br_invar \<delta>" "(\<Sigma>,\<Sigma>')\<in>br_step \<delta>"
shows "\<Sigma>'\<in>br_invar \<delta>"
proof -
obtain Q W Q' W' where SF[simp]: "\<Sigma>=(Q,W)" "\<Sigma>'=(Q',W')"
by (cases \<Sigma>, cases \<Sigma>', auto)
obtain q where
QIW: "q\<in>W" and
ASSFMT[simp]: "Q' = Q \<union> br_dsq \<delta> q (Q, W)"
"W' = W - {q} \<union> (br_dsq \<delta> q (Q, W) - Q)"
by (auto intro: br_step.cases[OF A(3)[simplified]])
from A(1) have WNE: "W\<noteq>{}" by (unfold br_cond_def) auto
have DSQSS: "br_dsq \<delta> q (Q,W) \<subseteq> b_accessible \<delta>"
using br_dsq_ss[OF A(2)[simplified] WNE QIW] .
show ?thesis
apply (simp add: br_invar_def del: ASSFMT)
proof (intro conjI)
from A(2) have "W \<subseteq> Q" by (simp add: br_invar_def)
thus "W' \<subseteq> Q'" by auto
next
from A(2) have "Q \<subseteq> b_accessible \<delta>" by (simp add: br_invar_def)
with DSQSS show "Q' \<subseteq> b_accessible \<delta>" by auto
next
show "bacc_step \<delta> (Q' - W') \<subseteq> Q'"
apply (rule subsetI)
apply (erule bacc_step.cases)
apply (auto simp add: br_dsq_def)
done
next
show "finite Q'" using A(2) by (simp add: br_invar_def br_dsq_def)
qed
qed
lemma br_invar_final:
"\<forall>\<Sigma>. \<Sigma>\<in>wa_invar (br_algo \<delta>) \<and> \<Sigma>\<notin>wa_cond (br_algo \<delta>)
\<longrightarrow> fst \<Sigma> = b_accessible \<delta>"
apply (simp add: br_invar_def br_cond_def br_algo_def)
apply (auto intro: set_rev_mp[OF _ b_accs_as_closed])
done
(* shows "\<lbrakk>(Q,W)\<in>br_invar \<delta>; (Q,W)\<notin>br_cond\<rbrakk> \<Longrightarrow> Q = b_accessible \<delta>"
apply (simp add: br_invar_def br_cond_def)
apply (auto intro: set_rev_mp[OF _ b_accs_as_closed])
done*)
theorem br_while_algo:
assumes FIN[simp]: "finite \<delta>"
shows "while_algo (br_algo \<delta>)"
apply (unfold_locales)
apply (simp_all add: br_algo_def br_invar_step br_invar_initial
br_step_in_termrel)
apply (rule_tac r="br_termrel \<delta>" in wf_subset)
apply (auto intro: br_step_in_termrel)
done
lemma bre_invar_final:
"\<forall>\<Sigma>. \<Sigma>\<in>wa_invar (bre_algo Qi \<delta>) \<and> \<Sigma>\<notin>wa_cond (bre_algo Qi \<delta>)
\<longrightarrow> ((Qi\<inter>fst \<Sigma>={}) \<longleftrightarrow> (Qi \<inter> b_accessible \<delta> = {}))"
apply (simp add: br_invar_def bre_cond_def bre_algo_def)
apply safe
apply (auto dest!: b_accs_as_closed)
done
theorem bre_while_algo:
assumes FIN[simp]: "finite \<delta>"
shows "while_algo (bre_algo Qi \<delta>)"
apply (unfold_locales)
apply (unfold bre_algo_def)
apply (auto simp add: br_invar_initial br_step_in_termrel
intro: br_invar_step
dest: set_rev_mp[OF _ bre_cond_imp_br_cond])
apply (rule_tac r="br_termrel \<delta>" in wf_subset)
apply (auto intro: br_step_in_termrel
dest: set_rev_mp[OF _ bre_cond_imp_br_cond])
done
text_raw {* \paragraph{@{text \<alpha>'} - Level} *}
text {*
Here, an optimization is added:
For each rule, the algorithm now maintains a counter that counts the number
of undiscovered states on the rules RHS. Whenever a new state is discovered,
this counter is decremented for all rules where the state occurs on the RHS.
The LHS states of rules where the counter falls to 0 are added to the
worklist. The idea is that decrementing the counter is more efficient than
checking whether all states on the rule's RHS have been discovered.
A similar algorithm is sketched in \cite{tata2007}(Exercise~1.18).
*}
type_synonym ('Q,'L) br'_state = "'Q set \<times> 'Q set \<times> (('Q,'L) ta_rule \<rightharpoonup> nat)"
-- {* Abstraction to @{text \<alpha>}-level *}
definition br'_\<alpha> :: "('Q,'L) br'_state \<Rightarrow> ('Q,'L) br_state"
where "br'_\<alpha> = (\<lambda>(Q,W,rcm). (Q,W))"
definition br'_invar_add :: "('Q,'L) ta_rule set \<Rightarrow> ('Q,'L) br'_state set"
where "br'_invar_add \<delta> == {(Q,W,rcm).
(\<forall>r\<in>\<delta>. rcm r = Some (card (set (rhsq r) - (Q - W)))) \<and>
{lhs r | r. r\<in>\<delta> \<and> the (rcm r) = 0} \<subseteq> Q
}"
definition br'_invar :: "('Q,'L) ta_rule set \<Rightarrow> ('Q,'L) br'_state set"
where "br'_invar \<delta> == br'_invar_add \<delta> \<inter> {\<Sigma>. br'_\<alpha> \<Sigma> \<in> br_invar \<delta>}"
inductive_set br'_step
:: "('Q,'L) ta_rule set \<Rightarrow> (('Q,'L) br'_state \<times> ('Q,'L) br'_state) set"
for \<delta> where
"\<lbrakk> q\<in>W;
Q' = Q \<union> { lhs r | r. r\<in>\<delta> \<and> q \<in> set (rhsq r) \<and> the (rcm r) \<le> 1 };
W' = (W-{q})
\<union> ({ lhs r | r. r\<in>\<delta> \<and> q \<in> set (rhsq r) \<and> the (rcm r) \<le> 1 }
- Q);
!!r. r\<in>\<delta> \<Longrightarrow> rcm' r = ( if q \<in> set (rhsq r) then
Some (the (rcm r) - 1)
else rcm r
)
\<rbrakk> \<Longrightarrow> ((Q,W,rcm),(Q',W',rcm')) \<in> br'_step \<delta>"
definition br'_cond :: "('Q,'L) br'_state set"
where "br'_cond == {(Q,W,rcm). W\<noteq>{}}"
definition bre'_cond :: "'Q set \<Rightarrow> ('Q,'L) br'_state set"
where "bre'_cond Qi == {(Q,W,rcm). W\<noteq>{} \<and> (Qi\<inter>Q={})}"
inductive_set br'_initial :: "('Q,'L) ta_rule set \<Rightarrow> ('Q,'L) br'_state set"
for \<delta> where
"\<lbrakk> !!r. r\<in>\<delta> \<Longrightarrow> rcm r = Some (card (set (rhsq r))) \<rbrakk>
\<Longrightarrow> (br_iq \<delta>, br_iq \<delta>, rcm)\<in>br'_initial \<delta>"
definition "br'_algo \<delta> == \<lparr>
wa_cond=br'_cond,
wa_step = br'_step \<delta>,
wa_initial = br'_initial \<delta>,
wa_invar = br'_invar \<delta>
\<rparr>"
definition "bre'_algo Qi \<delta> == \<lparr>
wa_cond=bre'_cond Qi,
wa_step = br'_step \<delta>,
wa_initial = br'_initial \<delta>,
wa_invar = br'_invar \<delta>
\<rparr>"
lemma br'_step_invar:
assumes finite[simp]: "finite \<delta>"
assumes INV: "\<Sigma>\<in>br'_invar_add \<delta>" "br'_\<alpha> \<Sigma> \<in> br_invar \<delta>"
assumes STEP: "(\<Sigma>,\<Sigma>') \<in> br'_step \<delta>"
shows "\<Sigma>'\<in>br'_invar_add \<delta>"
proof -
obtain Q W rcm where [simp]: "\<Sigma>=(Q,W,rcm)"
by (cases \<Sigma>) auto
obtain Q' W' rcm' where [simp]: "\<Sigma>'=(Q',W',rcm')"
by (cases \<Sigma>') auto
from STEP obtain q where
STEPF:
"q\<in>W"
"Q' = Q \<union> { lhs r | r. r\<in>\<delta> \<and> q \<in> set (rhsq r) \<and> the (rcm r) \<le> 1 }"
"W' = (W-{q})
\<union> ({ lhs r | r. r\<in>\<delta> \<and> q \<in> set (rhsq r) \<and> the (rcm r) \<le> 1 }
- Q)"
"!!r. r\<in>\<delta> \<Longrightarrow> rcm' r = ( if q \<in> set (rhsq r) then
Some (the (rcm r) - 1)
else rcm r
)"
by (auto elim: br'_step.cases)
from INV[unfolded br'_invar_def br_invar_def br'_invar_add_def br'_\<alpha>_def,
simplified]
have INV:
"(\<forall>r\<in>\<delta>. rcm r = Some (card (set (rhsq r) - (Q - W))))"
"{lhs r |r. r \<in> \<delta> \<and> the (rcm r) = 0} \<subseteq> Q"
"W \<subseteq> Q"
"Q \<subseteq> b_accessible \<delta>"
"bacc_step \<delta> (Q - W) \<subseteq> Q"
"finite Q"
by auto
{
fix r
assume A: "r\<in>\<delta>"
with INV(1) have RCMR: "rcm r = Some (card (set (rhsq r) - (Q - W)))"
by auto
have "rcm' r = Some (card (set (rhsq r) - (Q' - W')))"
proof (cases "q\<in>set (rhsq r)")
case False
with A STEPF(4) have "rcm' r = rcm r" by auto
moreover from STEPF INV(3) False have
"set (rhsq r) - (Q-W) = set (rhsq r) - (Q'-W')"
by auto
ultimately show ?thesis
by (simp add: RCMR)
next
case True
with A STEPF(4) RCMR have
"rcm' r = Some ((card (set (rhsq r) - (Q - W))) - 1)"
by simp
moreover from STEPF INV(3) True have
"set (rhsq r) - (Q-W) = insert q (set (rhsq r) - (Q'-W'))"
"q\<notin>(set (rhsq r) - (Q'-W'))"
by auto
ultimately show ?thesis
by (simp add: RCMR card_insert_disjoint')
qed
} moreover {
fix r
assume A: "r\<in>\<delta>" "the (rcm' r) = 0"
have "lhs r \<in> Q'" proof (cases "q\<in>set (rhsq r)")
case True
with A(1) STEPF(4) have "rcm' r = Some (the (rcm r) - 1)" by auto
with A(2) have "the (rcm r) - 1 = 0" by auto
hence "the (rcm r) \<le> 1" by auto
with STEPF(2) A(1) True show ?thesis by auto
next
case False
with A(1) STEPF(4) have "rcm' r = rcm r" by auto
with A(2) have "the (rcm r) = 0" by auto
with A(1) INV(2) have "lhs r \<in> Q" by auto
with STEPF(2) show ?thesis by auto
qed
} ultimately show ?thesis
by (auto simp add: br'_invar_add_def)
qed
lemma br'_invar_initial:
"br'_initial \<delta> \<subseteq> br'_invar_add \<delta>"
apply safe
apply (erule br'_initial.cases)
apply (unfold br'_invar_add_def)
apply (auto simp add: br_iq_def)
done
lemma br'_rcm_aux':
"\<lbrakk> (Q,W,rcm)\<in>br'_invar \<delta>; q\<in>W \<rbrakk>
\<Longrightarrow> {r \<in> \<delta>. q \<in> set (rhsq r) \<and> the (rcm r) \<le> Suc 0}
= {r\<in>\<delta>. q\<in>set (rhsq r) \<and> set (rhsq r) \<subseteq> (Q - (W-{q}))}"
proof (intro subsetI equalityI)
case (goal1 r)
hence B: "r\<in>\<delta>" "q\<in>set (rhsq r)" "the (rcm r) \<le> Suc 0" by auto
from B(1,3) goal1(1)[unfolded br'_invar_def br'_invar_add_def] have
CARD: "card (set (rhsq r) - (Q - W)) \<le> Suc 0"
by auto
from goal1(1)[unfolded br'_invar_def br_invar_def br'_\<alpha>_def] have WSQ: "W\<subseteq>Q"
by auto
have "set (rhsq r) - (Q - W) = {q}"
proof -
from B(2) goal1(2) have R1: "q\<in>set (rhsq r) - (Q - W)" by auto
moreover
{
fix x
assume A: "x\<noteq>q" "x\<in>set (rhsq r) - (Q - W)"
with R1 have "{x,q} \<subseteq> set (rhsq r) - (Q - W)" by auto
hence "card {x,q} \<le> card (set (rhsq r) - (Q - W))"
by (auto simp add: card_mono)
with CARD A(1) have False by auto
}
ultimately show ?thesis by auto
qed
with goal1(2) WSQ have "set (rhsq r) \<subseteq> Q - (W - {q})" by auto
thus ?case using B(1,2) by auto
next
case (goal2 r)
hence B: "r\<in>\<delta>" "q\<in>set (rhsq r)" "set (rhsq r) \<subseteq> Q - (W - {q})" by auto
with goal2(1)[unfolded br'_invar_def br'_invar_add_def
br'_\<alpha>_def br_invar_def]
have
IC: "W\<subseteq>Q" "the (rcm r) = card (set (rhsq r) - (Q - W))"
by auto
have "set (rhsq r) - (Q - W) \<subseteq> {q}" using B(2,3) IC(1) by auto
from card_mono[OF _ this] have "the (rcm r) \<le> Suc 0" by (simp add: IC(2))
with B(1,2) show ?case by auto
qed
lemma br'_rcm_aux:
assumes A: "(Q,W,rcm)\<in>br'_invar \<delta>" "q\<in>W"
shows "{lhs r |r. r \<in> \<delta> \<and> q \<in> set (rhsq r) \<and> the (rcm r) \<le> Suc 0}
= {lhs r | r. r\<in>\<delta> \<and> q\<in>set (rhsq r) \<and> set (rhsq r) \<subseteq> (Q - (W-{q}))}"
proof -
have "{lhs r |r. r \<in> \<delta> \<and> q \<in> set (rhsq r) \<and> the (rcm r) \<le> Suc 0}
= lhs ` {r \<in> \<delta>. q \<in> set (rhsq r) \<and> the (rcm r) \<le> Suc 0}"
by auto
also from br'_rcm_aux'[OF A] have
"\<dots> = lhs ` {r \<in> \<delta>. q \<in> set (rhsq r) \<and> set (rhsq r) \<subseteq> Q - (W - {q})}"
by simp
also have
"\<dots> = {lhs r | r. r\<in>\<delta> \<and> q\<in>set (rhsq r) \<and> set (rhsq r) \<subseteq> (Q - (W-{q}))}"
by auto
finally show ?thesis .
qed
lemma br'_invar_QcD:
"(Q,W,rcm) \<in> br'_invar \<delta> \<Longrightarrow> {lhs r | r. r\<in>\<delta> \<and> set (rhsq r) \<subseteq> (Q-W)} \<subseteq> Q"
proof (safe)
fix r
assume A: "(Q,W,rcm)\<in>br'_invar \<delta>" "r\<in>\<delta>" "set (rhsq r) \<subseteq> Q - W"
from A(1)[unfolded br'_invar_def br'_invar_add_def br'_\<alpha>_def br_invar_def,
simplified]
have
IC: "W \<subseteq> Q"
"finite Q"
"(\<forall>r\<in>\<delta>. rcm r = Some (card (set (rhsq r) - (Q - W))))"
"{lhs r |r. r \<in> \<delta> \<and> the (rcm r) = 0} \<subseteq> Q" by auto
from IC(3) A(2,3) have "the (rcm r) = 0" by auto
with IC(4) A(2) show "lhs r \<in> Q" by auto
qed
lemma br'_rcm_aux2:
"\<lbrakk> (Q,W,rcm)\<in>br'_invar \<delta>; q\<in>W \<rbrakk>
\<Longrightarrow> Q \<union> br_dsq \<delta> q (Q,W)
= Q \<union> {lhs r |r. r \<in> \<delta> \<and> q \<in> set (rhsq r) \<and> the (rcm r) \<le> Suc 0}"
apply (simp only: br'_rcm_aux)
apply (unfold br_dsq_def)
apply simp
apply (frule br'_invar_QcD)
apply auto
done
lemma br'_rcm_aux3:
"\<lbrakk> (Q,W,rcm)\<in>br'_invar \<delta>; q\<in>W \<rbrakk>
\<Longrightarrow> br_dsq \<delta> q (Q,W) - Q
= {lhs r |r. r \<in> \<delta> \<and> q \<in> set (rhsq r) \<and> the (rcm r) \<le> Suc 0} - Q"
apply (simp only: br'_rcm_aux)
apply (unfold br_dsq_def)
apply simp
apply (frule br'_invar_QcD)
apply auto
done
lemma br'_step_abs:
"\<lbrakk>
\<Sigma>\<in>br'_invar \<delta>;
(\<Sigma>,\<Sigma>') \<in> br'_step \<delta>
\<rbrakk> \<Longrightarrow> (br'_\<alpha> \<Sigma>, br'_\<alpha> \<Sigma>')\<in>br_step \<delta>"
apply (cases \<Sigma>, cases \<Sigma>', simp)
apply (erule br'_step.cases)
apply (simp add: br'_\<alpha>_def)
apply (rule_tac q=q in br_step.intros)
apply simp
apply (simp only: br'_rcm_aux2)
apply (simp only: br'_rcm_aux3)
done
lemma br'_initial_abs: "br'_\<alpha>`(br'_initial \<delta>) = {br_initial \<delta>}"
apply (force simp add: br_initial_def br'_\<alpha>_def
elim: br'_initial.cases
intro: br'_initial.intros)
done
lemma br'_cond_abs: "\<Sigma>\<in>br'_cond \<longleftrightarrow> (br'_\<alpha> \<Sigma>) \<in> br_cond"
by (cases \<Sigma>)
(simp add: br'_cond_def br_cond_def br'_\<alpha>_def image_Collect
br'_algo_def br_algo_def)
lemma bre'_cond_abs: "\<Sigma>\<in>bre'_cond Qi \<longleftrightarrow> (br'_\<alpha> \<Sigma>)\<in>bre_cond Qi"
by (cases \<Sigma>) (simp add: bre'_cond_def bre_cond_def br'_\<alpha>_def image_Collect
bre'_algo_def bre_algo_def)
lemma br'_invar_abs: "br'_\<alpha>`br'_invar \<delta> \<subseteq> br_invar \<delta>"
by (auto simp add: br'_invar_def)
theorem br'_pref_br: "wa_precise_refine (br'_algo \<delta>) (br_algo \<delta>) br'_\<alpha>"
apply unfold_locales
apply (simp_all add: br'_algo_def br_algo_def)
apply (simp_all add: br'_cond_abs br'_step_abs br'_invar_abs br'_initial_abs)
done
interpretation br'_pref: wa_precise_refine "br'_algo \<delta>" "br_algo \<delta>" "br'_\<alpha>"
using br'_pref_br .
theorem br'_while_algo:
"finite \<delta> \<Longrightarrow> while_algo (br'_algo \<delta>)"
apply (rule br'_pref.wa_intro)
apply (simp add: br_while_algo)
apply (simp_all add: br'_algo_def br_algo_def)
apply (simp add: br'_invar_def)
apply (erule (3) br'_step_invar)
apply (simp add: br'_invar_initial)
done
lemma fst_br'_\<alpha>: "fst (br'_\<alpha> s) = fst s" by (cases s) (simp add: br'_\<alpha>_def)
theorems br'_invar_final =
br'_pref.transfer_correctness[OF br_invar_final, unfolded fst_br'_\<alpha>]
theorem bre'_pref_br: "wa_precise_refine (bre'_algo Qi \<delta>) (bre_algo Qi \<delta>) br'_\<alpha>"
apply unfold_locales
apply (simp_all add: bre'_algo_def bre_algo_def)
apply (simp_all add: bre'_cond_abs br'_step_abs br'_invar_abs br'_initial_abs)
done
interpretation bre'_pref:
wa_precise_refine "bre'_algo Qi \<delta>" "bre_algo Qi \<delta>" "br'_\<alpha>"
using bre'_pref_br .
theorem bre'_while_algo:
"finite \<delta> \<Longrightarrow> while_algo (bre'_algo Qi \<delta>)"
apply (rule bre'_pref.wa_intro)
apply (simp add: bre_while_algo)
apply (simp_all add: bre'_algo_def bre_algo_def)
apply (simp add: br'_invar_def)
apply (erule (3) br'_step_invar)
apply (simp add: br'_invar_initial)
done
theorems bre'_invar_final =
bre'_pref.transfer_correctness[OF bre_invar_final, unfolded fst_br'_\<alpha>]
text_raw {* \paragraph{Implementing a Step} *}
text {*
In this paragraph, it is shown how to implement a step of the br'-algorithm
by iteration over the rules that have the discovered state on their RHS.
*}
definition br'_inner_step
:: "('Q,'L) ta_rule \<Rightarrow> ('Q,'L) br'_state \<Rightarrow> ('Q,'L) br'_state"
where
"br'_inner_step == \<lambda>r (Q,W,rcm). let c=the (rcm r) in (
if c\<le>1 then insert (lhs r) Q else Q,
if c\<le>1 \<and> (lhs r) \<notin> Q then insert (lhs r) W else W,
rcm ( r \<mapsto> (c-(1::nat)))
)
"
definition br'_inner_invar
:: "('Q,'L) ta_rule set \<Rightarrow> 'Q \<Rightarrow> ('Q,'L) br'_state
\<Rightarrow> ('Q,'L) ta_rule set \<Rightarrow> ('Q,'L) br'_state \<Rightarrow> bool"
where
"br'_inner_invar rules q == \<lambda>(Q,W,rcm) it (Q',W',rcm').
Q' = Q \<union> { lhs r | r. r\<in>rules-it \<and> the (rcm r) \<le> 1 } \<and>
W' = (W-{q}) \<union> ({ lhs r | r. r\<in>rules-it \<and> the (rcm r) \<le> 1 } - Q) \<and>
(\<forall>r. rcm' r = (if r\<in>rules-it then Some (the (rcm r) - 1) else rcm r))
"
lemma br'_inner_invar_imp_final:
"\<lbrakk> q\<in>W; br'_inner_invar {r\<in>\<delta>. q\<in>set (rhsq r)} q (Q,W-{q},rcm) {} \<Sigma>' \<rbrakk>
\<Longrightarrow> ((Q,W,rcm),\<Sigma>') \<in> br'_step \<delta>"
apply (unfold br'_inner_invar_def)
apply auto
apply (rule br'_step.intros)
apply assumption
apply auto
done
lemma br'_inner_invar_step:
"\<lbrakk> q\<in>W; br'_inner_invar {r\<in>\<delta>. q\<in>set (rhsq r)} q (Q,W-{q},rcm) it \<Sigma>';
r\<in>it; it\<subseteq>{r\<in>\<delta>. q\<in>set (rhsq r)}
\<rbrakk> \<Longrightarrow> br'_inner_invar {r\<in>\<delta>. q\<in>set (rhsq r)} q (Q,W-{q},rcm)
(it-{r}) (br'_inner_step r \<Sigma>')
"
apply (cases \<Sigma>', simp)
apply (unfold br'_inner_invar_def br'_inner_step_def Let_def)
apply auto
done
lemma br'_inner_invar_initial:
"\<lbrakk> q\<in>W \<rbrakk> \<Longrightarrow> br'_inner_invar {r\<in>\<delta>. q\<in>set (rhsq r)} q (Q,W-{q},rcm)
{r\<in>\<delta>. q\<in>set (rhsq r)} (Q,W-{q},rcm)"
apply (simp add: br'_inner_invar_def)
apply auto
done
lemma br'_inner_step_proof:
fixes \<alpha>s :: "'\<Sigma> \<Rightarrow> ('Q,'L) br'_state"
fixes cstep :: "('Q,'L) ta_rule \<Rightarrow> '\<Sigma> \<Rightarrow> '\<Sigma>"
fixes \<Sigma>h :: "'\<Sigma>"
fixes cinvar :: "('Q,'L) ta_rule set \<Rightarrow> '\<Sigma> \<Rightarrow> bool"
assumes iterable_set: "set_iteratei \<alpha> invar iteratei"
assumes invar_initial: "cinvar {r\<in>\<delta>. q\<in>set (rhsq r)} \<Sigma>h"
assumes invar_step:
"!!it r \<Sigma>. \<lbrakk> r\<in>it; it \<subseteq> {r\<in>\<delta>. q\<in>set (rhsq r)}; cinvar it \<Sigma> \<rbrakk>
\<Longrightarrow> cinvar (it-{r}) (cstep r \<Sigma>)"
assumes step_desc:
"!!it r \<Sigma>. \<lbrakk> r\<in>it; it\<subseteq>{r\<in>\<delta>. q\<in>set (rhsq r)}; cinvar it \<Sigma> \<rbrakk>
\<Longrightarrow> \<alpha>s (cstep r \<Sigma>) = br'_inner_step r (\<alpha>s \<Sigma>)"
assumes it_set_desc: "invar it_set" "\<alpha> it_set = {r\<in>\<delta>. q\<in>set (rhsq r)}"
assumes QIW[simp]: "q\<in>W"
assumes \<Sigma>_desc[simp]: "\<alpha>s \<Sigma> = (Q,W,rcm)"
assumes \<Sigma>h_desc[simp]: "\<alpha>s \<Sigma>h = (Q,W-{q},rcm)"
shows "(\<alpha>s \<Sigma>, \<alpha>s (iteratei it_set (\<lambda>_. True) cstep \<Sigma>h))\<in>br'_step \<delta>"
proof -
interpret set_iteratei \<alpha> invar iteratei by fact
show ?thesis
apply (rule_tac
I="\<lambda>it \<Sigma>. cinvar it \<Sigma>
\<and> br'_inner_invar {r\<in>\<delta>. q\<in>set (rhsq r)} q (Q,W-{q},rcm)
it (\<alpha>s \<Sigma>)"
in iterate_rule_P)
apply (simp_all
add: it_set_desc invar_initial br'_inner_invar_initial invar_step
step_desc br'_inner_invar_step)
apply (rule br'_inner_invar_imp_final)
apply (rule QIW)
apply simp
done
qed
text_raw {* \paragraph{Computing Witnesses} *}
text {*
The algorithm is now refined further, such that it stores, for each discovered
state, a witness for non-emptiness, i.e. a tree that is accepted with the
discovered state.
*}
-- "A map from states to trees has the witness-property, if it maps states to
trees that are accepted with that state:"
definition "witness_prop \<delta> m == \<forall>q t. m q = Some t \<longrightarrow> accs \<delta> t q"
-- "Construct a witness for the LHS of a rule, provided that the map contains
witnesses for all states on the RHS:"
definition construct_witness
:: "('Q \<rightharpoonup> 'L tree) \<Rightarrow> ('Q,'L) ta_rule \<Rightarrow> 'L tree"
where
"construct_witness Q r == NODE (rhsl r) (List.map (\<lambda>q. the (Q q)) (rhsq r))"
lemma witness_propD: "\<lbrakk>witness_prop \<delta> m; m q = Some t\<rbrakk> \<Longrightarrow> accs \<delta> t q"
by (auto simp add: witness_prop_def)
lemma construct_witness_correct:
"\<lbrakk> witness_prop \<delta> Q; r\<in>\<delta>; set (rhsq r) \<subseteq> dom Q \<rbrakk>
\<Longrightarrow> accs \<delta> (construct_witness Q r) (lhs r)"
apply (unfold construct_witness_def witness_prop_def)
apply (cases r)
apply simp
apply (erule accs.intros)
apply (auto dest: nth_mem)
done
lemma construct_witness_eq:
"\<lbrakk> Q |` set (rhsq r) = Q' |` set (rhsq r) \<rbrakk> \<Longrightarrow>
construct_witness Q r = construct_witness Q' r"
apply (unfold construct_witness_def)
apply auto
apply (subgoal_tac "Q x = Q' x")
apply (simp)
apply (drule_tac x=x in fun_cong)
apply auto
done
text {*
The set of discovered states is refined by a map from discovered states to
their witnesses:
*}
type_synonym ('Q,'L) brw_state = "('Q\<rightharpoonup>'L tree) \<times> 'Q set \<times> (('Q,'L) ta_rule \<rightharpoonup> nat)"
definition brw_\<alpha> :: "('Q,'L) brw_state \<Rightarrow> ('Q,'L) br'_state"
where "brw_\<alpha> = (\<lambda>(Q,W,rcm). (dom Q,W,rcm))"
definition brw_invar_add :: "('Q,'L) ta_rule set \<Rightarrow> ('Q,'L) brw_state set"
where "brw_invar_add \<delta> == {(Q,W,rcm). witness_prop \<delta> Q}"
definition "brw_invar \<delta> == brw_invar_add \<delta> \<inter> {s. brw_\<alpha> s \<in> br'_invar \<delta>}"
(* TODO:
This step description does not allow full flexibility, because
we may want to construct new witnesses from other witnesses constructed
in the same step!
However, if we say t = construct_witness Q' r, may we run into cyclicity
problems, where a cycle of witnesses
may witness itself?. Hmm? As these cyclic witnesses would have to
be infinite, they cannot exist?
But, if we use a BFS search strategy, the current step description will
compute minimal depth witnesses.
The argumentation is, that:
Initially, all witnesses of depth 1 (definitely minimal) are discovered
A witness of depth n has children of length < n
The states that are initially on the workset are all those with
witnesses of depth 1. Thus,
after they have been processed, all states with witnesses of depth 2 have
been discovered. This argument can be iterated inductively.
*)
inductive_set brw_step
:: "('Q,'L) ta_rule set \<Rightarrow> (('Q,'L) brw_state \<times> ('Q,'L) brw_state) set"
for \<delta> where
"\<lbrakk>
q\<in>W;
dsqr = { r\<in>\<delta>. q \<in> set (rhsq r) \<and> the (rcm r) \<le> 1 };
dom Q' = dom Q \<union> lhs`dsqr;
!!q t. Q' q = Some t \<Longrightarrow> Q q = Some t
\<or> (\<exists>r\<in>dsqr. q=lhs r \<and> t=construct_witness Q r);
W' = (W-{q}) \<union> (lhs`dsqr - dom Q);
!!r. r\<in>\<delta> \<Longrightarrow> rcm' r = ( if q \<in> set (rhsq r) then
Some (the (rcm r) - 1)
else rcm r
)
\<rbrakk> \<Longrightarrow> ((Q,W,rcm),(Q',W',rcm')) \<in> brw_step \<delta>"
definition brw_cond :: "'Q set \<Rightarrow> ('Q,'L) brw_state set"
where "brw_cond Qi == {(Q,W,rcm). W\<noteq>{} \<and> (Qi\<inter>dom Q={})}"
inductive_set brw_iq :: "('Q,'L) ta_rule set \<Rightarrow> ('Q \<rightharpoonup> 'L tree) set"
for \<delta> where
"\<lbrakk>
\<forall>q t. Q q = Some t \<longrightarrow> (\<exists>r\<in>\<delta>. rhsq r = [] \<and> q = lhs r
\<and> t = NODE (rhsl r) []);
\<forall>r\<in>\<delta>. rhsq r = [] \<longrightarrow> Q (lhs r) \<noteq> None
\<rbrakk> \<Longrightarrow> Q \<in> brw_iq \<delta>"
inductive_set brw_initial :: "('Q,'L) ta_rule set \<Rightarrow> ('Q,'L) brw_state set"
for \<delta> where
"\<lbrakk> !!r. r\<in>\<delta> \<Longrightarrow> rcm r = Some (card (set (rhsq r))); Q\<in>brw_iq \<delta> \<rbrakk>
\<Longrightarrow> (Q, br_iq \<delta>, rcm)\<in>brw_initial \<delta>"
definition "brw_algo Qi \<delta> == \<lparr>
wa_cond=brw_cond Qi,
wa_step = brw_step \<delta>,
wa_initial = brw_initial \<delta>,
wa_invar = brw_invar \<delta>
\<rparr>"
lemma brw_cond_abs: "\<Sigma>\<in>brw_cond Qi \<longleftrightarrow> (brw_\<alpha> \<Sigma>)\<in>bre'_cond Qi"
apply (cases \<Sigma>)
apply (simp add: brw_cond_def bre'_cond_def brw_\<alpha>_def)
done
lemma brw_initial_abs: "\<Sigma>\<in>brw_initial \<delta> \<Longrightarrow> brw_\<alpha> \<Sigma> \<in> br'_initial \<delta>"
apply (cases \<Sigma>, simp)
apply (erule brw_initial.cases)
apply (erule brw_iq.cases)
apply (auto simp add: brw_\<alpha>_def)
apply (subgoal_tac "dom Qa = br_iq \<delta>")
apply simp
apply (rule br'_initial.intros)
apply auto [1]
apply (force simp add: br_iq_def)
done
lemma brw_invar_initial: "brw_initial \<delta> \<subseteq> brw_invar_add \<delta>"
apply safe
apply (unfold brw_invar_add_def)
apply (auto simp add: witness_prop_def)
apply (erule brw_initial.cases)
apply (erule brw_iq.cases)
apply auto
proof -
case (goal1 q t rcm Q)
from goal1(3)[rule_format, OF goal1(1)] obtain r where
[simp]: "r\<in>\<delta>" "rhsq r = []" "q=lhs r" "t=NODE (rhsl r) []"
by blast
have RF[simplified]: "r=((lhs r) \<rightarrow> (rhsl r) (rhsq r))" by (cases r) simp
show ?case
apply (simp)
apply (rule accs.intros)
apply (subst RF[symmetric])
apply auto
done
qed
lemma brw_step_abs:
"\<lbrakk> (\<Sigma>,\<Sigma>')\<in>brw_step \<delta> \<rbrakk> \<Longrightarrow> (brw_\<alpha> \<Sigma>, brw_\<alpha> \<Sigma>')\<in>br'_step \<delta>"
apply (cases \<Sigma>, cases \<Sigma>', simp)
apply (erule brw_step.cases)
apply (simp add: brw_\<alpha>_def)
apply hypsubst
apply (rule br'_step.intros)
apply assumption
apply auto
done
lemma brw_step_invar:
assumes FIN[simp]: "finite \<delta>"
assumes INV: "\<Sigma>\<in>brw_invar_add \<delta>" and BR'INV: "brw_\<alpha> \<Sigma> \<in> br'_invar \<delta>"
assumes STEP: "(\<Sigma>,\<Sigma>') \<in> brw_step \<delta>"
shows "\<Sigma>'\<in>brw_invar_add \<delta>"
proof -
obtain Q W rcm Q' W' rcm' where
[simp]: "\<Sigma>=(Q,W,rcm)" "\<Sigma>'=(Q',W',rcm')"
by (cases \<Sigma>, cases \<Sigma>') force
from INV have WP: "witness_prop \<delta> Q"
by (simp_all add: brw_invar_add_def)
obtain qw dsqr where SPROPS:
"dsqr = {r \<in> \<delta>. qw \<in> set (rhsq r) \<and> the (rcm r) \<le> 1}"
"qw\<in>W"
"dom Q' = dom Q \<union> lhs ` dsqr"
"!!q t. Q' q = Some t \<Longrightarrow> Q q = Some t
\<or> (\<exists>r\<in>dsqr. q=lhs r \<and> t=construct_witness Q r)"
by (auto intro: brw_step.cases[OF STEP[simplified]])
from br'_rcm_aux'[OF BR'INV[unfolded brw_\<alpha>_def, simplified] SPROPS(2)] have
DSQR_ALT: "dsqr = {r \<in> \<delta>. qw \<in> set (rhsq r)
\<and> set (rhsq r) \<subseteq> dom Q - (W - {qw})}"
by (simp add: SPROPS(1))
have "witness_prop \<delta> Q'"
proof (unfold witness_prop_def, safe)
fix q t
assume A: "Q' q = Some t"
from SPROPS(4)[OF A] have
"Q q = Some t \<or> (\<exists>r\<in>dsqr. q = lhs r \<and> t = construct_witness Q r)" .
moreover {
assume C: "Q q = Some t"
from witness_propD[OF WP, OF C] have "accs \<delta> t q" .
} moreover {
fix r
assume "r\<in>dsqr" and [simp]: "q=lhs r" "t=construct_witness Q r"
from `r\<in>dsqr` have 1: "r\<in>\<delta>" "set (rhsq r) \<subseteq> dom Q"
by (auto simp add: DSQR_ALT)
from construct_witness_correct[OF WP 1] have "accs \<delta> t q" by simp
} ultimately show "accs \<delta> t q" by blast
qed
thus ?thesis by (simp add: brw_invar_add_def)
qed
theorem brw_pref_bre': "wa_precise_refine (brw_algo Qi \<delta>) (bre'_algo Qi \<delta>) brw_\<alpha>"
apply (unfold_locales)
apply (simp_all add: brw_algo_def bre'_algo_def)
apply (auto simp add: brw_cond_abs brw_step_abs brw_initial_abs brw_invar_def)
done
interpretation brw_pref:
wa_precise_refine "brw_algo Qi \<delta>" "bre'_algo Qi \<delta>" "brw_\<alpha>"
using brw_pref_bre' .
theorem brw_while_algo: "finite \<delta> \<Longrightarrow> while_algo (brw_algo Qi \<delta>)"
apply (rule brw_pref.wa_intro)
apply (simp add: bre'_while_algo)
apply (simp_all add: brw_algo_def bre'_algo_def)
apply (simp add: brw_invar_def)
apply (auto intro: brw_step_invar simp add: brw_invar_initial)
done
lemma fst_brw_\<alpha>: "fst (brw_\<alpha> s) = dom (fst s)"
by (cases s) (simp add: brw_\<alpha>_def)
theorem brw_invar_final:
"\<forall>sc. sc \<in> wa_invar (brw_algo Qi \<delta>) \<and> sc \<notin> wa_cond (brw_algo Qi \<delta>)
\<longrightarrow> (Qi \<inter> dom (fst sc) = {}) = (Qi \<inter> b_accessible \<delta> = {})
\<and> (witness_prop \<delta> (fst sc))"
apply (intro conjI allI impI)
using brw_pref.transfer_correctness[OF bre'_invar_final, unfolded fst_brw_\<alpha>]
apply blast
apply (auto simp add: brw_algo_def brw_invar_def brw_invar_add_def)
done
text_raw {* \paragraph{Implementing a Step} *}
inductive_set brw_inner_step
:: "('Q,'L) ta_rule \<Rightarrow> (('Q,'L) brw_state \<times> ('Q,'L) brw_state) set"
for r where
"\<lbrakk> c = the (rcm r); \<Sigma> = (Q,W,rcm); \<Sigma>'=(Q',W',rcm');
if c\<le>1 \<and> (lhs r) \<notin> dom Q then
Q' = Q(lhs r \<mapsto> construct_witness Q r)
else Q' = Q;
if c\<le>1 \<and> (lhs r) \<notin> dom Q then
W' = insert (lhs r) W
else W' = W;
rcm' = rcm ( r \<mapsto> (c-(1::nat)))
\<rbrakk> \<Longrightarrow> (\<Sigma>,\<Sigma>')\<in>brw_inner_step r"
definition brw_inner_invar
:: "('Q,'L) ta_rule set \<Rightarrow> 'Q \<Rightarrow> ('Q,'L) brw_state \<Rightarrow> ('Q,'L) ta_rule set
\<Rightarrow> ('Q,'L) brw_state \<Rightarrow> bool"
where
"brw_inner_invar rules q == \<lambda>(Q,W,rcm) it (Q',W',rcm').
(br'_inner_invar rules q (brw_\<alpha> (Q,W,rcm)) it (brw_\<alpha> (Q',W',rcm')) \<and>
(Q'|`dom Q = Q) \<and>
(let dsqr = { r\<in>rules - it. the (rcm r) \<le> 1 } in
(\<forall>q t. Q' q = Some t \<longrightarrow> (Q q = Some t
\<or> (Q q = None \<and> (\<exists>r\<in>dsqr. q=lhs r \<and> t=construct_witness Q r))
)
)))
"
lemma brw_inner_step_abs:
"(\<Sigma>,\<Sigma>')\<in>brw_inner_step r \<Longrightarrow> br'_inner_step r (brw_\<alpha> \<Sigma>) = brw_\<alpha> \<Sigma>'"
apply (erule brw_inner_step.cases)
apply (unfold br'_inner_step_def brw_\<alpha>_def Let_def)
apply auto
done
lemma brw_inner_invar_imp_final:
"\<lbrakk> q\<in>W; brw_inner_invar {r\<in>\<delta>. q\<in>set (rhsq r)} q (Q,W-{q},rcm) {} \<Sigma>' \<rbrakk>
\<Longrightarrow> ((Q,W,rcm),\<Sigma>') \<in> brw_step \<delta>"
apply (unfold brw_inner_invar_def br'_inner_invar_def brw_\<alpha>_def)
apply (auto simp add: Let_def)
apply (rule brw_step.intros)
apply assumption
apply (rule refl)
apply auto
done
lemma brw_inner_invar_step:
assumes INVI: "(Q,W,rcm)\<in>brw_invar \<delta>"
assumes A: "q\<in>W" "r\<in>it" "it\<subseteq>{r\<in>\<delta>. q\<in>set (rhsq r)}"
assumes INVH: "brw_inner_invar {r\<in>\<delta>. q\<in>set (rhsq r)} q (Q,W-{q},rcm) it \<Sigma>h"
assumes STEP: "(\<Sigma>h,\<Sigma>')\<in>brw_inner_step r"
shows "brw_inner_invar {r\<in>\<delta>. q\<in>set (rhsq r)} q (Q,W-{q},rcm) (it-{r}) \<Sigma>'"
proof -
from INVI have BR'_INV: "(dom Q,W,rcm)\<in>br'_invar \<delta>"
by (simp add: brw_invar_def brw_\<alpha>_def)
obtain c Qh Wh rcmh Q' W' rcm' where
SIGMAF[simp]: "\<Sigma>h=(Qh,Wh,rcmh)" "\<Sigma>'=(Q',W',rcm')" and
CF[simp]: "c = the (rcmh r)" and
SF: "if c\<le>1 \<and> (lhs r) \<notin> dom Qh then
Q' = Qh(lhs r \<mapsto> (construct_witness Qh r))
else Q' = Qh"
"if c\<le>1 \<and> (lhs r) \<notin> dom Qh then
W' = insert (lhs r) Wh
else W' = Wh"
"rcm' = rcmh ( r \<mapsto> (c-(1::nat)))"
by (blast intro: brw_inner_step.cases[OF STEP])
let ?rules = "{r\<in>\<delta>. q\<in>set (rhsq r)}"
let ?dsqr = "\<lambda>it. { r\<in>?rules - it. the (rcm r) \<le> 1 }"
from INVH have INVHF:
"br'_inner_invar ?rules q (dom Q, W-{q}, rcm) (it) (dom Qh,Wh,rcmh)"
"Qh|`dom Q = Q"
"(\<forall>q t. Qh q = Some t \<longrightarrow> (Q q = Some t
\<or> (Q q = None \<and> (\<exists>r\<in>?dsqr it. q=lhs r \<and> t=construct_witness Q r))
)
)"
by (auto simp add: brw_inner_invar_def Let_def brw_\<alpha>_def)
from INVHF(1)[unfolded br'_inner_invar_def] have INV'HF:
"dom Qh = dom Q \<union> lhs`?dsqr it"
"(\<forall>r. rcmh r = (if r \<in> ?rules - it then
Some (the (rcm r) - 1)
else rcm r))"
by auto
from brw_inner_step_abs[OF STEP]
br'_inner_invar_step[OF A(1) INVHF(1) A(2,3)] have
G1: "br'_inner_invar ?rules q (dom Q, W-{q}, rcm) (it-{r}) (dom Q',W',rcm')"
by (simp add: brw_\<alpha>_def)
moreover have
"(\<forall>q t. Q' q = Some t \<longrightarrow> (Q q = Some t
\<or> ( Q q = None
\<and> (\<exists>r\<in>?dsqr (it-{r}). q=lhs r \<and> t=construct_witness Q r)
)
)
)" (is ?G1)
"Q'|`dom Q = Q" (is ?G2)
proof -
{
assume C: "\<not> c\<le>1 \<or> lhs r \<in> dom Qh"
with SF have "Q'=Qh" by auto
with INVHF(2,3) have ?G1 ?G2 by auto
} moreover {
assume C: "c\<le>1" "lhs r\<notin> dom Qh"
with SF have Q'F: "Q'=Qh(lhs r \<mapsto> (construct_witness Qh r))" by auto
from C(2) INVHF(2) INV'HF(1) have G2: ?G2 by (auto simp add: Q'F)
from C(1) INV'HF A have
RI: "r\<in>?dsqr (it-{r})" and
DSS: "dom Q \<subseteq> dom Qh"
by (auto)
from br'_rcm_aux'[OF BR'_INV A(1)] RI have
RDQ: "set (rhsq r) \<subseteq> dom Q"
by auto
with INVHF(2) have "Qh |` set (rhsq r) = Q |` set (rhsq r)"
by (blast intro: restrict_map_subset_eq)
hence [simp]: "construct_witness Qh r = construct_witness Q r"
by (blast dest: construct_witness_eq)
from DSS C(2) have [simp]: "Q (lhs r) = None" "Qh (lhs r) = None" by auto
have G1: ?G1 proof (intro allI impI)
case goal1
{
assume [simp]: "q=lhs r"
from goal1 Q'F have [simp]: "t = (construct_witness Qh r)" by simp
from RI have ?case by auto
} moreover {
assume "q\<noteq>lhs r"
with Q'F goal1 have "Qh q = Some t" by auto
with INVHF(3) have ?case by auto
} ultimately show ?case by blast
qed
note G1 G2
} ultimately show ?G1 ?G2 by blast+
qed
ultimately show ?thesis
by (unfold brw_inner_invar_def Let_def brw_\<alpha>_def) auto
qed
lemma brw_inner_invar_initial:
"\<lbrakk>q\<in>W\<rbrakk> \<Longrightarrow> brw_inner_invar {r\<in>\<delta>. q\<in>set (rhsq r)} q (Q,W-{q},rcm)
{r\<in>\<delta>. q\<in>set (rhsq r)} (Q,W-{q},rcm)"
by (simp add: brw_inner_invar_def br'_inner_invar_initial brw_\<alpha>_def)
theorem brw_inner_step_proof:
fixes \<alpha>s :: "'\<Sigma> \<Rightarrow> ('Q,'L) brw_state"
fixes cstep :: "('Q,'L) ta_rule \<Rightarrow> '\<Sigma> \<Rightarrow> '\<Sigma>"
fixes \<Sigma>h :: "'\<Sigma>"
fixes cinvar :: "('Q,'L) ta_rule set \<Rightarrow> '\<Sigma> \<Rightarrow> bool"
assumes set_iterate: "set_iteratei \<alpha> invar iteratei"
assumes invar_start: "(\<alpha>s \<Sigma>)\<in>brw_invar \<delta>"
assumes invar_initial: "cinvar {r\<in>\<delta>. q\<in>set (rhsq r)} \<Sigma>h"
assumes invar_step:
"!!it r \<Sigma>. \<lbrakk> r\<in>it; it \<subseteq> {r\<in>\<delta>. q\<in>set (rhsq r)}; cinvar it \<Sigma> \<rbrakk>
\<Longrightarrow> cinvar (it-{r}) (cstep r \<Sigma>)"
assumes step_desc:
"!!it r \<Sigma>. \<lbrakk> r\<in>it; it\<subseteq>{r\<in>\<delta>. q\<in>set (rhsq r)}; cinvar it \<Sigma> \<rbrakk>
\<Longrightarrow> (\<alpha>s \<Sigma>, \<alpha>s (cstep r \<Sigma>)) \<in> brw_inner_step r"
assumes it_set_desc: "invar it_set" "\<alpha> it_set = {r\<in>\<delta>. q\<in>set (rhsq r)}"
assumes QIW[simp]: "q\<in>W"
assumes \<Sigma>_desc[simp]: "\<alpha>s \<Sigma> = (Q,W,rcm)"
assumes \<Sigma>h_desc[simp]: "\<alpha>s \<Sigma>h = (Q,W-{q},rcm)"
shows "(\<alpha>s \<Sigma>, \<alpha>s (iteratei it_set (\<lambda>_. True) cstep \<Sigma>h))\<in>brw_step \<delta>"
proof -
interpret set_iteratei \<alpha> invar iteratei by fact
show ?thesis
apply (rule_tac
I="\<lambda>it \<Sigma>. cinvar it \<Sigma> \<and> brw_inner_invar {r\<in>\<delta>. q\<in>set (rhsq r)} q
(Q,W-{q},rcm) it (\<alpha>s \<Sigma>)"
in iterate_rule_P)
apply (auto
simp add: it_set_desc invar_initial brw_inner_invar_initial invar_step
step_desc brw_inner_invar_step[OF invar_start[simplified]]
brw_inner_invar_imp_final[OF QIW])
done
qed
subsection {* Product Automaton *}
text {*
The forward-reduced product automaton can be described as a state-space
exploration problem.
In this section, the DFS-algorithm for state-space exploration
(cf. Theory~@{theory "Exploration"} in the Isabelle Collections Framework) is refined to compute the product automaton.
*}
type_synonym ('Q1,'Q2,'L) frp_state =
"('Q1\<times>'Q2) set \<times> ('Q1\<times>'Q2) list \<times> (('Q1\<times>'Q2),'L) ta_rule set"
definition frp_\<alpha> :: "('Q1,'Q2,'L) frp_state \<Rightarrow> ('Q1\<times>'Q2) dfs_state"
where "frp_\<alpha> S == let (Q,W,\<delta>)=S in (Q, W)"
definition "frp_invar_add \<delta>1 \<delta>2 ==
{ (Q,W,\<delta>d). \<delta>d = { r. r\<in>\<delta>_prod \<delta>1 \<delta>2 \<and> lhs r \<in> Q - set W} }"
definition frp_invar
:: "('Q1, 'L) tree_automaton_rec \<Rightarrow> ('Q2, 'L) tree_automaton_rec
\<Rightarrow> ('Q1,'Q2,'L) frp_state set"
where "frp_invar T1 T2 ==
frp_invar_add (ta_rules T1) (ta_rules T2)
\<inter> { s. frp_\<alpha> s \<in> dfs_invar (ta_initial T1 \<times> ta_initial T2)
(f_succ (\<delta>_prod (ta_rules T1) (ta_rules T2))) }"
inductive_set frp_step
:: "('Q1,'L) ta_rule set \<Rightarrow> ('Q2,'L) ta_rule set
\<Rightarrow> (('Q1,'Q2,'L) frp_state \<times> ('Q1,'Q2,'L) frp_state) set"
for \<delta>1 \<delta>2 where
"\<lbrakk> W=(q1,q2)#Wtl;
distinct Wn;
set Wn = f_succ (\<delta>_prod \<delta>1 \<delta>2) `` {(q1,q2)} - Q;
W'=Wn@Wtl;
Q'=Q \<union> f_succ (\<delta>_prod \<delta>1 \<delta>2) `` {(q1,q2)};
\<delta>d'=\<delta>d \<union> {r\<in>\<delta>_prod \<delta>1 \<delta>2. lhs r = (q1,q2) }
\<rbrakk> \<Longrightarrow> ((Q,W,\<delta>d),(Q',W',\<delta>d'))\<in>frp_step \<delta>1 \<delta>2"
inductive_set frp_initial :: "'Q1 set \<Rightarrow> 'Q2 set \<Rightarrow> ('Q1,'Q2,'L) frp_state set"
for Q10 Q20 where
"\<lbrakk> distinct W; set W = Q10\<times>Q20 \<rbrakk> \<Longrightarrow> (Q10\<times>Q20,W,{}) \<in> frp_initial Q10 Q20"
definition frp_cond :: "('Q1,'Q2,'L) frp_state set" where
"frp_cond == {(Q,W,\<delta>d). W\<noteq>[]}"
definition "frp_algo T1 T2 == \<lparr>
wa_cond = frp_cond,
wa_step = frp_step (ta_rules T1) (ta_rules T2),
wa_initial = frp_initial (ta_initial T1) (ta_initial T2),
wa_invar = frp_invar T1 T2
\<rparr>"
-- "The algorithm refines the DFS-algorithm"
theorem frp_pref_dfs:
"wa_precise_refine (frp_algo T1 T2)
(dfs_algo (ta_initial T1 \<times> ta_initial T2)
(f_succ (\<delta>_prod (ta_rules T1) (ta_rules T2))))
frp_\<alpha>"
apply unfold_locales
apply (auto simp add: frp_algo_def frp_\<alpha>_def frp_cond_def dfs_algo_def
dfs_cond_def frp_invar_def
elim!: frp_step.cases frp_initial.cases
intro: dfs_step.intros dfs_initial.intros
)
done
interpretation frp_ref: wa_precise_refine "(frp_algo T1 T2)"
"(dfs_algo (ta_initial T1 \<times> ta_initial T2)
(f_succ (\<delta>_prod (ta_rules T1) (ta_rules T2))))"
"frp_\<alpha>" using frp_pref_dfs .
-- "The algorithm is a well-defined while-algorithm"
theorem frp_while_algo:
assumes TA: "tree_automaton T1"
"tree_automaton T2"
shows "while_algo (frp_algo T1 T2)"
proof -
interpret t1: tree_automaton T1 by fact
interpret t2: tree_automaton T2 by fact
have finite: "finite ((f_succ (\<delta>_prod (ta_rules T1) (ta_rules T2)))\<^sup>*
`` (ta_initial T1 \<times> ta_initial T2))"
proof -
have "((f_succ (\<delta>_prod (ta_rules T1) (ta_rules T2)))\<^sup>*
`` (ta_initial T1 \<times> ta_initial T2))
\<subseteq> ((ta_initial T1 \<times> ta_initial T2)
\<union> \<delta>_states (\<delta>_prod (ta_rules T1) (ta_rules T2)))"
apply rule
apply (drule f_accessible_subset[unfolded f_accessible_def])
apply auto
done
moreover have "finite \<dots>"
by auto
ultimately show ?thesis by (simp add: finite_subset)
qed
show ?thesis
apply (rule frp_ref.wa_intro)
apply (rule dfs_while_algo[OF finite])
apply (simp add: frp_algo_def dfs_algo_def frp_invar_def)
apply (auto simp add: dfs_algo_def frp_algo_def frp_\<alpha>_def
dfs_\<alpha>_def frp_invar_add_def dfs_invar_def
dfs_invar_add_def sse_invar_def
elim!: frp_step.cases) [1]
apply (force simp add: frp_algo_def frp_invar_add_def
elim!: frp_initial.cases)
done
qed
(* unused
lemma f_succ_adv:
"\<lbrakk>lhs r \<in> (f_succ \<delta>)\<^sup>* `` Q0; r\<in>\<delta>\<rbrakk> \<Longrightarrow> set (rhsq r) \<subseteq> (f_succ \<delta>)\<^sup>* `` Q0"
by (case_tac r) (auto dest: rtrancl_into_rtrancl intro: f_succ.intros)
*)
-- "If the algorithm terminates, the forward reduced product automaton
can be constructed from the result"
theorem frp_inv_final:
"\<forall>s. s\<in>wa_invar (frp_algo T1 T2) \<and> s\<notin>wa_cond (frp_algo T1 T2)
\<longrightarrow> (case s of (Q,W,\<delta>d) \<Rightarrow>
\<lparr> ta_initial = ta_initial T1 \<times> ta_initial T2,
ta_rules = \<delta>d
\<rparr> = ta_fwd_reduce (ta_prod T1 T2))"
apply (intro allI impI)
apply (case_tac s)
apply simp
apply (simp add: ta_reduce_def ta_prod_def frp_algo_def)
proof -
fix Q W \<delta>d
assume A: "(Q,W,\<delta>d)\<in>frp_invar T1 T2 \<and> (Q,W,\<delta>d)\<notin>frp_cond"
from frp_ref.transfer_correctness[OF dfs_invar_final,
unfolded frp_algo_def, simplified,
rule_format, OF A]
have [simp]: "Q = f_accessible (\<delta>_prod (ta_rules T1) (ta_rules T2))
(ta_initial T1 \<times> ta_initial T2)"
by (simp add: f_accessible_def dfs_\<alpha>_def frp_\<alpha>_def)
from A show "\<delta>d = reduce_rules
(\<delta>_prod (ta_rules T1) (ta_rules T2))
(f_accessible (\<delta>_prod (ta_rules T1) (ta_rules T2))
(ta_initial T1 \<times> ta_initial T2))"
apply (auto simp add: reduce_rules_def f_accessible_def frp_invar_def
frp_invar_add_def frp_\<alpha>_def frp_cond_def)
apply (case_tac x)
apply (auto dest: rtrancl_into_rtrancl intro: f_succ.intros)
done
qed
end
|
module Example where
open import Data.List
-- reverse
rev : ∀ {a} {A : Set a} → List A → List A
rev [] = []
rev (x ∷ xs) = rev xs ++ [ x ]
-- https://code.google.com/p/agda/issues/detail?id=1252 暫定対策
rev' = rev
{-# COMPILED_EXPORT rev' rev' #-}
private
open import Relation.Binary.PropositionalEquality
-- reverse2回で元に戻る証明
lemma : ∀ {a} {A : Set a} (x : A) (xs : List A) → rev (xs ∷ʳ x) ≡ x ∷ rev xs
lemma x [] = refl
lemma x (_ ∷ xs)
rewrite lemma x xs
= refl
revrev-is-id : ∀ {a} {A : Set a} (xs : List A) → rev (rev xs) ≡ xs
revrev-is-id [] = refl
revrev-is-id (x ∷ xs)
rewrite lemma x (rev xs)
| revrev-is-id xs
= refl
open import Data.Empty
head : ∀ {a} {A : Set a} (xs : List A) → {xs≢[] : xs ≢ []} → A
head [] {xs≠[]} = ⊥-elim (xs≠[] refl)
head (x ∷ xs) = x
{-
{-# COMPILED_EXPORT head safeHead' #-}
-- エラーになる
-- The type _≡_ cannot be translated to a Haskell type.
-- when checking the pragma COMPILED_EXPORT head' safeHead
--
-- つまり,証明オブジェクトを取るような関数は,
-- そのままではCOMPILED_EXPORTできないことが多い
-}
open import Data.Maybe
head' : ∀ {a} {A : Set a} (xs : List A) → Maybe A
head' = go where
go : ∀ {a} {A : Set a} (xs : List A) → Maybe A
go [] = nothing
go (x ∷ xs) = just (head (x ∷ xs) {λ ()})
{-# COMPILED_EXPORT head' safeHead' #-}
-- つまり,安全なheadを使うには,
-- 安全なものだけ渡せるようにしてjustで結果が得られ,
-- それ以外についてはnothingになるように,
-- 適切にラップしないとCOMPILED_EXPORTできない.
|
Set Implicit Arguments.
Set Asymmetric Patterns.
(* ================================================*)
(* 0. Functional Programming *)
(* ================================================*)
(*
Functional programming (Coq's style)
1. Types provide guidance for building and
destructing data
2. Programs are data
*)
(* ----------------------------------------------- *)
(*
Programs are data
There is no “return” keyword:
expression = statement
- The program that always returns 4 is:
4
- This program also returns (or better computes
to) 4:
if (false || true) then 2 + 2 else 7
if true then 2 + 2 else 7
2 + 2
4
- This program also computes to 4:
2 + (if 7 == 2 then 4 else 2)
2 + (if false then 4 else 2)
2 + 2
4
*)
(* ----------------------------------------------- *)
(*
Programs are /really/ data
This data is actually a program that doubles
its input:
(fun x => x + x)
What does this evaluate to?
(fun x => x + x) 3
Recall: f(x) is written (f x) in Coq
This program takes in input a function f and
uses it twice
(fun f => f 3 + f 4)
What does this evaluate to?
(fun f => f 3 + f 4) (fun x => x + x)
*)
(* ================================================*)
(* 1. Build and destruct simple data *)
(* ================================================*)
(* Booleans *)
Check bool : Type.
Check true : bool.
Check false.
(* Booleans are defined in the prelude as a data
type with exactly the two constructors
true and false:
Inductive bool : Type := true | false.
We can use this fact when we program with
booleans via the "match .. with .. end"
construct.
*)
(* Example: defining the negation *)
Definition negb (b : bool) : bool :=
match b with
| true => false
| false => true
end.
(* Let's look at the type of the function we've just
defined
*)
Check negb.
(* Given the type of negb, if we apply it to a
boolean expression we obtain a boolean.
*)
Check (negb false).
(* Actually, the outermost parentheses can be
omitted. Like in:
Check negb false.
*)
(* In this lecture we are not going to prove that
our programs are correct, that is the topic of
the next lesson. We are going to just test
our programs.
*)
Eval compute in negb true.
Eval compute in negb false.
(* The system provides syntactic sugar for
matching over a boolean.
*)
Definition another_negb (b : bool) : bool :=
if b then false else true.
(* Note that Definition is just a convenient syntax
to name an otherwise anonymous function *)
Definition yet_another_negb :=
(fun b : bool =>
if b then false else true).
(* Definition of the boolean conjunction.
Note: pattern matching over multiple values
is just syntactic sugar.
*)
Definition andb (b1 : bool) (b2 : bool) :=
match b1, b2 with
| true, true => true
| _, _ => false
end.
(* actually, this is equivalent to:
if b1 then b2 else false *)
(* Some more syntactic sugar *)
Notation "x && y" := (andb x y).
Eval compute in true && false.
Eval compute in true && true.
(* ----------------------------------------------- *)
(* Polymorphic data containers: the option type *)
(* The simplest generic container is the option type.
Such container can either be empty, i.e. contain
no value, or it can contain some value.
Such container type is parametric over the
type A of the values it contains.
Inductive option (A : Type) : Type :=
| None
| Some (a : A).
*)
Check option.
Check option bool : Type.
Check Some true. (* Implicit argument *)
About Some.
(* The @ locally disables the implicit arguments *)
Check @Some bool true.
Check @Some _ true.
(* We now define a function checking if an
option holds a value or not.
Note the A parameter needed in order to wirte
the type of "box"
*)
Definition is_empty A (box : option A) : bool :=
match box with
| None => true
| Some _ => false (* Here _ means discard *)
end.
(* Note the implicit argument (A not passed) *)
Eval compute in is_empty (Some true).
(* Note: the function is polymorphic! *)
Eval compute in is_empty (Some 4).
(* first example of match with a binder *)
Definition get_default A (box: option A) (a : A) : A :=
match box with
| None => a
| Some x => x
end.
Eval compute in get_default None 3.
(* Here x binds the contents of the Some container,
the value 4, that is also the result. *)
Eval compute in get_default (Some 4) 3.
(* Pairs *)
(* There is only a way to build a pair, and any
two values can be paired
Inductive prod (A B : Type) : Type :=
| pair (a : A) (b : B).
Notation "A * B" := (prod A B).
Notation "( x , y )" := (pair x y).
*)
Check (true, Some false).
Definition fst A B (p : A * B) :=
match p with (x, _) => x end.
Eval compute in fst (true, None).
Definition snd A B (p : A * B) :=
match p with (_, y) => y end.
(* Exercises<<<<<<<<< *)
(* 1.1 Write a comparison function for the bool
data type.
Such function must evaluate to true if and only if
the two input booleans b1 and b2 have the same
value
*)
Definition eq_bool (b1 b2 : bool) : bool :=
match b1, b2 with
| true, true | false, false => true
| _, _ => false
end.
(* 1.2 Test the function you just wrote *)
Eval compute in eq_bool true true.
Eval compute in eq_bool false false.
Eval compute in eq_bool true false.
Eval compute in eq_bool false true.
(* 1.3 Write a function that computes the exclusive
or of the two booleans in input *)
Definition xorb (b1 b2 : bool) : bool :=
if eq_bool b1 b2 then false else true.
(* 1.4 Test the function you just wrote *)
Eval compute in
(* 1.5 Write and test a function that
applies the fst projection over the option
type.
Hint: if o had type option (A * B) and, after
scrutiny o turns out to be "Some x", which is
the type of x?
*)
Definition ofst A B (o: option (A * B)) : option A :=
(* Exercises>>>>>>>>> *)
(* ================================================*)
(* Recursive data and fixpoints *)
(* ================================================*)
(* Datatypes can be recursive
Inductive nat : Type :=
| O
| S (n : nat).
*)
Check S (S O).
Check 1.
(* Recursive types, recursive functions
*)
Fixpoint plus (n : nat) (m : nat) : nat :=
match n with
| O => m
| S n1 => S (plus n1 m)
end.
Infix "+" := plus.
Check 1 + 2.
Eval compute in 1 + 2.
Fixpoint fast_plus (n : nat) (m : nat) : nat :=
match n with
| O => m
| S n1 => fast_plus n1 (S m)
end.
Check fast_plus 1 2.
Eval simpl in
(fun n => fast_plus (S n) 3).
Eval simpl in
(fun n => plus (S n) 3).
(* Lists are pretty much like naturals
Inductive list (A : Type) : Type :=
| nil
| cons (x : A) (xs : list A).
*)
Infix "::" := cons.
Arguments nil {A}.
Arguments cons {A}.
(* The type of lists imposes all the elements to
be in the same type! *)
Check true :: false :: nil.
Fail Check 1 :: false :: nil.
(* A non recursive function on lists *)
Definition tl A (l : list A) : list A :=
match l with
| nil => nil
| _ :: xs => xs
end.
Eval compute in tl (6 :: 99 :: nil).
(* The most popular function on lists *)
Fixpoint len A (l : list A) : nat :=
match l with
| nil => O
| x :: xs => 1 + (len xs)
end.
Eval compute in len (1 :: 2 :: 3 :: nil).
(* Two other examples of function over lists:
- from a list of pairs, to a pair of lists
- from two lists, to a list of pairs
Note the let construction to name an
intermediate result used more than once.
*)
Fixpoint split A B (l : list (A * B)) : list A * list B :=
match l with
| nil => (nil, nil)
| (x,y) :: rest =>
let xs_ys := split rest in
(x :: fst xs_ys, y :: snd xs_ys)
end.
Eval compute in
split ((1,2) :: (3,4) :: nil).
Fixpoint zip A B (la : list A) (lb : list B) : list (A * B) :=
match la, lb with
| nil, nil => nil
| x::xs, y::ys => (x,y) :: zip xs ys
| _, _ => nil
end.
Eval compute in
zip (1 :: 2 :: nil) (true :: false :: nil).
Eval compute in
let xs_ys := split ((1,2) :: (3,4) :: nil) in
zip (fst xs_ys) (snd xs_ys).
(* Exercises<<<<<<<<< *)
(* 2.1 Write a function to compare two natural
numbers n1 and n2.
It must evaluate to true if and only if the
two numbers are equal *)
Fixpoint eq_nat n1 n2 :=
Eval compute in eq_nat 7 4.
Eval compute in eq_nat 7 7.
Eval compute in eq_nat 7 (3 + 4).
(* 2.2 Write a function that computes the product
of two natural numbers. Hint: you can use
(many times) the function that computes the
addition of natural numbers *)
Fixpoint mult n1 n2 :=
Infix "*" := mult.
Eval compute in 3 * 4.
Eval compute in eq_nat (3 * 4) 12.
Eval compute in eq_nat (3 * 0) 0.
Eval compute in eq_nat 0 (3 * 0).
(* 2.3 Write a function that appends two lists
Example:
append (1 :: 2 :: nil) (3 :: nil)
must evaluate to
(1 :: 2 :: 3 :: nil)
*)
Fixpoint
append A (l1 : list A) (l2 : list A) : list A
:=
Eval compute in append (1 :: 2 :: nil) (3 :: nil).
(* 2.4 Write a function that reverses a list.
Hint: use append. *)
Fixpoint rev1 A (l : list A) : list A :=
Eval compute in rev1 (1 :: 2 :: 3 :: nil).
(* 2.5 Again list reversal, but this time using
an auxiliary function that uses an accumulator. *)
Fixpoint rev2_aux A (acc l : list A) : list A :=
match l with
| nil => acc
end.
Definition rev2 A (l : list A) := rev2_aux nil l.
Eval compute in rev2 (1 :: 2 :: 3 :: nil).
(* Exercises>>>>>>>>> *)
(* ================================================*)
(* Illegal data types and recursive functions *)
(* ================================================*)
Fail
Fixpoint wrong A (l : list A) {struct l} :=
match l with
| nil => 0
| x :: xs => 1 + wrong (x :: nil)
end.
(* RUN THAT IN A PATCHED (UNSOUND) COQ
Recall:
Inductive False : Prop := .
i.e. There is no way to build a value
of type False.
*)
(*
Fixpoint loop (n : nat) : False := loop n.
Check loop 3.
Fail Timeout 2 Eval compute in loop 3.
Inductive non_positive : Type :=
| Call (f : non_positive -> False)
Definition self (t : non_positive) : False :=
match t with
| Call f => f t
end.
Definition loop2 : False := self (Call self).
Fail Timeout 2 Eval compute in loop2.
*)
(*
Note: for the experts in the room...
Yes, there are ways to use a well founded order
relation as the decreasing measure. See the
Function ... {measure ...}
and
Function ... {wf ...}
in the Reference Manual.
*)
(* ================================================*)
(* Higher order programming *)
(* ================================================*)
(* A function can be abstracted over another
function. It is a useful mechanism to write
code that can be reused, especially in the context
of polymorphic containers
*)
Fixpoint map A B (f : A -> B) (l : list A) : list B :=
match l with
| nil => nil
| x :: xs => f x :: map f xs
end.
Eval compute in
map (fun x => x + 2) (3 :: 4 :: 7 :: nil).
Eval compute in
map negb (true :: false :: nil).
(* fold f (x1 :: x2 :: .. xn :: nil) a
=
(f xn (.. (f x2 (f x1 a))))
*)
Fixpoint fold A B (f : B -> A -> A) (l : list B) (a : A) : A :=
match l with
| nil => a
| x :: xs => fold f xs (f x a)
end.
Eval compute in fold plus (1 :: 2 :: 3 :: nil) 0.
(* Exercises<<<<<<<<< *)
(* 4.1 Write a function that reverses a list based on fold. Hint: use fold and cons.
*)
Definition rev A (l : list A) :=
Eval compute in rev (1 :: 2 :: 3 :: 4 :: nil).
(* 4.2 Write a function that appends two lists, this
time using fold and rev *)
Definition another_append A (l1 l2 : list A) :=
Eval compute in
another_append (1 :: 2 :: nil) (3 :: 4 :: nil).
(* 4.3 The higher order function iter takes a
function f, an initial value a and a number n.
The result is (f (f ... (f a))),
where f is applied n times. *)
Fixpoint iter A (f : A -> A) a n :=
(* 4.4 Write a function that computes the sum of
two natural numbers using iter *)
Definition another_plus n1 n2 :=
Eval compute in another_plus 3 4.
(* 4.5 Write a function that computes the product of
two natural numbers using iter and plus *)
Definition another_mult n1 n2 :=
Eval compute in another_mult 3 7.
Eval compute in another_mult 3 0.
Eval compute in another_mult 0 7.
Eval compute in another_mult 2 4.
(* Exercises>>>>>>>>> *)
(* ================================================*)
(* Code reuse: a taste of ad-hoc polymorphism *)
(* ================================================*)
Class Eq (A : Type) := cmp : A -> A -> bool.
Infix "==" := cmp (at level 70, no associativity).
Fixpoint mem A `{Eq A} (y : A) (l : list A) : bool :=
match l with
| nil => false
| x :: xs => if x == y then true else mem y xs
end.
Instance bool_Eq : Eq bool := eq_bool.
Check mem true (false :: false :: true :: nil).
Eval compute in
mem true (false :: false :: true :: nil).
Instance pair_Eq A `{Eq A} B `{Eq B} : Eq (A * B) :=
fun (x y : A * B) =>
(fst x == fst y) && (snd x == snd y).
Eval compute in
mem (true,false)
((false,false) :: (true,false) :: nil).
Eval compute in
mem (true,false)
((false,false) :: (false,true) :: nil).
(* Exercises<<<<<<<<< *)
(* 5.1 Register eq_nat as the comparison function
for the nat type *)
Instance nat_Eq : Eq nat :=
(* Example: an associative list mapping numbers to
boolean values.
1 is mapped to true, 2 to false. *)
Definition an_associative_list : list (nat * bool) :=
(1,true) :: (2,false) :: nil.
(* 5.2 Write a function that finds the value
associated to y in the associative list l. *)
Fixpoint
find A {e : Eq A} (y : A) B (l : list (A * B))
:
option B
:=
Definition data := 1 :: 4 :: 7 :: nil.
(* 5.3 Define a list of pairs of natural numbers
such that the first item is an element of the list
data (the one just defined) and the second item
is its square.
I.e. the list must be (1,1)::(4,16)::... but
don't write it by hand. Instead, use map. *)
Definition square_cache :=
Eval compute in find 3 square_cache.
(* 5.4 The function square takes a cache c (that is
an associative list) and a number n. It computes
a pair: the first component is the square of the
input n while the second component is
an (eventually) updated cache. *)
Definition square cache n :=
Eval compute in square square_cache 3.
(* Exercises>>>>>>>>> *)
(* vim: set tw=50 *)
|
[STATEMENT]
lemma word_test_bit_def:
\<open>bit a = bit (uint a)\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bit a = bit (uint a)
[PROOF STEP]
by transfer (simp add: fun_eq_iff bit_take_bit_iff) |
Require Import Notation.
Require Import GeneralTactics.
Require Import Axioms.
Theorem inj_pair1 : forall A (B: A -> Type) (a a': A) (b: B a) (b': B a'),
⟨a, b⟩ = ⟨a', b'⟩ -> a = a'.
Proof using.
intros * H.
now inv H.
Qed.
Theorem inj_pair2_heq : forall A (B: A -> Type) (a a': A) (b: B a) (b': B a'),
⟨a, b⟩ = ⟨a', b'⟩ -> b ~=~ b'.
Proof using.
intros * H.
now inv H.
Qed.
Theorem inj_pair : forall A (B: A -> Type) (a a': A) (b: B a) (b': B a'),
⟨a, b⟩ = ⟨a', b'⟩ -> a = a' /\ b ~=~ b'.
Proof using.
intros * H.
split.
- follows eapply inj_pair1.
- follows eapply inj_pair2_heq.
Qed.
Lemma exists_unique_exists : forall A (P: A -> Prop),
(exists! a, P a) ->
exists a, P a.
Proof using.
tedious.
Qed.
Lemma forall_eq_intro : forall A (B C: A -> Type),
B = C ->
(forall a, B a) = (forall a, C a).
Proof using.
follows intros * <-.
Qed.
Require Import FinFun.
Lemma injective_neg_defn : forall A B (f: A -> B),
Injective f =
forall x y: A, x <> y -> f x <> f y.
Proof using.
intros *.
extensionality.
after split.
intros inj_neg x y f_eq.
contradict goal contra.
follows eapply inj_neg.
Qed.
(* any type *)
Definition any : Type := Σ t: Type, t.
Definition box {t} (x: t) : any := existT (λ x, x) t x.
Definition unbox {A} (a: any) (elim: forall t, t -> A) : A :=
elim (projT1 a) (projT2 a).
Definition any_destr (a: any) : Σ t (v: t), a = box v.
follows destruct a.
Defined.
Lemma any_canonical : forall a: any,
exists t (v: t), a = box v.
Proof using.
follows destruct a.
Qed.
Lemma box_eq_homo : forall t (v v': t),
box v = box v' ->
v = v'.
Proof using.
intros * H.
follows inject H.
Qed.
Lemma box_eq_hetero : forall t (v: t) t' (v': t'),
box v = box v' ->
v ~=~ v'.
Proof using.
intros * H.
follows inject H.
Qed.
Ltac unbox x :=
let t := fresh "t" in
destruct x as [t x];
change ⟨_, x⟩ with (box x) in *.
|
\<^marker>\<open>creator "Kevin Kappelmann"\<close>
subsection \<open>Restriction\<close>
theory TSFunctions_Extend_Restrict
imports TSFunctions_Base
begin
unbundle no_restrict_syntax
lemma restrict_Dep_Function_type [type]:
"set_restrict_left : ((x : A) \<rightarrow>s B x) \<Rightarrow> (P : Set \<Rightarrow> Bool) \<Rightarrow>
((x : (A & type P)) \<rightarrow>s B x) "
proof (intro Dep_fun_typeI Dep_FunctionI)
fix f P assume f_type: "f : (x : A) \<rightarrow>s B x"
let ?A' = "A & type P"
have "type_pred ?A' = (\<lambda>x. x : A \<and> P x)" by unfold_types
with f_type show "set_left_total_on ?A' f\<restriction>\<^bsub>P\<^esub>"
by (auto intro: set_left_total_on_inf_restrict_leftI)
from f_type have "set_right_unique_on (type_pred A) f"
by (auto dest: Dep_Function_set_right_unique_on)
then have "set_right_unique_on (type_pred A) f\<restriction>\<^bsub>P\<^esub>"
using antimonoD[OF antimono_set_right_unique_on_set]
by (rule le_boolD'') auto
then have "set_right_unique_on (type_pred ?A') f\<restriction>\<^bsub>P\<^esub>"
using antimonoD[OF antimono_set_right_unique_on_pred]
by (rule le_boolD'') (auto dest: Int_typeD1)
then show "set_right_unique_on ?A' f\<restriction>\<^bsub>P\<^esub>" by simp
fix x assume "x : A & type P"
then have "x : A" and "P x" by unfold_types
then have "f\<restriction>\<^bsub>P\<^esub>`x = f`x" by simp
with f_type show "f\<restriction>\<^bsub>P\<^esub>`x : B x" by auto
qed
lemma restrict_Dep_Function_set_type [type]:
"set_restrict_left : Dep_Function A B \<Rightarrow> (A' : Set) \<Rightarrow>
Dep_Function (A & Element A') B"
(*TODO: should be proved with lemma above*)
by unfold_types force
lemma restrict_Dep_Function_type_type [type]:
"set_restrict_left : Dep_Function A B \<Rightarrow> (T : Any) \<Rightarrow> Dep_Function (A & T) B"
(*TODO: should be proved with lemma above*)
by unfold_types force
lemma restrict_CDep_Function_if_Dep_Function [derive]:
assumes "f : (x : A) \<rightarrow>s B x"
shows "f\<restriction>\<^bsub>A\<^esub> : (x : A) \<rightarrow>cs B x"
proof (rule CDep_FunctionI)
have "f\<restriction>\<^bsub>A\<^esub> : (x : A & A) \<rightarrow>s B x" by discharge_types
then show "f\<restriction>\<^bsub>A\<^esub> : (x : A) \<rightarrow>s B x"
by (elim Dep_Function_contravariant_dom) discharge_types
show "f\<restriction>\<^bsub>A\<^esub> : Dep_Bin_Rel A B"
proof (rule Dep_Bin_RelI)
fix p assume "p \<in> f\<restriction>\<^bsub>A\<^esub>"
then obtain x y where "p = \<langle>x, y\<rangle>" "\<langle>x, y\<rangle> \<in> f" "x : A" by auto
moreover with assms have "y : B x" by auto
ultimately show "p : \<Sum>x : A. (B x)" by simp
qed
qed
end |
State Before: x y : ℝ≥0
⊢ 0 < ↑sqrt x ↔ 0 < x State After: no goals Tactic: simp [pos_iff_ne_zero] |
{-# LANGUAGE DataKinds, RankNTypes, TypeFamilies #-}
module TestMnistFCNN
( testTrees, shortTestForCITrees, mnistTestCase2T, mnistTestCase2D
) where
import Prelude
import Control.DeepSeq
import Control.Monad (foldM, when)
import Data.Array.Internal (valueOf)
import Data.Coerce (coerce)
import Data.List.Index (imap)
import Data.Proxy (Proxy (Proxy))
import Data.Time.Clock.POSIX (POSIXTime, getPOSIXTime)
import qualified Data.Vector.Generic as V
import GHC.TypeLits (KnownNat)
import qualified Numeric.LinearAlgebra as HM
import System.IO (hFlush, hPutStrLn, stderr)
import System.Random
import Test.Tasty
import Test.Tasty.HUnit hiding (assert)
import Test.Tasty.QuickCheck hiding (label, shuffle)
import Text.Printf
import HordeAd
import HordeAd.Core.OutdatedOptimizer
import HordeAd.Tool.MnistTools
import TestCommon
testTrees :: [TestTree]
testTrees = [ dumbMnistTests
, bigMnistTests
, vectorMnistTests
, matrixMnistTests
, fusedMnistTests
]
shortTestForCITrees :: [TestTree]
shortTestForCITrees = [ dumbMnistTests
, shortCIMnistTests
]
sgdShow :: HasDelta r
=> r
-> (a -> DualNumberVariables 'DModeGradient r -> DualMonadGradient r (DualNumber 'DModeGradient r))
-> [a] -- ^ training data
-> Domain0 r -- ^ initial parameters
-> r
sgdShow gamma f trainData params0Init =
let result =
fst $ sgd gamma f trainData (params0Init, V.empty, V.empty, V.empty)
in snd $ dReverse 1 (f $ head trainData) result
sgdTestCase :: String
-> IO [a]
-> (Int
-> Int
-> a
-> DualNumberVariables 'DModeGradient Double
-> DualMonadGradient Double
(DualNumber 'DModeGradient Double))
-> Double
-> Double
-> TestTree
sgdTestCase prefix trainDataIO trainWithLoss gamma expected =
let widthHidden = 250
widthHidden2 = 50
nParams0 = fcnnMnistLen0 widthHidden widthHidden2
vec = HM.randomVector 33 HM.Uniform nParams0 - HM.scalar 0.5
name = prefix ++ " "
++ unwords [show widthHidden, show nParams0, show gamma]
in testCase name $ do
trainData <- trainDataIO
sgdShow gamma (trainWithLoss widthHidden widthHidden2)
trainData vec
@?= expected
mnistTestCase2
:: String
-> Int
-> Int
-> (Int
-> Int
-> MnistData Double
-> DualNumberVariables 'DModeGradient Double
-> DualMonadGradient Double (DualNumber 'DModeGradient Double))
-> Int
-> Int
-> Double
-> Double
-> TestTree
mnistTestCase2 prefix epochs maxBatches trainWithLoss widthHidden widthHidden2
gamma expected =
let nParams0 = fcnnMnistLen0 widthHidden widthHidden2
params0Init = HM.randomVector 44 HM.Uniform nParams0 - HM.scalar 0.5
name = prefix ++ ": "
++ unwords [ show epochs, show maxBatches
, show widthHidden, show widthHidden2
, show nParams0, show gamma ]
in testCase name $ do
hPutStrLn stderr $ printf "\n%s: Epochs to run/max batches per epoch: %d/%d"
prefix epochs maxBatches
trainData <- loadMnistData trainGlyphsPath trainLabelsPath
testData <- loadMnistData testGlyphsPath testLabelsPath
-- Mimic how backprop tests and display it, even though tests
-- should not print, in principle.
let runBatch :: Domain0 Double -> (Int, [MnistData Double])
-> IO (Domain0 Double)
runBatch !params0 (k, chunk) = do
let f = trainWithLoss widthHidden widthHidden2
(!res, _, _, _) =
fst $ sgd gamma f chunk (params0, V.empty, V.empty, V.empty)
!trainScore = fcnnMnistTest0 (Proxy @Double)
widthHidden widthHidden2 chunk res
!testScore = fcnnMnistTest0 (Proxy @Double)
widthHidden widthHidden2 testData res
!lenChunk = length chunk
hPutStrLn stderr $ printf "\n%s: (Batch %d with %d points)" prefix k lenChunk
hPutStrLn stderr $ printf "%s: Training error: %.2f%%" prefix ((1 - trainScore) * 100)
hPutStrLn stderr $ printf "%s: Validation error: %.2f%%" prefix ((1 - testScore ) * 100)
return res
let runEpoch :: Int -> Domain0 Double -> IO (Domain0 Double)
runEpoch n params0 | n > epochs = return params0
runEpoch n params0 = do
hPutStrLn stderr $ printf "\n%s: [Epoch %d]" prefix n
let trainDataShuffled = shuffle (mkStdGen $ n + 5) trainData
chunks = take maxBatches
$ zip [1 ..] $ chunksOf 5000 trainDataShuffled
!res <- foldM runBatch params0 chunks
runEpoch (succ n) res
res <- runEpoch 1 params0Init
let testErrorFinal = 1 - fcnnMnistTest0 (Proxy @Double) widthHidden widthHidden2 testData res
testErrorFinal @?= expected
mnistTestCase2V
:: String
-> Int
-> Int
-> (Int
-> Int
-> MnistData Double
-> DualNumberVariables 'DModeGradient Double
-> DualMonadGradient Double (DualNumber 'DModeGradient Double))
-> Int
-> Int
-> Double
-> Double
-> TestTree
mnistTestCase2V prefix epochs maxBatches trainWithLoss widthHidden widthHidden2
gamma expected =
let (nParams0, nParams1, _, _) = fcnnMnistLen1 widthHidden widthHidden2
params0Init = HM.randomVector 44 HM.Uniform nParams0 - HM.scalar 0.5
params1Init = V.fromList $
imap (\i nPV -> HM.randomVector (44 + nPV + i) HM.Uniform nPV
- HM.scalar 0.5)
nParams1
name = prefix ++ ": "
++ unwords [ show epochs, show maxBatches
, show widthHidden, show widthHidden2
, show nParams0, show (length nParams1)
, show (sum nParams1 + nParams0), show gamma ]
in testCase name $ do
hPutStrLn stderr $ printf "\n%s: Epochs to run/max batches per epoch: %d/%d"
prefix epochs maxBatches
trainData <- loadMnistData trainGlyphsPath trainLabelsPath
testData <- loadMnistData testGlyphsPath testLabelsPath
-- Mimic how backprop tests and display it, even though tests
-- should not print, in principle.
let runBatch :: (Domain0 Double, Domain1 Double)
-> (Int, [MnistData Double])
-> IO (Domain0 Double, Domain1 Double)
runBatch (!params0, !params1) (k, chunk) = do
let f = trainWithLoss widthHidden widthHidden2
(resS, resV, _, _) =
fst $ sgd gamma f chunk (params0, params1, V.empty, V.empty)
res = (resS, resV)
!trainScore = fcnnMnistTest1
widthHidden widthHidden2 chunk res
!testScore = fcnnMnistTest1
widthHidden widthHidden2 testData res
!lenChunk = length chunk
hPutStrLn stderr $ printf "\n%s: (Batch %d with %d points)" prefix k lenChunk
hPutStrLn stderr $ printf "%s: Training error: %.2f%%" prefix ((1 - trainScore) * 100)
hPutStrLn stderr $ printf "%s: Validation error: %.2f%%" prefix ((1 - testScore ) * 100)
return res
let runEpoch :: Int -> (Domain0 Double, Domain1 Double)
-> IO (Domain0 Double, Domain1 Double)
runEpoch n params2 | n > epochs = return params2
runEpoch n params2 = do
hPutStrLn stderr $ printf "\n%s: [Epoch %d]" prefix n
let trainDataShuffled = shuffle (mkStdGen $ n + 5) trainData
chunks = take maxBatches
$ zip [1 ..] $ chunksOf 5000 trainDataShuffled
!res <- foldM runBatch params2 chunks
runEpoch (succ n) res
res <- runEpoch 1 (params0Init, params1Init)
let testErrorFinal =
1 - fcnnMnistTest1 widthHidden widthHidden2 testData res
testErrorFinal @?= expected
fcnnMnistLossTanh :: DualMonad 'DModeGradient Double m
=> Int
-> Int
-> MnistData Double
-> DualNumberVariables 'DModeGradient Double
-> m (DualNumber 'DModeGradient Double)
fcnnMnistLossTanh widthHidden widthHidden2 (xs, targ) vec = do
res <- fcnnMnist0 tanhAct softMaxAct widthHidden widthHidden2 xs vec
lossCrossEntropy targ res
fcnnMnistLossRelu :: DualMonad 'DModeGradient Double m
=> Int
-> Int
-> MnistData Double
-> DualNumberVariables 'DModeGradient Double
-> m (DualNumber 'DModeGradient Double)
fcnnMnistLossRelu widthHidden widthHidden2 (xs, targ) vec = do
res <- fcnnMnist0 reluAct softMaxAct widthHidden widthHidden2 xs vec
lossCrossEntropy targ res
mnistTestCase2L
:: String
-> Int
-> Int
-> (MnistData Double
-> DualNumberVariables 'DModeGradient Double
-> DualMonadGradient Double (DualNumber 'DModeGradient Double))
-> Int
-> Int
-> Double
-> Double
-> TestTree
mnistTestCase2L prefix epochs maxBatches trainWithLoss widthHidden widthHidden2
gamma expected =
let ((nParams0, nParams1, nParams2, _), totalParams, range, parameters0) =
initializerFixed 44 0.5 (fcnnMnistLen2 widthHidden widthHidden2)
name = prefix ++ ": "
++ unwords [ show epochs, show maxBatches
, show widthHidden, show widthHidden2
, show nParams0, show nParams1, show nParams2
, show totalParams, show gamma, show range]
in testCase name $ do
hPutStrLn stderr $ printf "\n%s: Epochs to run/max batches per epoch: %d/%d"
prefix epochs maxBatches
trainData <- loadMnistData trainGlyphsPath trainLabelsPath
testData <- loadMnistData testGlyphsPath testLabelsPath
-- Mimic how backprop tests and display it, even though tests
-- should not print, in principle.
let runBatch :: Domains Double
-> (Int, [MnistData Double])
-> IO (Domains Double)
runBatch (!params0, !params1, !params2, !paramsX) (k, chunk) = do
let f = trainWithLoss
res = fst $ sgd gamma f chunk
(params0, params1, params2, paramsX)
!trainScore = fcnnMnistTest2 @Double chunk res
!testScore = fcnnMnistTest2 @Double testData res
!lenChunk = length chunk
hPutStrLn stderr $ printf "\n%s: (Batch %d with %d points)" prefix k lenChunk
hPutStrLn stderr $ printf "%s: Training error: %.2f%%" prefix ((1 - trainScore) * 100)
hPutStrLn stderr $ printf "%s: Validation error: %.2f%%" prefix ((1 - testScore ) * 100)
return res
let runEpoch :: Int
-> Domains Double
-> IO (Domains Double)
runEpoch n params2 | n > epochs = return params2
runEpoch n params2 = do
hPutStrLn stderr $ printf "\n%s: [Epoch %d]" prefix n
let trainDataShuffled = shuffle (mkStdGen $ n + 5) trainData
chunks = take maxBatches
$ zip [1 ..] $ chunksOf 5000 trainDataShuffled
!res <- foldM runBatch params2 chunks
runEpoch (succ n) res
res <- runEpoch 1 parameters0
let testErrorFinal = 1 - fcnnMnistTest2 testData res
testErrorFinal @?= expected
mnistTestCase2T
:: Bool
-> String
-> Int
-> Int
-> (MnistData Double
-> DualNumberVariables 'DModeGradient Double
-> DualMonadGradient Double (DualNumber 'DModeGradient Double))
-> Int
-> Int
-> Double
-> Double
-> TestTree
mnistTestCase2T reallyWriteFile
prefix epochs maxBatches trainWithLoss widthHidden widthHidden2
gamma expected =
let ((nParams0, nParams1, nParams2, _), totalParams, range, !parameters0) =
initializerFixed 44 0.5 (fcnnMnistLen2 widthHidden widthHidden2)
name = prefix ++ " "
++ unwords [ show epochs, show maxBatches
, show widthHidden, show widthHidden2
, show nParams0, show nParams1, show nParams2
, show totalParams, show gamma, show range]
in testCase name $ do
hPutStrLn stderr $ printf "\n%s: Epochs to run/max batches per epoch: %d/%d"
prefix epochs maxBatches
trainData0 <- loadMnistData trainGlyphsPath trainLabelsPath
testData <- loadMnistData testGlyphsPath testLabelsPath
let !trainData = force $ shuffle (mkStdGen 6) trainData0
-- Mimic how backprop tests and display it, even though tests
-- should not print, in principle.
let runBatch :: (Domains Double, [(POSIXTime, Double)])
-> (Int, [MnistData Double])
-> IO (Domains Double, [(POSIXTime, Double)])
runBatch ((!params0, !params1, !params2, !paramsX), !times)
(k, chunk) = do
when (k `mod` 100 == 0) $ do
hPutStrLn stderr $ printf "%s: %d " prefix k
hFlush stderr
let f = trainWithLoss
(!params0New, !value) =
sgd gamma f chunk (params0, params1, params2, paramsX)
time <- getPOSIXTime
return (params0New, (time, value) : times)
let runEpoch :: Int
-> (Domains Double, [(POSIXTime, Double)])
-> IO (Domains Double, [(POSIXTime, Double)])
runEpoch n params2times | n > epochs = return params2times
runEpoch n (!params2, !times2) = do
hPutStrLn stderr $ printf "\n%s: [Epoch %d]" prefix n
let !trainDataShuffled =
if n > 1
then shuffle (mkStdGen $ n + 5) trainData
else trainData
chunks = take maxBatches
$ zip [1 ..] $ chunksOf 1 trainDataShuffled
res <- foldM runBatch (params2, times2) chunks
runEpoch (succ n) res
timeBefore <- getPOSIXTime
(res, times) <- runEpoch 1 (parameters0, [])
let ppTime (t, l) = init (show (t - timeBefore)) ++ " " ++ show l
when reallyWriteFile $
writeFile "walltimeLoss.txt" $ unlines $ map ppTime times
let testErrorFinal = 1 - fcnnMnistTest2 testData res
testErrorFinal @?= expected
mnistTestCase2D
:: Bool
-> Int
-> Bool
-> String
-> Int
-> Int
-> (MnistData Double
-> DualNumberVariables 'DModeGradient Double
-> DualMonadGradient Double (DualNumber 'DModeGradient Double))
-> Int
-> Int
-> Double
-> Double
-> TestTree
mnistTestCase2D reallyWriteFile miniBatchSize decay
prefix epochs maxBatches trainWithLoss widthHidden widthHidden2
gamma0 expected =
let np = fcnnMnistLen2 widthHidden widthHidden2
((nParams0, nParams1, nParams2, _), totalParams, range, !parameters0) =
initializerFixed 44 0.5 np
name = prefix ++ " "
++ unwords [ show epochs, show maxBatches
, show widthHidden, show widthHidden2
, show nParams0, show nParams1, show nParams2
, show totalParams, show gamma0, show range]
in testCase name $ do
hPutStrLn stderr $ printf "\n%s: Epochs to run/max batches per epoch: %d/%d"
prefix epochs maxBatches
trainData0 <- loadMnistData trainGlyphsPath trainLabelsPath
testData <- loadMnistData testGlyphsPath testLabelsPath
let !trainData = force $ shuffle (mkStdGen 6) trainData0
-- Mimic how backprop tests and display it, even though tests
-- should not print, in principle.
let runBatch :: (Domains Double, [(POSIXTime, Double)])
-> (Int, [MnistData Double])
-> IO (Domains Double, [(POSIXTime, Double)])
runBatch ((!params0, !params1, !params2, !paramsX), !times)
(k, chunk) = do
when (k `mod` 100 == 0) $ do
hPutStrLn stderr $ printf "%s: %d " prefix k
hFlush stderr
let f = trainWithLoss
gamma = if decay
then gamma0 * exp (- fromIntegral k * 1e-4)
else gamma0
(!params0New, !value) =
sgdBatchForward (33 + k * 7) miniBatchSize gamma f chunk
(params0, params1, params2, paramsX) np
time <- getPOSIXTime
return (params0New, (time, value) : times)
let runEpoch :: Int
-> (Domains Double, [(POSIXTime, Double)])
-> IO (Domains Double, [(POSIXTime, Double)])
runEpoch n params2times | n > epochs = return params2times
runEpoch n (!params2, !times2) = do
hPutStrLn stderr $ printf "\n%s: [Epoch %d]" prefix n
let !trainDataShuffled =
if n > 1
then shuffle (mkStdGen $ n + 5) trainData
else trainData
chunks = take maxBatches
$ zip [1 ..]
$ chunksOf miniBatchSize trainDataShuffled
res <- foldM runBatch (params2, times2) chunks
runEpoch (succ n) res
timeBefore <- getPOSIXTime
(res, times) <- runEpoch 1 (parameters0, [])
let ppTime (t, l) = init (show (t - timeBefore)) ++ " " ++ show l
when reallyWriteFile $
writeFile "walltimeLoss.txt" $ unlines $ map ppTime times
let testErrorFinal = 1 - fcnnMnistTest2 testData res
testErrorFinal @?= expected
mnistTestCase2F
:: Bool
-> Int
-> Bool
-> String
-> Int
-> Int
-> (MnistData Double
-> DualNumberVariables 'DModeDerivative Double
-> DualMonadForward Double (DualNumber 'DModeDerivative Double))
-> Int
-> Int
-> Double
-> Double
-> TestTree
mnistTestCase2F reallyWriteFile miniBatchSize decay
prefix epochs maxBatches trainWithLoss widthHidden widthHidden2
gamma0 expected =
let np = fcnnMnistLen2 widthHidden widthHidden2
((nParams0, nParams1, nParams2, _), totalParams, range, !parameters0) =
initializerFixed 44 0.5 np
name = prefix ++ " "
++ unwords [ show epochs, show maxBatches
, show widthHidden, show widthHidden2
, show nParams0, show nParams1, show nParams2
, show totalParams, show gamma0, show range]
in testCase name $ do
hPutStrLn stderr $ printf "\n%s: Epochs to run/max batches per epoch: %d/%d"
prefix epochs maxBatches
trainData0 <- loadMnistData trainGlyphsPath trainLabelsPath
testData <- loadMnistData testGlyphsPath testLabelsPath
let !trainData = coerce $ force $ shuffle (mkStdGen 6) trainData0
-- Mimic how backprop tests and display it, even though tests
-- should not print, in principle.
let runBatch :: (Domains Double, [(POSIXTime, Double)])
-> (Int, [MnistData Double])
-> IO (Domains Double, [(POSIXTime, Double)])
runBatch ((!params0, !params1, !params2, !paramsX), !times)
(k, chunk) = do
when (k `mod` 100 == 0) $ do
hPutStrLn stderr $ printf "%s: %d " prefix k
hFlush stderr
let f = trainWithLoss
gamma = if decay
then gamma0 * exp (- fromIntegral k * 1e-4)
else gamma0
(!params0New, !value) =
sgdBatchFastForward (33 + k * 7) miniBatchSize gamma f chunk
(params0, params1, params2, paramsX) np
time <- getPOSIXTime
return (params0New, (time, value) : times)
let runEpoch :: Int
-> (Domains Double, [(POSIXTime, Double)])
-> IO (Domains Double, [(POSIXTime, Double)])
runEpoch n params2times | n > epochs = return params2times
runEpoch n (!params2, !times2) = do
hPutStrLn stderr $ printf "\n%s: [Epoch %d]" prefix n
let !trainDataShuffled =
if n > 1
then shuffle (mkStdGen $ n + 5) trainData
else trainData
chunks = take maxBatches
$ zip [1 ..]
$ chunksOf miniBatchSize trainDataShuffled
res <- foldM runBatch (params2, times2) chunks
runEpoch (succ n) res
timeBefore <- getPOSIXTime
(res, times) <- runEpoch 1 (parameters0, [])
let ppTime (t, l) = init (show (t - timeBefore)) ++ " " ++ show l
when reallyWriteFile $
writeFile "walltimeLoss.txt" $ unlines $ map ppTime times
let testErrorFinal = 1 - fcnnMnistTest2 testData res
testErrorFinal @?= expected
mnistTestCase2S
:: forall widthHidden widthHidden2.
(KnownNat widthHidden, KnownNat widthHidden2)
=> Proxy widthHidden -> Proxy widthHidden2
-> String
-> Int
-> Int
-> (forall d r m. DualMonad d r m
=> Proxy widthHidden -> Proxy widthHidden2
-> MnistData r -> DualNumberVariables d r -> m (DualNumber d r))
-> Double
-> Double
-> TestTree
mnistTestCase2S proxy proxy2
prefix epochs maxBatches trainWithLoss gamma expected =
let ((_, _, _, nParamsX), totalParams, range, parametersInit) =
initializerFixed 44 0.5 (fcnnMnistLenS @widthHidden @widthHidden2)
name = prefix ++ ": "
++ unwords [ show epochs, show maxBatches
, show (valueOf @widthHidden :: Int)
, show (valueOf @widthHidden2 :: Int)
, show nParamsX, show totalParams
, show gamma, show range ]
in testCase name $ do
hPutStrLn stderr $ printf "\n%s: Epochs to run/max batches per epoch: %d/%d"
prefix epochs maxBatches
trainData <- loadMnistData trainGlyphsPath trainLabelsPath
testData <- loadMnistData testGlyphsPath testLabelsPath
let runBatch :: Domains Double
-> (Int, [MnistData Double])
-> IO (Domains Double)
runBatch (!params0, !params1, !params2, !paramsX) (k, chunk) = do
let f = trainWithLoss proxy proxy2
res = fst $ sgd gamma f chunk
(params0, params1, params2, paramsX)
!trainScore = fcnnMnistTestS @widthHidden @widthHidden2
chunk res
!testScore = fcnnMnistTestS @widthHidden @widthHidden2
testData res
!lenChunk = length chunk
hPutStrLn stderr $ printf "\n%s: (Batch %d with %d points)" prefix k lenChunk
hPutStrLn stderr $ printf "%s: Training error: %.2f%%" prefix ((1 - trainScore) * 100)
hPutStrLn stderr $ printf "%s: Validation error: %.2f%%" prefix ((1 - testScore ) * 100)
return res
let runEpoch :: Int
-> Domains Double
-> IO (Domains Double)
runEpoch n params2 | n > epochs = return params2
runEpoch n params2 = do
hPutStrLn stderr $ printf "\n%s: [Epoch %d]" prefix n
let trainDataShuffled = shuffle (mkStdGen $ n + 5) trainData
chunks = take maxBatches
$ zip [1 ..] $ chunksOf 5000 trainDataShuffled
!res <- foldM runBatch params2 chunks
runEpoch (succ n) res
res <- runEpoch 1 parametersInit
let testErrorFinal = 1 - fcnnMnistTestS @widthHidden @widthHidden2
testData res
testErrorFinal @?= expected
dumbMnistTests :: TestTree
dumbMnistTests = testGroup "Dumb MNIST tests"
[ testCase "1pretty-print in grey 3 2" $ do
let (nParams0, lParams1, lParams2, _) = fcnnMnistLen2 4 3
vParams1 = V.fromList lParams1
vParams2 = V.fromList lParams2
params0 = V.replicate nParams0 (1 :: Float)
params1 = V.map (`V.replicate` 2) vParams1
params2 = V.map (HM.konst 3) vParams2
blackGlyph = V.replicate sizeMnistGlyph 4
blackLabel = V.replicate sizeMnistLabel 5
trainData = (blackGlyph, blackLabel)
output = prettyPrintDf False (fcnnMnistLoss2 trainData)
(params0, params1, params2, V.empty)
-- printf "%s" output
length output @?= 13348
, testCase "2pretty-print in grey 3 2 fused" $ do
let (nParams0, lParams1, lParams2, _) = fcnnMnistLen2 4 3
vParams1 = V.fromList lParams1
vParams2 = V.fromList lParams2
params0 = V.replicate nParams0 (1 :: Float)
params1 = V.map (`V.replicate` 2) vParams1
params2 = V.map (HM.konst 3) vParams2
blackGlyph = V.replicate sizeMnistGlyph 4
blackLabel = V.replicate sizeMnistLabel 5
trainData = (blackGlyph, blackLabel)
output = prettyPrintDf True (fcnnMnistLossFused2 trainData)
(params0, params1, params2, V.empty)
--- printf "%s" output
length output @?= 12431
, testCase "3pretty-print on testset 3 2" $ do
let (_, _, _, parameters0) = initializerFixed 44 0.5 (fcnnMnistLen2 4 3)
testData <- loadMnistData testGlyphsPath testLabelsPath
let trainDataItem = head testData
output = prettyPrintDf True (fcnnMnistLoss2 trainDataItem) parameters0
-- printf "%s" output
length output @?= 16449
, let blackGlyph = V.replicate sizeMnistGlyph 0
blackLabel = V.replicate sizeMnistLabel 0
trainData = replicate 10 (blackGlyph, blackLabel)
in sgdTestCase "black"
(return trainData) fcnnMnistLoss0 0.02 (-0.0)
, let whiteGlyph = V.replicate sizeMnistGlyph 1
whiteLabel = V.replicate sizeMnistLabel 1
trainData = replicate 20 (whiteGlyph, whiteLabel)
in sgdTestCase "white"
(return trainData) fcnnMnistLoss0 0.02 23.02585095418536
, let blackGlyph = V.replicate sizeMnistGlyph 0
whiteLabel = V.replicate sizeMnistLabel 1
trainData = replicate 50 (blackGlyph, whiteLabel)
in sgdTestCase "black/white"
(return trainData) fcnnMnistLoss0 0.02 23.025850929940457
, let glyph = V.unfoldrExactN sizeMnistGlyph (uniformR (0, 1))
label = V.unfoldrExactN sizeMnistLabel (uniformR (0, 1))
trainData = map ((\g -> (glyph g, label g)) . mkStdGen) [1 .. 100]
in sgdTestCase "random 100"
(return trainData) fcnnMnistLoss0 0.02 11.089140063760212
, sgdTestCase "first 100 trainset samples only"
(take 100 <$> loadMnistData trainGlyphsPath trainLabelsPath)
fcnnMnistLoss0 0.02 3.233123290489956
, testCase "fcnnMnistTest0 on 0.1 params0 300 100 width 10k testset" $ do
let nParams0 = fcnnMnistLen0 300 100
params0 = V.replicate nParams0 0.1
testData <- loadMnistData testGlyphsPath testLabelsPath
(1 - fcnnMnistTest0 (Proxy @Double) 300 100 testData params0)
@?= 0.902
, testCase "fcnnMnistTest2VV on 0.1 params0 300 100 width 10k testset" $ do
let (nParams0, nParams1, _, _) = fcnnMnistLen1 300 100
params0 = V.replicate nParams0 0.1
params1 = V.fromList $ map (`V.replicate` 0.1) nParams1
testData <- loadMnistData testGlyphsPath testLabelsPath
(1 - fcnnMnistTest1 300 100 testData (params0, params1))
@?= 0.902
, testCase "fcnnMnistTest2LL on 0.1 params0 300 100 width 10k testset" $ do
let (nParams0, lParams1, lParams2, _) = fcnnMnistLen2 300 100
vParams1 = V.fromList lParams1
vParams2 = V.fromList lParams2
params0 = V.replicate nParams0 0.1
params1 = V.map (`V.replicate` 0.1) vParams1
params2 = V.map (HM.konst 0.1) vParams2
testData <- loadMnistData testGlyphsPath testLabelsPath
(1 - fcnnMnistTest2 testData
(params0, params1, params2, V.empty))
@?= 0.902
, testProperty "Compare two forward derivatives and gradient for Mnist0" $
\seed seedDs ->
forAll (choose (1, 300)) $ \widthHidden ->
forAll (choose (1, 100)) $ \widthHidden2 ->
forAll (choose (0.01, 10)) $ \range ->
forAll (choose (0.01, 10)) $ \rangeDs ->
let createRandomVector n seedV = HM.randomVector seedV HM.Uniform n
glyph = createRandomVector sizeMnistGlyph seed
label = createRandomVector sizeMnistLabel seedDs
mnistData :: MnistData Double
mnistData = (glyph, label)
nParams0 = fcnnMnistLen0 widthHidden widthHidden2
paramShape = (nParams0, [], [], [])
(_, _, _, parameters) = initializerFixed seed range paramShape
(_, _, _, ds) = initializerFixed seedDs rangeDs paramShape
(_, _, _, parametersPerturbation) =
initializerFixed (seed + seedDs) 1e-7 paramShape
f :: forall d r m. (DualMonad d r m, r ~ Double)
=> DualNumberVariables d r -> m (DualNumber d r)
f = fcnnMnistLoss0 widthHidden widthHidden2 mnistData
in
qcPropDom f parameters ds parametersPerturbation 1
, testProperty "Compare two forward derivatives and gradient for Mnist1" $
\seed seedDs ->
forAll (choose (1, 2000)) $ \widthHidden ->
forAll (choose (1, 5000)) $ \widthHidden2 ->
forAll (choose (0.01, 0.5)) $ \range -> -- large nn, so NaNs fast
forAll (choose (0.01, 10)) $ \rangeDs ->
let createRandomVector n seedV = HM.randomVector seedV HM.Uniform n
glyph = createRandomVector sizeMnistGlyph seed
label = createRandomVector sizeMnistLabel seedDs
mnistData :: MnistData Double
mnistData = (glyph, label)
paramShape = fcnnMnistLen1 widthHidden widthHidden2
(_, _, _, parameters) = initializerFixed seed range paramShape
(_, _, _, ds) = initializerFixed seedDs rangeDs paramShape
(_, _, _, parametersPerturbation) =
initializerFixed (seed + seedDs) 1e-7 paramShape
f :: forall d r m. (DualMonad d r m, r ~ Double)
=> DualNumberVariables d r -> m (DualNumber d r)
f = fcnnMnistLoss1 widthHidden widthHidden2 mnistData
in
qcPropDom f parameters ds parametersPerturbation 1
, testProperty "Compare two forward derivatives and gradient for Mnist2" $
\seed ->
forAll (choose (0, sizeMnistLabel - 1)) $ \seedDs ->
forAll (choose (1, 5000)) $ \widthHidden ->
forAll (choose (1, 1000)) $ \widthHidden2 ->
forAll (choose (0.01, 1)) $ \range ->
forAll (choose (0.01, 10)) $ \rangeDs ->
let createRandomVector n seedV = HM.randomVector seedV HM.Uniform n
glyph = createRandomVector sizeMnistGlyph seed
label = createRandomVector sizeMnistLabel seedDs
labelOneHot = HM.konst 0 sizeMnistLabel V.// [(seedDs, 1)]
mnistData, mnistDataOneHot :: MnistData Double
mnistData = (glyph, label)
mnistDataOneHot = (glyph, labelOneHot)
paramShape = fcnnMnistLen2 widthHidden widthHidden2
(_, _, _, parameters) = initializerFixed seed range paramShape
(_, _, _, ds) = initializerFixed seedDs rangeDs paramShape
(_, _, _, parametersPerturbation) =
initializerFixed (seed + seedDs) 1e-7 paramShape
f, fOneHot, fFused
:: forall d r m. (DualMonad d r m, r ~ Double)
=> DualNumberVariables d r -> m (DualNumber d r)
f = fcnnMnistLoss2 mnistData
fOneHot = fcnnMnistLoss2 mnistDataOneHot
fFused = fcnnMnistLossFused2 mnistDataOneHot
in
qcPropDom f parameters ds parametersPerturbation 1 .&&.
qcPropDom fOneHot parameters ds parametersPerturbation 1 .&&.
qcPropDom fFused parameters ds parametersPerturbation 1 .&&.
cmpTwoSimple fOneHot fFused parameters ds
]
bigMnistTests :: TestTree
bigMnistTests = testGroup "MNIST tests with a 2-hidden-layer nn"
[ mnistTestCase2 "1 epoch, 1 batch" 1 1 fcnnMnistLoss0 300 100 0.02
0.1269
, mnistTestCase2 "tanh: 1 epoch, 1 batch" 1 1 fcnnMnistLossTanh 300 100 0.02
0.6406000000000001
, mnistTestCase2 "relu: 1 epoch, 1 batch" 1 1 fcnnMnistLossRelu 300 100 0.02
0.7248
, mnistTestCase2 "1 epoch, 1 batch, wider" 1 1 fcnnMnistLoss0 500 150 0.02
0.1269
, mnistTestCase2 "2 epochs, but only 1 batch" 2 1 fcnnMnistLoss0 300 100 0.02
9.809999999999997e-2
, mnistTestCase2 "artificial 1 2 3 4 5" 1 2 fcnnMnistLoss0 3 4 5
0.8972
, mnistTestCase2 "artificial 5 4 3 2 1" 5 4 fcnnMnistLoss0 3 2 1
0.8991
]
vectorMnistTests :: TestTree
vectorMnistTests = testGroup "MNIST VV tests with a 2-hidden-layer nn"
[ mnistTestCase2V "1 epoch, 1 batch" 1 1 fcnnMnistLoss1 300 100 0.02
0.12960000000000005
, mnistTestCase2V "1 epoch, 1 batch, wider" 1 1 fcnnMnistLoss1 500 150 0.02
0.13959999999999995
, mnistTestCase2V "2 epochs, but only 1 batch" 2 1 fcnnMnistLoss1 300 100 0.02
0.10019999999999996
, mnistTestCase2V "1 epoch, all batches" 1 99 fcnnMnistLoss1 300 100 0.02
5.389999999999995e-2
, mnistTestCase2V "artificial 1 2 3 4 5" 1 2 fcnnMnistLoss1 3 4 5
0.8972
, mnistTestCase2V "artificial 5 4 3 2 1" 5 4 fcnnMnistLoss1 3 2 1
0.7756000000000001
]
matrixMnistTests :: TestTree
matrixMnistTests = testGroup "MNIST LL tests with a 2-hidden-layer nn"
[ mnistTestCase2L "1 epoch, 1 batch" 1 1 fcnnMnistLoss2 300 100 0.02
0.12339999999999995
, mnistTestCase2L "1 epoch, 1 batch, wider" 1 1 fcnnMnistLoss2 500 150 0.02
0.15039999999999998
, mnistTestCase2L "2 epochs, but only 1 batch" 2 1 fcnnMnistLoss2 300 100 0.02
8.879999999999999e-2
, mnistTestCase2L "1 epoch, all batches" 1 99 fcnnMnistLoss2 300 100 0.02
5.1100000000000034e-2
, mnistTestCase2L "artificial 1 2 3 4 5" 1 2 fcnnMnistLoss2 3 4 5
0.8972
, mnistTestCase2T False
"artificial TL 5 4 3 2 1" 5 4 fcnnMnistLoss2 3 2 1
0.8865
, mnistTestCase2D False 1 False
"artificial DL 5 4 3 2 1" 5 4 fcnnMnistLoss2 3 2 1
0.8991
, mnistTestCase2F False 1 False
"artificial FL 5 4 3 2 1" 5 4 fcnnMnistLoss2 3 2 1
0.8991
-- , mnistTestCase2T True False
-- "2 epochs, all batches, TL, wider, to file"
-- 2 60000 fcnnMnistLoss2 500 150 0.02
-- 4.290000000000005e-2
-- , mnistTestCase2D True 1 False
-- "2 epochs, all batches, DL, wider, to file"
-- 2 60000 fcnnMnistLoss2 500 150 0.02
-- 0.9079
-- , mnistTestCase2D True 64 False
-- "2 epochs, all batches, DL, wider, to file"
-- 2 60000 fcnnMnistLoss2 500 150 0.02
-- 0.9261
-- , mnistTestCase2D True 64 True
-- "2 epochs, all batches, DL, wider, to file"
-- 2 60000 fcnnMnistLoss2 500 150 0.02
-- 0.8993
-- , mnistTestCase2D True 64 True
-- "2 epochs, all batches, DL, wider, to file"
-- 2 60000 fcnnMnistLoss2 500 150 2e-5
-- 0.9423
-- , mnistTestCase2D True 64 True
-- "2 epochs, all batches, DL, wider, to file"
-- 2 60000 fcnnMnistLoss2 500 150 2e-4
-- 0.8714
-- , mnistTestCase2F True 64 True
-- "2 epochs, all batches, FL, wider, to file"
-- 2 60000 fcnnMnistLoss2 500 150 2e-4
-- 0.8714
-- , mnistTestCase2D True 64 True
-- "2 epochs, all batches, DL, wider, to file"
-- 2 60000 fcnnMnistLossFusedRelu2 1024 1024 2e-4
-- 0.902
-- , mnistTestCase2D False 64 True
-- "2 epochs, all batches, 1024DL"
-- 2 60000 fcnnMnistLoss2 1024 1024 2e-4
-- 0.7465999999999999
-- , mnistTestCase2F False 64 True
-- "2 epochs, all batches, 1024FL"
-- 2 60000 fcnnMnistLoss2 1024 1024 2e-4
-- 0.7465999999999999
]
fusedMnistTests :: TestTree
fusedMnistTests = testGroup "MNIST fused LL tests with a 2-hidden-layer nn"
[ mnistTestCase2L "1 epoch, 1 batch" 1 1 fcnnMnistLossFused2 300 100 0.02
0.12339999999999995
, mnistTestCase2L "1 epoch, 1 batch, wider" 1 1
fcnnMnistLossFused2 500 150 0.02
0.15039999999999998
, mnistTestCase2L "2 epochs, but only 1 batch" 2 1
fcnnMnistLossFused2 300 100 0.02
8.879999999999999e-2
, mnistTestCase2L "1 epoch, all batches" 1 99 fcnnMnistLossFused2 300 100 0.02
5.1100000000000034e-2
, mnistTestCase2L "artificial 1 2 3 4 5" 1 2 fcnnMnistLossFused2 3 4 5
0.8972
, mnistTestCase2L "artificial 5 4 3 2 1" 5 4 fcnnMnistLossFused2 3 2 1
0.7033
, mnistTestCase2S (Proxy @300) (Proxy @100)
"S 1 epoch, 1 batch" 1 1 fcnnMnistLossFusedS 0.02
0.1311
, mnistTestCase2S (Proxy @500) (Proxy @150)
"S 1 epoch, 1 batch, wider" 1 1 fcnnMnistLossFusedS 0.02
0.12470000000000003
, mnistTestCase2S (Proxy @300) (Proxy @100)
"S 2 epochs, but only 1 batch" 2 1 fcnnMnistLossFusedS 0.02
9.630000000000005e-2
, mnistTestCase2S (Proxy @300) (Proxy @100)
"S 1 epoch, all batches" 1 99 fcnnMnistLossFusedS 0.02
5.620000000000003e-2
, mnistTestCase2S (Proxy @3) (Proxy @4)
"S artificial 1 2 3 4 5" 1 2 fcnnMnistLossFusedS 5
0.8972
, mnistTestCase2S (Proxy @3) (Proxy @2)
"S artificial 5 4 3 2 1" 5 4 fcnnMnistLossFusedS 1
0.8246
, mnistTestCase2S (Proxy @300) (Proxy @100)
"SR 1 epoch, 1 batch" 1 1 fcnnMnistLossFusedReluS 0.02
0.7068
, mnistTestCase2S (Proxy @500) (Proxy @150)
"SR 1 epoch, 1 batch, wider" 1 1
fcnnMnistLossFusedReluS 0.02
0.8874
, mnistTestCase2S (Proxy @300) (Proxy @100)
"SR 2 epochs, but 1 batch" 2 1 fcnnMnistLossFusedReluS 0.02
0.8352999999999999
, mnistTestCase2S (Proxy @300) (Proxy @100)
"SR 1 epoch, all batches" 1 99 fcnnMnistLossFusedReluS 0.02
0.6415
, mnistTestCase2S (Proxy @3) (Proxy @4)
"SR artificial 1 2 3 4 5" 1 2 fcnnMnistLossFusedReluS 5
0.8972
, mnistTestCase2S (Proxy @3) (Proxy @2)
"SR artificial 5 4 3 2 1" 5 4 fcnnMnistLossFusedReluS 1
0.8991
]
shortCIMnistTests :: TestTree
shortCIMnistTests = testGroup "Short CI MNIST tests"
[ mnistTestCase2 "2 artificial 1 2 3 4 5" 1 2 fcnnMnistLoss0 3 4 5
0.8972
, mnistTestCase2 "2 artificial 5 4 3 2 1" 5 4 fcnnMnistLoss0 3 2 1
0.8991
, mnistTestCase2V "VV 1 epoch, 1 batch" 1 1 fcnnMnistLoss1 300 100 0.02
0.12960000000000005
, mnistTestCase2V "VV artificial 1 2 3 4 5" 1 2 fcnnMnistLoss1 3 4 5
0.8972
, mnistTestCase2V "VV artificial 5 4 3 2 1" 5 4 fcnnMnistLoss1 3 2 1
0.7756000000000001
, mnistTestCase2L "LL 1 epoch, 1 batch" 1 1 fcnnMnistLoss2 300 100 0.02
0.12339999999999995
, mnistTestCase2L "LL artificial 1 2 3 4 5" 1 2 fcnnMnistLoss2 3 4 5
0.8972
, mnistTestCase2L "LL artificial 5 4 3 2 1" 5 4 fcnnMnistLoss2 3 2 1
0.8085
, mnistTestCase2L "fused LL 1/1 batch" 1 1 fcnnMnistLossFused2 300 100 0.02
0.12339999999999995
, mnistTestCase2L "fused LL artificial 1 2 3 4 5" 1 2
fcnnMnistLossFused2 3 4 5
0.8972
, mnistTestCase2T False
"fused TL artificial 5 4 3 2 1" 5 4
fcnnMnistLossFused2 3 2 1
0.8865
, mnistTestCase2D False 1 False
"fused DL artificial 5 4 3 2 1" 5 4
fcnnMnistLossFused2 3 2 1
0.8991
, mnistTestCase2S (Proxy @300) (Proxy @100)
"S 1 epoch, 1 batch" 1 1 fcnnMnistLossFusedS 0.02
0.1311
, mnistTestCase2S (Proxy @3) (Proxy @4)
"S artificial 1 2 3 4 5" 1 2 fcnnMnistLossFusedS 5
0.8972
, mnistTestCase2S (Proxy @3) (Proxy @2)
"S artificial 5 4 3 2 1" 5 4 fcnnMnistLossFusedS 1
0.8246
, mnistTestCase2S (Proxy @3) (Proxy @4)
"SR artificial 1 2 3 4 5" 1 2 fcnnMnistLossFusedReluS 5
0.8972
, mnistTestCase2S (Proxy @3) (Proxy @2)
"SR artificial 5 4 3 2 1" 5 4 fcnnMnistLossFusedReluS 1
0.8991
]
|
C Copyright (c) 2005, NVIDIA CORPORATION. All rights reserved.
C
C Licensed under the Apache License, Version 2.0 (the "License");
C you may not use this file except in compliance with the License.
C You may obtain a copy of the License at
C
C http://www.apache.org/licenses/LICENSE-2.0
C
C Unless required by applicable law or agreed to in writing, software
C distributed under the License is distributed on an "AS IS" BASIS,
C WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
C See the License for the specific language governing permissions and
C limitations under the License.
C
C PACK intrinsic with a scalar mask fails in 6.0 with -mcmodel=medium
C Failure mode is a runtime error "0: PACK: invalid mask descriptor"
integer*4 xo(5), xe(5)
data xe/1, 2, 3, 4, 5/
xo = pack(xe,.true.)
call check(xe, xo, 5)
end
|
function this = MrDimInfo_get_add_remove(this)
% Unit test for MrDimInfo get, add and remove method
%
% Y = MrUnitTest()
% run(Y, 'MrDimInfo_get_add_remove')
%
% This is a method of class MrUnitTest.
%
% IN
%
% OUT
%
% EXAMPLE
% MrDimInfo_methods
%
% See also MrUnitTest
% Author: Saskia Bollmann
% Created: 2018-01-15
% Copyright (C) 2018 Institute for Biomedical Engineering
% University of Zurich and ETH Zurich
%
% This file is part of the TAPAS UniQC Toolbox, which is released
% under the terms of the GNU General Public License (GPL), version 3.
% You can redistribute it and/or modify it under the terms of the GPL
% (either version 3 or, at your option, any later version).
% For further details, see the file COPYING or
% <http://www.gnu.org/licenses/>.
%
% construct MrDimInfo object from sampling points
dimInfo = this.make_dimInfo_reference(0);
% define expected solution
expSolution = dimInfo.copyobj;
% get, remove and add dims
% get dimInfo along x
dimInfoX = dimInfo.get_dims('x');
% remove x from dimInfo
dimInfo.remove_dims('x');
% add x back to dimInfo
dimInfo.add_dims(1, 'samplingPoints', dimInfoX.samplingPoints{1}, ...
'dimLabels', dimInfoX.dimLabels{1}, 'units', dimInfoX.units{1});
% define actual solution
actSolution = dimInfo;
% verify whether expected and actual solution are identical
% Note: convert to struct, since the PublicPropertyComparator (to allow
% nans to be treated as equal) does not compare properties of objects that
% overload subsref
warning('off', 'MATLAB:structOnObject');
this.verifyEqual(struct(actSolution), struct(expSolution), 'absTol', 10e-7);
warning('on', 'MATLAB:structOnObject');
end
|
[STATEMENT]
lemma assert_image [simp]: "assert ` A \<subseteq> assertion"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. assert ` A \<subseteq> assertion
[PROOF STEP]
by auto |
[STATEMENT]
lemma valid_bin:
"valid_state (binary_mask (get_m a), get_h a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. valid_state (binary_mask (get_m a), get_h a)
[PROOF STEP]
proof (rule valid_stateI)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. valid_mask (binary_mask (get_m a))
2. \<And>hl. ppos (binary_mask (get_m a) hl) \<Longrightarrow> get_h a hl \<noteq> None
[PROOF STEP]
show "valid_mask (binary_mask (get_m a))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. valid_mask (binary_mask (get_m a))
[PROOF STEP]
by (metis PartialSA.unit_neutral binary_mask_def minus_empty option.discI plus_ab_defined unit_charact(2) valid_mask.elims(2) valid_mask.elims(3))
[PROOF STATE]
proof (state)
this:
valid_mask (binary_mask (get_m a))
goal (1 subgoal):
1. \<And>hl. ppos (binary_mask (get_m a) hl) \<Longrightarrow> get_h a hl \<noteq> None
[PROOF STEP]
show "\<And>hl. ppos (binary_mask (get_m a) hl) \<Longrightarrow> get_h a hl \<noteq> None"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>hl. ppos (binary_mask (get_m a) hl) \<Longrightarrow> get_h a hl \<noteq> None
[PROOF STEP]
by (metis Rep_prat Rep_state binary_mask_def get_h.simps get_pre(2) leD mem_Collect_eq pnone.rep_eq ppos.rep_eq prod.collapse valid_heap_def valid_state.simps)
[PROOF STATE]
proof (state)
this:
ppos (binary_mask (get_m a) ?hl) \<Longrightarrow> get_h a ?hl \<noteq> None
goal:
No subgoals!
[PROOF STEP]
qed |
# Vierachsroboter KUKA KR700PA implizit
# Initialisierung
# Import (Library)
interface(warnlevel=0): # Unterdrücke die folgende Warnung.
restart: # Gibt eine Warnung, wenn über Terminal-Maple mit read gestartet wird.
interface(warnlevel=3):
kin_constraints_exist := true: # Für Speicherung
;
with(StringTools): # Für Zeitausgabe
with(LinearAlgebra):
with(codegen):
with(CodeGeneration):
codegen_act := true:
codegen_opt := 1: # Geringerer Optimierungsgrad. Sonst zu lange.
codegen_debug := 0: # Zur Code-Generierung auch für Nicht-Inert-Ausdrücke
;
# Import (Hybriddyn)
read "../helper/proc_MatlabExport":
read "../transformation/proc_rotx":
read "../transformation/proc_roty":
read "../transformation/proc_rotz":
read "../transformation/proc_trotz":
read "../transformation/proc_transl":
read "../helper/proc_convert_s_t":
read "../helper/proc_convert_t_s":
read "../robot_codegen_constraints/proc_subs_kintmp_exp":
read "../helper/proc_intersect_circle":
with(RealDomain): # Schränkt alle Funktionen auf den reellen Bereich ein. Muss nach Definition von MatlabExport kommen. Sonst geht dieses nicht.
;
read "../robot_codegen_definitions/robot_env_IC": #aktuelle Roboter, MDH-Tabelle
;
read sprintf("../codeexport/%s/tmp/tree_floatb_definitions", robot_name_OL):
# Ergebnisse der Kinematik laden
read sprintf("../codeexport/%s/tmp/kinematics_floatb_%s_rotmat_maple.m", robot_name_OL, base_method_name);
read "../robot_codegen_definitions/robot_env_IC":
Trf := Trf:
Trf_c := Trf_c:
Trf:
# VGK Gelb 1-6-11-9-7-2-1
# Schleife (1-6)-(6-11)
T_1_11 := combine( Matrix(Trf(1..4,1..4, 6)) . Matrix(Trf(1..4,1..4,11))):
# Schleife (1-2)(2-7)(7-9)
T_1_9 := combine(Matrix(Trf(1..4,1..4, 2)) . Matrix(Trf(1..4,1..4,7)) . Matrix(Trf(1..4, 1..4, 9))):
h1t := T_1_11(1..3,4) - T_1_9(1..3,4);
tmp := Transpose( Matrix(T_1_11(1..3,1..3)) ) . Matrix(T_1_9(1..3,1..3)): #nur anzeigen lassen für h1r
;
combine(tmp);# nur anzeigen lassen für h1r
;
h1r := -(-qJ6(t)+qJ2(t)+qJ7(t)+phi79+qJ9(t)) + Pi;
# VGK GRÜN 2-3-4-10-12-8-7-2
# Schleife (2-3)-(3-4)-(4-10)
T_2_10 := combine( Matrix(Trf(1..4,1..4, 3)) . Matrix(Trf(1..4,1..4,4)) . Matrix(Trf(1..4,1..4,10))):
# Schleife (2-7)-(7-8)-(8-12)
T_2_12:= combine( Matrix(Trf(1..4,1..4, 7)) . Matrix(Trf(1..4,1..4,8)).Matrix(Trf(1..4,1..4,12)) ):
h2t := T_2_10(1..3,4) - T_2_12(1..3,4);
tmp := Transpose( Matrix(T_2_10(1..3,1..3)) ) . Matrix(T_2_12(1..3,1..3)): #nur anzeigen lassen für h2r
;
combine(tmp); #nur anzeigen lassen für h2r
;
h2r := (qJ3(t)+qJ4(t)+phi410+qJ10(t)-qJ7(t)+phi78-qJ8(t))+Pi;
# Zusammenstellen aller Zwangsbedingungen
implconstr_t := <h1t([1, 3],1);h2t([1, 2],1); h1r; h2r>; # TODO: In h1r, h2r muss das richtige drinstehen.
implconstr_s := convert_t_s(implconstr_t);
# Exportiere Code für folgende Skripte
kin_constraints_exist:=true:
save implconstr_t, implconstr_s, kin_constraints_exist, sprintf("../codeexport/%s/tmp/kinematic_constraints_implicit_maple.m", robot_name):
# Exportieren des vollständigen Ausdruckes
if codegen_act then
MatlabExport(implconstr_s, sprintf("../codeexport/%s/tmp/kinconstr_impl_matlab.m", robot_name), 2):
end if:
# Liste mit abhängigen konstanten Kinematikparametern erstellen (wichtig für Matlab-Funktionsgenerierung)
read "../helper/proc_list_constant_expressions";
kc_symbols := Matrix(list_constant_expressions( implconstr_s )):
save kc_symbols, sprintf("../codeexport/%s/tmp/kinematic_implicit_constraints_symbols_list_maple", robot_name):
MatlabExport(Transpose(kc_symbols), sprintf("../codeexport/%s/tmp/kinematic_implicit_constraints_symbols_list_matlab.m", robot_name), 2):
|
From caml5 Require Import
prelude.
From caml5.lang Require Import
notations
proofmode.
From caml5.std Require Export
mutex.
Record condition `{!heapGS Σ} {mutex_unboxed} {mutex : mutex Σ mutex_unboxed} {unboxed : bool} := {
condition_make : val ;
condition_wait : val ;
condition_signal : val ;
condition_broadcast : val ;
condition_inv : val → iProp Σ ;
condition_inv_persistent t :
Persistent (condition_inv t) ;
condition_make_spec :
{{{ True }}}
condition_make #()
{{{ t, RET t; condition_inv t }}} ;
condition_wait_spec t mtx P :
{{{ condition_inv t ∗ mutex.(mutex_inv) mtx P ∗ mutex.(mutex_locked) mtx ∗ P }}}
condition_wait t mtx
{{{ RET #(); mutex.(mutex_locked) mtx ∗ P }}} ;
condition_signal_spec t :
{{{ condition_inv t }}}
condition_signal t
{{{ RET #(); True }}} ;
condition_broadcast_spec t :
{{{ condition_inv t }}}
condition_broadcast t
{{{ RET #(); True }}} ;
condition_unboxed :
if unboxed then ∀ t,
condition_inv t -∗
⌜val_is_unboxed t⌝
else
True ;
}.
#[global] Arguments condition _ {_ _} _ _ : assert.
#[global] Arguments Build_condition {_ _ _ _} _ {_ _ _ _ _ _} _ _ _ _ _ : assert.
#[global] Existing Instance condition_inv_persistent.
Section condition.
Context `{!heapGS Σ} {mutex_unboxed} {mutex : mutex Σ mutex_unboxed} {unboxed} (condition : condition Σ mutex unboxed).
#[local] Definition condition_wait_until_aux (cond : val) : val :=
rec: "condition_wait_until_aux" "t" "mtx" :=
if: cond #() then #() else (
condition.(condition_wait) "t" "mtx" ;;
"condition_wait_until_aux" "t" "mtx"
).
Definition condition_wait_until cond : val :=
λ: "t" "mtx",
condition_wait_until_aux cond "t" "mtx".
Definition condition_wait_while (cond : val) :=
condition_wait_until (λ: <>, ~ cond #()).
Lemma condition_wait_until_spec (cond : val) t mtx P Φ :
{{{
condition.(condition_inv) t ∗ mutex.(mutex_inv) mtx P ∗
mutex.(mutex_locked) mtx ∗ P ∗ Φ false ∗
{{{ mutex.(mutex_locked) mtx ∗ P ∗ Φ false }}}
cond #()
{{{ (b : bool), RET #b; mutex.(mutex_locked) mtx ∗ P ∗ Φ b }}}
}}}
condition_wait_until cond t mtx
{{{
RET #();
mutex.(mutex_locked) mtx ∗ P ∗ Φ true
}}}.
Proof.
iIntros "%Ψ (#Hinv_t & #Hinv_mtx & Hlocked & HP & HΦ & #Hcond) HΨ".
wp_rec. wp_pures.
iLöb as "HLöb".
wp_rec. wp_pures.
wp_apply ("Hcond" with "[$]"). iIntros "%b (Hlocked & HP & HΦ)".
destruct b; wp_pures.
{ iApply "HΨ". iFrame. done. }
wp_apply (condition_wait_spec _ _ _ P with "[$]"). iIntros "(Hlocked & HP)".
wp_pures.
iApply ("HLöb" with "[$] [$] [$] [$]").
Qed.
Lemma condition_wait_while_spec (cond : val) t mtx P Φ :
{{{
condition.(condition_inv) t ∗ mutex.(mutex_inv) mtx P ∗
mutex.(mutex_locked) mtx ∗ P ∗ Φ true ∗
{{{ mutex.(mutex_locked) mtx ∗ P ∗ Φ true }}}
cond #()
{{{ (b : bool), RET #b; mutex.(mutex_locked) mtx ∗ P ∗ Φ b }}}
}}}
condition_wait_while cond t mtx
{{{
RET #();
mutex.(mutex_locked) mtx ∗ P ∗ Φ false
}}}.
Proof.
iIntros "%Ψ (#Hinv_t & #Hinv_mtx & Hlocked & HP & HΦ & #Hcond) HΨ".
wp_apply (condition_wait_until_spec _ _ _ P (λ b, Φ (negb b)) with "[$Hlocked $HP $HΦ]"); last done.
iFrame "#". clear. iIntros "%Ψ !> (Hlocked & HP & HΦ) HΨ".
wp_pures.
wp_apply ("Hcond" with "[$]"). iIntros "%b (Hlocked & HP & HΦ)".
destruct b; wp_pures; iApply "HΨ"; iFrame; done.
Qed.
End condition.
#[global] Opaque condition_wait_until.
#[global] Opaque condition_wait_while.
|
/-
Copyright (c) 2022 Michael Stoll. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Michael Stoll
! This file was ported from Lean 3 source module number_theory.legendre_symbol.gauss_sum
! leanprover-community/mathlib commit d11893b411025250c8e61ff2f12ccbd7ee35ab15
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.NumberTheory.LegendreSymbol.AddCharacter
import Mathbin.NumberTheory.LegendreSymbol.ZmodChar
import Mathbin.Algebra.CharP.CharAndCard
/-!
# Gauss sums
We define the Gauss sum associated to a multiplicative and an additive
character of a finite field and prove some results about them.
## Main definition
Let `R` be a finite commutative ring and let `R'` be another commutative ring.
If `χ` is a multiplicative character `R → R'` (type `mul_char R R'`) and `ψ`
is an additive character `R → R'` (type `add_char R R'`, which abbreviates
`(multiplicative R) →* R'`), then the *Gauss sum* of `χ` and `ψ` is `∑ a, χ a * ψ a`.
## Main results
Some important results are as follows.
* `gauss_sum_mul_gauss_sum_eq_card`: The product of the Gauss
sums of `χ` and `ψ` and that of `χ⁻¹` and `ψ⁻¹` is the cardinality
of the source ring `R` (if `χ` is nontrivial, `ψ` is primitive and `R` is a field).
* `gauss_sum_sq`: The square of the Gauss sum is `χ(-1)` times
the cardinality of `R` if in addition `χ` is a quadratic character.
* `quad_gauss_sum_frob`: For a quadratic character `χ`, raising
the Gauss sum to the `p`th power (where `p` is the characteristic of
the target ring `R'`) multiplies it by `χ p`.
* `char.card_pow_card`: When `F` and `F'` are finite fields and `χ : F → F'`
is a nontrivial quadratic character, then `(χ (-1) * #F)^(#F'/2) = χ (#F')`.
* `finite_field.two_pow_card`: For every finite field `F` of odd characteristic,
we have `2^(#F/2) = χ₈(#F)` in `F`.
This machinery can be used to derive (a generalization of) the Law of
Quadratic Reciprocity.
## Tags
additive character, multiplicative character, Gauss sum
-/
universe u v
open BigOperators
open AddChar MulChar
section GaussSumDef
-- `R` is the domain of the characters
variable {R : Type u} [CommRing R] [Fintype R]
-- `R'` is the target of the characters
variable {R' : Type v} [CommRing R']
/-!
### Definition and first properties
-/
/-- Definition of the Gauss sum associated to a multiplicative and an additive character. -/
def gaussSum (χ : MulChar R R') (ψ : AddChar R R') : R' :=
∑ a, χ a * ψ a
#align gauss_sum gaussSum
/-- Replacing `ψ` by `mul_shift ψ a` and multiplying the Gauss sum by `χ a` does not change it. -/
theorem gaussSum_mulShift (χ : MulChar R R') (ψ : AddChar R R') (a : Rˣ) :
χ a * gaussSum χ (mulShift ψ a) = gaussSum χ ψ :=
by
simp only [gaussSum, mul_shift_apply, Finset.mul_sum]
simp_rw [← mul_assoc, ← map_mul]
exact Fintype.sum_bijective _ a.mul_left_bijective _ _ fun x => rfl
#align gauss_sum_mul_shift gaussSum_mulShift
end GaussSumDef
/-!
### The product of two Gauss sums
-/
section GaussSumProd
-- In the following, we need `R` to be a finite field and `R'` to be a domain.
variable {R : Type u} [Field R] [Fintype R] {R' : Type v} [CommRing R'] [IsDomain R']
-- A helper lemma for `gauss_sum_mul_gauss_sum_eq_card` below
-- Is this useful enough in other contexts to be public?
private theorem gauss_sum_mul_aux {χ : MulChar R R'} (hχ : IsNontrivial χ) (ψ : AddChar R R')
(b : R) : (∑ a, χ (a * b⁻¹) * ψ (a - b)) = ∑ c, χ c * ψ (b * (c - 1)) :=
by
cases' eq_or_ne b 0 with hb hb
· -- case `b = 0`
simp only [hb, inv_zero, MulZeroClass.mul_zero, MulChar.map_zero, MulZeroClass.zero_mul,
Finset.sum_const_zero, map_zero_one, mul_one]
exact hχ.sum_eq_zero.symm
· -- case `b ≠ 0`
refine' (Fintype.sum_bijective _ (Equiv.mulLeft_bijective₀ b hb) _ _ fun x => _).symm
rw [mul_assoc, mul_comm x, ← mul_assoc, mul_inv_cancel hb, one_mul, mul_sub, mul_one]
#align gauss_sum_mul_aux gauss_sum_mul_aux
/-- We have `gauss_sum χ ψ * gauss_sum χ⁻¹ ψ⁻¹ = fintype.card R`
when `χ` is nontrivial and `ψ` is primitive (and `R` is a field). -/
theorem gaussSum_mul_gaussSum_eq_card {χ : MulChar R R'} (hχ : IsNontrivial χ) {ψ : AddChar R R'}
(hψ : IsPrimitive ψ) : gaussSum χ ψ * gaussSum χ⁻¹ ψ⁻¹ = Fintype.card R :=
by
simp only [gaussSum, AddChar.inv_apply, Finset.sum_mul, Finset.mul_sum, MulChar.inv_apply']
conv in _ * _ * (_ * _) => rw [mul_mul_mul_comm, ← map_mul, ← map_add_mul, ← sub_eq_add_neg]
simp_rw [gauss_sum_mul_aux hχ ψ]
rw [Finset.sum_comm]
classical
-- to get `[decidable_eq R]` for `sum_mul_shift`
simp_rw [← Finset.mul_sum, sum_mul_shift _ hψ, sub_eq_zero, mul_ite, MulZeroClass.mul_zero]
rw [Finset.sum_ite_eq' Finset.univ (1 : R)]
simp only [Finset.mem_univ, map_one, one_mul, if_true]
#align gauss_sum_mul_gauss_sum_eq_card gaussSum_mul_gaussSum_eq_card
/-- When `χ` is a nontrivial quadratic character, then the square of `gauss_sum χ ψ`
is `χ(-1)` times the cardinality of `R`. -/
theorem gaussSum_sq {χ : MulChar R R'} (hχ₁ : IsNontrivial χ) (hχ₂ : IsQuadratic χ)
{ψ : AddChar R R'} (hψ : IsPrimitive ψ) : gaussSum χ ψ ^ 2 = χ (-1) * Fintype.card R :=
by
rw [pow_two, ← gaussSum_mul_gaussSum_eq_card hχ₁ hψ, hχ₂.inv, mul_rotate']
congr
rw [mul_comm, ← gaussSum_mulShift _ _ (-1 : Rˣ), inv_mul_shift]
rfl
#align gauss_sum_sq gaussSum_sq
end GaussSumProd
/-!
### Gauss sums and Frobenius
-/
section gaussSum_frob
variable {R : Type u} [CommRing R] [Fintype R] {R' : Type v} [CommRing R']
-- We assume that the target ring `R'` has prime characteristic `p`.
variable (p : ℕ) [fp : Fact p.Prime] [hch : CharP R' p]
include fp hch
/-- When `R'` has prime characteristic `p`, then the `p`th power of the Gauss sum
of `χ` and `ψ` is the Gauss sum of `χ^p` and `ψ^p`. -/
theorem gaussSum_frob (χ : MulChar R R') (ψ : AddChar R R') :
gaussSum χ ψ ^ p = gaussSum (χ ^ p) (ψ ^ p) :=
by
rw [← frobenius_def, gaussSum, gaussSum, map_sum]
simp_rw [pow_apply' χ fp.1.Pos, map_mul, frobenius_def]
rfl
#align gauss_sum_frob gaussSum_frob
/-- For a quadratic character `χ` and when the characteristic `p` of the target ring
is a unit in the source ring, the `p`th power of the Gauss sum of`χ` and `ψ` is
`χ p` times the original Gauss sum. -/
theorem MulChar.IsQuadratic.gaussSum_frob (hp : IsUnit (p : R)) {χ : MulChar R R'}
(hχ : IsQuadratic χ) (ψ : AddChar R R') : gaussSum χ ψ ^ p = χ p * gaussSum χ ψ := by
rw [gaussSum_frob, pow_mul_shift, hχ.pow_char p, ← gaussSum_mulShift χ ψ hp.unit, ← mul_assoc,
hp.unit_spec, ← pow_two, ← pow_apply' _ (by norm_num : 0 < 2), hχ.sq_eq_one, ← hp.unit_spec,
one_apply_coe, one_mul]
#align mul_char.is_quadratic.gauss_sum_frob MulChar.IsQuadratic.gaussSum_frob
/-- For a quadratic character `χ` and when the characteristic `p` of the target ring
is a unit in the source ring and `n` is a natural number, the `p^n`th power of the Gauss
sum of`χ` and `ψ` is `χ (p^n)` times the original Gauss sum. -/
theorem MulChar.IsQuadratic.gaussSum_frob_iter (n : ℕ) (hp : IsUnit (p : R)) {χ : MulChar R R'}
(hχ : IsQuadratic χ) (ψ : AddChar R R') : gaussSum χ ψ ^ p ^ n = χ (p ^ n) * gaussSum χ ψ :=
by
induction' n with n ih
· rw [pow_zero, pow_one, pow_zero, MulChar.map_one, one_mul]
·
rw [pow_succ, mul_comm p, pow_mul, ih, mul_pow, hχ.gauss_sum_frob _ hp, ← mul_assoc, pow_succ,
mul_comm (p : R), map_mul, ← pow_apply' χ fp.1.Pos (p ^ n), hχ.pow_char p]
#align mul_char.is_quadratic.gauss_sum_frob_iter MulChar.IsQuadratic.gaussSum_frob_iter
end gaussSum_frob
/-!
### Values of quadratic characters
-/
section GaussSumValues
variable {R : Type u} [CommRing R] [Fintype R] {R' : Type v} [CommRing R'] [IsDomain R']
/-- If the square of the Gauss sum of a quadratic character is `χ(-1) * #R`,
then we get, for all `n : ℕ`, the relation `(χ(-1) * #R) ^ (p^n/2) = χ(p^n)`,
where `p` is the (odd) characteristic of the target ring `R'`.
This version can be used when `R` is not a field, e.g., `ℤ/8ℤ`. -/
theorem Char.card_pow_char_pow {χ : MulChar R R'} (hχ : IsQuadratic χ) (ψ : AddChar R R') (p n : ℕ)
[fp : Fact p.Prime] [hch : CharP R' p] (hp : IsUnit (p : R)) (hp' : p ≠ 2)
(hg : gaussSum χ ψ ^ 2 = χ (-1) * Fintype.card R) :
(χ (-1) * Fintype.card R) ^ (p ^ n / 2) = χ (p ^ n) :=
by
have : gaussSum χ ψ ≠ 0 := by
intro hf
rw [hf, zero_pow (by norm_num : 0 < 2), eq_comm, mul_eq_zero] at hg
exact
not_isUnit_prime_of_dvd_card p
((CharP.cast_eq_zero_iff R' p _).mp <| hg.resolve_left (is_unit_one.neg.map χ).NeZero) hp
rw [← hg]
apply mul_right_cancel₀ this
rw [← hχ.gauss_sum_frob_iter p n hp ψ, ← pow_mul, mul_comm, ← pow_succ,
Nat.two_mul_div_two_add_one_of_odd (fp.1.eq_two_or_odd'.resolve_left hp').pow]
#align char.card_pow_char_pow Char.card_pow_char_pow
/-- When `F` and `F'` are finite fields and `χ : F → F'` is a nontrivial quadratic character,
then `(χ(-1) * #F)^(#F'/2) = χ(#F')`. -/
theorem Char.card_pow_card {F : Type _} [Field F] [Fintype F] {F' : Type _} [Field F'] [Fintype F']
{χ : MulChar F F'} (hχ₁ : IsNontrivial χ) (hχ₂ : IsQuadratic χ)
(hch₁ : ringChar F' ≠ ringChar F) (hch₂ : ringChar F' ≠ 2) :
(χ (-1) * Fintype.card F) ^ (Fintype.card F' / 2) = χ (Fintype.card F') :=
by
obtain ⟨n, hp, hc⟩ := FiniteField.card F (ringChar F)
obtain ⟨n', hp', hc'⟩ := FiniteField.card F' (ringChar F')
let ψ := primitive_char_finite_field F F' hch₁
let FF' := CyclotomicField ψ.n F'
have hchar := Algebra.ringChar_eq F' FF'
apply (algebraMap F' FF').Injective
rw [map_pow, map_mul, map_natCast, hc', hchar, Nat.cast_pow]
simp only [← MulChar.ringHomComp_apply]
haveI := Fact.mk hp'
haveI := Fact.mk (hchar.subst hp')
rw [Ne, ← Nat.prime_dvd_prime_iff_eq hp' hp, ← isUnit_iff_not_dvd_char, hchar] at hch₁
exact
Char.card_pow_char_pow (hχ₂.comp _) ψ.char (ringChar FF') n' hch₁ (hchar ▸ hch₂)
(gaussSum_sq (hχ₁.comp <| RingHom.injective _) (hχ₂.comp _) ψ.prim)
#align char.card_pow_card Char.card_pow_card
end GaussSumValues
section GaussSumTwo
/-!
### The quadratic character of 2
This section proves the following result.
For every finite field `F` of odd characteristic, we have `2^(#F/2) = χ₈(#F)` in `F`.
This can be used to show that the quadratic character of `F` takes the value
`χ₈(#F)` at `2`.
The proof uses the Gauss sum of `χ₈` and a primitive additive character on `ℤ/8ℤ`;
in this way, the result is reduced to `card_pow_char_pow`.
-/
open ZMod
/-- For every finite field `F` of odd characteristic, we have `2^(#F/2) = χ₈(#F)` in `F`. -/
theorem FiniteField.two_pow_card {F : Type _} [Fintype F] [Field F] (hF : ringChar F ≠ 2) :
(2 : F) ^ (Fintype.card F / 2) = χ₈ (Fintype.card F) :=
by
have hp2 : ∀ n : ℕ, (2 ^ n : F) ≠ 0 := fun n => pow_ne_zero n (Ring.two_ne_zero hF)
obtain ⟨n, hp, hc⟩ := FiniteField.card F (ringChar F)
-- we work in `FF`, the eighth cyclotomic field extension of `F`
let FF := (Polynomial.cyclotomic 8 F).SplittingField
haveI : FiniteDimensional F FF :=
Polynomial.IsSplittingField.finiteDimensional FF (Polynomial.cyclotomic 8 F)
haveI : Fintype FF := FiniteDimensional.fintypeOfFintype F FF
have hchar := Algebra.ringChar_eq F FF
have FFp := hchar.subst hp
haveI := Fact.mk FFp
have hFF := ne_of_eq_of_ne hchar.symm hF
-- `ring_char FF ≠ 2`
have hu : IsUnit (ringChar FF : ZMod 8) :=
by
rw [isUnit_iff_not_dvd_char, ring_char_zmod_n]
rw [Ne, ← Nat.prime_dvd_prime_iff_eq FFp Nat.prime_two] at hFF
change ¬_ ∣ 2 ^ 3
exact mt FFp.dvd_of_dvd_pow hFF
-- there is a primitive additive character `ℤ/8ℤ → FF`, sending `a + 8ℤ ↦ τ^a`
-- with a primitive eighth root of unity `τ`
let ψ₈ := primitive_zmod_char 8 F (by convert hp2 3 <;> norm_num)
let τ : FF := ψ₈.char 1
have τ_spec : τ ^ 4 = -1 := by
refine' (sq_eq_one_iff.1 _).resolve_left _ <;>
· simp only [τ, ← map_nsmul_pow]
erw [AddChar.IsPrimitive.zMod_char_eq_one_iff 8 ψ₈.prim]
decide
-- we consider `χ₈` as a multiplicative character `ℤ/8ℤ → FF`
let χ := χ₈.ring_hom_comp (Int.castRingHom FF)
have hχ : χ (-1) = 1 := NormNum.int_cast_one
have hq : is_quadratic χ := is_quadratic_χ₈.comp _
-- we now show that the Gauss sum of `χ` and `ψ₈` has the relevant property
have hg : gaussSum χ ψ₈.char ^ 2 = χ (-1) * Fintype.card (ZMod 8) :=
by
rw [hχ, one_mul, card, gaussSum]
convert← congr_arg (· ^ 2) (Fin.sum_univ_eight fun x => (χ₈ x : FF) * τ ^ x.val)
· ext
congr
apply pow_one
convert_to(0 + 1 * τ ^ 1 + 0 + -1 * τ ^ 3 + 0 + -1 * τ ^ 5 + 0 + 1 * τ ^ 7) ^ 2 = _
· simp only [χ₈_apply, Matrix.cons_val_zero, Matrix.cons_val_one, Matrix.head_cons,
Matrix.cons_vec_bit0_eq_alt0, Matrix.cons_vec_bit1_eq_alt1, Matrix.cons_vecAppend,
Matrix.cons_vecAlt0, Matrix.cons_vecAlt1, Int.cast_zero, Int.cast_one, Int.cast_neg,
MulZeroClass.zero_mul]
rfl
convert_to 8 + (τ ^ 4 + 1) * (τ ^ 10 - 2 * τ ^ 8 - 2 * τ ^ 6 + 6 * τ ^ 4 + τ ^ 2 - 8) = _
· ring
· rw [τ_spec]
norm_num
-- this allows us to apply `card_pow_char_pow` to our situation
have h := Char.card_pow_char_pow hq ψ₈.char (ringChar FF) n hu hFF hg
rw [card, ← hchar, hχ, one_mul, ← hc, ← Nat.cast_pow (ringChar F), ← hc] at h
-- finally, we change `2` to `8` on the left hand side
convert_to(8 : F) ^ (Fintype.card F / 2) = _
·
rw [(by norm_num : (8 : F) = 2 ^ 2 * 2), mul_pow,
(FiniteField.isSquare_iff hF <| hp2 2).mp ⟨2, pow_two 2⟩, one_mul]
apply (algebraMap F FF).Injective
simp only [map_pow, map_bit0, map_one, map_intCast]
convert h
norm_num
#align finite_field.two_pow_card FiniteField.two_pow_card
end GaussSumTwo
|
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testGaussianFactorGraphUnordered.cpp
* @brief Unit tests for Linear Factor Graph
* @author Christian Potthast
* @author Frank Dellaert
* @author Luca Carlone
* @author Richard Roberts
**/
#include <boost/assign/std/list.hpp> // for operator +=
using namespace boost::assign;
#include <gtsam/base/TestableAssertions.h>
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/debug.h>
#include <gtsam/base/VerticalBlockMatrix.h>
#include <gtsam/inference/VariableSlots.h>
#include <gtsam/inference/VariableIndex.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/GaussianConditional.h>
#include <gtsam/linear/GaussianBayesNet.h>
using namespace std;
using namespace gtsam;
//static SharedDiagonal
// sigma0_1 = noiseModel::Isotropic::Sigma(2,0.1), sigma_02 = noiseModel::Isotropic::Sigma(2,0.2),
// constraintModel = noiseModel::Constrained::All(2);
/* ************************************************************************* */
TEST(GaussianFactorGraph, initialization) {
// Create empty graph
GaussianFactorGraph fg;
SharedDiagonal unit2 = noiseModel::Unit::Create(2);
fg +=
JacobianFactor(0, 10*eye(2), -1.0*ones(2), unit2),
JacobianFactor(0, -10*eye(2),1, 10*eye(2), (Vector(2) << 2.0, -1.0), unit2),
JacobianFactor(0, -5*eye(2), 2, 5*eye(2), (Vector(2) << 0.0, 1.0), unit2),
JacobianFactor(1, -5*eye(2), 2, 5*eye(2), (Vector(2) << -1.0, 1.5), unit2);
EXPECT_LONGS_EQUAL(4, (long)fg.size());
// Test sparse, which takes a vector and returns a matrix, used in MATLAB
// Note that this the augmented vector and the RHS is in column 7
Matrix expectedIJS = (Matrix(3, 22) <<
1., 2., 1., 2., 3., 4., 3., 4., 3., 4., 5., 6., 5., 6., 5., 6., 7., 8., 7., 8., 7., 8.,
1., 2., 7., 7., 1., 2., 3., 4., 7., 7., 1., 2., 5., 6., 7., 7., 3., 4., 5., 6., 7., 7.,
10., 10., -1., -1., -10., -10., 10., 10., 2., -1., -5., -5., 5., 5., 0., 1., -5., -5., 5., 5., -1., 1.5
);
Matrix actualIJS = fg.sparseJacobian_();
EQUALITY(expectedIJS, actualIJS);
}
/* ************************************************************************* */
TEST(GaussianFactorGraph, sparseJacobian) {
// Create factor graph:
// x1 x2 x3 x4 x5 b
// 1 2 3 0 0 4
// 5 6 7 0 0 8
// 9 10 0 11 12 13
// 0 0 0 14 15 16
// Expected - NOTE that we transpose this!
Matrix expectedT = (Matrix(16, 3) <<
1., 1., 2.,
1., 2., 4.,
1., 3., 6.,
2., 1.,10.,
2., 2.,12.,
2., 3.,14.,
1., 6., 8.,
2., 6.,16.,
3., 1.,18.,
3., 2.,20.,
3., 4.,22.,
3., 5.,24.,
4., 4.,28.,
4., 5.,30.,
3., 6.,26.,
4., 6.,32.);
Matrix expected = expectedT.transpose();
GaussianFactorGraph gfg;
SharedDiagonal model = noiseModel::Isotropic::Sigma(2, 0.5);
gfg.add(0, (Matrix(2, 3) << 1., 2., 3., 5., 6., 7.), (Vector(2) << 4., 8.), model);
gfg.add(0, (Matrix(2, 3) << 9.,10., 0., 0., 0., 0.), 1, (Matrix(2, 2) << 11., 12., 14., 15.), (Vector(2) << 13.,16.), model);
Matrix actual = gfg.sparseJacobian_();
EXPECT(assert_equal(expected, actual));
}
/* ************************************************************************* */
TEST(GaussianFactorGraph, matrices) {
// Create factor graph:
// x1 x2 x3 x4 x5 b
// 1 2 3 0 0 4
// 5 6 7 0 0 8
// 9 10 0 11 12 13
// 0 0 0 14 15 16
Matrix A00 = (Matrix(2, 3) << 1, 2, 3, 5, 6, 7);
Matrix A10 = (Matrix(2, 3) << 9, 10, 0, 0, 0, 0);
Matrix A11 = (Matrix(2, 2) << 11, 12, 14, 15);
GaussianFactorGraph gfg;
SharedDiagonal model = noiseModel::Unit::Create(2);
gfg.add(0, A00, (Vector(2) << 4., 8.), model);
gfg.add(0, A10, 1, A11, (Vector(2) << 13.,16.), model);
Matrix Ab(4,6);
Ab <<
1, 2, 3, 0, 0, 4,
5, 6, 7, 0, 0, 8,
9,10, 0,11,12,13,
0, 0, 0,14,15,16;
// augmented versions
EXPECT(assert_equal(Ab, gfg.augmentedJacobian()));
EXPECT(assert_equal(Ab.transpose() * Ab, gfg.augmentedHessian()));
// jacobian
Matrix A = Ab.leftCols(Ab.cols()-1);
Vector b = Ab.col(Ab.cols()-1);
Matrix actualA; Vector actualb; boost::tie(actualA,actualb) = gfg.jacobian();
EXPECT(assert_equal(A, actualA));
EXPECT(assert_equal(b, actualb));
// hessian
Matrix L = A.transpose() * A;
Vector eta = A.transpose() * b;
Matrix actualL; Vector actualeta; boost::tie(actualL,actualeta) = gfg.hessian();
EXPECT(assert_equal(L, actualL));
EXPECT(assert_equal(eta, actualeta));
// hessianBlockDiagonal
VectorValues expectLdiagonal; // Make explicit that diagonal is sum-squares of columns
expectLdiagonal.insert(0, (Vector(3) << 1+25+81, 4+36+100, 9+49));
expectLdiagonal.insert(1, (Vector(2) << 121+196, 144+225));
EXPECT(assert_equal(expectLdiagonal, gfg.hessianDiagonal()));
// hessianBlockDiagonal
map<Key,Matrix> actualBD = gfg.hessianBlockDiagonal();
LONGS_EQUAL(2,actualBD.size());
EXPECT(assert_equal(A00.transpose()*A00 + A10.transpose()*A10,actualBD[0]));
EXPECT(assert_equal(A11.transpose()*A11,actualBD[1]));
}
/* ************************************************************************* */
static GaussianFactorGraph createSimpleGaussianFactorGraph() {
GaussianFactorGraph fg;
SharedDiagonal unit2 = noiseModel::Unit::Create(2);
// linearized prior on x1: c[_x1_]+x1=0 i.e. x1=-c[_x1_]
fg += JacobianFactor(2, 10*eye(2), -1.0*ones(2), unit2);
// odometry between x1 and x2: x2-x1=[0.2;-0.1]
fg += JacobianFactor(0, 10*eye(2), 2, -10*eye(2), (Vector(2) << 2.0, -1.0), unit2);
// measurement between x1 and l1: l1-x1=[0.0;0.2]
fg += JacobianFactor(1, 5*eye(2), 2, -5*eye(2), (Vector(2) << 0.0, 1.0), unit2);
// measurement between x2 and l1: l1-x2=[-0.2;0.3]
fg += JacobianFactor(0, -5*eye(2), 1, 5*eye(2), (Vector(2) << -1.0, 1.5), unit2);
return fg;
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, gradient )
{
GaussianFactorGraph fg = createSimpleGaussianFactorGraph();
// Construct expected gradient
// 2*f(x) = 100*(x1+c[X(1)])^2 + 100*(x2-x1-[0.2;-0.1])^2 + 25*(l1-x1-[0.0;0.2])^2 + 25*(l1-x2-[-0.2;0.3])^2
// worked out: df/dx1 = 100*[0.1;0.1] + 100*[0.2;-0.1]) + 25*[0.0;0.2] = [10+20;10-10+5] = [30;5]
VectorValues expected = map_list_of<Key, Vector>
(1, (Vector(2) << 5.0, -12.5))
(2, (Vector(2) << 30.0, 5.0))
(0, (Vector(2) << -25.0, 17.5));
// Check the gradient at delta=0
VectorValues zero = VectorValues::Zero(expected);
VectorValues actual = fg.gradient(zero);
EXPECT(assert_equal(expected, actual));
EXPECT(assert_equal(expected, fg.gradientAtZero()));
// Check the gradient at the solution (should be zero)
VectorValues solution = fg.optimize();
VectorValues actual2 = fg.gradient(solution);
EXPECT(assert_equal(VectorValues::Zero(solution), actual2));
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, transposeMultiplication )
{
GaussianFactorGraph A = createSimpleGaussianFactorGraph();
Errors e; e +=
(Vector(2) << 0.0, 0.0),
(Vector(2) << 15.0, 0.0),
(Vector(2) << 0.0,-5.0),
(Vector(2) << -7.5,-5.0);
VectorValues expected;
expected.insert(1, (Vector(2) << -37.5,-50.0));
expected.insert(2, (Vector(2) << -150.0, 25.0));
expected.insert(0, (Vector(2) << 187.5, 25.0));
VectorValues actual = A.transposeMultiply(e);
EXPECT(assert_equal(expected, actual));
}
/* ************************************************************************* */
TEST(GaussianFactorGraph, eliminate_empty )
{
// eliminate an empty factor
GaussianFactorGraph gfg;
gfg.add(JacobianFactor());
GaussianBayesNet::shared_ptr actualBN;
GaussianFactorGraph::shared_ptr remainingGFG;
boost::tie(actualBN, remainingGFG) = gfg.eliminatePartialSequential(Ordering());
// expected Bayes net is empty
GaussianBayesNet expectedBN;
// expected remaining graph should be the same as the original, still containing the empty factor
GaussianFactorGraph expectedLF = gfg;
// check if the result matches
EXPECT(assert_equal(*actualBN, expectedBN));
EXPECT(assert_equal(*remainingGFG, expectedLF));
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, matrices2 )
{
GaussianFactorGraph gfg = createSimpleGaussianFactorGraph();
Matrix A; Vector b; boost::tie(A,b) = gfg.jacobian();
Matrix AtA; Vector eta; boost::tie(AtA,eta) = gfg.hessian();
EXPECT(assert_equal(A.transpose()*A, AtA));
EXPECT(assert_equal(A.transpose()*b, eta));
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, multiplyHessianAdd )
{
GaussianFactorGraph gfg = createSimpleGaussianFactorGraph();
VectorValues x = map_list_of<Key, Vector>
(0, (Vector(2) << 1,2))
(1, (Vector(2) << 3,4))
(2, (Vector(2) << 5,6));
VectorValues expected;
expected.insert(0, (Vector(2) << -450, -450));
expected.insert(1, (Vector(2) << 0, 0));
expected.insert(2, (Vector(2) << 950, 1050));
VectorValues actual;
gfg.multiplyHessianAdd(1.0, x, actual);
EXPECT(assert_equal(expected, actual));
// now, do it with non-zero y
gfg.multiplyHessianAdd(1.0, x, actual);
EXPECT(assert_equal(2*expected, actual));
}
/* ************************************************************************* */
static GaussianFactorGraph createGaussianFactorGraphWithHessianFactor() {
GaussianFactorGraph gfg = createSimpleGaussianFactorGraph();
gfg += HessianFactor(1, 2, 100*eye(2,2), zeros(2,2), (Vector(2) << 0.0, 1.0),
400*eye(2,2), (Vector(2) << 1.0, 1.0), 3.0);
return gfg;
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, multiplyHessianAdd2 )
{
GaussianFactorGraph gfg = createGaussianFactorGraphWithHessianFactor();
// brute force
Matrix AtA; Vector eta; boost::tie(AtA,eta) = gfg.hessian();
Vector X(6); X<<1,2,3,4,5,6;
Vector Y(6); Y<<-450, -450, 300, 400, 2950, 3450;
EXPECT(assert_equal(Y,AtA*X));
VectorValues x = map_list_of<Key, Vector>
(0, (Vector(2) << 1,2))
(1, (Vector(2) << 3,4))
(2, (Vector(2) << 5,6));
VectorValues expected;
expected.insert(0, (Vector(2) << -450, -450));
expected.insert(1, (Vector(2) << 300, 400));
expected.insert(2, (Vector(2) << 2950, 3450));
VectorValues actual;
gfg.multiplyHessianAdd(1.0, x, actual);
EXPECT(assert_equal(expected, actual));
// now, do it with non-zero y
gfg.multiplyHessianAdd(1.0, x, actual);
EXPECT(assert_equal(2*expected, actual));
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, multiplyHessianAdd3 )
{
GaussianFactorGraph gfg = createGaussianFactorGraphWithHessianFactor();
// brute force
Matrix AtA; Vector eta; boost::tie(AtA,eta) = gfg.hessian();
Vector X(6); X<<1,2,3,4,5,6;
Vector Y(6); Y<<-450, -450, 300, 400, 2950, 3450;
EXPECT(assert_equal(Y,AtA*X));
double* x = &X[0];
Vector fast_y = gtsam::zero(6);
gfg.multiplyHessianAdd(1.0, x, fast_y.data());
EXPECT(assert_equal(Y, fast_y));
// now, do it with non-zero y
gfg.multiplyHessianAdd(1.0, x, fast_y.data());
EXPECT(assert_equal(2*Y, fast_y));
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, matricesMixed )
{
GaussianFactorGraph gfg = createGaussianFactorGraphWithHessianFactor();
Matrix A; Vector b; boost::tie(A,b) = gfg.jacobian(); // incorrect !
Matrix AtA; Vector eta; boost::tie(AtA,eta) = gfg.hessian(); // correct
EXPECT(assert_equal(A.transpose()*A, AtA));
Vector expected = - (Vector(6) << -25, 17.5, 5, -13.5, 29, 4);
EXPECT(assert_equal(expected, eta));
EXPECT(assert_equal(A.transpose()*b, eta));
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, gradientAtZero )
{
GaussianFactorGraph gfg = createGaussianFactorGraphWithHessianFactor();
VectorValues expected;
VectorValues actual = gfg.gradientAtZero();
expected.insert(0, (Vector(2) << -25, 17.5));
expected.insert(1, (Vector(2) << 5, -13.5));
expected.insert(2, (Vector(2) << 29, 4));
EXPECT(assert_equal(expected, actual));
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, clone ) {
// 2 variables, frontal has dim=4
VerticalBlockMatrix blockMatrix(list_of(4)(2)(1), 4);
blockMatrix.matrix() <<
1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.1,
0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.2,
0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.3,
0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.4;
GaussianConditional cg(list_of(1)(2), 1, blockMatrix);
GaussianFactorGraph init_graph = createGaussianFactorGraphWithHessianFactor();
init_graph.push_back(GaussianFactor::shared_ptr()); /// Add null factor
init_graph.push_back(GaussianConditional(cg));
GaussianFactorGraph exp_graph = createGaussianFactorGraphWithHessianFactor(); // Created separately
exp_graph.push_back(GaussianFactor::shared_ptr()); /// Add null factor
exp_graph.push_back(GaussianConditional(cg));
GaussianFactorGraph actCloned = init_graph.clone();
EXPECT(assert_equal(init_graph, actCloned)); // Same as the original version
// Apply an in-place change to init_graph and compare
JacobianFactor::shared_ptr jacFactor0 = boost::dynamic_pointer_cast<JacobianFactor>(init_graph.at(0));
CHECK(jacFactor0);
jacFactor0->getA(jacFactor0->begin()) *= 7.;
EXPECT(assert_inequal(init_graph, exp_graph));
EXPECT(assert_equal(exp_graph, actCloned));
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, negate ) {
GaussianFactorGraph init_graph = createGaussianFactorGraphWithHessianFactor();
init_graph.push_back(GaussianFactor::shared_ptr()); /// Add null factor
GaussianFactorGraph actNegation = init_graph.negate();
GaussianFactorGraph expNegation;
expNegation.push_back(init_graph.at(0)->negate());
expNegation.push_back(init_graph.at(1)->negate());
expNegation.push_back(init_graph.at(2)->negate());
expNegation.push_back(init_graph.at(3)->negate());
expNegation.push_back(init_graph.at(4)->negate());
expNegation.push_back(GaussianFactor::shared_ptr());
EXPECT(assert_equal(expNegation, actNegation));
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, hessianDiagonal )
{
GaussianFactorGraph gfg = createGaussianFactorGraphWithHessianFactor();
VectorValues expected;
Matrix infoMatrix = gfg.hessian().first;
Vector d = infoMatrix.diagonal();
VectorValues actual = gfg.hessianDiagonal();
expected.insert(0, d.segment<2>(0));
expected.insert(1, d.segment<2>(2));
expected.insert(2, d.segment<2>(4));
EXPECT(assert_equal(expected, actual));
}
/* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
/* ************************************************************************* */
|
import for_mathlib.split_exact
noncomputable theory
open category_theory category_theory.limits
variables {𝓐 : Type*} [category 𝓐] [abelian 𝓐]
-- move me
lemma exact_of_exact_image {X Y Z : 𝓐} (f : X ⟶ Y) (g : Y ⟶ Z) (h : exact f (factor_thru_image g)) :
exact f g :=
by { rw ← limits.image.fac g, exact exact_comp_mono h }
open_locale pseudoelement
lemma exact_factor_thru_image_iff {X Y Z : 𝓐} (f : X ⟶ Y) (g : Y ⟶ Z) :
exact f (factor_thru_image g) ↔ exact f g :=
begin
refine ⟨exact_of_exact_image f g, λ h, abelian.pseudoelement.exact_of_pseudo_exact _ _
⟨λ x, abelian.pseudoelement.zero_of_map_zero (limits.image.ι g)
(abelian.pseudoelement.pseudo_injective_of_mono _) _ _, λ y hy, _⟩⟩,
{ rw [← abelian.pseudoelement.comp_apply, limits.image.fac],
exact (abelian.pseudoelement.pseudo_exact_of_exact h).1 x },
{ replace hy := congr_arg (limits.image.ι g) hy,
rw [abelian.pseudoelement.apply_zero, ← abelian.pseudoelement.comp_apply,
limits.image.fac] at hy,
obtain ⟨a, ha ⟩ := (abelian.pseudoelement.pseudo_exact_of_exact h).2 _ hy,
exact ⟨a, ha⟩ }
end
lemma short_exact_kernel_factor_thru_image {A B : 𝓐} (f : A ⟶ B) :
short_exact (kernel.ι f) (factor_thru_image f) :=
begin
refine ⟨_⟩,
rw exact_factor_thru_image_iff,
apply exact_kernel_ι,
end
lemma iso_of_short_exact_comp_right {X Y Z W : 𝓐} (f : X ⟶ Y) (g : Y ⟶ Z) (h : Z ⟶ W)
(H1 : short_exact f g) (H2 : short_exact f (g ≫ h)) :
is_iso h :=
begin
refine (is_iso_iff_mono_and_epi _).2 ⟨abelian.pseudoelement.mono_of_zero_of_map_zero _ (λ z hz, _),
abelian.pseudoelement.epi_of_pseudo_surjective _ (λ w, _)⟩,
{ haveI := H1.epi,
obtain ⟨y, rfl⟩ := abelian.pseudoelement.pseudo_surjective_of_epi g z,
rw [← abelian.pseudoelement.comp_apply] at hz,
obtain ⟨x, rfl⟩ := (abelian.pseudoelement.pseudo_exact_of_exact H2.exact).2 _ hz,
exact (abelian.pseudoelement.pseudo_exact_of_exact H1.exact).1 x },
{ haveI := H2.epi,
obtain ⟨y, rfl⟩ := abelian.pseudoelement.pseudo_surjective_of_epi (g ≫ h) w,
refine ⟨g y, _⟩,
rw [← abelian.pseudoelement.comp_apply] }
end
|
/-
Copyright (c) 2021 OpenAI. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kunhao Zheng, Stanislas Polu, David Renshaw, OpenAI GPT-f
-/
import mathzoo.imports.miniF2F
open_locale nat rat real big_operators topological_space
theorem mathd_algebra_484 :
real.log 27 / real.log 3 = 3 :=
begin
rw real.log_div_log,
have three_to_three : (27 : ℝ) = (3 : ℝ)^(3 : ℝ), by norm_num,
rw three_to_three,
have trivial_ineq: (0 : ℝ) < (3 : ℝ), by norm_num,
have trivial_neq: (3: ℝ) ≠ (1 : ℝ), by norm_num,
exact real.logb_rpow trivial_ineq trivial_neq,
end |
lemma is_nth_power_nth_power [simp, intro]: "is_nth_power n (x ^ n)" |
lemma synthetic_divmod_0 [simp]: "synthetic_divmod 0 c = (0, 0)" |
%
% Extract variable names from any number of polynomial strings
%
% Syntax: (pvar is the shortened alias for GetVariables)
% >> z = GetVariables(f)
% >> z = GetVariables(f,g,h)
%
% to extract variables from a cell array f of polynomial strings:
% >> z = GetVariables(f{:})
%
% Input: f --- (string) polynomial
% Output: z --- (cell) variable names of f
%
% Example: >> f = '-2*x + 9*x*y^2 - 9*x^2*y^2 + 8*x^3*y^2';
% >> g = '5 - x^3*z';
% >> z = GetVariables(f,g)
% z =
% 'x' 'y' 'z'
|
# R - 3.4.1
expand <- function(s) {
unlist(strsplit(s, ""))
}
|
import Lean4Axiomatic.AbstractAlgebra
import Lean4Axiomatic.Integer.Core
/-! # Integer addition -/
namespace Lean4Axiomatic.Integer
/-! ## Axioms -/
/--
Definition of addition, and properties that it must satisfy.
All other properties of addition can be derived from these.
**Named parameters**
- `ℤ`: The type of integers.
**Class parameters**
- `Core ℤ`: Required to express most properties of addition.
-/
class Addition {ℕ : outParam Type} [Natural ℕ] (ℤ : Type) [Core (ℕ := ℕ) ℤ] :=
/-- Definition of and syntax for addition. -/
addOp : Add ℤ
/--
Addition preserves equivalence of integers; two equivalent integers are still
equivalent after the same quantity is added to both (on the left or right).
-/
add_substitutive : AA.Substitutive₂ (α := ℤ) (· + ·) AA.tc (· ≃ ·) (· ≃ ·)
/-- Exchanging the operands of an addition does not change the result. -/
add_commutative : AA.Commutative (α := ℤ) (· + ·)
/-- The grouping of the terms in a sum doesn't matter. -/
add_associative : AA.Associative (α := ℤ) (· + ·)
/-- Adding zero to an integer produces the same integer. -/
add_identity : AA.Identity (α := ℤ) 0 (· + ·)
/--
Adding two natural numbers and then converting to an integer gives the same
result as converting each number to an integer and then adding.
-/
add_compatible_from_natural
: AA.Compatible₂ (α := ℕ) (β := ℤ) (↑·) (· + ·) (· + ·)
attribute [instance] Addition.addOp
attribute [instance] Addition.add_associative
attribute [instance] Addition.add_commutative
attribute [instance] Addition.add_compatible_from_natural
attribute [instance] Addition.add_identity
attribute [instance] Addition.add_substitutive
export Addition (addOp)
/-! ## Derived properties -/
variable {ℕ : Type} [Natural ℕ]
variable {ℤ : Type} [Core ℤ] [Addition (ℕ := ℕ) ℤ]
/--
Non-typeclass version of `add_associative`.
Eventually, this should become the axiom and the typeclass should be derived.
-/
theorem add_assoc {a b c : ℤ} : (a + b) + c ≃ a + (b + c) := AA.assoc
end Lean4Axiomatic.Integer
|
[STATEMENT]
theorem IF2map_id: "IF2map id = id"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. IF2map id = id
[PROOF STEP]
apply (rule sym)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. id = IF2map id
[PROOF STEP]
apply (rule conjunct2[OF IFmap_unique])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. ?u3 \<circ> ctor1 = ctor1 \<circ> F1map id ?u3 id
2. id \<circ> ctor2 = ctor2 \<circ> F2map id ?u3 id
[PROOF STEP]
apply (rule trans[OF id_o])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. ctor1 = ctor1 \<circ> F1map id id id
2. id \<circ> ctor2 = ctor2 \<circ> F2map id id id
[PROOF STEP]
apply (rule trans[OF sym[OF o_id]])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. ctor1 \<circ> id = ctor1 \<circ> F1map id id id
2. id \<circ> ctor2 = ctor2 \<circ> F2map id id id
[PROOF STEP]
apply (rule arg_cong[OF sym[OF F1.map_id0]])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. id \<circ> ctor2 = ctor2 \<circ> F2map id id id
[PROOF STEP]
apply (rule trans[OF id_o])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ctor2 = ctor2 \<circ> F2map id id id
[PROOF STEP]
apply (rule trans[OF sym[OF o_id]])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ctor2 \<circ> id = ctor2 \<circ> F2map id id id
[PROOF STEP]
apply (rule arg_cong[OF sym[OF F2.map_id0]])
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
From mathcomp.ssreflect
Require Import ssreflect ssrbool ssrnat eqtype ssrfun seq.
From mathcomp
Require Import path.
Require Import Eqdep.
Require Import Relation_Operators.
From fcsl
Require Import pred prelude ordtype finmap pcm unionmap heap.
From DiSeL
Require Import Domain Freshness State EqTypeX DepMaps Protocols Worlds NetworkSem Rely.
From DiSeL
Require Import Actions Injection Process Always HoareTriples InductiveInv.
Set Implicit Arguments.
Unset Strict Implicit.
Import Prenex Implicits.
(*****************************************************************************
[Unary Hoare-style specifications and auxiliary lemmas].
This file borrows basic definition of binary-to-unary Hoare triple
encoding (i.e., logvar and binarify definitions) from the development
of FCSL by Nanevski et al.
(FCSL is available at http://software.imdea.org/fcsl)
*****************************************************************************)
(* Spec s is parametrized by a ghost variable of type A *)
Definition logvar {B A} (s : A -> spec B) : spec B :=
(fun i => exists x : A, (s x).1 i,
fun y i m => forall x : A, (s x).2 y i m).
(* Representing q as a unary postcondition, including the precondition *)
Definition binarify {A} (p : pre) (q : cont A) : spec A :=
(p, fun i y m => p i -> q y m).
Notation "'DHT' [ this , W ] ( p , q ) " :=
(DTbin this W (binarify p q)) (at level 0,
format "'[hv ' DHT [ this , W ] ( '[' p , '/' q ']' ) ']'").
(* A unary Hoare-style specification *)
Notation "{ x .. y }, 'DHT' [ this , W ] ( p , q )" :=
(DTbin this W (logvar (fun x => .. (logvar (fun y => binarify p q)) .. )))
(at level 0, x binder, y binder, right associativity,
format "'[hv ' { x .. y }, '/ ' DHT [ this , W ] ( '[' p , '/' q ']' ) ']'").
Section BasicRules.
Variable this : nid.
(* We can always assume coherence of the state *)
Lemma vrf_coh W A (e : DT this W A) i r :
(i \In Coh W -> verify i e r) -> verify i e r.
Proof.
by move=>H C; apply: H.
Qed.
(* stability of preconditions *)
Lemma vrf_pre W A (e : DT this W A) i i' (k : cont A) :
verify i e k -> network_rely W this i i' -> verify i' e k.
Proof.
move=>H M Ci' t H'; case: (rely_coh M)=>Ci _.
by apply: aft_imp (alw_envs (H Ci t H') M).
Qed.
(* stability of postconditions *)
Lemma vrf_post W A (e : DT this W A) i (k : cont A) :
verify i e k ->
verify i e (fun x m => forall m', network_rely W this m m' -> k x m').
Proof.
move=>H Ci t H'; move: (alw_envsq (H Ci t H')).
apply: alw_imp=>s p Cs H2 s3 M v E; apply: H2 E _ M.
Qed.
(* An inference rule for the sequential composition *)
Lemma bind_rule W A B (e1 : DT this W A) (e2 : A -> DT this W B) i
(q : cont A) (r : cont B) :
verify i e1 q ->
(forall y j, q y j -> j \In Coh W -> verify j (e2 y) r) ->
verify i (bind e1 e2) r.
Proof.
move=>H1 H2 Ci t [->|[t'][H3 H4]].
- by apply: alw_unfin=>//; move/alw_coh: (H1 Unfinished (prog_unfin e1)).
by apply: aft_bnd H3 _; move/(H1 Ci): H4; apply: aft_imp=>y j Cj H; apply: H2.
Qed.
Arguments bind_rule [W A B e1 e2 i].
Lemma step W A B (e1 : DT this W A) (e2 : A -> DT this W B) i (r : cont B) :
verify i e1 (fun y m => verify m (e2 y) r) ->
verify i (bind e1 e2) r.
Proof. by move=>H; apply: (bind_rule (fun y m => verify m (e2 y) r)). Qed.
(* Inference rules for the calls to an already verified function f *)
Lemma call_rule' W A i (f : DT this W A) (k : cont A) :
(* Verify precondition of the call *)
(i \In Coh W -> pre_of f i) ->
(* Verify the rest out of the postcondition *)
(forall x m, post_of f i x m -> m \In Coh W -> k x m) ->
verify i f k.
Proof.
case: f=>s [e] /= H H1 H2 Ci t H3.
apply: aft_imp (H i t (H1 Ci) Ci H3).
by move=>v m Cm H4; apply: H2.
Qed.
(* Same lemma for unary postconidtions *)
Lemma call_rule W A (p : Pred state) (q : A -> Pred state) i
{e} (k : cont A) :
(i \In Coh W -> p i) ->
(forall x m, q x m -> m \In Coh W -> k x m) ->
verify i (@with_spec this W A (binarify p q) e) k.
Proof.
move=>H1 H2; apply: vrf_coh=>C; apply: call_rule'=>//.
by move=>x m /(_ (H1 C)); apply: H2.
Qed.
(* Lemmas for manipulating with ghost variables *)
Section GhostRules.
Variables (W : world) (A B C : Type).
(* Weakening of the continuation postcondition *)
Lemma vrf_mono (e : DT this W A) i (r1 r2 : cont A) :
r1 <== r2 -> verify i e r1 -> verify i e r2.
Proof. by move=>T H1 C' t; move/(H1 C'); apply: aft_imp=>v m _; apply: T. Qed.
Variable (e : DT this W A).
(* "Uncurrying" the ghosts in the specification s *)
Lemma ghE (s : B -> C -> spec A) :
conseq e (logvar (fun x => logvar (s x))) <->
conseq e (logvar (fun xy => s xy.1 xy.2)).
Proof.
split.
- move=>/= H1 i [[x y]] H2.
have: exists x1 y1, (s x1 y1).1 i by exists x, y.
by move/H1; apply: vrf_mono=>y1 m1 T1 [x2 y2]; apply: (T1 x2 y2).
move=>/= H1 i [x][y] H2.
have: exists x, (s x.1 x.2).1 i by exists (x, y).
by move/H1; apply: vrf_mono=>y1 m1 T1 x2 y2; apply: (T1 (x2, y2)).
Qed.
(* Pulling the ghosts out of the specification *)
Lemma ghC (p : B -> pre) (q : B -> A -> pre) :
(forall i x, p x i -> i \In Coh W -> verify i e (q x)) ->
conseq e (logvar (fun x => binarify (p x) (q x))).
Proof.
move=>H i /= [x Hp] Ci t Ht.
have S : alwsafe i t by apply: alw_imp (H i x Hp Ci Ci t Ht).
by apply/aftA=>// y; apply/aftI=>// /H; apply.
Qed.
(********************************************)
(* Lemmas for instantiating ghost variables *)
(********************************************)
Variables (s : C -> spec A) (f : DTbin this W (logvar s)).
(* helper lemma, to express the instantiation *)
Lemma gh_conseq t : conseq f (s t).
Proof.
case E: (s t)=>[a b] h /= H; apply: call_rule'=>[|x m].
- by exists t; rewrite E.
by move/(_ t); rewrite E.
Qed.
(* Instantiating the ghost of a call *)
Lemma gh_ex g i (k : cont A) :
verify i (do' (@gh_conseq g)) k ->
verify i (@with_spec this W A (logvar s) f) k.
Proof. by []. Qed.
End GhostRules.
Arguments gh_ex [W A C s f].
Lemma act_rule W A (a: action W A this) i (r : cont A) :
(forall j, network_rely W this i j -> a_safe a j /\
forall y k m, (exists pf : a_safe a j, a_step pf k y) -> network_rely W this k m -> r y m) ->
verify i (act a) r.
Proof.
move=>H C p; case=>Z; subst p; first by apply: (alw_unfin C).
apply: (alw_act C)=>j R; case: (H j R)=>{H}S H; exists S.
split=>//k v m St R' v'[]<-.
have X: (exists pf : a_safe a j, a_step pf k v) by exists S.
by apply: (H _ _ _ X R').
Qed.
Lemma ret_rule W A i (v : A) (r : cont A) :
(forall m, network_rely W this i m -> r v m) ->
verify i (ret this W v) r.
Proof.
move=>H C p; case=>Z; subst p; first by apply: alw_unfin.
by apply: alw_ret=>//m R v'[]<-; apply: H.
Qed.
End BasicRules.
Section InjectLemmas.
Variable this : nid.
Variables (W V : world) (K : hooks) (A : Type) (w : injects V W K).
Notation W2 := (inj_ext w).
Variable (e1 : DT this V A).
Lemma inject_rule i j (r : cont A) :
i \In Coh V ->
verify i e1 (fun x i' => forall j',
i' \+ j' \In Coh W -> network_rely W2 this j j' -> r x (i' \+ j')) ->
verify (i \+ j) (inject w e1) r.
Proof.
move=>Ci H C t [->|[t' [H' ->{t}]]]; first by apply: alw_unfin.
move/aft_inject: {H H'} (H Ci _ H'); move/(_ _ _ w _ C).
apply: aft_imp=>v s Cs [i'][j'][E] Ci' S'.
by rewrite {s}E in Cs *; apply.
Qed.
End InjectLemmas.
Section InductiveInvLemmas.
Variable pr : protocol.
Notation l := (plab pr).
Variable I : dstatelet -> pred nid -> Prop.
Variable ii : InductiveInv pr I.
(* Tailored modal always-lemma *)
Variables (A : Type) (this: nid).
Notation V := (mkWorld pr).
Notation W := (mkWorld (ProtocolWithIndInv ii)).
Variable (e : DT this V A).
(*
[Inferences rule for invariant strengthening]
This rule essentially means that we can always verify the program in
stronger assumptions (i.e., in a protocol, enriched with the inductive
invariant), if we can provide this protocol in the first place. We can
then also make use of the invariant.
*)
Notation getS i := (getStatelet i l).
Lemma with_inv_rule' i (r : cont A) :
verify i e (fun x m =>
I (getS m) (nodes pr (getS m)) -> r x m) ->
verify i (with_inv ii e) r.
Proof.
move=> H C t [->|[t' [H' ->{t}]]]; first by apply: alw_unfin.
move/aft_ind_inv: {H H'}(H (with_inv_coh C) _ H')=>/(_ _ _ C).
apply: aft_imp=>v m _[C']; apply.
by case: C'=>_ _ _ _/(_ l); rewrite prEq; case.
Qed.
Lemma with_inv_rule i (r : cont A) :
verify i e (fun x m => r x m) ->
verify i (with_inv ii e) r.
Proof.
move=>H; apply: with_inv_rule'.
by move=>H1 p H2; move: (H H1 p H2)=>G; apply: (aft_imp _ G).
Qed.
End InductiveInvLemmas.
|
/* specfunc/hyperg_2F0.c
*
* Copyright (C) 1996, 1997, 1998, 1999, 2000 Gerard Jungman
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/* Author: G. Jungman */
#include <config.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_sf_hyperg.h>
#include "error.h"
#include "hyperg.h"
int
gsl_sf_hyperg_2F0_e(const double a, const double b, const double x, gsl_sf_result * result)
{
if(x < 0.0) {
/* Use "definition" 2F0(a,b,x) = (-1/x)^a U(a,1+a-b,-1/x).
*/
gsl_sf_result U;
double pre = pow(-1.0/x, a);
int stat_U = gsl_sf_hyperg_U_e(a, 1.0+a-b, -1.0/x, &U);
result->val = pre * U.val;
result->err = GSL_DBL_EPSILON * fabs(result->val) + pre * U.err;
return stat_U;
}
else if(x == 0.0) {
result->val = 1.0;
result->err = 0.0;
return GSL_SUCCESS;
}
else {
/* Use asymptotic series. ??
*/
/* return hyperg_2F0_series(a, b, x, -1, result, &prec); */
DOMAIN_ERROR(result);
}
}
/*-*-*-*-*-*-*-*-*-* Functions w/ Natural Prototypes *-*-*-*-*-*-*-*-*-*-*/
#include "eval.h"
double gsl_sf_hyperg_2F0(const double a, const double b, const double x)
{
EVAL_RESULT(gsl_sf_hyperg_2F0_e(a, b, x, &result));
}
|
lemma compact_UN [intro]: "finite A \<Longrightarrow> (\<And>x. x \<in> A \<Longrightarrow> compact (B x)) \<Longrightarrow> compact (\<Union>x\<in>A. B x)" |
-----------------------------------------------------------------------------
-- |
-- Module : Berp.Base.Operators
-- Copyright : (c) 2010 Bernie Pope
-- License : BSD-style
-- Maintainer : [email protected]
-- Stability : experimental
-- Portability : ghc
--
-- Implementation of Python's operators. Where possible we should try to
-- specialise them to commonly used types.
--
-- Note: Complex numbers intentionally don't have an ordering.
--
-----------------------------------------------------------------------------
module Berp.Base.Operators
( (+), (-), (*), (/), (==), (<), (>), (<=), (>=), (.), and, or, (%)
, unaryMinus, unaryPlus, invert, not
, modIntIntInt
, addIntIntInt
, addIntFloatFloat
, addIntComplexComplex
, subIntIntInt
, subIntFloatFloat
, subIntComplexComplex
, mulIntIntInt
, mulIntFloatFloat
, mulIntComplexComplex
, divIntIntInt
, divIntFloatFloat
, divIntComplexComplex
, leIntIntBool
, leIntFloatBool
, gtIntIntBool
, gtIntFloatBool
, eqIntIntBool
, eqIntFloatBool
, eqIntComplexBool
, ltIntIntBool
, ltIntFloatBool
, geIntIntBool
, geIntFloatBool
, addFloatFloatFloat
, addFloatIntFloat
, addFloatComplexComplex
, subFloatFloatFloat
, subFloatIntFloat
, subFloatComplexComplex
, mulFloatFloatFloat
, mulFloatIntFloat
, mulFloatComplexComplex
, divFloatFloatFloat
, divFloatIntFloat
, divFloatComplexComplex
, leFloatFloatBool
, leFloatIntBool
, gtFloatFloatBool
, gtFloatIntBool
, eqFloatFloatBool
, eqFloatIntBool
, eqFloatComplexBool
, ltFloatFloatBool
, ltFloatIntBool
, geFloatFloatBool
, geFloatIntBool
, addComplexComplexComplex
, addComplexIntComplex
, addComplexFloatComplex
, subComplexComplexComplex
, subComplexIntComplex
, subComplexFloatComplex
, mulComplexComplexComplex
, mulComplexIntComplex
, mulComplexFloatComplex
, divComplexComplexComplex
, divComplexIntComplex
, divComplexFloatComplex
, eqComplexComplexBool
, eqComplexIntBool
, eqComplexFloatBool
)
where
import Data.Complex (Complex (..))
import Prelude hiding ((+), (-), (*), (.), (/), (==), (<), (>), (<=), (>=), or, and, not)
import qualified Prelude ((==),(<),(>=),(*),(+),(-),(<=),(>),(.),(/), not)
import Berp.Base.Prims (callMethod, raise)
import Berp.Base.Builtins.Exceptions (typeError, zeroDivisionError)
import Berp.Base.Object (lookupAttribute)
import Berp.Base.SemanticTypes (Object (..), Eval)
import Berp.Base.Hash (Hashed)
import Berp.Base.StdNames
( specialModName, specialAddName, specialSubName, specialMulName, specialDivName, specialLeName
, specialGtName, specialEqName, specialLtName, specialGeName)
import Berp.Base.Truth (truth)
import {-# SOURCE #-} Berp.Base.StdTypes.Integer (int)
import {-# SOURCE #-} Berp.Base.StdTypes.Float (float)
import {-# SOURCE #-} Berp.Base.StdTypes.Bool (bool, true, false)
import {-# SOURCE #-} Berp.Base.StdTypes.Complex (complex)
infixl 9 .
infixl 7 *, /, %
infixl 6 +, -
infix 4 ==, <, <=, >=, >
infixr 3 `and`
infixr 2 `or`
{-
Is it possible to minimise the boiler plate?
Maybe Template Haskell?
-}
-- We specialise some operations for particular types rather than
-- going via the method lookups. Fall back to method look ups for the
-- general case.
binop :: Hashed String -> Object -> Object -> Eval Object
binop str arg1 arg2 = callMethod arg1 str [arg2]
specialiseOp :: (Object -> a) -> (Object -> a) ->
(a -> a -> r) -> (r -> Object) -> Object -> Object -> Eval Object
specialiseOp project1 project2 op build obj1 obj2 =
return $ build (project1 obj1 `op` project2 obj2)
specialiseOpIntIntInt :: (Integer -> Integer -> Integer) -> Object -> Object -> Eval Object
specialiseOpIntIntInt op = specialiseOp object_integer object_integer op int
specialiseOpIntIntBool :: (Integer -> Integer -> Bool) -> Object -> Object -> Eval Object
specialiseOpIntIntBool op = specialiseOp object_integer object_integer op bool
{-
specialiseOpBoolBoolBool :: (Bool -> Bool -> Bool) -> Object -> Object -> Eval Object
specialiseOpBoolBoolBool op = specialiseOp object_bool object_bool op bool
-}
specialiseOpFloatFloatFloat :: (Double -> Double -> Double) -> Object -> Object -> Eval Object
specialiseOpFloatFloatFloat op = specialiseOp object_float object_float op float
specialiseOpFloatFloatBool :: (Double -> Double -> Bool) -> Object -> Object -> Eval Object
specialiseOpFloatFloatBool op = specialiseOp object_float object_float op bool
specialiseOpIntFloatFloat :: (Double -> Double -> Double) -> Object -> Object -> Eval Object
specialiseOpIntFloatFloat op =
specialiseOp (fromInteger Prelude.. object_integer) object_float op float
specialiseOpIntFloatBool :: (Double -> Double -> Bool) -> Object -> Object -> Eval Object
specialiseOpIntFloatBool op =
specialiseOp (fromInteger Prelude.. object_integer) object_float op bool
specialiseOpFloatIntFloat :: (Double -> Double -> Double) -> Object -> Object -> Eval Object
specialiseOpFloatIntFloat op =
specialiseOp object_float (fromInteger Prelude.. object_integer) op float
specialiseOpFloatIntBool :: (Double -> Double -> Bool) -> Object -> Object -> Eval Object
specialiseOpFloatIntBool op =
specialiseOp object_float (fromInteger Prelude.. object_integer) op bool
type ComplexD = Complex Double
specialiseOpComplexComplexComplex :: (ComplexD -> ComplexD -> ComplexD) -> Object -> Object -> Eval Object
specialiseOpComplexComplexComplex op = specialiseOp object_complex object_complex op complex
specialiseOpComplexComplexBool :: (ComplexD -> ComplexD -> Bool) -> Object -> Object -> Eval Object
specialiseOpComplexComplexBool op = specialiseOp object_complex object_complex op bool
specialiseOpIntComplexBool :: (ComplexD -> ComplexD -> Bool) -> Object -> Object -> Eval Object
specialiseOpIntComplexBool op = specialiseOp (fromInteger Prelude.. object_integer) object_complex op bool
specialiseOpComplexIntBool :: (ComplexD -> ComplexD -> Bool) -> Object -> Object -> Eval Object
specialiseOpComplexIntBool op = specialiseOp object_complex (fromInteger Prelude.. object_integer) op bool
specialiseOpFloatComplexBool :: (ComplexD -> ComplexD -> Bool) -> Object -> Object -> Eval Object
specialiseOpFloatComplexBool op = specialiseOp (realToFrac Prelude.. object_float) object_complex op bool
specialiseOpComplexFloatBool :: (ComplexD -> ComplexD -> Bool) -> Object -> Object -> Eval Object
specialiseOpComplexFloatBool op = specialiseOp object_complex (realToFrac Prelude.. object_float) op bool
specialiseOpFloatComplexComplex :: (ComplexD -> ComplexD -> ComplexD) -> Object -> Object -> Eval Object
specialiseOpFloatComplexComplex op =
specialiseOp (realToFrac Prelude.. object_float) object_complex op complex
specialiseOpComplexFloatComplex :: (ComplexD -> ComplexD -> ComplexD) -> Object -> Object -> Eval Object
specialiseOpComplexFloatComplex op =
specialiseOp object_complex (realToFrac Prelude.. object_float) op complex
specialiseOpIntComplexComplex :: (ComplexD -> ComplexD -> ComplexD) -> Object -> Object -> Eval Object
specialiseOpIntComplexComplex op =
specialiseOp (fromInteger Prelude.. object_integer) object_complex op complex
specialiseOpComplexIntComplex :: (ComplexD -> ComplexD -> ComplexD) -> Object -> Object -> Eval Object
specialiseOpComplexIntComplex op =
specialiseOp object_complex (fromInteger Prelude.. object_integer) op complex
(%), (+), (-), (*), (/), (==), (<), (>), (<=), (>=), or, and :: Object -> Object -> Eval Object
modIntIntInt :: Object -> Object -> Eval Object
modIntIntInt = specialiseOpIntIntInt (Prelude.mod)
-- XXX fixme
(%) obj1@(Integer {}) obj2@(Integer {}) = modIntIntInt obj1 obj2
(%) x y = binop specialModName x y
addIntIntInt, addFloatFloatFloat, addIntFloatFloat, addFloatIntFloat, addComplexComplexComplex, addIntComplexComplex, addComplexIntComplex, addFloatComplexComplex, addComplexFloatComplex :: Object -> Object -> Eval Object
addIntIntInt = specialiseOpIntIntInt (Prelude.+)
addFloatFloatFloat = specialiseOpFloatFloatFloat (Prelude.+)
addIntFloatFloat = specialiseOpIntFloatFloat (Prelude.+)
addFloatIntFloat = specialiseOpFloatIntFloat (Prelude.+)
addComplexComplexComplex = specialiseOpComplexComplexComplex (Prelude.+)
addIntComplexComplex = specialiseOpIntComplexComplex (Prelude.+)
addComplexIntComplex = specialiseOpComplexIntComplex (Prelude.+)
addFloatComplexComplex = specialiseOpFloatComplexComplex (Prelude.+)
addComplexFloatComplex = specialiseOpComplexFloatComplex (Prelude.+)
(+) obj1@(Integer {}) obj2 =
case obj2 of
Integer {} -> addIntIntInt obj1 obj2
Float {} -> addIntFloatFloat obj1 obj2
Complex {} -> addIntComplexComplex obj1 obj2
_other -> raise typeError
(+) obj1@(Float {}) obj2 =
case obj2 of
Float {} -> addFloatFloatFloat obj1 obj2
Integer {} -> addFloatIntFloat obj1 obj2
Complex {} -> addFloatComplexComplex obj1 obj2
_other -> raise typeError
(+) obj1@(Complex {}) obj2 =
case obj2 of
Complex {} -> addComplexComplexComplex obj1 obj2
Integer {} -> addComplexIntComplex obj1 obj2
Float {} -> addComplexFloatComplex obj1 obj2
_other -> raise typeError
(+) x y = binop specialAddName x y
subIntIntInt, subFloatFloatFloat, subIntFloatFloat, subFloatIntFloat, subComplexComplexComplex, subIntComplexComplex, subComplexIntComplex, subFloatComplexComplex, subComplexFloatComplex :: Object -> Object -> Eval Object
subIntIntInt = specialiseOpIntIntInt (Prelude.-)
subFloatFloatFloat = specialiseOpFloatFloatFloat (Prelude.-)
subIntFloatFloat = specialiseOpIntFloatFloat (Prelude.-)
subFloatIntFloat = specialiseOpFloatIntFloat (Prelude.-)
subComplexComplexComplex = specialiseOpComplexComplexComplex (Prelude.-)
subIntComplexComplex = specialiseOpIntComplexComplex (Prelude.-)
subComplexIntComplex = specialiseOpComplexIntComplex (Prelude.-)
subFloatComplexComplex = specialiseOpFloatComplexComplex (Prelude.-)
subComplexFloatComplex = specialiseOpComplexFloatComplex (Prelude.-)
(-) obj1@(Integer {}) obj2 =
case obj2 of
Integer {} -> subIntIntInt obj1 obj2
Float {} -> subIntFloatFloat obj1 obj2
Complex {} -> subIntComplexComplex obj1 obj2
_other -> raise typeError
(-) obj1@(Float {}) obj2 =
case obj2 of
Float {} -> subFloatFloatFloat obj1 obj2
Integer {} -> subFloatIntFloat obj1 obj2
Complex {} -> subFloatComplexComplex obj1 obj2
_other -> raise typeError
(-) obj1@(Complex {}) obj2 =
case obj2 of
Complex {} -> subComplexComplexComplex obj1 obj2
Integer {} -> subComplexIntComplex obj1 obj2
Float {} -> subComplexFloatComplex obj1 obj2
_other -> raise typeError
(-) x y = binop specialSubName x y
mulIntIntInt, mulFloatFloatFloat, mulIntFloatFloat, mulFloatIntFloat, mulComplexComplexComplex, mulIntComplexComplex, mulComplexIntComplex, mulFloatComplexComplex, mulComplexFloatComplex :: Object -> Object -> Eval Object
mulIntIntInt = specialiseOpIntIntInt (Prelude.*)
mulFloatFloatFloat = specialiseOpFloatFloatFloat (Prelude.*)
mulIntFloatFloat = specialiseOpIntFloatFloat (Prelude.*)
mulFloatIntFloat = specialiseOpFloatIntFloat (Prelude.*)
mulComplexComplexComplex = specialiseOpComplexComplexComplex (Prelude.*)
mulIntComplexComplex = specialiseOpIntComplexComplex (Prelude.*)
mulComplexIntComplex = specialiseOpComplexIntComplex (Prelude.*)
mulFloatComplexComplex = specialiseOpFloatComplexComplex (Prelude.*)
mulComplexFloatComplex = specialiseOpComplexFloatComplex (Prelude.*)
(*) obj1@(Integer {}) obj2 =
case obj2 of
Integer {} -> mulIntIntInt obj1 obj2
Float {} -> mulIntFloatFloat obj1 obj2
Complex {} -> mulIntComplexComplex obj1 obj2
_other -> raise typeError
(*) obj1@(Float {}) obj2 =
case obj2 of
Float {} -> mulFloatFloatFloat obj1 obj2
Integer {} -> mulFloatIntFloat obj1 obj2
Complex {} -> mulFloatComplexComplex obj1 obj2
_other -> raise typeError
(*) obj1@(Complex {}) obj2 =
case obj2 of
Complex {} -> mulComplexComplexComplex obj1 obj2
Integer {} -> mulComplexIntComplex obj1 obj2
Float {} -> mulComplexFloatComplex obj1 obj2
_other -> raise typeError
(*) x y = binop specialMulName x y
checkDivByZero :: Num a => a -> Eval Object -> Eval Object
checkDivByZero denom comp
| denom Prelude.== 0 = raise zeroDivisionError
| otherwise = comp
divIntIntInt, divFloatFloatFloat, divIntFloatFloat, divFloatIntFloat, divComplexComplexComplex, divIntComplexComplex, divComplexIntComplex, divFloatComplexComplex, divComplexFloatComplex :: Object -> Object -> Eval Object
divIntIntInt obj1 obj2 =
checkDivByZero (object_integer obj2) $ specialiseOpIntIntInt (Prelude.div) obj1 obj2
divFloatFloatFloat obj1 obj2 =
checkDivByZero (object_float obj2) $ specialiseOpFloatFloatFloat (Prelude./) obj1 obj2
divIntFloatFloat obj1 obj2 =
checkDivByZero (object_float obj2) $ specialiseOpIntFloatFloat (Prelude./) obj1 obj2
divFloatIntFloat obj1 obj2 =
checkDivByZero (object_integer obj2) $ specialiseOpFloatIntFloat (Prelude./) obj1 obj2
divComplexComplexComplex obj1 obj2 =
checkDivByZero (object_complex obj2) $ specialiseOpComplexComplexComplex (Prelude./) obj1 obj2
divIntComplexComplex obj1 obj2 =
checkDivByZero (object_complex obj2) $ specialiseOpIntComplexComplex (Prelude./) obj1 obj2
divComplexIntComplex obj1 obj2 =
checkDivByZero (object_integer obj2) $ specialiseOpComplexIntComplex (Prelude./) obj1 obj2
divFloatComplexComplex obj1 obj2 =
checkDivByZero (object_complex obj2) $ specialiseOpFloatComplexComplex (Prelude./) obj1 obj2
divComplexFloatComplex obj1 obj2 =
checkDivByZero (object_float obj2) $ specialiseOpComplexFloatComplex (Prelude./) obj1 obj2
(/) obj1@(Integer {}) obj2 =
case obj2 of
Integer {} -> divIntIntInt obj1 obj2
Float {} -> divIntFloatFloat obj1 obj2
Complex {} -> divIntComplexComplex obj1 obj2
_other -> raise typeError
(/) obj1@(Float {}) obj2 =
case obj2 of
Float {} -> divFloatFloatFloat obj1 obj2
Integer {} -> divFloatIntFloat obj1 obj2
Complex {} -> divFloatComplexComplex obj1 obj2
_other -> raise typeError
(/) obj1@(Complex {}) obj2 =
case obj2 of
Complex {} -> divComplexComplexComplex obj1 obj2
Integer {} -> divComplexIntComplex obj1 obj2
Float {} -> divComplexFloatComplex obj1 obj2
_other -> raise typeError
(/) x y = binop specialDivName x y
leIntIntBool, leFloatFloatBool, leIntFloatBool, leFloatIntBool :: Object -> Object -> Eval Object
leIntIntBool = specialiseOpIntIntBool (Prelude.<=)
leFloatFloatBool = specialiseOpFloatFloatBool (Prelude.<=)
leIntFloatBool = specialiseOpIntFloatBool (Prelude.<=)
leFloatIntBool = specialiseOpFloatIntBool (Prelude.<=)
(<=) obj1@(Integer {}) obj2 =
case obj2 of
Integer {} -> leIntIntBool obj1 obj2
Float {} -> leIntFloatBool obj1 obj2
_other -> raise typeError
(<=) obj1@(Float {}) obj2 =
case obj2 of
Float {} -> leFloatFloatBool obj1 obj2
Integer {} -> leIntFloatBool obj1 obj2
_other -> raise typeError
(<=) x y = binop specialLeName x y
gtIntIntBool, gtFloatFloatBool, gtIntFloatBool, gtFloatIntBool :: Object -> Object -> Eval Object
gtIntIntBool = specialiseOpIntIntBool (Prelude.>)
gtFloatFloatBool = specialiseOpFloatFloatBool (Prelude.>)
gtIntFloatBool = specialiseOpIntFloatBool (Prelude.>)
gtFloatIntBool = specialiseOpFloatIntBool (Prelude.>)
(>) obj1@(Integer {}) obj2 =
case obj2 of
Integer {} -> gtIntIntBool obj1 obj2
Float {} -> gtIntFloatBool obj1 obj2
_other -> raise typeError
(>) obj1@(Float {}) obj2 =
case obj2 of
Float {} -> gtFloatFloatBool obj1 obj2
Integer {} -> gtIntFloatBool obj1 obj2
_other -> raise typeError
(>) x y = binop specialGtName x y
eqIntIntBool, eqFloatFloatBool, eqIntFloatBool, eqFloatIntBool, eqComplexComplexBool, eqIntComplexBool, eqComplexIntBool, eqFloatComplexBool, eqComplexFloatBool :: Object -> Object -> Eval Object
eqIntIntBool = specialiseOpIntIntBool (Prelude.==)
eqFloatFloatBool = specialiseOpFloatFloatBool (Prelude.==)
eqIntFloatBool = specialiseOpIntFloatBool (Prelude.==)
eqFloatIntBool = specialiseOpFloatIntBool (Prelude.==)
eqComplexComplexBool = specialiseOpComplexComplexBool (Prelude.==)
eqIntComplexBool = specialiseOpIntComplexBool (Prelude.==)
eqComplexIntBool = specialiseOpComplexIntBool (Prelude.==)
eqFloatComplexBool = specialiseOpFloatComplexBool (Prelude.==)
eqComplexFloatBool = specialiseOpComplexFloatBool (Prelude.==)
(==) obj1@(Integer {}) obj2 =
case obj2 of
Integer {} -> eqIntIntBool obj1 obj2
Float {} -> eqIntFloatBool obj1 obj2
Complex {} -> eqIntComplexBool obj1 obj2
_other -> raise typeError
(==) obj1@(Float {}) obj2 =
case obj2 of
Float {} -> eqFloatFloatBool obj1 obj2
Integer {} -> eqIntFloatBool obj1 obj2
Complex {} -> eqFloatComplexBool obj1 obj2
_other -> raise typeError
(==) obj1@(Complex {}) obj2 =
case obj2 of
Complex {} -> eqComplexComplexBool obj1 obj2
Integer {} -> eqComplexIntBool obj1 obj2
Float {} -> eqComplexFloatBool obj1 obj2
_other -> raise typeError
(==) x y = binop specialEqName x y
ltIntIntBool, ltFloatFloatBool, ltIntFloatBool, ltFloatIntBool :: Object -> Object -> Eval Object
ltIntIntBool = specialiseOpIntIntBool (Prelude.<)
ltFloatFloatBool = specialiseOpFloatFloatBool (Prelude.<)
ltIntFloatBool = specialiseOpIntFloatBool (Prelude.<)
ltFloatIntBool = specialiseOpFloatIntBool (Prelude.<)
(<) obj1@(Integer {}) obj2 =
case obj2 of
Integer {} -> ltIntIntBool obj1 obj2
Float {} -> ltIntFloatBool obj1 obj2
_other -> raise typeError
(<) obj1@(Float {}) obj2 =
case obj2 of
Float {} -> ltFloatFloatBool obj1 obj2
Integer {} -> ltIntFloatBool obj1 obj2
_other -> raise typeError
(<) x y = binop specialLtName x y
geIntIntBool, geFloatFloatBool, geIntFloatBool, geFloatIntBool :: Object -> Object -> Eval Object
geIntIntBool = specialiseOpIntIntBool (Prelude.>=)
geFloatFloatBool = specialiseOpFloatFloatBool (Prelude.>=)
geIntFloatBool = specialiseOpIntFloatBool (Prelude.>=)
geFloatIntBool = specialiseOpFloatIntBool (Prelude.>=)
(>=) obj1@(Integer {}) obj2 =
case obj2 of
Integer {} -> geIntIntBool obj1 obj2
Float {} -> geIntFloatBool obj1 obj2
_other -> raise typeError
(>=) obj1@(Float {}) obj2 =
case obj2 of
Float {} -> geFloatFloatBool obj1 obj2
Integer {} -> geIntFloatBool obj1 obj2
_other -> raise typeError
(>=) x y = binop specialGeName x y
{-
From the Python Language Reference, sec 5.10 "Boolean Operations"
The expression x and y first evaluates x; if x is false, its value
is returned; otherwise, y is evaluated and the resulting value is
returned.
-}
and obj1 obj2
| truth obj1 = return obj2
| otherwise = return obj1
{-
The expression x or y first evaluates x; if x is true, its value
is returned; otherwise, y is evaluated and the resulting value
is returned.
-}
or obj1 obj2
| truth obj1 = return obj1
| otherwise = return obj2
(.) :: Object -> Hashed String -> Eval Object
(.) object ident = lookupAttribute object ident
unaryMinus :: Object -> Eval Object
unaryMinus obj@(Integer {}) = return $ int $ negate $ object_integer obj
unaryMinus obj@(Float {}) = return $ float $ negate $ object_float obj
unaryMinus obj@(Complex {}) = return $ complex $ negate $ object_complex obj
unaryMinus _other = error "unary minus applied to a non integer"
-- This is just the identity function
unaryPlus :: Object -> Eval Object
unaryPlus obj@(Integer {}) = return obj
unaryPlus obj@(Float {}) = return obj
unaryPlus obj@(Complex {}) = return obj
-- XXX in CPython this turns the boolean into an integer
unaryPlus obj@(Bool {}) = return obj
unaryPlus _other = error "unary plus applied to a non integer"
invert :: Object -> Eval Object
invert (Integer {}) = error "bitwise inversion not implemented"
invert _other = error "unary invert applied to a non integer"
not :: Object -> Eval Object
not obj
| Prelude.not $ truth obj = return true
| otherwise = return false
|
Definition foo := True.
Section foo.
Global Arguments foo / .
End foo.
|
** Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
** See https://llvm.org/LICENSE.txt for license information.
** SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
* Subprograms with many arguments
parameter(n = 4)
integer result(n), expect(n)
data expect/11,12,13,14/
call t0(result, 1,2,3,4,5,6,7,8,9,10,11,12,13,14)
call check (result, expect, n)
end
subroutine t0(a,i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14)
integer a(*)
call t1 ! to force memory arg ptr to memory
a(1) = i11
a(2) = i12
a(3) = i13
a(4) = i14
end
subroutine t1
end
|
% \section{Mutual Information}
\newpage
\chapter{Conclusion}
\clearpage
\newpage
|
record Is3 (n : Nat) where
constructor MkThree
{auto prf : n === 3}
three : Is3 3
three = MkThree
|
module Data.Time.LocalTime.Internal.TimeZone
export
record TimeZone where
constructor MkTimeZone
timeZoneMinutes: Int
timeZoneSummaryOnly: Bool
timeZoneName: String
-- TODO: Implement TimeZone conversion functions
|
[STATEMENT]
lemma partition_on_common_refinement:
assumes A: "\<And>P. P \<in> \<P> \<Longrightarrow> partition_on A P" and "\<P> \<noteq> {}"
shows "partition_on A (common_refinement \<P>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. partition_on A (common_refinement \<P>)
[PROOF STEP]
proof (rule partition_onI)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<Union> (common_refinement \<P>) = A
2. \<And>p q. \<lbrakk>p \<in> common_refinement \<P>; q \<in> common_refinement \<P>; p \<noteq> q\<rbrakk> \<Longrightarrow> disjnt p q
3. {} \<notin> common_refinement \<P>
[PROOF STEP]
show "\<Union> (common_refinement \<P>) = A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Union> (common_refinement \<P>) = A
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
?P \<in> \<P> \<Longrightarrow> partition_on A ?P
\<P> \<noteq> {}
goal (1 subgoal):
1. \<Union> (common_refinement \<P>) = A
[PROOF STEP]
by (simp add: partition_on_def Union_common_refinement)
[PROOF STATE]
proof (state)
this:
\<Union> (common_refinement \<P>) = A
goal (2 subgoals):
1. \<And>p q. \<lbrakk>p \<in> common_refinement \<P>; q \<in> common_refinement \<P>; p \<noteq> q\<rbrakk> \<Longrightarrow> disjnt p q
2. {} \<notin> common_refinement \<P>
[PROOF STEP]
fix P Q
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>p q. \<lbrakk>p \<in> common_refinement \<P>; q \<in> common_refinement \<P>; p \<noteq> q\<rbrakk> \<Longrightarrow> disjnt p q
2. {} \<notin> common_refinement \<P>
[PROOF STEP]
assume "P \<in> common_refinement \<P>" and "Q \<in> common_refinement \<P>" and "P \<noteq> Q"
[PROOF STATE]
proof (state)
this:
P \<in> common_refinement \<P>
Q \<in> common_refinement \<P>
P \<noteq> Q
goal (2 subgoals):
1. \<And>p q. \<lbrakk>p \<in> common_refinement \<P>; q \<in> common_refinement \<P>; p \<noteq> q\<rbrakk> \<Longrightarrow> disjnt p q
2. {} \<notin> common_refinement \<P>
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
P \<in> common_refinement \<P>
Q \<in> common_refinement \<P>
P \<noteq> Q
[PROOF STEP]
obtain f g where f: "f \<in> (\<Pi>\<^sub>E P\<in>\<P>. P)" and P: "P = \<Inter> (f ` \<P>)" and "P \<noteq> {}"
and g: "g \<in> (\<Pi>\<^sub>E P\<in>\<P>. P)" and Q: "Q = \<Inter> (g ` \<P>)" and "Q \<noteq> {}"
[PROOF STATE]
proof (prove)
using this:
P \<in> common_refinement \<P>
Q \<in> common_refinement \<P>
P \<noteq> Q
goal (1 subgoal):
1. (\<And>f g. \<lbrakk>f \<in> (\<Pi>\<^sub>E P\<in>\<P>. P); P = \<Inter> (f ` \<P>); P \<noteq> {}; g \<in> (\<Pi>\<^sub>E P\<in>\<P>. P); Q = \<Inter> (g ` \<P>); Q \<noteq> {}\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (auto simp add: common_refinement_def)
[PROOF STATE]
proof (state)
this:
f \<in> (\<Pi>\<^sub>E P\<in>\<P>. P)
P = \<Inter> (f ` \<P>)
P \<noteq> {}
g \<in> (\<Pi>\<^sub>E P\<in>\<P>. P)
Q = \<Inter> (g ` \<P>)
Q \<noteq> {}
goal (2 subgoals):
1. \<And>p q. \<lbrakk>p \<in> common_refinement \<P>; q \<in> common_refinement \<P>; p \<noteq> q\<rbrakk> \<Longrightarrow> disjnt p q
2. {} \<notin> common_refinement \<P>
[PROOF STEP]
have "f=g" if "x \<in> P" "x \<in> Q" for x
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f = g
[PROOF STEP]
proof (rule extensionalityI [of _ \<P>])
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. f \<in> extensional \<P>
2. g \<in> extensional \<P>
3. \<And>x. x \<in> \<P> \<Longrightarrow> f x = g x
[PROOF STEP]
fix R
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. f \<in> extensional \<P>
2. g \<in> extensional \<P>
3. \<And>x. x \<in> \<P> \<Longrightarrow> f x = g x
[PROOF STEP]
assume "R \<in> \<P>"
[PROOF STATE]
proof (state)
this:
R \<in> \<P>
goal (3 subgoals):
1. f \<in> extensional \<P>
2. g \<in> extensional \<P>
3. \<And>x. x \<in> \<P> \<Longrightarrow> f x = g x
[PROOF STEP]
with that P Q f g A [unfolded partition_on_def, OF \<open>R \<in> \<P>\<close>]
[PROOF STATE]
proof (chain)
picking this:
x \<in> P
x \<in> Q
P = \<Inter> (f ` \<P>)
Q = \<Inter> (g ` \<P>)
f \<in> (\<Pi>\<^sub>E P\<in>\<P>. P)
g \<in> (\<Pi>\<^sub>E P\<in>\<P>. P)
\<Union> R = A \<and> disjoint R \<and> {} \<notin> R
R \<in> \<P>
[PROOF STEP]
show "f R = g R"
[PROOF STATE]
proof (prove)
using this:
x \<in> P
x \<in> Q
P = \<Inter> (f ` \<P>)
Q = \<Inter> (g ` \<P>)
f \<in> (\<Pi>\<^sub>E P\<in>\<P>. P)
g \<in> (\<Pi>\<^sub>E P\<in>\<P>. P)
\<Union> R = A \<and> disjoint R \<and> {} \<notin> R
R \<in> \<P>
goal (1 subgoal):
1. f R = g R
[PROOF STEP]
by (metis INT_E Int_iff PiE_iff disjointD emptyE)
[PROOF STATE]
proof (state)
this:
f R = g R
goal (2 subgoals):
1. f \<in> extensional \<P>
2. g \<in> extensional \<P>
[PROOF STEP]
qed (use PiE_iff f g in auto)
[PROOF STATE]
proof (state)
this:
\<lbrakk>?x \<in> P; ?x \<in> Q\<rbrakk> \<Longrightarrow> f = g
goal (2 subgoals):
1. \<And>p q. \<lbrakk>p \<in> common_refinement \<P>; q \<in> common_refinement \<P>; p \<noteq> q\<rbrakk> \<Longrightarrow> disjnt p q
2. {} \<notin> common_refinement \<P>
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>?x \<in> P; ?x \<in> Q\<rbrakk> \<Longrightarrow> f = g
[PROOF STEP]
show "disjnt P Q"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?x \<in> P; ?x \<in> Q\<rbrakk> \<Longrightarrow> f = g
goal (1 subgoal):
1. disjnt P Q
[PROOF STEP]
by (metis P Q \<open>P \<noteq> Q\<close> disjnt_iff)
[PROOF STATE]
proof (state)
this:
disjnt P Q
goal (1 subgoal):
1. {} \<notin> common_refinement \<P>
[PROOF STEP]
qed (simp add: common_refinement_def) |
import category_theory.equivalence
open category_theory
variables {C : Type*} [category C]
variables {D : Type*} [category D]
lemma equiv_reflects_mono {X Y : C} (f : X ⟶ Y) (e : C ≌ D)
(hef : mono (e.functor.map f)) : mono f :=
begin
split,
intros Z g h w,
apply e.functor.map_injective,
rw ← cancel_mono (e.functor.map f),
-- Now we're ready to push eveything back to `C`, using the same trick.
sorry
end
|
lemma of_real_in_Ints_iff [simp]: "of_real x \<in> \<int> \<longleftrightarrow> x \<in> \<int>" |
theory PhiSem_CF_Break
imports Phi_System.PhiSem_Formalization_Tools
begin
section \<open>Semantic Model\<close>
subsection \<open>Abnormal\<close>
virtual_datatype \<phi>CF_break_abnormal = \<phi>empty_abnormal +
ABN_break :: unit
debt_axiomatization ABN_break :: \<open>unit abnormal_entry\<close>
where \<phi>CF_break_abnormal_ax: \<open>\<phi>CF_break_abnormal ABN_CONS_OF ABN_break\<close>
interpretation \<phi>CF_break_abnormal ABN_CONS_OF _ _ ABN_break
using \<phi>CF_break_abnormal_ax .
hide_fact \<phi>CF_break_abnormal_ax
subsection \<open>Resource of Scope Frames\<close>
setup \<open>Sign.mandatory_path "RES"\<close>
type_synonym brk_label = nat
type_synonym brk_frame = \<open>RES.brk_label \<rightharpoonup> VAL list option nosep\<close>
setup \<open>Sign.parent_path\<close>
resource_space \<phi>CF_break =
brk_frame :: \<open>{frames::RES.brk_frame. finite (dom frames)}\<close> (partial_map_resource) ..
hide_fact RES.\<phi>CF_break_res_ax
subsection \<open>Fiction of Scope Frames\<close>
fiction_space \<phi>CF_break =
brk_frame :: \<open>RES.brk_frame.basic_fiction \<F>_it\<close>
(identity_fiction_for_partial_mapping_resource RES.brk_frame) ..
hide_fact FIC.\<phi>CF_break_fic_ax
section \<open>\<phi>-Types\<close>
(*
abbreviation Brk_Frame' :: \<open>brk_label \<Rightarrow> (VAL list option,'a) \<phi> \<Rightarrow> (fiction,'a) \<phi>\<close>
where \<open>Brk_Frame' label T \<equiv> (FIC.brk_frame.\<phi> (label \<^bold>\<rightarrow> \<black_circle> (Nosep T)))\<close>
*)
definition Brk_Frame :: \<open>RES.brk_label \<Rightarrow> assn\<close>
where \<open>Brk_Frame label \<equiv> () \<Ztypecolon> FIC.brk_frame.\<phi> (label \<^bold>\<rightarrow> \<black_circle> (Nosep \<circle>))\<close>
definition Brking_Frame :: \<open>RES.brk_label \<Rightarrow> ('v::VALs \<phi>arg \<Rightarrow> assn) \<Rightarrow> assn\<close> ("\<^bold>b\<^bold>r\<^bold>o\<^bold>k\<^bold>e\<^bold>n _ \<^bold>w\<^bold>i\<^bold>t\<^bold>h _" [1000,10] 3)
where \<open>Brking_Frame label S =
(\<exists>*v. S v\<heavy_comma> to_vals (\<phi>arg.dest v) \<Ztypecolon> FIC.brk_frame.\<phi> (label \<^bold>\<rightarrow> \<black_circle> (Nosep (\<black_circle> Identity))))\<close>
lemma Brk_Frame_eq_identity:
\<open>Brk_Frame l = (nosep None \<Ztypecolon> FIC.brk_frame.\<phi> (l \<^bold>\<rightarrow> \<black_circle> Identity))\<close>
unfolding set_eq_iff Brk_Frame_def
by (simp add: \<phi>expns)
lemma Brking_Frame_eq_identity:
\<open>Brking_Frame l S = (\<exists>*v. S v\<heavy_comma> nosep (Some (to_vals (\<phi>arg.dest v))) \<Ztypecolon> FIC.brk_frame.\<phi> (l \<^bold>\<rightarrow> \<black_circle> Identity))\<close>
unfolding set_eq_iff Brking_Frame_def
by (simp add: \<phi>expns)
section \<open>Instruction\<close>
definition op_brk_scope :: \<open>(RES.brk_label \<Rightarrow> ('a::VALs) proc) \<Rightarrow> 'a proc\<close>
where \<open>op_brk_scope F =
RES.brk_frame.\<phi>R_allocate_res_entry (\<lambda>_. True) (Some (nosep None)) (\<lambda>l.
op_try
(F l \<bind> (\<lambda>ret. RES.brk_frame.\<phi>R_set_res (\<lambda>f. f(l := None)) \<ggreater> Return ret))
(\<lambda>a. RES.brk_frame.\<phi>R_get_res_entry l (\<lambda>brk.
RES.brk_frame.\<phi>R_set_res (\<lambda>f. f(l := None)) \<ggreater>
(case nosep.dest brk of Some vs \<Rightarrow> Return (\<phi>arg (from_vals vs))
| None \<Rightarrow> throw a)
)))
\<close>
definition op_break :: \<open>RES.brk_label \<Rightarrow> ('a::VALs, 'ret::VALs) proc'\<close>
where \<open>op_break l = (\<lambda>vs.
RES.brk_frame.\<phi>R_set_res (\<lambda>f. f(l \<mapsto> nosep (Some (to_vals (\<phi>arg.dest vs)))))
\<ggreater> throw (ABN_break.mk ())
)\<close>
lemma op_break_reduce_tail[procedure_simps,simp]:
\<open>(op_break L v \<ggreater> f) = op_break L v\<close>
unfolding op_break_def by simp
definition \<open>sift_brking_frame' l Y E = (Brking_Frame l Y) + (E\<heavy_comma> Brk_Frame l)\<close>
definition sift_brking_frame ("\<^bold>b\<^bold>r\<^bold>e\<^bold>a\<^bold>k _/ \<^bold>w\<^bold>i\<^bold>t\<^bold>h _/ \<^bold>o\<^bold>r _" [1000,10,3] 3)
where \<open>sift_brking_frame = sift_brking_frame'\<close>
declare sift_brking_frame'_def[folded sift_brking_frame_def, assertion_simps_source]
context begin
private lemma alloc_brk_scope[intro!]:
\<open>(\<And>l. \<p>\<r>\<o>\<c> F l \<lbrace> X\<heavy_comma> Brk_Frame l \<longmapsto> Y \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E )
\<Longrightarrow> \<p>\<r>\<o>\<c> RES.brk_frame.\<phi>R_allocate_res_entry (\<lambda>_. True) (Some (nosep None)) F
\<lbrace> X \<longmapsto> Y \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E \<close>
unfolding Brk_Frame_eq_identity
by (rule; simp add: finite_map_freshness)
private lemma dispose_brk_scope:
\<open>\<p>\<r>\<o>\<c> RES.brk_frame.\<phi>R_set_res (\<lambda>f. f(l := None)) \<lbrace> Brk_Frame l \<longmapsto> \<lambda>_. Void \<rbrace>\<close>
unfolding Brk_Frame_eq_identity
by (rule FIC.brk_frame.\<phi>R_dispose_res[where P=\<open>\<lambda>_. True\<close>]; simp)
lemma brk_scope:
\<open> (\<And>l. \<p>\<r>\<o>\<c> f l \<lbrace> X\<heavy_comma> Brk_Frame l \<longmapsto> \<lambda>ret. Y ret\<heavy_comma> Brk_Frame l \<rbrace>
\<t>\<h>\<r>\<o>\<w>\<s> (\<lambda>a. sift_brking_frame l Y' (E a)))
\<Longrightarrow> \<p>\<r>\<o>\<c> op_brk_scope f \<lbrace> X \<longmapsto> \<lambda>ret. Y ret + Y' ret \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E\<close>
unfolding op_brk_scope_def sift_brking_frame_def sift_brking_frame'_def
apply (rule, rule, rule, assumption, rule)
apply (rule \<phi>CONSEQ'E0, rule dispose_brk_scope[THEN \<phi>frame, simplified], rule)
apply (rule \<phi>CASE)
apply (simp only: Brking_Frame_eq_identity norm_precond_ex, rule, rule, simp, rule)
apply (rule FIC.brk_frame.\<phi>R_dispose_res_frm[where P=\<open>\<lambda>_. True\<close>]; simp)
apply (rule)
apply (simp only: Brk_Frame_eq_identity, rule, simp, rule)
apply (rule \<phi>CONSEQ'E0, rule FIC.brk_frame.\<phi>R_dispose_res_frm[where P=\<open>\<lambda>_. True\<close>]; simp)
by (rule, rule implies_refl)
lemma "_op_break_rule_":
\<open>\<p>\<r>\<o>\<c> op_break l vs \<lbrace> S vs\<heavy_comma> Brk_Frame l \<longmapsto> 0 \<rbrace>
\<t>\<h>\<r>\<o>\<w>\<s> (\<lambda>_. Brking_Frame l S)\<close>
unfolding op_break_def Brking_Frame_eq_identity Brk_Frame_eq_identity
by (rule, rule, simp, simp, simp, rule, \<phi>reason)
end
section \<open>Reasoning Processes\<close>
subsection \<open>sift brking frame\<close>
declare [[\<phi>reason_default_pattern
\<open>?X \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame' ?l ?Y ?E \<a>\<n>\<d> ?Any\<close>
\<Rightarrow> \<open>?X \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame' ?l _ _ \<a>\<n>\<d> _\<close> (100)
and \<open>?X \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame ?l ?Y ?E \<a>\<n>\<d> ?Any\<close>
\<Rightarrow> \<open>?X \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame ?l _ _ \<a>\<n>\<d> _\<close> (100)]]
lemma [\<phi>reason 1010 for \<open>?X \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame ?l ?var_Y' ?var_E'\<close>]:
\<open> X \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame' l Y E
\<Longrightarrow> \<s>\<i>\<m>\<p>\<l>\<i>\<f>\<y>[assertion_simps undefined] Y' : Y
\<Longrightarrow> \<s>\<i>\<m>\<p>\<l>\<i>\<f>\<y>[assertion_simps undefined] E' : E
\<Longrightarrow> X \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame l Y' E'\<close>
unfolding sift_brking_frame_def Simplify_def by simp
lemma [\<phi>reason 1000]:
\<open> X \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame' l Y E
\<Longrightarrow> (\<And>v. Y v \<i>\<m>\<p>\<l>\<i>\<e>\<s> Y' v @action ToSA)
\<Longrightarrow> E \<i>\<m>\<p>\<l>\<i>\<e>\<s> E' @action ToSA
\<Longrightarrow> X \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame l Y' E'\<close>
unfolding sift_brking_frame_def Simplify_def Action_Tag_def sift_brking_frame'_def
Brking_Frame_def
\<medium_left_bracket> premises X and Y and E
X cases \<medium_left_bracket> E[THEN implies_right_prod] \<medium_right_bracket> for \<open>(\<exists>*v. Y' v\<heavy_comma> to_vals (\<phi>arg.dest v) \<Ztypecolon> _) + (E'\<heavy_comma> Brk_Frame l)\<close> ..
\<medium_left_bracket> Y[THEN implies_right_prod] \<medium_right_bracket> ..
\<medium_right_bracket>. .
lemma [\<phi>reason 3000 for \<open>_ \<i>\<m>\<p>\<l>\<i>\<e>\<s> _ * \<blangle> sift_brking_frame ?l ?Y ?E \<brangle> \<a>\<n>\<d> _\<close>]:
\<open> X \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame l Y E
\<Longrightarrow> X \<i>\<m>\<p>\<l>\<i>\<e>\<s> 1 * \<blangle> sift_brking_frame l Y E \<brangle> \<a>\<n>\<d> True\<close>
unfolding FOCUS_TAG_def Action_Tag_def
by simp
lemma Brking_Frame_plus:
\<open>Brking_Frame l (Y1 + Y2) = Brking_Frame l Y1 + Brking_Frame l Y2\<close>
unfolding set_eq_iff Brking_Frame_def plus_fun_def distrib_right ExSet_plus by clarify
lemma [\<phi>reason 1200]:
\<open> X1 \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame' l Y1 E1
\<Longrightarrow> X2 \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame' l Y2 E2
\<Longrightarrow> (X1 + X2) \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame' l (Y1 + Y2) (E1 + E2)\<close>
unfolding sift_brking_frame'_def Brking_Frame_plus distrib_right
\<medium_left_bracket> premises X1 and X2
cases \<medium_left_bracket> X2 \<medium_right_bracket> for \<open>Brking_Frame l Y1 + Brking_Frame l Y2 + ((E1 \<heavy_comma> Brk_Frame l) + (E2 \<heavy_comma> Brk_Frame l))\<close> by fast
\<medium_left_bracket> X1 \<medium_right_bracket>.
\<medium_right_bracket>. .
(* lemma [\<phi>reason 1200]:
\<open> X1 \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame' l Y E
\<Longrightarrow> X2 \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame' l Y E
\<Longrightarrow> X1 + X2 \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame' l Y E\<close>
using \<phi>CASE_IMP by fastforce *)
lemma [\<phi>reason 1200]:
\<open>Brking_Frame l Y \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame' l Y 0\<close>
unfolding sift_brking_frame'_def \<medium_left_bracket> \<medium_right_bracket>. .
lemma Brking_Frame_absorb_item[assertion_simps]:
\<open>((Brking_Frame l Y)\<heavy_comma> X) = Brking_Frame l (\<lambda>v. Y v \<heavy_comma> X)\<close>
unfolding Brking_Frame_def
apply (intro assertion_eq_intro)
\<medium_left_bracket> ;; \<medium_right_bracket>. \<medium_left_bracket> \<medium_right_bracket>. .
lemma Brking_Frame_absorb_subj[assertion_simps]:
\<open>((Brking_Frame l Y) \<s>\<u>\<b>\<j> P) = Brking_Frame l (\<lambda>v. Y v \<s>\<u>\<b>\<j> P)\<close>
unfolding Brking_Frame_def
apply (intro assertion_eq_intro)
\<medium_left_bracket> \<medium_right_bracket>. \<medium_left_bracket> ;; \<medium_right_bracket>. .
lemma Brking_Frame_absorb_ex[assertion_simps]:
\<open>(\<exists>*x. (Brking_Frame l (Y x))) = Brking_Frame l (\<lambda>v. \<exists>*x. Y x v)\<close>
unfolding Brking_Frame_def
apply (intro assertion_eq_intro)
\<medium_left_bracket> \<medium_right_bracket>. \<medium_left_bracket> ;; \<medium_right_bracket>. .
lemma [\<phi>reason 1180]:
\<open> NO_MATCH TYPE('a) TYPE('b)
\<Longrightarrow> ERROR TEXT(\<open>The exits of scope\<close> l \<open>mismach in return type. One is\<close>
TYPE('a) \<open>while another is\<close> TYPE('b))
\<Longrightarrow> Brking_Frame l Y \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame' l Y' 0\<close>
for Y :: \<open>'a::VALs \<phi>arg \<Rightarrow> _\<close> and Y' :: \<open>'b::VALs \<phi>arg \<Rightarrow> _\<close>
by blast
lemma [\<phi>reason 1000]:
\<open> X \<i>\<m>\<p>\<l>\<i>\<e>\<s> E\<heavy_comma> \<blangle> Brk_Frame l \<brangle> \<a>\<n>\<d> Any
\<Longrightarrow> X \<i>\<m>\<p>\<l>\<i>\<e>\<s> sift_brking_frame' l 0 E\<close>
unfolding sift_brking_frame'_def FOCUS_TAG_def
\<medium_left_bracket> premises X
X
\<medium_right_bracket>. .
hide_fact Brking_Frame_plus
subsection \<open>ToSA through Brking_Frame\<close>
lemma [\<phi>reason 2200]:
(*The priority must override Void Padding*)
\<open> (\<And>v. S v \<i>\<m>\<p>\<l>\<i>\<e>\<s> R v \<heavy_comma> \<blangle> Y \<brangle> \<a>\<n>\<d> P)
\<Longrightarrow> Brking_Frame l S \<i>\<m>\<p>\<l>\<i>\<e>\<s> Brking_Frame l R \<heavy_comma> \<blangle> Y \<brangle> \<a>\<n>\<d> P\<close>
unfolding Brking_Frame_def FOCUS_TAG_def
\<medium_left_bracket> premises X
X[THEN implies_right_prod]
\<medium_right_bracket>. .
subsection \<open>Syntax hiding technical separation items\<close>
optional_translations (\<phi>hide_brk_frame)
"L" <= "CONST Brk_Frame l\<heavy_comma> L"
"R" <= "R \<heavy_comma> CONST Brk_Frame l"
"R\<heavy_comma> L" <= "R \<heavy_comma> CONST Brk_Frame l\<heavy_comma> L"
"XCONST Void" <= "CONST Brk_Frame l"
\<open>Hides technical SL assertions for control flowing breaking\<close>
declare [[\<phi>hide_brk_frame = false]]
(*
ML \<open>
val phi_display_brk_frame = Attrib.setup_config_bool \<^binding>\<open>\<phi>display_brk_frame\<close> (K false)
val _ = Theory.setup (
Procedure_Syntax.add_item_printer (\<^const_syntax>\<open>Brk_Frame\<close>, (fn m => fn ctxt => fn X =>
if Config.get ctxt phi_display_brk_frame
then raise Match
else (case m of Phi_Kind.Procedure => NONE
| Phi_Kind.Construction => NONE)
)))
\<close> *)
section \<open>Example\<close>
proc
input \<open>x \<Ztypecolon> \<v>\<a>\<l> T\<heavy_comma> y \<Ztypecolon> \<v>\<a>\<l> U\<close>
output \<open>y \<Ztypecolon> \<v>\<a>\<l> U\<close>
\<medium_left_bracket> brk_scope \<medium_left_bracket> for l1
brk_scope \<medium_left_bracket> for l2
$y "_op_break_rule_"[of l1 \<a>\<r>\<g>2 \<open>\<lambda>ret. Brk_Frame l2\<heavy_comma> y \<Ztypecolon> \<v>\<a>\<l>[ret] U\<close>]
\<medium_right_bracket> .. ;;
assert \<bottom> (*this place is unreachable!*)
\<medium_right_bracket> ..
\<medium_right_bracket> .. .
end
|
lemma fract_poly_add [simp]: "fract_poly (p + q) = fract_poly p + fract_poly q" |
library(Hmisc)
clc <- function() cat(rep("\n",100))
clc()
set.seed(42)
read.data <- function(file) {
# read training data
data <- read.csv(file, sep=',', na.strings=c(''), stringsAsFactors=FALSE)
# drop some columns
data <- subset(data, select = -c(Name, Fare, Ticket))
# correct some column types
#data$Survived <- as.factor(data$Survived)
data$Sex <- as.factor(data$Sex)
data$Embarked <- as.factor(data$Embarked)
data$Pclass <- as.factor(data$Pclass)
if('Survived' %in% colnames(data)) {
data$Survived <- as.factor(data$Survived)
}
return (data)
}
train <- read.data('data/train.csv')
test <- read.data('data/test.csv')
# massage and impute missing data
cabin_to_deck <- function(data) {
data = as.character(data)
for(i in seq(along=data)) {
if (is.na(data[i]))
next
data[i] <- substr(data[i], 1, 1)
}
return (as.factor(data))
}
# Cabin
train$Cabin <- impute(cabin_to_deck(train$Cabin), 'random')
test$Cabin <- impute(cabin_to_deck(test$Cabin), 'random')
# Age
train$Age <- impute(train$Age, mean)
test$Age <- impute(test$Age, mean)
# Embarked
train$Embarked <- impute(train$Embarked, mean)
test$Embarked <- impute(test$Embarked, mean)
model <- glm(
Survived ~ Pclass + Sex + Age + Cabin + SibSp + Parch +
Pclass : Sex +
Pclass : Age +
Pclass : Cabin +
Sex : SibSp +
Sex : Parch +
Sex : Age +
Sex : Cabin +
Age : Cabin,
data=train,
family="binomial",
control=list(maxit = 150)
)
summary(model)
anova(model, test="Chisq")
if(T){
head(test)
test$Survived <- predict(model, newdata=test, type="response")
test$Survived <- round(test$Survived)
head(test)
summary(test)
write.csv(test[,c("PassengerId", "Survived")], file="predictions.csv", row.names=FALSE, quote=FALSE)
} |
import analysis.convex.basic analysis.convex.combination topology.metric_space.basic
import data.set.finite
import .homotopy_invariance
local attribute [instance]
category_theory.concrete_category.has_coe_to_sort
category_theory.concrete_category.has_coe_to_fun
section subcomplexes_with_indexing
-- weird universe issues without being explicit :(
universes u v w p
def spanned_by_sat (R : Type*) [comm_ring R] (M : Type*) [add_comm_monoid M] [module R M]
{ι : Type*} (b : basis ι R M) (s : set ι)
: submodule R M :=
submodule.span R (b '' { i | i ∈ s })
lemma finsupp.subtype_domain_single {α : Type*} {M : Type*} [has_zero M]
(p : α → Prop) (a : α) (ha : p a) (m : M)
: finsupp.subtype_domain p (finsupp.single a m) = finsupp.single ⟨a, ha⟩ m :=
begin
rw finsupp.eq_single_iff,
split,
{ rintros ⟨a', _⟩ h, simp at h ⊢, have h' := finsupp.single_apply_ne_zero.mp h, exact h'.left },
{ simp }
end
-- lemma finsupp.subtype_domain_desc {α : Type*} {M : Type*} [has_zero M]
-- (p : α → Prop) [decidable_pred p] (a : α) (m : M)
-- : finsupp.subtype_domain p (finsupp.single a m)
-- = if h : p a then finsupp.single ⟨a, h⟩ m else 0 :=
-- begin
-- split_ifs,
-- { exact finsupp.subtype_domain_single p a h m },
-- { rw finsupp.subtype_domain_eq_zero_iff',
-- intros x hx, apply finsupp.single_eq_of_ne,
-- intro hax, rw hax at h, contradiction }
-- end
noncomputable
def spanned_by_sat_basis (R : Type u) [comm_ring R] (M : Type w) [add_comm_monoid M] [module R M]
{ι : Type p} (b : basis ι R M) (s : set ι)
: basis s R (spanned_by_sat R M b s) := {
repr := {
to_fun := λ x, @finsupp.lsubtype_domain ι R R _ _ _ s (b.repr x),
inv_fun := λ f, ⟨b.repr.inv_fun (finsupp.lmap_domain R R subtype.val f),
by { dsimp [spanned_by_sat],
rw basis.mem_span_iff _ b _ (set.image_subset_range _ _),
intros i hi,
simp [finsupp.map_domain] at hi,
obtain ⟨j, h, h'⟩ := finset.exists_ne_zero_of_sum_ne_zero hi,
simp at h', have h'' := finsupp.single_apply_ne_zero.mp h',
rw h''.left,
exact set.mem_image_of_mem _ j.property }⟩,
map_add' := by { rintros ⟨x, hx⟩ ⟨y, hy⟩, dsimp, repeat { rw map_add } },
map_smul' := by { rintros r ⟨x, hx⟩, dsimp, repeat { rw map_smul } },
left_inv := by { rintro ⟨x, hx⟩, ext, rw subtype.coe_mk,
suffices : set.eq_on ((((b.repr.symm : (ι →₀ R) →ₗ[R] M).comp
(finsupp.lmap_domain R R subtype.val)).comp
(finsupp.lsubtype_domain s)).comp
(b.repr : M →ₗ[R] (ι →₀ R)))
(@linear_map.id R M _ _ _)
(b '' { i | i ∈ s }),
{ exact linear_map.eq_on_span this hx },
rintros y ⟨i, hi, h⟩, subst h,
dsimp [finsupp.lsubtype_domain],
rw basis.repr_self,
rw finsupp.subtype_domain_single (λ x, x ∈ s) i hi,
rw finsupp.map_domain_single,
exact basis.repr_symm_single_one b i },
right_inv := by { intro f, ext i,
dsimp [finsupp.lsubtype_domain],
rw linear_equiv.apply_symm_apply,
exact finsupp.map_domain_apply subtype.val_injective f i }
}
}
lemma spanned_by_sat_basis_apply (R : Type*) [comm_ring R] (M : Type*) [add_comm_monoid M] [module R M]
{ι : Type p} (b : basis ι R M) (s : set ι)
(i : ι) (hi : i ∈ s)
: spanned_by_sat_basis R M b s ⟨i, hi⟩
= ⟨b i, submodule.subset_span (set.mem_image_of_mem b hi)⟩ :=
begin
apply subtype.eq, simp [spanned_by_sat_basis],
end
def subcomplex_spanned_by (R : Type u) [comm_ring R] {ι' : Type*} {c : complex_shape ι'}
(C : homological_complex (Module.{w} R) c)
{ι : ι' → Type p} (b : Π (i : ι'), basis (ι i) R (C.X i))
(s : Π (i : ι'), set (ι i))
(s_mono : Π i j, c.rel i j →
submodule.map (C.d i j) (spanned_by_sat R (C.X i) (b i) (s i))
≤ spanned_by_sat R (C.X j) (b j) (s j))
: homological_complex (Module.{w} R) c :=
Module.subcomplex_of_compatible_submodules C (λ i, spanned_by_sat R (C.X i) (b i) (s i))
(by { rintros i j y ⟨x, ⟨hx, h⟩⟩,
subst h,
by_cases c.rel i j,
{ exact s_mono i j h (submodule.mem_map_of_mem hx) },
{ rw C.shape' i j h, simp } })
def subcomplex_spanned_by_map
(R : Type u) [comm_ring R] {ι' : Type*} {c : complex_shape ι'}
(C1 C2 : homological_complex (Module.{w} R) c)
(f : C1 ⟶ C2)
{ι1 ι2 : ι' → Type p}
(b1 : Π (i : ι'), basis (ι1 i) R (C1.X i))
(b2 : Π (i : ι'), basis (ι2 i) R (C2.X i))
(s1 : Π (i : ι'), set (ι1 i)) (s2 : Π (i : ι'), set (ι2 i))
(s1_mono : Π i j, c.rel i j →
submodule.map (C1.d i j) (spanned_by_sat R (C1.X i) (b1 i) (s1 i))
≤ spanned_by_sat R (C1.X j) (b1 j) (s1 j))
(s2_mono : Π i j, c.rel i j →
submodule.map (C2.d i j) (spanned_by_sat R (C2.X i) (b2 i) (s2 i))
≤ spanned_by_sat R (C2.X j) (b2 j) (s2 j))
(hcompat : ∀ i ℓ, ℓ ∈ s1 i → f.f i (b1 i ℓ) ∈ (spanned_by_sat R (C2.X i) (b2 i) (s2 i)))
: subcomplex_spanned_by R C1 b1 s1 s1_mono ⟶ subcomplex_spanned_by R C2 b2 s2 s2_mono := {
f := λ i, linear_map.cod_restrict (spanned_by_sat R (C2.X i) (b2 i) (s2 i))
((f.f i).dom_restrict (spanned_by_sat R (C1.X i) (b1 i) (s1 i)))
(λ x, (submodule.map_span_le (f.f i) _ _).mpr
(by { rintros x ⟨ℓ, hℓ, hx⟩, subst hx,
apply hcompat, exact hℓ })
(submodule.mem_map_of_mem x.property)),
comm' := by { intros i j hij, ext, cases x,
dsimp [subcomplex_spanned_by, Module.subcomplex_of_compatible_submodules],
rw ← category_theory.comp_apply _ (f.f j),
rw ← f.comm' i j hij, refl }
}.
def subcomplex_spanned_by_map_inj
(R : Type u) [comm_ring R] {ι' : Type*} {c : complex_shape ι'}
(C1 C2 : homological_complex (Module.{w} R) c)
(f : C1 ⟶ C2)
{ι1 ι2 : ι' → Type p}
(b1 : Π (i : ι'), basis (ι1 i) R (C1.X i))
(b2 : Π (i : ι'), basis (ι2 i) R (C2.X i))
(s1 : Π (i : ι'), set (ι1 i)) (s2 : Π (i : ι'), set (ι2 i))
(s1_mono : Π i j, c.rel i j →
submodule.map (C1.d i j) (spanned_by_sat R (C1.X i) (b1 i) (s1 i))
≤ spanned_by_sat R (C1.X j) (b1 j) (s1 j))
(s2_mono : Π i j, c.rel i j →
submodule.map (C2.d i j) (spanned_by_sat R (C2.X i) (b2 i) (s2 i))
≤ spanned_by_sat R (C2.X j) (b2 j) (s2 j))
(hcompat : ∀ i ℓ, ℓ ∈ s1 i → f.f i (b1 i ℓ) ∈ (spanned_by_sat R (C2.X i) (b2 i) (s2 i)))
(hinj : ∀ n, function.injective (f.f n))
: ∀ n, function.injective ((subcomplex_spanned_by_map R C1 C2 f b1 b2 s1 s2 s1_mono s2_mono hcompat).f n) :=
begin
rintros n ⟨x, hx⟩ ⟨y, hy⟩ hxy,
apply subtype.eq, change x = y,
refine @hinj n x y _,
have := congr_arg subtype.val hxy,
exact this
end
def subcomplex_spanned_by_map_comp
(R : Type u) [comm_ring R] {ι' : Type*} {c : complex_shape ι'}
(C1 C2 C3 : homological_complex (Module.{w} R) c)
(f : C1 ⟶ C2) (g : C2 ⟶ C3)
{ι1 ι2 ι3 : ι' → Type p}
(b1 : Π (i : ι'), basis (ι1 i) R (C1.X i))
(b2 : Π (i : ι'), basis (ι2 i) R (C2.X i))
(b3 : Π (i : ι'), basis (ι3 i) R (C3.X i))
(s1 : Π (i : ι'), set (ι1 i)) (s2 : Π (i : ι'), set (ι2 i)) (s3 : Π (i : ι'), set (ι3 i))
(s1_mono : Π i j, c.rel i j →
submodule.map (C1.d i j) (spanned_by_sat R (C1.X i) (b1 i) (s1 i))
≤ spanned_by_sat R (C1.X j) (b1 j) (s1 j))
(s2_mono : Π i j, c.rel i j →
submodule.map (C2.d i j) (spanned_by_sat R (C2.X i) (b2 i) (s2 i))
≤ spanned_by_sat R (C2.X j) (b2 j) (s2 j))
(s3_mono : Π i j, c.rel i j →
submodule.map (C3.d i j) (spanned_by_sat R (C3.X i) (b3 i) (s3 i))
≤ spanned_by_sat R (C3.X j) (b3 j) (s3 j))
(h12 : ∀ i ℓ, ℓ ∈ s1 i → f.f i (b1 i ℓ) ∈ (spanned_by_sat R (C2.X i) (b2 i) (s2 i)))
(h23 : ∀ i ℓ, ℓ ∈ s2 i → g.f i (b2 i ℓ) ∈ (spanned_by_sat R (C3.X i) (b3 i) (s3 i)))
: subcomplex_spanned_by_map R C1 C2 f b1 b2 s1 s2 s1_mono s2_mono h12
≫ subcomplex_spanned_by_map R C2 C3 g b2 b3 s2 s3 s2_mono s3_mono h23
= subcomplex_spanned_by_map R C1 C3 (f ≫ g) b1 b3 s1 s3 s1_mono s3_mono
(λ i ℓ hℓ, (submodule.map_span_le (g.f i) _ (spanned_by_sat R (C3.X i) (b3 i) (s3 i))).mpr
(λ y (hy : y ∈ (b2 i) '' (s2 i)), exists.elim hy (λ m hm, eq.subst hm.right (h23 i m hm.left)))
(set.mem_image_of_mem _ (h12 i ℓ hℓ))
: ∀ i ℓ, ℓ ∈ s1 i → g.f i (f.f i (b1 i ℓ))
∈ (spanned_by_sat R (C3.X i) (b3 i) (s3 i))) :=
begin
ext n : 2,
apply basis.ext (spanned_by_sat_basis R (C1.X n) (b1 n) (s1 n)),
rintro ⟨i, hi⟩,
rw spanned_by_sat_basis_apply,
apply subtype.eq,
refl,
end
end subcomplexes_with_indexing
section subcomplexes
noncomputable
def bounded_by_submodule (R : Type*) [comm_ring R] {X : Top} (cov : set (set X)) (n : ℕ)
: submodule R (((singular_chain_complex R).obj X).X n)
:= spanned_by_sat R (((singular_chain_complex R).obj X).X n)
((singular_chain_complex_basis R n).get_basis X)
{ p | ∃ s, s ∈ cov ∧ set.range p.2 ⊆ s }
lemma bounded_by_subcomplex_compat (R : Type) [comm_ring R] {X : Top} (cov : set (set X)) (i j : ℕ)
: submodule.map (((singular_chain_complex R).obj X).d i j) (bounded_by_submodule R cov i)
≤ bounded_by_submodule R cov j :=
begin
by_cases (j + 1 = i),
{ subst h,
refine (submodule.map_span_le _ _ _).mpr _,
rintros C ⟨⟨i, σ⟩, ⟨s, H, hσ⟩, h⟩, subst h, cases i,
rw ← simplex_to_chain_is_basis,
dsimp [simplex_to_chain],
rw singular_chain_complex_differential_desc,
refine submodule.sum_mem _ _,
intros k _,
rw zsmul_eq_smul_cast R,
refine submodule.smul_mem _ _ _,
refine submodule.subset_span _,
rw simplex_to_chain_is_basis, apply set.mem_image_of_mem,
existsi s,
refine ⟨H, _⟩,
refine subset_trans _ hσ,
exact set.range_comp_subset_range _ _ },
{ rw ← complex_shape.down_rel at h, rw homological_complex.shape' _ i j h, simp, }
end
noncomputable
def bounded_by_submodule_basis (R : Type*) [comm_ring R] {X : Top} (cov : set (set X)) (n : ℕ)
: basis { p : Σ (i : unit), Top.of (topological_simplex n) ⟶ X // ∃ s, s ∈ cov ∧ set.range p.2 ⊆ s }
R (bounded_by_submodule R cov n) :=
spanned_by_sat_basis R (((singular_chain_complex R).obj X).X n)
((singular_chain_complex_basis R n).get_basis X)
{ p | ∃ s, s ∈ cov ∧ set.range p.2 ⊆ s }
noncomputable
def bounded_by_subcomplex (R : Type*) [comm_ring R] {X : Top} (cov : set (set X))
: chain_complex (Module R) ℕ :=
@subcomplex_spanned_by R _ ℕ (complex_shape.down ℕ)
((singular_chain_complex R).obj X)
(λ n, Σ p : unit, Top.of (topological_simplex n) ⟶ X)
(λ n, (singular_chain_complex_basis R n).get_basis X)
(λ n, λ p, ∃ s, s ∈ cov ∧ set.range p.2 ⊆ s)
(λ i j hij, bounded_by_subcomplex_compat R cov i j)
lemma bounded_by_subcomplex_eq_bounded_by_submodule (R : Type*) [comm_ring R] {X : Top}
(cov : set (set X)) (n : ℕ)
: (bounded_by_subcomplex R cov).X n = Module.of R (bounded_by_submodule R cov n) := rfl
lemma bounded_by_submodule_refinement (R : Type) [comm_ring R] {X : Top} (cov cov' : set (set X))
(h : ∀ s, s ∈ cov → ∃ s', s' ∈ cov' ∧ s ⊆ s') (n : ℕ)
: bounded_by_submodule R cov n ≤ bounded_by_submodule R cov' n :=
begin
refine submodule.span_mono _,
apply set.image_subset,
rintros ⟨i, σ⟩ ⟨s, hs1, hs2⟩,
obtain ⟨t, ht, hst⟩ := h s hs1,
exact ⟨t, ht, subset_trans hs2 hst⟩
end
-- handles both intersecting a cover with a subset and refining covers!
noncomputable
def bounded_by_subcomplex_map (R : Type) [comm_ring R] {X Y : Top} (f : X ⟶ Y)
(covX : set (set X)) (covY : set (set Y))
(H : ∀ s, s ∈ covX → ∃ t, t ∈ covY ∧ f '' s ⊆ t)
: bounded_by_subcomplex R covX ⟶ bounded_by_subcomplex R covY :=
subcomplex_spanned_by_map R _ _ ((singular_chain_complex R).map f) _ _ _ _ _ _ (by {
rintros n ⟨i, σ⟩ ⟨s, hs, hσ⟩, cases i,
rw ← simplex_to_chain_is_basis, dsimp [simplex_to_chain],
rw singular_chain_complex_map,
refine submodule.subset_span _,
refine ⟨⟨(), σ ≫ f⟩, _, _⟩,
{ obtain ⟨t, ht, hst⟩ := H s hs,
exact ⟨t, ht, subset_trans (subset_of_eq (set.range_comp _ _))
(subset_trans (set.image_subset f hσ) hst)⟩ },
{ rw ← simplex_to_chain_is_basis, refl } })
lemma bounded_by_subcomplex_map_comp (R : Type) [comm_ring R] {X Y Z : Top}
(f : X ⟶ Y) (g : Y ⟶ Z) (covX : set (set X)) (covY : set (set Y)) (covZ : set (set Z))
(H : ∀ s, s ∈ covX → ∃ t, t ∈ covY ∧ f '' s ⊆ t)
(H' : ∀ t, t ∈ covY → ∃ u, u ∈ covZ ∧ g '' t ⊆ u)
: bounded_by_subcomplex_map R f covX covY H ≫ bounded_by_subcomplex_map R g covY covZ H'
= bounded_by_subcomplex_map R (f ≫ g) covX covZ (λ s hs, exists.elim (H s hs) (λ t ht,
exists.elim (H' t ht.left) (λ u hu, ⟨u, hu.left, subset_trans (subset_of_eq (set.image_comp g f s))
(subset_trans (set.image_subset g ht.right) hu.right)⟩))) :=
begin
delta bounded_by_subcomplex_map,
rw subcomplex_spanned_by_map_comp,
congr,
symmetry, apply (singular_chain_complex R).map_comp
end
lemma bounded_by_subcomplex_map_mono (R : Type) [comm_ring R] {X Y : Top}
(f : X ⟶ Y) (hf : function.injective f) (covX : set (set X)) (covY : set (set Y))
(H : ∀ s, s ∈ covX → ∃ t, t ∈ covY ∧ f '' s ⊆ t)
: category_theory.mono (bounded_by_subcomplex_map R f covX covY H) :=
begin
apply_with homological_complex.mono_of_eval {instances := ff},
intro, rw Module.mono_iff_injective,
delta bounded_by_subcomplex_map, apply subcomplex_spanned_by_map_inj,
apply singular_chain_complex_map_inj,
exact hf
end
noncomputable
def bounded_diam_submodule (R : Type*) [comm_ring R] (X : Type*) [pseudo_metric_space X]
(ε : nnreal) (n : ℕ)
: submodule R (((singular_chain_complex R).obj (Top.of X)).X n) :=
bounded_by_submodule R { S : set (Top.of X) | @metric.bounded X _ S ∧ @metric.diam X _ S ≤ ε } n
noncomputable
def bounded_diam_subcomplex (R : Type*) [comm_ring R] (X : Type*) [pseudo_metric_space X]
(ε : nnreal) : chain_complex (Module R) ℕ :=
bounded_by_subcomplex R { S : set (Top.of X) | @metric.bounded X _ S ∧ @metric.diam X _ S ≤ ε }
lemma bounded_diam_submodule_eq_bounded_diam_submodule (R : Type*) [comm_ring R] {X : Type}
[pseudo_metric_space X] (ε : nnreal) (n : ℕ)
: (bounded_diam_subcomplex R X ε).X n = Module.of R (bounded_diam_submodule R X ε n) := rfl
lemma bounded_diam_submodule_monotone (R : Type) [comm_ring R] {X : Type*} [pseudo_metric_space X]
(ε δ : nnreal) (h : ε ≤ δ) (n : ℕ)
: bounded_diam_submodule R X ε n ≤ bounded_diam_submodule R X δ n :=
begin
dsimp [bounded_diam_submodule],
apply bounded_by_submodule_refinement,
rintros s ⟨hs1, hs2⟩,
exact ⟨s, ⟨hs1, le_trans hs2 (nnreal.coe_le_coe.mpr h)⟩, subset_refl s⟩
end
noncomputable
def subset_submodule (R : Type*) [comm_ring R] (X : Type*) [topological_space X]
(S : set X) (n : ℕ) : submodule R (((singular_chain_complex R).obj (Top.of X)).X n) :=
bounded_by_submodule R {S} n
noncomputable
def subset_subcomplex (R : Type*) [comm_ring R] (X : Type*) [topological_space X]
(S : set X) : chain_complex (Module R) ℕ :=
bounded_by_subcomplex R ({S} : set (set (Top.of X)))
lemma subset_submodule_eq_subset_submodule (R : Type*) [comm_ring R] (X : Type)
[topological_space X] (S : set X) (n : ℕ)
: (subset_subcomplex R X S).X n = Module.of R (subset_submodule R X S n) := rfl
lemma subset_subcomplex_monotone (R : Type*) [comm_ring R]
{X : Type*} [topological_space X] (S T : set X) (h : S ⊆ T) (n : ℕ)
: subset_submodule R X S n ≤ subset_submodule R X T n :=
begin
dsimp [subset_submodule],
apply bounded_by_submodule_refinement,
simp, assumption
end
lemma subset_subcomplex_univ (R : Type*) [comm_ring R] {X : Type*} [topological_space X] (n : ℕ)
: subset_submodule R X (@set.univ X) n = ⊤ :=
begin
refine eq.trans _ ((singular_chain_complex_basis R n).spanning _),
dsimp [subset_submodule, bounded_by_submodule, spanned_by_sat],
congr,
ext, simp, split,
{ rintro ⟨b, σ, hσ⟩, subst hσ, existsi unit.star, existsi σ,
refine eq.trans (singular_chain_complex_map R n σ (𝟙 (Top.of (topological_simplex n)))) _,
dsimp [singular_chain_complex_basis, functor_basis.get_basis],
rw basis.mk_apply, dsimp [simplex_to_chain], rw singular_chain_complex_map },
{ rintros ⟨a, σ, hσ⟩, cases a, subst hσ, existsi (), existsi σ, symmetry,
refine eq.trans (singular_chain_complex_map R n σ (𝟙 (Top.of (topological_simplex n)))) _,
dsimp [singular_chain_complex_basis, functor_basis.get_basis],
rw basis.mk_apply, dsimp [simplex_to_chain], rw singular_chain_complex_map }
end
-- Should probably generalize this to a statement about covers/bounds
lemma singular_chain_complex_map_subset_subcomplex (R : Type*) [comm_ring R]
(X Y : Type*) [topological_space X] [topological_space Y]
(S : set X) (f : C(X, Y)) (n : ℕ)
: submodule.map ((@category_theory.functor.map _ _ _ _ (singular_chain_complex R) (Top.of X) (Top.of Y) f).f n)
(subset_submodule R X S n)
≤ subset_submodule R Y (f '' S) n :=
begin
refine (submodule.map_span_le _ _ _).mpr _,
rintros C ⟨⟨i, σ⟩, ⟨s, hs, hσ⟩, h'⟩, cases i, simp at hs, subst hs,
refine submodule.subset_span _,
refine exists.intro ⟨(), σ ≫ f⟩ _,
simp [Top.to_sSet'], split,
{ transitivity f '' set.range σ,
{ exact subset_of_eq (set.range_comp _ _) },
{ exact set.image_subset f hσ } },
{ symmetry, rw ← h',
dsimp [functor_basis.get_basis], rw [basis.mk_apply, basis.mk_apply],
dsimp [singular_chain_complex_basis, functor_basis.get_basis, simplex_to_chain],
rw [singular_chain_complex_map, singular_chain_complex_map, singular_chain_complex_map],
refl }
end
lemma subset_subcomplex_le_bounded_by_subcomplex (R : Type*) [comm_ring R] {X : Type*}
[topological_space X] (cov : set (set X)) (s : set X) (hs : s ∈ cov) (n : ℕ)
: subset_submodule R X s n ≤ bounded_by_submodule R cov n :=
begin
dsimp [subset_submodule],
apply bounded_by_submodule_refinement,
simp,
exact ⟨s, hs, subset_refl s⟩
end
lemma metric.lebesgue_number_lemma {M : Type*} [pseudo_metric_space M] (hCompact : compact_space M)
(cov : set (set M)) (cov_open : ∀ s, s ∈ cov → is_open s) (hcov : ⋃₀ cov = ⊤)
(cov_nonempty : cov.nonempty) -- if M is empty this can happen!
: ∃ δ : nnreal, 0 < δ ∧ (∀ S : set M, metric.diam S < δ → ∃ U, U ∈ cov ∧ S ⊆ U) :=
match lebesgue_number_lemma_sUnion (is_compact_univ_iff.mpr hCompact) cov_open (set.univ_subset_iff.mpr hcov) with
| ⟨n, H, hn⟩ := match metric.mem_uniformity_dist.mp H with
| ⟨ε, ε_pos, hε⟩ := ⟨ε.to_nnreal, real.to_nnreal_pos.mpr ε_pos, λ S hS,
match em S.nonempty with
| or.inl ⟨x, hx⟩ := match hn x (set.mem_univ x) with
| ⟨U, hU, hU'⟩ := ⟨U, hU, λ y hy, hU' y (@hε x y (lt_of_le_of_lt (metric.dist_le_diam_of_mem metric.bounded_of_compact_space hx hy) (lt_of_lt_of_eq hS (real.coe_to_nnreal _ (le_of_lt ε_pos)))))⟩
end
| or.inr h' := match cov_nonempty with
| ⟨U, hU⟩ := ⟨U, hU, λ y hy, false.elim (eq.subst (set.not_nonempty_iff_eq_empty.mp h') hy : y ∈ (∅ : set M))⟩
end
end⟩
end
end
end subcomplexes
section
parameters {ι : Type} [fintype ι]
parameters {D : set (ι → ℝ)} (hConvex : convex ℝ D)
def convex_combination {ι' : Type} [fintype ι'] [nonempty ι']
(vertices : ι' → D) (coeffs : std_simplex ℝ ι') : D :=
⟨finset.univ.sum (λ i, coeffs.val i • (vertices i).val),
convex.sum_mem hConvex (λ i _, coeffs.property.left i) coeffs.property.right
(λ i _, (vertices i).property)⟩
lemma convex_combination_partial_app_lipschitz {ι' : Type} [fintype ι'] [nonempty ι']
(vertices : ι' → D)
: lipschitz_with (fintype.card ι' * ∥subtype.val ∘ vertices∥₊) (convex_combination vertices) :=
begin
rw lipschitz_with_iff_dist_le_mul, intros x y,
rw [subtype.dist_eq, dist_eq_norm],
simp [convex_combination],
rw ← finset.sum_sub_distrib,
refine le_trans (norm_sum_le _ _) _,
convert le_of_eq_of_le (congr_arg finset.univ.sum (funext (λ i, congr_arg norm (sub_smul (x.val i) (y.val i) (vertices i).val).symm))) _,
refine le_of_eq_of_le (congr_arg finset.univ.sum (funext (λ i, norm_smul _ _))) _,
refine le_trans (finset.sum_le_sum (λ i _, mul_le_mul (le_refl ∥x.val i - y.val i∥) (norm_le_pi_norm (subtype.val ∘ vertices) i) (norm_nonneg _) (norm_nonneg _))
: finset.univ.sum (λ i, ∥x.val i - y.val i∥ * ∥(vertices i).val∥)
≤ finset.univ.sum (λ i, ∥x.val i - y.val i∥ * ∥subtype.val ∘ vertices∥)) _,
rw ← finset.sum_mul,
rw mul_right_comm, apply mul_le_mul,
{ dsimp [fintype.card],
convert le_of_le_of_eq _ (@finset.sum_const _ _ (@finset.univ ι' _) _ (dist x y)), simp,
apply finset.sum_le_sum,
intros i _,
rw [← real.dist_eq, subtype.dist_eq],
apply dist_le_pi_dist },
{ refl },
{ apply norm_nonneg },
{ apply mul_nonneg, { norm_cast, apply zero_le }, { apply dist_nonneg } }
end
lemma convex_combination_cont {ι' : Type} [fintype ι'] [nonempty ι']
: continuous (function.uncurry (@convex_combination ι' _ _)) :=
have continuous (λ p : (ι' → (ι → ℝ)) × (ι' → ℝ), finset.univ.sum (λ i, p.snd i • p.fst i)),
by { continuity, simp, continuity,
{ exact continuous.snd' (continuous_apply i_1) },
{ exact continuous.fst' (continuous_apply_apply i_1 i) } },
(homeomorph.subtype_prod_equiv_prod.trans
(homeomorph.Pi_to_subtype.prod_congr (homeomorph.refl _))).comp_continuous_iff'.mp
(continuous.congr
(continuous.cod_restrict (this.comp continuous_subtype_val)
(λ p, convex.sum_mem hConvex (λ i _, p.property.right.left i)
p.property.right.right
(λ i _, p.property.left i)))
(by { rintro ⟨p, h⟩, refl }))
def singular_simplex_of_vertices {n : ℕ}
(vertices : fin (n + 1) → D) : C(topological_simplex n, Top.of D) :=
⟨convex_combination vertices, convex_combination_cont.comp (continuous.prod.mk vertices)⟩.
lemma simplex_category.to_Top'_map_comp_affine
{x y : simplex_category} (f : x ⟶ y) (vertices : y → D)
: simplex_category.to_Top'.map f ≫ singular_simplex_of_vertices vertices
= singular_simplex_of_vertices (λ j, vertices (f j)) :=
begin
ext p k,
delta simplex_category.to_Top',
dsimp [continuous_map.has_coe_to_fun],
simp only [simplex_category.to_Top'_map, singular_simplex_of_vertices],
dsimp [continuous_map.has_coe_to_fun, convex_combination],
simp, simp_rw finset.sum_mul,
refine eq.trans _
(@finset.sum_fiberwise_of_maps_to _ _ _ _ _ finset.univ finset.univ
f (λ _ _, finset.mem_univ _)
(λ t, p.val t * (vertices (f t)).val k)),
congr, ext j,
apply finset.sum_congr,
{ ext i, simp },
{ intros t ht, simp at ht, rw ← ht, refl }
end
noncomputable
def affine_submodule (R : Type*) [comm_ring R] (n : ℕ)
: submodule R (((singular_chain_complex R).obj (Top.of D)).X n) :=
spanned_by_sat R (((singular_chain_complex R).obj (Top.of D)).X n)
((singular_chain_complex_basis R n).get_basis (Top.of D))
{ σ | ∃ vs : fin (n + 1) → D, σ.2 = singular_simplex_of_vertices vs }
noncomputable
def affine_subcomplex (R : Type*) [comm_ring R] : chain_complex (Module R) ℕ :=
subcomplex_spanned_by R ((singular_chain_complex R).obj (Top.of D))
(λ n, (singular_chain_complex_basis R n).get_basis (Top.of D))
(λ n, { σ | ∃ vs : fin (n + 1) → D, σ.2 = singular_simplex_of_vertices vs })
(by { intros i j h, simp at h, subst h,
refine (submodule.map_span_le _ _ _).mpr _,
rintros C ⟨⟨i, σ⟩, ⟨vs, hvs⟩, h⟩, subst h, cases i, dsimp at hvs,
subst hvs,
dsimp [singular_chain_complex_basis, functor_basis.get_basis],
rw [basis.mk_apply],
dsimp [simplex_to_chain],
rw [singular_chain_complex_map],
rw singular_chain_complex_differential_desc,
simp_rw zsmul_eq_smul_cast R,
refine submodule.sum_smul_mem _ _ _,
intros i _,
refine submodule.subset_span _,
refine ⟨⟨(), simplex_category.to_Top'.map (simplex_category.δ i)
≫ 𝟙 (Top.of (topological_simplex (j + 1)))
≫ singular_simplex_of_vertices vs⟩, _⟩,
rw basis.mk_apply,
split,
{ existsi (λ j, vs (simplex_category.δ i j)),
simp,
rw @category_theory.category.id_comp Top _
(Top.of (topological_simplex (j + 1)))
(Top.of D)
(singular_simplex_of_vertices vs),
apply simplex_category.to_Top'_map_comp_affine },
{ apply singular_chain_complex_map } })
lemma affine_submodule_eq_affine_submodule (R : Type*) [comm_ring R] (n : ℕ)
: (affine_subcomplex R).X n = Module.of R (affine_submodule R n) := rfl
lemma bounded_diam_subcomplex_le_cover_subcomplex
(hCompact : is_compact D) (R : Type*) [comm_ring R]
(cov : set (set D)) (cov_open : ∀ s, s ∈ cov → is_open s) (hcov : ⋃₀ cov = ⊤)
(cov_nonempty : cov.nonempty) (n : ℕ)
: ∃ δ, 0 < δ ∧ bounded_diam_submodule R D δ n ≤ bounded_by_submodule R cov n :=
begin
obtain ⟨δ, hδ, Hδ⟩ := metric.lebesgue_number_lemma (is_compact_iff_compact_space.mp hCompact) cov
cov_open hcov cov_nonempty,
refine ⟨δ/2, nnreal.half_pos hδ, _⟩,
refine submodule.span_le.mpr _,
rintros C ⟨⟨i, vs⟩, ⟨s, hs, hvs⟩, H⟩, subst H, cases i,
refine submodule.subset_span _,
apply set.mem_image_of_mem,
obtain ⟨U, hU, hsU⟩ := Hδ s _,
{ existsi U,
refine ⟨hU, _⟩,
exact subset_trans hvs hsU },
{ refine lt_of_le_of_lt hs.right _,
rw nnreal.coe_lt_coe,
apply nnreal.half_lt_self, symmetry, exact ne_of_lt hδ }
end
lemma finite_is_bounded {α : Type*} [hα : nonempty α] [linear_order α] {s : set α} (h : s.finite)
: bdd_above s :=
begin
cases h,
by_cases h' : s.nonempty,
{ rw ← set.nonempty_coe_sort at h',
rw ← @finset.univ_nonempty_iff _ h at h',
refine exists.intro ((@finset.univ _ h).max' h') _,
intros i hi,
exact finset.le_max' (@finset.univ _ h) ⟨i, hi⟩ (@finset.mem_univ _ h _) },
{ apply hα.elim, intro a, existsi a, intros i hi, exfalso, apply h', existsi i, exact hi }
end
lemma csupr_prod {α : Type*} {β : Type*} {γ : Type*}
[nonempty β] [nonempty γ] [conditionally_complete_lattice α]
{f : β × γ → α} (Hbound : bdd_above (set.range f)) :
(⨆ (x : β × γ), f x) = ⨆ (i : β) (j : γ), f (i, j) :=
begin
obtain ⟨B, hB⟩ := Hbound, simp [upper_bounds] at hB,
apply eq_of_forall_ge_iff, intro c,
split,
{ intro H, apply csupr_le, intro i, apply csupr_le, intro j,
rw csupr_le_iff at H, apply H,
{ existsi B, simp [upper_bounds], exact hB } },
{ intro H, apply csupr_le, rintro ⟨i, j⟩,
rw csupr_le_iff at H, specialize H i, rw csupr_le_iff at H, exact H j,
{ existsi B, simp [upper_bounds], intros j, exact hB i j rfl },
{ existsi B, simp [upper_bounds], intro i',
apply csupr_le, intro j', exact hB i' j' rfl } }
end
lemma affine_simplex_dist_maximized {n : ℕ} (vertices : fin (n + 1) → D)
(x0 : D) (p : topological_simplex n)
: dist x0 (singular_simplex_of_vertices vertices p) ≤ ⨆ (i : fin (n + 1)), dist x0 (vertices i) :=
begin
rw [subtype.dist_eq, dist_eq_norm],
have : x0 = singular_simplex_of_vertices (λ _, x0) p,
{ apply subtype.eq, dsimp [singular_simplex_of_vertices, convex_combination],
rw ← finset.sum_smul,
refine eq.trans (one_smul _ _).symm (congr_arg2 _ _ rfl),
exact p.property.right.symm },
transitivity ∥finset.univ.sum (λ (i : fin (n + 1)), p.val i • (x0.val - (vertices i).val))∥,
{ apply le_of_eq, apply congr_arg,
refine eq.trans _ (congr_arg _ (funext (λ i, (smul_sub (p.val i) x0.val (vertices i).val).symm))),
refine eq.trans _ finset.sum_sub_distrib.symm,
exact congr_arg2 _ (congr_arg subtype.val this) rfl, },
{ refine le_trans (norm_sum_le _ _) _,
simp_rw [norm_smul],
let d := ⨆ (i : fin (n + 1)), dist x0 (vertices i),
have d_spec : ∀ j, ∥x0.val - (vertices j).val∥ ≤ d,
{ intro j,
rw [← dist_eq_norm, subtype.val_eq_coe, subtype.val_eq_coe,
← subtype.dist_eq x0 (vertices j)],
refine le_csupr _ j,
apply finite_is_bounded, apply set.finite_range },
convert (finset.sum_le_sum (λ i _, mul_le_mul (le_refl ∥p.val i∥) (d_spec i)
(norm_nonneg _) (norm_nonneg _))),
rw ← finset.sum_mul,
simp_rw [real.norm_eq_abs],
symmetry, convert one_mul d,
convert p.property.right,
ext, simp, apply p.property.left }
end
-- This should probably be used in other places
lemma apply_affine_to_vertex_eq_vertices_apply
{n : ℕ} (vertices : fin (n + 1) → D) (i : fin (n + 1))
: singular_simplex_of_vertices vertices (vertex n i) = vertices i :=
begin
apply subtype.eq, simp [singular_simplex_of_vertices, convex_combination],
refine eq.trans (finset.sum_eq_single_of_mem i (finset.mem_univ i) _) _,
{ intros b _ hb, convert zero_smul _ (vertices b).val,
convert vertex_coord_zero n i b hb.symm },
{ convert one_smul _ (vertices i).val, convert vertex_coord_one n i }
end
lemma vertices_apply_mem_range_singular_simplex_of_vertices
{n : ℕ} (vertices : fin (n + 1) → D) (i : fin (n + 1))
: vertices i ∈ set.range (singular_simplex_of_vertices vertices) :=
⟨vertex n i, apply_affine_to_vertex_eq_vertices_apply vertices i⟩
lemma singular_simplex_of_vertices_bounded {n : ℕ} (vertices : fin (n + 1) → D)
: @metric.bounded D _ (set.range (singular_simplex_of_vertices vertices)) :=
begin
rw metric.bounded_range_iff,
existsi (((n + 1 : ℝ) * ∥subtype.val ∘ vertices∥) * metric.diam (topological_simplex n)),
intros x y,
refine le_trans ((convex_combination_partial_app_lipschitz vertices).dist_le_mul x y) _,
refine mul_le_mul (le_of_eq _) _ _ _,
{ simp },
{ rw subtype.dist_eq, refine metric.dist_le_diam_of_mem _ x.property y.property,
exact bounded_std_simplex (fin (n + 1)) },
{ apply dist_nonneg },
{ apply mul_nonneg, { norm_cast, apply zero_le }, { apply norm_nonneg } }
end
lemma affine_simplex_diam {n : ℕ} (vertices : fin (n + 1) → D)
: @metric.diam D _ (set.range (singular_simplex_of_vertices vertices))
= ⨆ (i j : fin (n + 1)), dist (vertices i) (vertices j) :=
begin
apply le_antisymm,
{ have : 0 ≤ ⨆ (x : fin (n + 1) × fin (n + 1)), dist (vertices x.fst) (vertices x.snd),
{ refine le_csupr_of_le _ (0, 0) _,
exact finite_is_bounded (set.finite_range _),
apply dist_nonneg },
refine le_of_le_of_eq _ (csupr_prod (finite_is_bounded (set.finite_range (λ p : fin (n + 1) × fin (n + 1), dist (vertices p.1) (vertices p.2))))),
apply ennreal.to_real_le_of_le_of_real this,
dsimp [emetric.diam],
refine supr_le _, intro, refine supr_le _, rintro ⟨p, Hp⟩, subst Hp,
refine supr_le _, intro, refine supr_le _, rintro ⟨q, Hq⟩, subst Hq,
rw edist_le_of_real this,
refine le_trans (affine_simplex_dist_maximized vertices (singular_simplex_of_vertices vertices p) q) _,
refine le_of_le_of_eq _ (csupr_prod (finite_is_bounded (set.finite_range (λ p : fin (n + 1) × fin (n + 1), dist (vertices p.1) (vertices p.2))))).symm,
apply csupr_mono,
{ apply finite_is_bounded, apply set.finite_range },
{ intro i, dsimp,
rw dist_comm,
exact affine_simplex_dist_maximized vertices (vertices i) p } },
{ apply csupr_le, intro i, apply csupr_le, intro j,
apply metric.dist_le_diam_of_mem,
{ apply singular_simplex_of_vertices_bounded },
{ apply vertices_apply_mem_range_singular_simplex_of_vertices },
{ apply vertices_apply_mem_range_singular_simplex_of_vertices } }
end
lemma cone_construction_lift_vertex_span {n : ℕ} (vertices : fin (n + 1) → D) (v' : D)
: @cone_construction_lift_simplex (Top.of D) v' (hConvex.contraction v') n
(singular_simplex_of_vertices vertices)
= singular_simplex_of_vertices (fin.cons v' vertices) :=
begin
ext x : 1,
obtain ⟨⟨t, y⟩, h⟩ := q_surj n x,
delta cone_construction_lift_simplex,
transitivity,
apply @lift_along_quot_map_spec (Top.of (unit_interval × topological_simplex n))
(Top.of (topological_simplex (n + 1)))
(Top.of D)
⟨function.uncurry (q_map n), q_continuous n⟩ _ _ _ x (t, y) h,
subst h, cases v' with v' hv',
delta convex.contraction star_convex.contraction,
apply subtype.eq, dsimp [cylinder, singular_simplex_of_vertices, convex_combination],
refine (eq.trans (fin.sum_univ_succ _) _).symm,
rw finset.smul_sum,
congr,
ext i j, simp, rw ← mul_assoc, congr,
dsimp [q_map],
split_ifs,
{ exfalso, exact fin.succ_ne_zero i h },
{ congr, exact fin.pred_above_succ_above (0 : fin (n + 1)) i }
end
lemma boundary_of_cone_construction_of_convex_contract_deg0 (R : Type*) [comm_ring R]
(v' : D)
(c : ((singular_chain_complex R).obj (Top.of D)).X 0)
: ((singular_chain_complex R).obj (Top.of D)).d 1 0
(@cone_construction_hom R _ (Top.of D)
v'
(hConvex.contraction v')
0
c)
= c - @ε_hom R _ (Top.of D) v' 0 c :=
begin
have := (@cone_construction R _ (Top.of D) v' (hConvex.contraction v')).comm 0,
rw ← sub_eq_iff_eq_add at this,
simp at this,
symmetry,
refine eq.trans _ (congr_fun (congr_arg coe_fn this) c),
simp, refl
end
lemma boundary_of_cone_construction_of_convex_contract (R : Type*) [comm_ring R]
{n : ℕ} (v' : D)
(c : ((singular_chain_complex R).obj (Top.of D)).X (n + 1))
: ((singular_chain_complex R).obj (Top.of D)).d (n + 2) (n + 1)
(@cone_construction_hom R _ (Top.of D)
v'
(hConvex.contraction v')
(n + 1)
c)
= c - (@cone_construction_hom R _ (Top.of D)
v'
(hConvex.contraction v')
n
(((singular_chain_complex R).obj (Top.of D)).d (n + 1) n c)) :=
begin
have := congr_fun (congr_arg coe_fn ((@cone_construction R _ (Top.of D) v' (hConvex.contraction v')).comm (n + 1))) c,
simp [ε, ε_hom, ε_map, cone_construction, cone_construction_complex_hom] at this,
rw [@add_comm (((singular_chain_complex R).obj (Top.of D)).X (n + 1)), ← sub_eq_iff_eq_add] at this,
exact this.symm
end
noncomputable
def barycenter (n : ℕ) : topological_simplex n :=
⟨(λ _, (n + 1)⁻¹), ⟨(λ _, inv_nonneg.mp (by { simp, exact le_of_lt (nat.cast_add_one_pos n) })),
by { simp [simplex_category.to_Top'_obj], apply mul_inv_cancel,
exact nat.cast_add_one_ne_zero n }⟩⟩
noncomputable
def convex.barycenter' {n : ℕ} (vertices : fin (n + 1) → D) : D :=
convex_combination vertices (barycenter n)
lemma barycenter_dist_vertex_bound {n : ℕ} (vertices : fin (n + 1) → D) (i : fin (n + 1))
: dist (hConvex.barycenter' vertices) (vertices i)
≤ n / (n + 1) * metric.diam (set.range vertices) :=
begin
norm_cast,
rw [subtype.dist_eq, dist_eq_norm],
have : vertices i = singular_simplex_of_vertices (λ _, vertices i) (barycenter n),
{ apply subtype.eq, dsimp [singular_simplex_of_vertices, convex_combination],
rw ← finset.sum_smul,
refine eq.trans (one_smul _ _).symm (congr_arg2 _ _ rfl),
exact (barycenter n).property.right.symm },
rw this,
dsimp [convex.barycenter', singular_simplex_of_vertices, convex_combination],
refine le_of_eq_of_le (congr_arg norm finset.sum_sub_distrib.symm) _,
dsimp [barycenter],
transitivity ((n + 1)⁻¹ : ℝ) * ∥finset.univ.sum (λ j : fin (n + 1), (vertices j).val - (vertices i).val)∥,
{ apply le_of_eq,
rw ← abs_eq_self.mpr (_ : 0 ≤ (n + 1 : ℝ)),
swap, norm_cast, apply zero_le,
rw [← real.norm_eq_abs],
rw ← norm_inv,
refine eq.trans _ (norm_smul (n + 1 : ℝ)⁻¹ (finset.univ.sum (λ (j : fin (n + 1)), (vertices j).val - (vertices i).val))),
rw finset.smul_sum,
congr,
ext, simp, rw mul_sub, congr; norm_cast },
{ rw [div_eq_inv_mul, mul_assoc],
refine mul_le_mul _ _ _ _,
{ norm_cast },
{ refine le_trans (norm_sum_le _ _) _,
refine le_of_le_of_eq (@finset.sum_le_sum _ _ _ _ (λ j, if i = j then 0 else metric.diam (set.range vertices)) finset.univ _) _,
{ intros j _,
dsimp, split_ifs,
{ subst h, simp },
{ rw [← dist_eq_norm, ← subtype.dist_eq],
apply metric.dist_le_diam_of_mem,
{ apply metric.bounded_of_finite, apply set.finite_range },
{ apply set.mem_range_self },
{ apply set.mem_range_self } } },
{ dsimp,
refine eq.trans (@finset.sum_filter_of_ne _ _ finset.univ (λ j, ite (i = j) 0 (metric.diam (set.range vertices))) _ (λ j, i ≠ j) _ (λ (j : fin (n + 1)) _ hj, (ite_ne_left_iff.mp hj).left)).symm _,
refine eq.trans (finset.sum_congr rfl (λ j hj, ite_eq_right_iff.mpr (λ h', absurd h' (finset.mem_filter.mp hj).right))) _,
refine eq.trans (finset.sum_const _) _,
simp,
left,
rw finset.filter_ne finset.univ i,
rw finset.card_erase_of_mem (finset.mem_univ i),
simp } },
{ apply norm_nonneg },
{ apply inv_nonneg.mpr, norm_cast, simp, } }
end
end
noncomputable
def barycentric_subdivision_in_deg (R : Type*) [comm_ring R]
: Π (n : ℕ), (singular_chain_complex R ⋙ homological_complex.eval _ _ n)
⟶ (singular_chain_complex R ⋙ homological_complex.eval _ _ n)
| 0 := 𝟙 _
| (n + 1) := (singular_chain_complex_basis R (n + 1)).map_out
(singular_chain_complex R ⋙ homological_complex.eval _ _ (n + 1))
(λ _, @cone_construction_hom R _ (Top.of (topological_simplex (n + 1)))
(barycenter (n + 1))
((convex_std_simplex ℝ (fin (n + 2))).contraction (barycenter (n + 1)))
n
((barycentric_subdivision_in_deg n).app (Top.of (topological_simplex (n + 1)))
(((singular_chain_complex R).obj (Top.of (topological_simplex (n + 1)))).d
(n + 1) n
(simplex_to_chain (𝟙 (Top.of (topological_simplex (n + 1)))) R))))
lemma barycentric_subdivision_subset (R : Type) [comm_ring R]
{X : Type*} [topological_space X] (S : set X) (n : ℕ)
: submodule.map ((barycentric_subdivision_in_deg R n).app (Top.of X))
(subset_submodule R X S n)
≤ subset_submodule R X S n :=
begin
refine (linear_map.map_span_le _ _ _).mpr _,
rintros C ⟨⟨i, σ⟩, ⟨s, hs, hσ⟩, H⟩, subst H, cases i, simp at hs, subst hs,
cases n with n,
{ simp [barycentric_subdivision_in_deg], apply submodule.subset_span,
apply set.mem_image_of_mem, refine ⟨s, rfl, hσ⟩ },
{ dsimp, simp [barycentric_subdivision_in_deg],
rw map_out_desc,
rw simplex_to_chain_is_basis,
dsimp,
have := singular_chain_complex_map_subset_subcomplex R
(topological_simplex (n + 1))
X
set.univ σ (n + 1),
rw [set.image_univ, subset_subcomplex_univ] at this,
refine subset_subcomplex_monotone R _ _ hσ (n + 1) (this _),
exact submodule.mem_map_of_mem submodule.mem_top }
end
lemma barycentric_subdivision_subset' (R : Type) [comm_ring R] (X : Type*) [topological_space X]
(n : ℕ) (σ : C(topological_simplex n, X))
: (barycentric_subdivision_in_deg R n).app (Top.of X) (finsupp.single σ 1)
∈ subset_submodule R X (set.range σ) n :=
barycentric_subdivision_subset R _ n ⟨finsupp.single σ 1,
submodule.subset_span ⟨⟨(), σ⟩, ⟨set.range σ,
set.mem_singleton _,
subset_of_eq rfl⟩,
eq.symm (simplex_to_chain_is_basis R n (Top.of X) σ)⟩, rfl⟩
local attribute [instance] classical.prop_decidable
lemma singular_simplex_of_vertices_eq_id {n : ℕ}
: singular_simplex_of_vertices (convex_std_simplex ℝ (fin (n + 1))) (vertex n)
= 𝟙 (Top.of (topological_simplex n)) :=
begin
ext p i, simp [singular_simplex_of_vertices, convex_combination],
simp_rw [← subtype.val_eq_coe],
transitivity p.val i * (vertex n i).val i,
{ apply finset.sum_eq_single_of_mem _ (finset.mem_univ _),
intros j _ hj,
simp [vertex],
right, simp [simplex_category.to_Top'_map, simplex_category.const],
apply finset.sum_eq_zero,
intros x hx, exfalso, apply hj, simp at hx, exact hx },
{ refine eq.trans _ (mul_one _), apply congr_arg,
exact (vertex_coord_one n i) }
end
lemma simplex_category.to_Top'_map_eq_affine_map {x y : simplex_category} (f : x ⟶ y)
: simplex_category.to_Top'.map f
= singular_simplex_of_vertices (convex_std_simplex ℝ (fin (y.len + 1)))
(λ j, vertex y.len (f j)) :=
begin
refine eq.trans (category_theory.category.comp_id _).symm _,
rw (_ : 𝟙 (simplex_category.to_Top'.obj y) = 𝟙 (Top.of (topological_simplex y.len))),
swap, refl,
rw ← singular_simplex_of_vertices_eq_id,
rw simplex_category.to_Top'_map_comp_affine
end
lemma cone_construction_barycentry_comp_affine_simplex (R : Type) [comm_ring R]
{ι : Type} [fintype ι] {D : set (ι → ℝ)} (hConvex : convex ℝ D)
{n : ℕ} (k : ℕ) (vertices : fin (n + 1) → D)
: @cone_construction_hom R _ (Top.of (topological_simplex n)) (barycenter n)
((convex_std_simplex ℝ (fin (n + 1))).contraction (barycenter n)) k
≫ (@category_theory.functor.map _ _ _ _ (singular_chain_complex R)
(Top.of (topological_simplex n)) _
(singular_simplex_of_vertices hConvex vertices)).f (k + 1)
= (@category_theory.functor.map _ _ _ _ (singular_chain_complex R)
(Top.of (topological_simplex n)) _
(singular_simplex_of_vertices hConvex vertices)).f k
≫ @cone_construction_hom R _ (Top.of D) (hConvex.barycenter' vertices) (hConvex.contraction (hConvex.barycenter' vertices)) k :=
begin
apply cone_construction_hom_naturality,
ext p i, cases p with t p,
delta cylinder convex.barycenter' singular_simplex_of_vertices convex_combination barycenter convex.contraction star_convex.contraction,
simp,
rw [finset.mul_sum, finset.mul_sum],
refine eq.trans finset.sum_add_distrib.symm _,
congr, ext j,
rw [right_distrib, mul_assoc, mul_assoc]
end
lemma cone_of_barycenter_sends_bounded_to_bounded (R : Type) [comm_ring R]
{ι : Type} [fintype ι] {D : set (ι → ℝ)} (hConvex : convex ℝ D)
(n : ℕ) (δ : nnreal)
(b : D) (S : set D) (Hb : ∀ x ∈ S, dist b x ≤ δ)
(C : bounded_diam_submodule R D δ n
⊓ subset_submodule R (Top.of D) (S : set (Top.of D)) n ⊓ affine_submodule hConvex R n)
: (@cone_construction_hom R _ (Top.of D) b (hConvex.contraction b) n) C.val
∈ bounded_diam_submodule R D δ (n + 1) ⊓ affine_submodule hConvex R (n + 1) :=
begin
cases C with C hC, dsimp,
by_cases htrivial : (1 : R) = (0 : R),
{ split;
convert submodule.zero_mem _;
exact eq.trans (one_smul _ _).symm (eq.trans (congr_arg2 _ htrivial rfl) (zero_smul _ _)) },
have hnontriv : nontrivial R := ⟨⟨1, 0, htrivial⟩⟩,
dsimp [bounded_diam_submodule, subset_submodule, affine_submodule,
bounded_by_submodule, spanned_by_sat] at hC,
rw [← submodule.mem_inf, ← submodule.mem_inf] at hC,
rw [submodule.inf_spans_free R ((singular_chain_complex_basis R n).get_basis (Top.of D))] at hC,
rw [set.image_inter (@basis.injective _ R _ _ _ _
((singular_chain_complex_basis R n).get_basis (Top.of D))
hnontriv)] at hC,
rw [submodule.inf_spans_free R ((singular_chain_complex_basis R n).get_basis (Top.of D))] at hC,
rw [set.image_inter (@basis.injective _ R _ _ _ _
((singular_chain_complex_basis R n).get_basis (Top.of D))
hnontriv)] at hC,
dsimp [bounded_diam_submodule, bounded_by_submodule, affine_submodule, spanned_by_sat],
rw ← submodule.mem_inf,
rw [submodule.inf_spans_free R ((singular_chain_complex_basis R (n+1)).get_basis (Top.of D))],
rw [set.image_inter (@basis.injective _ R _ _ _ _
((singular_chain_complex_basis R (n+1)).get_basis (Top.of D))
hnontriv)],
{ refine (submodule.map_span_le _ _ _).mpr _ (submodule.mem_map_of_mem hC),
rintros x ⟨⟨i, σ⟩, ⟨⟨⟨s, hs, hσ1⟩, s', hs', hσ2⟩, vs, hσ3⟩, H⟩, cases i, subst hs',
rw ← simplex_to_chain_is_basis at H, subst H, dsimp at hσ3, rw hσ3,
simp [cone_construction_hom, simplex_to_chain],
rw cone_construction_lift_vertex_span,
refine submodule.subset_span _,
refine exists.intro ⟨(), singular_simplex_of_vertices hConvex (fin.cons b vs)⟩ _,
rw ← simplex_to_chain_is_basis,
refine and.intro _ rfl,
simp,
refine ⟨set.range (singular_simplex_of_vertices hConvex (fin.cons b vs)), ⟨_, _⟩, subset_of_eq rfl⟩,
{ apply singular_simplex_of_vertices_bounded },
{ rw affine_simplex_diam,
rw csupr_le_iff,
intro i, rw csupr_le_iff,
intro j,
{ revert i j,
suffices : ∀ i j : fin (n + 2), i < j → dist (@fin.cons _ (λ _, D) b vs i)
(@fin.cons _ (λ _, D) b vs j) ≤ δ,
{ intros i j,
rcases lt_trichotomy i j with h | h | h,
{ exact this i j h },
{ subst h, simp, },
{ rw dist_comm, exact this j i h } },
intros i j hij,
by_cases (i = 0),
{ subst h, rw ← fin.succ_pred j (ne.symm (ne_of_lt hij)),
simp only [fin.cons_zero, fin.cons_succ],
apply Hb,
rw (_ : vs (j.pred (ne.symm (ne_of_lt hij))) = σ (vertex n (j.pred (ne.symm (ne_of_lt hij))))),
exact hσ2 (set.mem_range_self _),
rw hσ3,
rw apply_affine_to_vertex_eq_vertices_apply },
{ have h' : j ≠ 0,
{ symmetry, apply ne_of_lt, exact lt_trans ((fin.pos_iff_ne_zero _).mpr h) hij },
rw [← fin.succ_pred i h, ← fin.succ_pred j h'],
simp only [fin.cons_succ],
refine le_trans _ hs.right,
apply metric.dist_le_diam_of_mem hs.left;
refine hσ1 _; dsimp; rw hσ3;
apply vertices_apply_mem_range_singular_simplex_of_vertices } },
{ apply finite_is_bounded, apply set.finite_range },
{ apply finite_is_bounded, apply set.finite_range } } },
all_goals { apply set.image_subset_range }
end
lemma barycentric_subdivison_of_affine_simplex_bound_diam (R : Type) [comm_ring R]
{ι : Type} [fintype ι] {D : set (ι → ℝ)} (hConvex : convex ℝ D)
{n : ℕ} (vertices : fin (n + 1) → D)
: (barycentric_subdivision_in_deg R n).app (Top.of D)
(simplex_to_chain (singular_simplex_of_vertices hConvex vertices) R)
∈ bounded_diam_submodule R D ((n : nnreal)/(n + 1 : nnreal)
* ⟨@metric.diam D _ (set.range vertices), metric.diam_nonneg⟩) n
⊓ affine_submodule hConvex R n :=
begin
induction n with n ih,
{ simp [barycentric_subdivision_in_deg, bounded_diam_subcomplex],
split; refine submodule.subset_span _;
rw simplex_to_chain_is_basis; apply set.mem_image_of_mem,
{ simp,
refine ⟨set.range (singular_simplex_of_vertices hConvex vertices), ⟨_, _⟩, subset_refl _⟩,
{ apply singular_simplex_of_vertices_bounded },
{ apply le_of_eq,
dsimp [metric.diam],
rw emetric.diam_eq_zero_iff.mpr _, refl,
refine set.subsingleton_of_forall_eq (singular_simplex_of_vertices hConvex vertices topological_simplex.point) _,
rintros b ⟨p, hp⟩,
rw ← hp, congr } },
{ exact ⟨vertices, rfl⟩ } },
{ dsimp [barycentric_subdivision_in_deg],
rw simplex_to_chain_is_basis R (n + 1) (Top.of D) (singular_simplex_of_vertices hConvex vertices),
rw map_out_desc,
dsimp [simplex_to_chain], rw singular_chain_complex_differential_desc,
rw [map_sum, map_sum, map_sum],
rw ← submodule.mem_inf,
refine submodule.sum_mem _ _,
intros k _,
rw zsmul_eq_smul_cast R,
rw [map_smul, map_smul, map_smul],
refine submodule.smul_mem _ _ _,
rw ← category_theory.comp_apply,
rw cone_construction_barycentry_comp_affine_simplex,
rw [category_theory.comp_apply, ← homological_complex.eval_map,
← category_theory.functor.comp_map],
rw [← category_theory.comp_apply (category_theory.nat_trans.app _ _),
← category_theory.nat_trans.naturality],
dsimp [simplex_to_chain], rw singular_chain_complex_map,
have : simplex_category.to_Top'.map (simplex_category.δ k)
≫ 𝟙 (Top.of (topological_simplex (n + 1)))
= simplex_category.to_Top'.map (simplex_category.δ k) := category_theory.category.comp_id _,
rw this, clear this,
rw simplex_category.to_Top'_map_comp_affine,
specialize ih (vertices ∘ simplex_category.δ k),
have := cone_of_barycenter_sends_bounded_to_bounded R hConvex n
(((n : nnreal) + 1) / ((n : nnreal) + 1 + 1)
* ⟨@metric.diam D _ (set.range vertices), metric.diam_nonneg⟩)
(hConvex.barycenter' vertices)
(set.range (singular_simplex_of_vertices hConvex vertices))
_
⟨((barycentric_subdivision_in_deg R n).app (Top.of ↥D))
(finsupp.single (singular_simplex_of_vertices hConvex
(λ (j : fin (n + 1)), vertices (simplex_category.δ k j))) 1),
_, _⟩,
rw ← submodule.mem_inf,
convert this,
{ norm_cast }, { norm_cast },
{ rintros x ⟨w, hx⟩, subst hx,
refine le_trans (affine_simplex_dist_maximized hConvex vertices (hConvex.barycenter' vertices) w) _,
apply csupr_le, intro i,
convert barycenter_dist_vertex_bound hConvex vertices i,
simp },
{ split,
{ refine bounded_diam_submodule_monotone R _ _ _ n ih.left,
apply mul_le_mul,
{ rw [nnreal.div_le_iff', ← mul_div_assoc, nnreal.le_div_iff_mul_le]; norm_cast,
linarith, exact nat.succ_ne_zero (n + 1), exact nat.succ_ne_zero n },
{ change metric.diam (set.range (vertices ∘ simplex_category.δ k)) ≤ metric.diam (set.range vertices),
apply metric.diam_mono,
{ apply set.range_comp_subset_range },
{ apply metric.bounded_of_finite, apply set.finite_range } },
{ exact metric.diam_nonneg },
{ exact ((↑n + 1) / (↑n + 1 + 1) : nnreal).property } },
{ refine subset_subcomplex_monotone R _ _ _ n (barycentric_subdivision_subset' R _ n _),
convert set.range_comp_subset_range (simplex_category.to_Top'.map (simplex_category.δ k))
(singular_simplex_of_vertices hConvex vertices),
symmetry,
have := simplex_category.to_Top'_map_comp_affine hConvex (simplex_category.δ k) vertices,
refine eq.trans _ (congr_arg _ this),
ext, refl } },
{ exact ih.right } }
end
lemma barycentric_subdivison_map_bound_diam_subcomplex (R : Type) [comm_ring R]
{ι : Type} [fintype ι] {D : set (ι → ℝ)} (hConvex : convex ℝ D)
(n : ℕ) (δ : nnreal)
: submodule.map ((barycentric_subdivision_in_deg R n).app (Top.of D))
(bounded_diam_submodule R D δ n ⊓ affine_submodule hConvex R n)
≤ (bounded_diam_submodule R D ((n : nnreal)/(n + 1 : nnreal) * δ) n
⊓ affine_submodule hConvex R n) :=
begin
apply @le_of_eq_of_le _ _ _
(submodule.map ((barycentric_subdivision_in_deg R n).app (Top.of D))
(submodule.span R ((singular_chain_complex_basis R n).get_basis (Top.of ↥D) ''
({i : Σ (i : (singular_chain_complex_basis R n).indices),
Top.of (topological_simplex n) ⟶ Top.of D
| (∃ (s : set D), (metric.bounded s ∧ metric.diam s ≤ δ)
∧ set.range i.snd ⊆ s)
∧ ∃ (vs : fin (n + 1) → D),
i.snd = singular_simplex_of_vertices hConvex vs})))),
{ by_cases htrivial : (1 : R) = (0 : R),
{ ext, split; intro; convert submodule.zero_mem _;
exact eq.trans (one_smul _ _).symm (eq.trans (congr_arg2 _ htrivial rfl) (zero_smul _ _)) },
have hnontriv : nontrivial R := ⟨⟨1, 0, htrivial⟩⟩,
dsimp [bounded_diam_submodule, bounded_by_submodule, affine_submodule, spanned_by_sat],
rw [submodule.inf_spans_free R ((singular_chain_complex_basis R n).get_basis (Top.of D))],
rw [set.image_inter (@basis.injective _ R _ _ _ _
((singular_chain_complex_basis R n).get_basis (Top.of D))
hnontriv)],
refl,
apply set.image_subset_range, apply set.image_subset_range },
{ refine (linear_map.map_span_le _ _ _).mpr _,
rintros C ⟨⟨i, σ⟩, ⟨⟨s, ⟨hs1, hs2⟩, hs3⟩, ⟨vs, hvs⟩⟩, h⟩, cases i, subst h,
dsimp at hvs, subst hvs,
split,
{ refine bounded_diam_submodule_monotone R
((n : nnreal)/(n + 1 : nnreal) * ⟨@metric.diam D _ (set.range vs), metric.diam_nonneg⟩)
((n : nnreal)/(n + 1 : nnreal) * δ) _ n _,
{ apply mul_le_mul,
{ refl },
{ rw ← nnreal.coe_le_coe, refine le_trans _ hs2, dsimp,
apply metric.diam_mono,
{ refine subset_trans _ hs3,
rintros p ⟨i, hp⟩, subst hp,
apply vertices_apply_mem_range_singular_simplex_of_vertices },
{ exact hs1 } },
{ exact metric.diam_nonneg },
{ simp } },
{ rw ← simplex_to_chain_is_basis,
exact (barycentric_subdivison_of_affine_simplex_bound_diam R hConvex vs).left } },
{ rw ← simplex_to_chain_is_basis,
exact (barycentric_subdivison_of_affine_simplex_bound_diam R hConvex vs).right } }
end
lemma barycentric_subdivison_chain_map_deg1_on_id (R : Type) [comm_ring R] :
((singular_chain_complex R).obj (Top.of (topological_simplex 1))).d 1 0
((barycentric_subdivision_in_deg R 1).app (Top.of (topological_simplex 1))
(simplex_to_chain (𝟙 (Top.of (topological_simplex 1))) R))
= (barycentric_subdivision_in_deg R 0).app (Top.of (topological_simplex 1))
(((singular_chain_complex R).obj (Top.of (topological_simplex 1))).d 1 0
(simplex_to_chain (𝟙 (Top.of (topological_simplex 1))) R)) :=
begin
transitivity ((singular_chain_complex R).obj (Top.of (topological_simplex 1))).d 1 0
(@cone_construction_hom R _ (Top.of (topological_simplex 1))
(barycenter 1)
((convex_std_simplex ℝ (fin 2)).contraction (barycenter 1))
0
((barycentric_subdivision_in_deg R 0).app (Top.of (topological_simplex 1))
(((singular_chain_complex R).obj (Top.of (topological_simplex 1))).d
1 0
(simplex_to_chain (𝟙 (Top.of (topological_simplex 1))) R)))),
{ refine congr_arg _ _,
dsimp [barycentric_subdivision_in_deg],
rw simplex_to_chain_is_basis,
rw map_out_desc,
simp,
rw (singular_chain_complex R).map_id (Top.of (topological_simplex 1)),
rw homological_complex.id_f ((singular_chain_complex R).obj (Top.of (topological_simplex 1))),
refl },
rw boundary_of_cone_construction_of_convex_contract_deg0,
rw sub_eq_self,
dsimp [simplex_to_chain], rw singular_chain_complex_differential_desc_deg_0,
rw [map_sub, simplex_to_chain_is_basis, simplex_to_chain_is_basis],
dsimp [barycentric_subdivision_in_deg],
rw map_sub, rw sub_eq_zero,
simp [ε_hom, ε_map],
rw [← simplex_to_chain_is_basis, ← simplex_to_chain_is_basis],
rw [@category_theory.category.comp_id _ _ _ (Top.of (topological_simplex 1)),
@category_theory.category.comp_id _ _ _ (Top.of (topological_simplex 1))],
simp [simplex_to_chain]
end
lemma barycentric_subdivison_chain_map_deg1 (R : Type) {X : Top} [comm_ring R] :
(barycentric_subdivision_in_deg R 1).app X ≫
((singular_chain_complex R).obj X).d 1 0 =
((singular_chain_complex R).obj X).d 1 0 ≫
(barycentric_subdivision_in_deg R 0).app X :=
begin
apply basis.ext ((singular_chain_complex_basis R 1).get_basis X),
rintro ⟨i, σ⟩,
dsimp [functor_basis.get_basis], rw basis.mk_apply,
change ((singular_chain_complex R).obj X).d 1 0
((barycentric_subdivision_in_deg R 1).app X
(((singular_chain_complex R).map σ).f 1
(simplex_to_chain (𝟙 (Top.of (topological_simplex 1))) R)))
= (barycentric_subdivision_in_deg R 0).app X
(((singular_chain_complex R).obj X).d (0 + 1) 0
(((singular_chain_complex R).map σ).f 1
(simplex_to_chain (𝟙 (Top.of (topological_simplex 1))) R))),
rw [← homological_complex.eval_map, ← category_theory.functor.comp_map,
← category_theory.comp_apply _ ((barycentric_subdivision_in_deg R 1).app X)],
rw (barycentric_subdivision_in_deg R 1).naturality,
dsimp,
rw [← category_theory.comp_apply, ((singular_chain_complex R).map σ).comm],
dsimp,
refine eq.trans (congr_arg (((singular_chain_complex R).map σ).f 0) (barycentric_subdivison_chain_map_deg1_on_id R)) _,
rw [← category_theory.comp_apply, ← homological_complex.eval_map,
← category_theory.functor.comp_map, ← (barycentric_subdivision_in_deg R 0).naturality],
dsimp,
refine congr_arg ((barycentric_subdivision_in_deg R 0).app X) _,
rw [← category_theory.comp_apply, ← category_theory.comp_apply],
refine congr_fun (congr_arg coe_fn _) _,
symmetry, exact ((singular_chain_complex R).map σ).comm 1 0
end
lemma barycentric_subdivison_chain_map_degn_on_id (R : Type) [comm_ring R] (n : ℕ) :
(∀ X, (barycentric_subdivision_in_deg R (n + 1)).app X ≫
((singular_chain_complex R).obj X).d (n + 1) n =
((singular_chain_complex R).obj X).d (n + 1) n ≫
(barycentric_subdivision_in_deg R n).app X) →
((singular_chain_complex R).obj (Top.of (topological_simplex (n + 2)))).d (n + 2) (n + 1)
((barycentric_subdivision_in_deg R (n + 2)).app (Top.of (topological_simplex (n + 2)))
(simplex_to_chain (𝟙 (Top.of (topological_simplex (n + 2)))) R))
= (barycentric_subdivision_in_deg R (n + 1)).app (Top.of (topological_simplex (n + 2)))
(((singular_chain_complex R).obj (Top.of (topological_simplex (n + 2)))).d (n + 2) (n + 1)
(simplex_to_chain (𝟙 (Top.of (topological_simplex (n + 2)))) R)) :=
begin
intro H,
transitivity ((singular_chain_complex R).obj (Top.of (topological_simplex (n + 2)))).d (n + 2) (n + 1)
(@cone_construction_hom R _ (Top.of (topological_simplex (n + 2)))
(barycenter (n + 2))
((convex_std_simplex ℝ (fin (n + 3))).contraction (barycenter (n + 2)))
(n + 1)
((barycentric_subdivision_in_deg R (n + 1)).app (Top.of (topological_simplex (n + 2)))
(((singular_chain_complex R).obj (Top.of (topological_simplex (n + 2)))).d
(n + 2) (n + 1)
(simplex_to_chain (𝟙 (Top.of (topological_simplex (n + 2)))) R)))),
{ refine congr_arg _ _,
dsimp [barycentric_subdivision_in_deg],
rw simplex_to_chain_is_basis R (n + 2),
rw map_out_desc,
simp,
rw (singular_chain_complex R).map_id (Top.of (topological_simplex (n + 2))),
rw homological_complex.id_f ((singular_chain_complex R).obj (Top.of (topological_simplex (n + 2)))),
refl },
rw boundary_of_cone_construction_of_convex_contract,
rw sub_eq_self,
refine eq.trans (congr_arg _ _) (map_zero _),
rw ← category_theory.comp_apply,
rw H,
rw category_theory.comp_apply,
refine eq.trans (congr_arg _ _) (map_zero _),
rw ← category_theory.comp_apply,
simp
end
lemma barycentric_subdivison_chain_map_degn (R : Type) {X : Top} [comm_ring R] (n : ℕ) :
(∀ Y, (barycentric_subdivision_in_deg R (n + 1)).app Y ≫
((singular_chain_complex R).obj Y).d (n + 1) n =
((singular_chain_complex R).obj Y).d (n + 1) n ≫
(barycentric_subdivision_in_deg R n).app Y) →
(barycentric_subdivision_in_deg R (n + 2)).app X ≫
((singular_chain_complex R).obj X).d (n + 2) (n + 1) =
((singular_chain_complex R).obj X).d (n + 2) (n + 1) ≫
(barycentric_subdivision_in_deg R (n + 1)).app X :=
begin
intro H,
apply basis.ext ((singular_chain_complex_basis R (n + 2)).get_basis X),
rintro ⟨i, σ⟩,
dsimp [functor_basis.get_basis], rw basis.mk_apply,
change ((singular_chain_complex R).obj X).d (n + 2) (n + 1)
((barycentric_subdivision_in_deg R (n + 2)).app X
(((singular_chain_complex R).map σ).f (n + 2)
(simplex_to_chain (𝟙 (Top.of (topological_simplex (n + 2)))) R)))
= (barycentric_subdivision_in_deg R (n + 1)).app X
(((singular_chain_complex R).obj X).d (n + 2) (n + 1)
(((singular_chain_complex R).map σ).f (n + 2)
(simplex_to_chain (𝟙 (Top.of (topological_simplex (n + 2)))) R))),
rw [← homological_complex.eval_map, ← category_theory.functor.comp_map,
← category_theory.comp_apply _ ((barycentric_subdivision_in_deg R (n + 2)).app X)],
rw (barycentric_subdivision_in_deg R (n + 2)).naturality,
dsimp,
rw [← category_theory.comp_apply, ((singular_chain_complex R).map σ).comm],
dsimp,
refine eq.trans (congr_arg (((singular_chain_complex R).map σ).f (n + 1)) (barycentric_subdivison_chain_map_degn_on_id R n H)) _,
rw [← category_theory.comp_apply, ← homological_complex.eval_map,
← category_theory.functor.comp_map, ← (barycentric_subdivision_in_deg R (n + 1)).naturality],
dsimp,
refine congr_arg ((barycentric_subdivision_in_deg R (n + 1)).app X) _,
rw [← category_theory.comp_apply, ← category_theory.comp_apply],
refine congr_fun (congr_arg coe_fn _) _,
symmetry, exact ((singular_chain_complex R).map σ).comm (n + 2) (n + 1)
end
lemma barycentric_subdivison_chain_map (R : Type) {X : Top} [comm_ring R] (n : ℕ)
: (barycentric_subdivision_in_deg R (n + 1)).app X ≫
((singular_chain_complex R).obj X).d (n + 1) n =
((singular_chain_complex R).obj X).d (n + 1) n ≫
(barycentric_subdivision_in_deg R n).app X :=
begin
revert X, induction n; intro X,
apply barycentric_subdivison_chain_map_deg1,
apply barycentric_subdivison_chain_map_degn,
assumption
end
noncomputable
def barycentric_subdivision (R : Type*) [comm_ring R]
: singular_chain_complex R ⟶ singular_chain_complex R :=
homological_complex_functor.mk_nat_trans
(barycentric_subdivision_in_deg R)
(λ i j hij X, by { dsimp at hij, subst hij, apply barycentric_subdivison_chain_map })
noncomputable
def barycentric_subdivision_homotopic_id (R : Type*) [comm_ring R]
: natural_chain_homotopy (𝟙 (singular_chain_complex R)) (barycentric_subdivision R) :=
@chain_complex.mk_natural_chain_homotopy_rec Top (Module R) _ _ _ _ _ _ _
(singular_chain_complex R) (singular_chain_complex R)
(𝟙 (singular_chain_complex R))
(barycentric_subdivision R)
0 (λ X, by { simp, refl })
(λ n s _,
(singular_chain_complex_basis R (n + 1)).map_out
(singular_chain_complex R
⋙ homological_complex.eval _ _ (n + 2))
(λ p, @cone_construction_hom R _
(Top.of (topological_simplex (n + 1)))
(barycenter (n + 1))
((convex_std_simplex ℝ (fin (n + 2))).contraction (barycenter (n + 1)))
(n + 1)
(simplex_to_chain (𝟙 (Top.of (topological_simplex (n + 1)))) R
- ((barycentric_subdivision_in_deg R _).app _ (simplex_to_chain (𝟙 (Top.of (topological_simplex (n + 1)))) R))
- s.app (Top.of (topological_simplex (n + 1)))
(((singular_chain_complex R).obj (Top.of (topological_simplex (n + 1)))).d (n + 1) n
(simplex_to_chain (𝟙 (Top.of (topological_simplex (n + 1)))) R)))))
(by { intros,
apply basis.ext ((singular_chain_complex_basis R (n + 1)).get_basis X),
rintro ⟨i, σ⟩, cases i,
have : ∀ n Y (τ : Top.of (topological_simplex n) ⟶ Y),
@simplex_to_chain n (Top.to_sSet'.obj Y) τ R _
= ((singular_chain_complex_basis R n).get_basis Y) ⟨(), τ⟩,
{ intros, dsimp [functor_basis.get_basis, simplex_to_chain], rw basis.mk_apply,
symmetry, refine eq.trans finsupp.map_domain_single _,
congr, apply category_theory.category.id_comp },
simp,
suffices H : ∀ a b c d : (((singular_chain_complex R).obj X).X (n + 1)),
c = a - b - d → a = b + c + d,
{ apply H,
rw map_out_desc, rw ← this, simp,
rw [sub_right_comm, sub_eq_iff_eq_add],
transitivity ((singular_chain_complex R).map σ).f (n + 1)
(((singular_chain_complex R).obj (Top.of (topological_simplex (n + 1)))).d (n + 2) (n + 1)
(@cone_construction_hom R _
(Top.of (topological_simplex (n + 1)))
(barycenter (n + 1))
((convex_std_simplex ℝ (fin (n + 2))).contraction (barycenter (n + 1)))
(n + 1)
(simplex_to_chain (𝟙 (Top.of (topological_simplex (n + 1)))) R
- s.app (Top.of (topological_simplex (n + 1)))
(((singular_chain_complex R).obj (Top.of (topological_simplex (n + 1)))).d (n + 1) n
(simplex_to_chain (𝟙 (Top.of (topological_simplex (n + 1)))) R))))),
rw [← category_theory.comp_apply,
← category_theory.comp_apply (((singular_chain_complex R).map σ).f (n + 2)),
← map_sub, ((singular_chain_complex R).map σ).comm],
dsimp,
refine congr_arg _ _,
refine congr_arg _ _,
symmetry, apply map_sub,
rw boundary_of_cone_construction_of_convex_contract,
rw map_sub (((singular_chain_complex R).obj (Top.of (topological_simplex (n + 1)))).d (n + 1) n),
specialize h (Top.of (topological_simplex (n + 1))),
simp at h,
rw ← sub_eq_iff_eq_add at h,
rw [← category_theory.comp_apply (s.app (Top.of (topological_simplex (n + 1)))),
← category_theory.comp_apply _ (s.app (Top.of ↥(topological_simplex (n + 1))) ≫ ((singular_chain_complex R).obj (Top.of ↥(topological_simplex (n + 1)))).d (n + 1) n)],
rw ← h, simp,
rw sub_add,
apply congr_arg2,
{ apply congr_arg2,
{ dsimp [simplex_to_chain],
rw singular_chain_complex_map,
exact congr_fun (congr_arg finsupp.single (category_theory.category.id_comp σ)) 1, },
{ dsimp [simplex_to_chain],
rw [← category_theory.comp_apply,
← homological_complex.eval_map,
← category_theory.functor.comp_map,
← s.naturality,
category_theory.functor.comp_map,
homological_complex.eval_map,
category_theory.comp_apply,
← category_theory.comp_apply _ (((singular_chain_complex R).map σ).f n)],
refine congr_arg _ _,
transitivity (((singular_chain_complex R).map σ).f (n + 1) ≫ ((singular_chain_complex R).obj X).d (n + 1) n) (finsupp.single (𝟙 (Top.of (topological_simplex (n + 1)))) 1),
{ exact congr_fun (congr_arg coe_fn (((singular_chain_complex R).map σ).comm (n + 1) n).symm) _ },
refine congr_arg (((singular_chain_complex R).obj X).d (n + 1) n) _,
rw singular_chain_complex_map,
exact congr_fun (congr_arg finsupp.single (category_theory.category.id_comp σ)) 1, } },
{ rw [← category_theory.comp_apply _ (((barycentric_subdivision R).app (Top.of (topological_simplex (n + 1)))).f n),
← ((barycentric_subdivision R).app (Top.of (topological_simplex (n + 1)))).comm,
category_theory.comp_apply],
have := boundary_of_cone_construction_of_convex_contract (convex_std_simplex ℝ (fin (n + 2))) R (barycenter (n + 1))
(((barycentric_subdivision R).app (Top.of (topological_simplex (n + 1)))).f (n + 1)
(simplex_to_chain (𝟙 (Top.of (topological_simplex (n + 1)))) R)),
rw [eq_sub_iff_add_eq, @add_comm (((singular_chain_complex R).obj (Top.of (std_simplex ℝ (fin (n + 2))))).X (n + 1)), ← eq_sub_iff_add_eq] at this,
refine eq.trans (congr_arg (((singular_chain_complex R).map σ).f (n + 1)) this) _,
rw map_sub, apply congr_arg2,
{ rw [← category_theory.comp_apply,
← homological_complex.comp_f,
← (barycentric_subdivision R).naturality,
homological_complex.comp_f,
category_theory.comp_apply],
refine congr_arg (((barycentric_subdivision R).app X).f (n + 1)) _,
dsimp [simplex_to_chain],
rw singular_chain_complex_map,
exact congr_fun (congr_arg finsupp.single (category_theory.category.id_comp σ)) 1 },
{ rw [← category_theory.comp_apply,
← category_theory.comp_apply (((singular_chain_complex R).map σ).f (n + 2))],
refine congr_fun _ _,
refine congr_arg _ _,
symmetry,
exact ((singular_chain_complex R).map σ).comm (n + 2) (n + 1), } } },
{ intros a b c d h,
rw [eq_sub_iff_add_eq, eq_sub_iff_add_eq] at h,
rw ← h,
ac_refl } })
lemma iterated_barycentric_subdivison_of_affine_simplex_bound_diam (R : Type) [comm_ring R]
{ι : Type} [fintype ι] {D : set (ι → ℝ)} (hConvex : convex ℝ D)
{n : ℕ} (vertices : fin (n + 1) → D) (k : ℕ)
: ((barycentric_subdivision_in_deg R n).app (Top.of D))^[k]
(simplex_to_chain (singular_simplex_of_vertices hConvex vertices) R)
∈ bounded_diam_submodule R D (((n : nnreal)/(n + 1 : nnreal))^k
* ⟨@metric.diam D _ (set.range vertices), metric.diam_nonneg⟩) n
⊓ affine_submodule hConvex R n :=
begin
induction k with k ih,
{ dsimp [barycentric_subdivision_in_deg],
refine ⟨submodule.subset_span _, submodule.subset_span _⟩;
rw simplex_to_chain_is_basis; apply set.mem_image_of_mem,
{ refine ⟨set.range (singular_simplex_of_vertices hConvex vertices), _, subset_of_eq rfl⟩,
simp,
split,
{ apply singular_simplex_of_vertices_bounded },
{ rw affine_simplex_diam,
rw csupr_le_iff, intro i,
rw csupr_le_iff, intro j,
refine metric.dist_le_diam_of_mem _ (set.mem_range_self i) (set.mem_range_self j),
apply metric.bounded_of_finite, apply set.finite_range,
apply finite_is_bounded, apply set.finite_range,
apply finite_is_bounded, apply set.finite_range } },
{ exact ⟨vertices, rfl⟩ } },
{ rw nat.iterate_succ,
rw [pow_succ, mul_assoc],
exact barycentric_subdivison_map_bound_diam_subcomplex R hConvex n _
(submodule.mem_map.mpr ⟨_, ih, rfl⟩) },
end
lemma nat_trans.iter_naturality {C D : Type*} [category_theory.category C]
[category_theory.category D] [category_theory.concrete_category D]
(F : C ⥤ D) (η : F ⟶ F) {X Y : C} (f : X ⟶ Y) (x : F.obj X) (n : ℕ)
: (η.app Y)^[n] (F.map f x) = F.map f ((η.app X)^[n] x) :=
begin
induction n with n ih, { simp },
{ rw nat.iterate_succ, rw ih,
rw ← category_theory.comp_apply,
rw η.naturality,
rw category_theory.comp_apply,
rw ← nat.iterate_succ (η.app X) }
end
def pullback_family_of_sets {X Y : Type*} (cov : set (set Y)) (f : X → Y) := (set.preimage f) '' cov
lemma pullback_family_of_sets_covers {X Y : Type*} (cov : set (set Y)) (f : X → Y)
(hcov : ⋃₀ cov = ⊤) : ⋃₀ (pullback_family_of_sets cov f) = ⊤ :=
begin
delta pullback_family_of_sets,
rw set.sUnion_image, simp_rw ← set.preimage_Union,
rw ← set.sUnion_eq_bUnion, rw hcov, exact set.preimage_univ
end
lemma pullback_family_of_sets_by_continuous {X Y : Type*}
[topological_space X] [topological_space Y] (cov : set (set Y))
(hOpen : ∀ s, s ∈ cov → is_open s) (f : C(X, Y))
: ∀ t, t ∈ pullback_family_of_sets cov f → is_open t :=
by { rintros t ⟨s, hs, h⟩, subst h, refine (hOpen s hs).preimage f.continuous }
lemma bounded_by_subcomplex_map_pullback_le (R : Type) [comm_ring R] {X Y : Top}
(cov : set (set Y)) (f : X ⟶ Y) (n : ℕ)
: submodule.map (((singular_chain_complex R).map f).f n)
(bounded_by_submodule R (pullback_family_of_sets cov f) n)
≤ bounded_by_submodule R cov n :=
begin
refine (linear_map.map_span_le _ _ _).mpr _,
rintros C ⟨⟨i, σ⟩, ⟨t, ht, hσ⟩, h⟩, subst h, cases i,
obtain ⟨s, hs, hst⟩ := ht,
rw ← simplex_to_chain_is_basis, dsimp [simplex_to_chain],
rw singular_chain_complex_map,
refine submodule.subset_span _,
refine ⟨⟨(), σ ≫ f⟩, ⟨s, hs, _⟩, _⟩,
{ subst hst,
refine subset_trans (subset_of_eq (set.range_comp _ _)) _,
exact set.image_subset_iff.mpr hσ },
{ rw ← simplex_to_chain_is_basis, refl }
end
lemma sufficient_barycentric_lands_in_cover (R : Type) [comm_ring R] {X : Top}
(cov : set (set X)) (cov_is_open : ∀ s, s ∈ cov → is_open s) (hcov : ⋃₀ cov = ⊤) (n : ℕ)
(C : ((singular_chain_complex R).obj X).X n)
: ∃ k : ℕ, ((barycentric_subdivision_in_deg R n).app X) ^[k] C ∈ bounded_by_submodule R cov n :=
begin
have : ∀ C', (∃ k : ℕ, ((barycentric_subdivision_in_deg R n).app X) ^[k] C'
∈ bounded_by_submodule R cov n)
↔ C' ∈ ⨆ (k : ℕ), submodule.comap (((barycentric_subdivision_in_deg R n).app X)^k)
(bounded_by_submodule R cov n),
{ intro C',
rw submodule.mem_supr_of_directed, simp,
intros i j, existsi i + j, split; intro x; simp; intro H,
-- store brand wlog tactic
swap, rename i temp, rename j i, rename temp j, rw add_comm j i,
all_goals
{ induction j with j ih,
{ exact H },
{ rw nat.add_succ,
rw nat.iterate_succ, revert ih,
generalize : ((barycentric_subdivision_in_deg R n).app X)^[i + j] x = y, intro H,
refine submodule.mem_comap.mp _,
refine (submodule.map_le_iff_le_comap.mp _) H,
refine (linear_map.map_span_le _ _ _).mpr _ ,
rintros x ⟨⟨i, σ⟩, ⟨s, hs, hσs⟩, h⟩, subst h, cases i,
refine subset_subcomplex_le_bounded_by_subcomplex R cov s hs n _,
refine subset_subcomplex_monotone R _ _ hσs n _,
rw ← simplex_to_chain_is_basis,
convert barycentric_subdivision_subset' R X n σ,
-- the fact that we need this suggests bad design
cases X, refl } } },
rw this,
revert C,
rw [← submodule.eq_top_iff', ← top_le_iff],
rw ← (singular_chain_complex_basis R n).spanning X,
rw submodule.span_le,
rintro C ⟨i, σ, h⟩, cases i, dsimp [singular_chain_complex_basis] at σ,
refine (this C).mp _, subst h,
let cov' := pullback_family_of_sets cov σ,
have cov'_is_open := pullback_family_of_sets_by_continuous cov cov_is_open σ,
have hcov' := pullback_family_of_sets_covers cov σ hcov,
have cov'_nonempty : cov'.nonempty := @set.nonempty.of_sUnion_eq_univ _ ⟨vertex n 0⟩ _ hcov',
obtain ⟨δ, δ_pos, hδ⟩ := @bounded_diam_subcomplex_le_cover_subcomplex (fin (n + 1)) _
(topological_simplex n)
(compact_std_simplex (fin (n + 1)))
R _ cov' cov'_is_open hcov' cov'_nonempty n,
simp_rw nat_trans.iter_naturality,
have : (n : ℝ) / (n + 1 : ℝ) < 1,
{ rw div_lt_one_iff, left, norm_cast, simp },
obtain ⟨k, hk⟩ := exists_pow_lt_of_lt_one (nnreal.coe_pos.mpr δ_pos) this,
existsi k, dsimp,
convert bounded_by_subcomplex_map_pullback_le R cov σ n _,
apply submodule.mem_map_of_mem,
refine hδ _,
convert bounded_diam_submodule_monotone R _ _ _ n
(iterated_barycentric_subdivison_of_affine_simplex_bound_diam R (convex_std_simplex ℝ (fin (n + 1))) (vertex n) k).left,
{ dsimp [singular_chain_complex_basis], congr, symmetry, exact singular_simplex_of_vertices_eq_id },
{ have hk' : ((↑n / (↑n + 1)) ^ k : nnreal) ≤ δ,
{ apply le_of_lt,
rw ← nnreal.coe_lt_coe,
convert hk },
rw ← mul_one ((↑n / (↑n + 1)) ^ k : nnreal) at hk',
refine le_trans _ hk',
apply mul_le_mul,
{ refl },
{ dsimp, apply metric.diam_le_of_forall_dist_le, simp,
rintros p _ q _,
refine (dist_pi_le_iff _).mpr _, simp,
intro i,
exact real.dist_le_of_mem_Icc_01 ⟨p.property.left i, topological_simplex.coord_le_one n i p⟩
⟨q.property.left i, topological_simplex.coord_le_one n i q⟩ },
{ exact metric.diam_nonneg },
{ simp, } }
end
noncomputable
def bounded_by_subcomplex_inclusion (R : Type) [comm_ring R] {X : Top} (cov : set (set X))
: bounded_by_subcomplex R cov ⟶ (singular_chain_complex R).obj X :=
Module.subcomplex_of_compatible_submodules_inclusion ((singular_chain_complex R).obj X)
(λ n, spanned_by_sat R (((singular_chain_complex R).obj X).X n)
(((singular_chain_complex_basis R n).get_basis X))
{ p | ∃ s, s ∈ cov ∧ set.range p.2 ⊆ s })
(by { rintros i j y ⟨x, ⟨hx, h⟩⟩,
subst h,
by_cases (complex_shape.down ℕ).rel i j,
{ exact bounded_by_subcomplex_compat R cov i j (submodule.mem_map_of_mem hx) },
{ rw homological_complex.shape' _ i j h, simp } })
-- This does typecheck but it takes forever... why???
lemma subdivision_chain_homotopy_of_bounded_is_bounded
(R : Type) [comm_ring R] {X : Top}
(cov : set (set X)) (n : ℕ) (s : set X) (H : s ∈ cov)
(σ : Top.of (topological_simplex n) ⟶ X) (hσ : set.range σ ⊆ s)
: ((barycentric_subdivision_homotopic_id R).to_chain_htpy X).hom n (n+1) (simplex_to_chain σ R)
∈ bounded_by_submodule R cov (n + 1) :=
begin
rw simplex_to_chain_is_basis,
dsimp [barycentric_subdivision_homotopic_id, chain_complex.mk_natural_chain_homotopy_rec],
delta chain_complex.mk_natural_chain_homotopy,
unfold_projs,
dsimp,
split_ifs, swap, contradiction,
cases n with n,
{ exact submodule.zero_mem _ },
{ dsimp,
rw map_out_desc,
dsimp,
refine bounded_by_subcomplex_map_pullback_le R cov σ (n.succ + 1)
(submodule.mem_map_of_mem _),
convert submodule.mem_top,
rw eq_top_iff,
rw ← subset_subcomplex_univ,
apply subset_subcomplex_le_bounded_by_subcomplex R (pullback_family_of_sets cov σ),
dsimp [pullback_family_of_sets],
refine ⟨s, H, _⟩,
rw ← set.univ_subset_iff,
rw ← set.preimage_range σ,
exact set.preimage_mono hσ }
end
lemma cover_subcomplex_inclusion_quasi_iso
(R : Type) [comm_ring R] {X : Top}
(cov : set (set X)) (cov_is_open : ∀ s, s ∈ cov → is_open s) (hcov : ⋃₀ cov = ⊤)
: quasi_iso (bounded_by_subcomplex_inclusion R cov) :=
begin
dsimp [bounded_by_subcomplex_inclusion],
apply subcomplex_inclusion_quasi_iso_of_pseudo_projection _ _
((barycentric_subdivision R).app X)
((barycentric_subdivision_homotopic_id R).to_chain_htpy X),
{ apply sufficient_barycentric_lands_in_cover; assumption },
{ intro i,
refine (submodule.map_span_le _ _ _).mpr _,
rintros C ⟨⟨i, σ⟩, ⟨s, H, hσ⟩, h⟩, subst h, cases i,
rw ← simplex_to_chain_is_basis,
simp [barycentric_subdivision,
homological_complex_functor.mk_nat_trans],
cases i with i,
{ refine submodule.subset_span _,
rw simplex_to_chain_is_basis,
refine set.mem_image_of_mem _ ⟨s, H, hσ⟩ },
{ change (barycentric_subdivision_in_deg R (i+1)).app X (simplex_to_chain σ R)
∈ bounded_by_submodule R cov (i+1),
rw simplex_to_chain_is_basis,
dsimp [barycentric_subdivision_in_deg],
rw map_out_desc,
simp,
have := bounded_by_subcomplex_map_pullback_le R cov σ (i + 1),
refine this (submodule.mem_map_of_mem _), clear this,
refine subset_subcomplex_le_bounded_by_subcomplex R _ set.univ _ (i + 1) _,
{ existsi s,
refine ⟨H, _⟩,
rw ← set.univ_subset_iff,
exact subset_trans (subset_of_eq (set.preimage_range _).symm) (set.preimage_mono hσ) },
{ rw subset_subcomplex_univ, simp } } },
{ intros i j,
by_cases (i + 1 = j),
{ subst h,
refine (submodule.map_span_le _ _ _).mpr _,
rintros C ⟨⟨i, σ⟩, ⟨s, H, hσ⟩, h⟩, subst h, cases i,
change ((barycentric_subdivision_homotopic_id R).to_chain_htpy X).hom i (i + 1)
((singular_chain_complex_basis R i).get_basis X ⟨(), σ⟩)
∈ bounded_by_submodule R cov (i + 1),
rw ← simplex_to_chain_is_basis,
exact subdivision_chain_homotopy_of_bounded_is_bounded R cov i s H σ hσ },
{ rw ← complex_shape.down_rel at h, rw homotopy.zero' _ i j h,
rw submodule.map_zero,
exact bot_le } }
end
lemma cover_inclusion_natural (R : Type) [comm_ring R] {X Y : Top} (f : X ⟶ Y)
(covX : set (set X)) (covY : set (set Y)) (H : ∀ s, s ∈ covX → ∃ t, t ∈ covY ∧ f '' s ⊆ t)
: bounded_by_subcomplex_inclusion R covX ≫ (singular_chain_complex R).map f
= bounded_by_subcomplex_map R f covX covY H ≫ bounded_by_subcomplex_inclusion R covY :=
begin
ext n : 2,
apply basis.ext (bounded_by_submodule_basis R covX n),
rintro ⟨⟨i, σ⟩, s, hs, hσ⟩, cases i,
delta bounded_by_submodule_basis,
rw spanned_by_sat_basis_apply,
refl
end
noncomputable
def bounded_by_pullback_chain_inclusion (R : Type) [comm_ring R]
(i : category_theory.arrow Top) (cov : set (set i.right))
: bounded_by_subcomplex R (pullback_family_of_sets cov (i.hom)) ⟶ bounded_by_subcomplex R cov :=
bounded_by_subcomplex_map R i.hom (pullback_family_of_sets cov i.hom)
cov
(λ s hs, exists.elim hs (λ t ht, ⟨t, ht.left,
subset_trans (set.image_subset _ (subset_of_eq ht.right.symm)) (set.image_preimage_subset _ _)⟩)).
lemma pullback_of_refinement_is_refinement (R : Type) [comm_ring R]
{X A Y B : Top} (i : A ⟶ X) (j : B ⟶ Y)
(g : A ⟶ B) (f : X ⟶ Y) (w : g ≫ j = i ≫ f)
(cov : set (set X)) (cov' : set (set Y)) (H : ∀ S, S ∈ cov → ∃ T, T ∈ cov' ∧ f '' S ⊆ T)
: ∀ s, s ∈ pullback_family_of_sets cov i → ∃ t, t ∈ pullback_family_of_sets cov' j ∧ g '' s ⊆ t :=
begin
rintros s ⟨S, hS, hs⟩,
obtain ⟨T, hT⟩ := H S hS,
refine ⟨j ⁻¹' T, set.mem_image_of_mem _ hT.left, _⟩,
refine set.image_subset_iff.mp _,
refine subset_trans _ hT.right,
rw [← set.image_comp, ← hs],
change (g ≫ j) '' (i ⁻¹' S) ⊆ f '' S,
rw w, refine subset_trans (subset_of_eq (set.image_comp f i _)) _,
dsimp,
refine set.image_subset _ _,
exact set.image_preimage_subset _ _
end
lemma bounded_by_pullback_chain_inclusion_natural(R : Type) [comm_ring R]
(i : category_theory.arrow Top) (j : category_theory.arrow Top) (w : i ⟶ j)
(cov : set (set i.right)) (cov' : set (set j.right))
(H : ∀ S, S ∈ cov → ∃ T, T ∈ cov' ∧ w.right '' S ⊆ T)
: bounded_by_subcomplex_map R w.left (pullback_family_of_sets cov i.hom)
(pullback_family_of_sets cov' j.hom)
(pullback_of_refinement_is_refinement R i.hom j.hom w.left
w.right w.w cov cov' H)
≫ bounded_by_pullback_chain_inclusion R j cov'
= bounded_by_pullback_chain_inclusion R i cov ≫ bounded_by_subcomplex_map R w.right cov cov' H :=
begin
delta bounded_by_pullback_chain_inclusion,
rw [bounded_by_subcomplex_map_comp, bounded_by_subcomplex_map_comp],
have := w.w, dsimp at this, simp_rw this,
refl
end
noncomputable
def singular_chain_complex_of_pair_under_cover (R : Type) [comm_ring R]
(i : category_theory.arrow Top) (cov : set (set i.right)) : chain_complex (Module R) ℕ :=
category_theory.limits.cokernel (bounded_by_pullback_chain_inclusion R i cov).
noncomputable
def singular_chain_complex_of_pair_under_cover_map (R : Type) [comm_ring R]
{i j : category_theory.arrow Top} (w : i ⟶ j)
(cov : set (set i.right)) (cov' : set (set j.right))
(H : ∀ s, s ∈ cov → ∃ t, t ∈ cov' ∧ w.right '' s ⊆ t)
: singular_chain_complex_of_pair_under_cover R i cov
⟶ singular_chain_complex_of_pair_under_cover R j cov' :=
(coker_functor (chain_complex (Module R) ℕ)).map
(category_theory.arrow.hom_mk (bounded_by_pullback_chain_inclusion_natural R i j w cov cov' H)
: category_theory.arrow.mk (bounded_by_pullback_chain_inclusion R i cov)
⟶ category_theory.arrow.mk (bounded_by_pullback_chain_inclusion R j cov'))
noncomputable
def singular_chain_complex_of_pair_under_cover_to_singular_chain_complex_of_pair
(R : Type) [comm_ring R] (i : category_theory.arrow Top) (cov : set (set i.right))
: singular_chain_complex_of_pair_under_cover R i cov ⟶ (singular_chain_complex_of_pair R).obj i :=
(coker_functor (chain_complex (Module R) ℕ)).map
(category_theory.arrow.hom_mk (cover_inclusion_natural R i.hom
(pullback_family_of_sets cov i.hom) cov
(λ s hs, exists.elim hs (λ t ht, ⟨t, ht.left,
subset_trans (set.image_subset _ (subset_of_eq ht.right.symm))
(set.image_preimage_subset _ _)⟩)))
: category_theory.arrow.mk _ ⟶ category_theory.arrow.mk _)
lemma singular_chain_complex_of_pair_under_cover_to_singular_chain_complex_of_pair_naturality
(R : Type) [comm_ring R] {i j : category_theory.arrow Top} (w : i ⟶ j)
(cov : set (set i.right)) (cov' : set (set j.right))
(H : ∀ s, s ∈ cov → ∃ t, t ∈ cov' ∧ w.right '' s ⊆ t)
: singular_chain_complex_of_pair_under_cover_map R w cov cov' H
≫ singular_chain_complex_of_pair_under_cover_to_singular_chain_complex_of_pair R j cov'
= singular_chain_complex_of_pair_under_cover_to_singular_chain_complex_of_pair R i cov
≫ (singular_chain_complex_of_pair R).map w :=
begin
dsimp [singular_chain_complex_of_pair, singular_chain_complex_of_pair_under_cover_map,
singular_chain_complex_of_pair_under_cover_to_singular_chain_complex_of_pair],
rw [← (coker_functor (chain_complex (Module R) ℕ)).map_comp,
← (coker_functor (chain_complex (Module R) ℕ)).map_comp],
refine congr_arg _ _,
ext : 1; dsimp; symmetry; apply cover_inclusion_natural,
end
noncomputable
def singular_homology_of_pair_under_cover (R : Type) [comm_ring R]
(i : category_theory.arrow Top) (cov : set (set i.right)) (n : ℕ) : Module R :=
(singular_chain_complex_of_pair_under_cover R i cov).homology n
lemma singular_chain_complex_of_pair_under_cover_to_singular_chain_complex_of_pair_quasi_iso
(R : Type) [comm_ring R] (i : category_theory.arrow Top) (hi : function.injective i.hom)
(cov : set (set i.right)) (cov_is_open : ∀ s, s ∈ cov → is_open s) (hcov : ⋃₀ cov = ⊤)
: quasi_iso (singular_chain_complex_of_pair_under_cover_to_singular_chain_complex_of_pair R i cov) :=
begin
apply coker_of_quasi_isos_between_monic_arrows_is_quasi_iso,
{ apply bounded_by_subcomplex_map_mono, exact hi },
{ apply_with homological_complex.mono_of_eval {instances := ff},
intro, rw Module.mono_iff_injective,
apply singular_chain_complex_map_inj, exact hi },
{ apply cover_subcomplex_inclusion_quasi_iso,
{ apply pullback_family_of_sets_by_continuous, assumption },
{ apply pullback_family_of_sets_covers, assumption } },
{ apply cover_subcomplex_inclusion_quasi_iso; assumption }
end.
def excision_inner_map {X : Type*} [topological_space X] (A B : set X)
: Top.of (A ∩ B : set X) ⟶ Top.of A := ⟨_, continuous_inclusion (set.inter_subset_left _ _)⟩
def excision_outer_map {X : Type*} [topological_space X] (A B : set X)
: Top.of B ⟶ Top.of X := ⟨_, continuous_subtype_val⟩
def excision_include {X : Type*} [topological_space X] (A : set X)
: Top.of A ⟶ Top.of X := ⟨_, continuous_subtype_val⟩
def excision_include_inter {X : Type*} [topological_space X] (A B : set X)
: Top.of (A ∩ B : set X) ⟶ Top.of B :=
⟨set.inclusion (set.inter_subset_right A B), continuous_inclusion (set.inter_subset_right A B)⟩
lemma excision_sq_comm {X : Type*} [topological_space X] (A B : set X)
: excision_inner_map A B ≫ excision_include A
= excision_include_inter A B ≫ excision_outer_map A B := by { ext, refl }
def excision_map {X : Type*} [topological_space X] (A B : set X)
: category_theory.arrow.mk (excision_inner_map A B)
⟶ category_theory.arrow.mk (excision_outer_map A B) :=
category_theory.arrow.hom_mk (excision_sq_comm A B).symm
lemma bounded_by_subcomplex_inclusion_iso_of_contains_univ (R : Type) [comm_ring R]
{X : Top} (cov : set (set X)) (h : set.univ ∈ cov)
: category_theory.is_iso (bounded_by_subcomplex_inclusion R cov) :=
begin
apply homological_complex.is_iso_of_degreewise_is_iso, intro i,
dsimp [bounded_by_subcomplex_inclusion, Module.subcomplex_of_compatible_submodules_inclusion],
refine category_theory.is_iso.of_iso (linear_equiv.to_Module_iso' (linear_equiv.of_bijective
((bounded_by_subcomplex_inclusion R cov).f i) _ _)),
{ exact submodule.injective_subtype _ },
{ rw [← set.range_iff_surjective, ← linear_map.range_coe],
refine eq.trans (congr_arg _ _) submodule.top_coe, convert submodule.range_subtype _,
symmetry, rw eq_top_iff,
rw ← subset_subcomplex_univ,
refine subset_subcomplex_le_bounded_by_subcomplex R _ set.univ _ i,
exact h }
end.
-- move this to homological algebra
lemma is_pushout_of_is_is_pushout_eval {V : Type*} [category_theory.category V]
[category_theory.limits.has_zero_morphisms V] {ι : Type*} {c : complex_shape ι}
{W X Y Z : homological_complex V c} (f : W ⟶ X) (g : W ⟶ Y) (h : X ⟶ Z) (i : Y ⟶ Z)
(H : ∀ n, category_theory.is_pushout (f.f n) (g.f n) (h.f n) (i.f n))
: category_theory.is_pushout f g h i :=
begin
refine category_theory.is_pushout.of_is_colimit' _ _,
{ constructor, ext n, dsimp, exact (H n).to_comm_sq.w },
{ apply homological_complex.is_colimit_of_is_colimit_eval, intro n,
have functors_eq : category_theory.limits.span f g ⋙ homological_complex.eval V c n
= category_theory.limits.span (f.f n) (g.f n),
{ refine category_theory.functor.hext _ _,
{ intro ℓ, cases ℓ; try { cases ℓ }; refl },
{ intros ℓ ℓ' a, cases a,
{ cases ℓ; try { cases ℓ }; refl },
{ cases a_1; refl } } },
convert (H n).is_colimit,
{ simp [category_theory.comm_sq.cocone, category_theory.is_pushout.cocone,
category_theory.functor.map_cocone, category_theory.limits.cocones.functoriality,
homological_complex.eval],
transitivity { category_theory.limits.cocone .
X := (category_theory.limits.pushout_cocone.mk (h.f n) (i.f n) (H n).to_comm_sq.w).X,
ι := { app := (category_theory.limits.pushout_cocone.mk (h.f n) (i.f n) (H n).to_comm_sq.w).ι.app,
naturality' := (category_theory.limits.pushout_cocone.mk (h.f n) (i.f n) (H n).to_comm_sq.w).ι.naturality' } },
{ congr,
{ assumption },
{ assumption },
{ ext, refl,
intros ℓ ℓ' hℓ, cases hℓ,
cases ℓ; try { cases ℓ }; refl },
{ apply proof_irrel_heq } },
{ apply heq_of_eq, congr } } }
end
lemma is_pushout_of_iso_pushout {V : Type*} [category_theory.category V]
[category_theory.abelian V]
{X X' A A' B B' Y Y' : V}
(f : X ⟶ A) (g : X ⟶ B) (h : A ⟶ Y) (i : B ⟶ Y)
(f' : X' ⟶ A') (g' : X' ⟶ B') (h' : A' ⟶ Y') (i' : B' ⟶ Y')
(ϕ : X ≅ X') (α : A ≅ A') (β : B ≅ B') (ψ : Y ≅ Y')
(w1 : f ≫ α.hom = ϕ.hom ≫ f') (w2 : g ≫ β.hom = ϕ.hom ≫ g')
(w3 : h ≫ ψ.hom = α.hom ≫ h') (w4 : i ≫ ψ.hom = β.hom ≫ i')
(H : category_theory.is_pushout f' g' h' i') : category_theory.is_pushout f g h i :=
begin
have w : f ≫ h = g ≫ i,
{ rw ← category_theory.iso.eq_comp_inv at w1 w2 w3 w4,
rw [w1, w2, w3, w4],
simp, exact H.to_comm_sq.w },
refine ⟨⟨w⟩, _⟩,
constructor,
let span_iso : category_theory.limits.span f g ≅ category_theory.limits.span f' g'
:= category_theory.limits.span_ext ϕ α β w1.symm w2.symm,
refine category_theory.limits.is_colimit.of_cocone_equiv
(category_theory.limits.cocones.precompose_equivalence span_iso).symm _,
refine category_theory.limits.is_colimit.of_iso_colimit H.is_colimit _,
refine category_theory.limits.cocones.ext ψ.symm _,
intro c, cases c,
{ dsimp [category_theory.is_pushout.cocone, category_theory.comm_sq.cocone],
rw [← category_theory.iso.inv_comp_eq, ← category_theory.category.assoc,
← category_theory.iso.eq_comp_inv] at w1 w3,
rw [category_theory.category.assoc, ← w3, ← category_theory.category.assoc, ← w1],
rw category_theory.category.assoc, refl },
cases c,
{ dsimp [category_theory.is_pushout.cocone, category_theory.comm_sq.cocone],
rw [← category_theory.iso.inv_comp_eq, ← category_theory.category.assoc,
← category_theory.iso.eq_comp_inv] at w3,
exact w3.symm },
{ dsimp [category_theory.is_pushout.cocone, category_theory.comm_sq.cocone],
rw [← category_theory.iso.inv_comp_eq, ← category_theory.category.assoc,
← category_theory.iso.eq_comp_inv] at w4,
exact w4.symm }
end
lemma Module.sum_is_pushout' (R : Type*) [comm_ring R] {U : Type*}
[add_comm_group U] [module R U] (A B : submodule R U)
: category_theory.is_pushout (Module.of_hom (submodule.of_le (@inf_le_left _ _ A B)))
(Module.of_hom (submodule.of_le (@inf_le_right _ _ A B)))
(Module.of_hom (submodule.of_le (@le_sup_left _ _ A B)))
(Module.of_hom (submodule.of_le (@le_sup_right _ _ A B))) :=
begin
refine ⟨_, _⟩,
{ constructor, ext x, cases x, refl },
{ constructor,
let f : ∀ c : category_theory.limits.pushout_cocone
(Module.of_hom (submodule.of_le (@inf_le_left _ _ A B)))
(Module.of_hom (submodule.of_le (@inf_le_right _ _ A B))),
A → B → c.X := λ c y z, c.inl y + c.inr z,
have hf : ∀ c y hy z hz y' hy' z' hz', y + z = y' + z' → f c ⟨y, hy⟩ ⟨z, hz⟩ = f c ⟨y', hy'⟩ ⟨z', hz'⟩,
{ intros c y hy z hz y' hy' z' hz' H,
dsimp [f],
rw [← eq_sub_iff_add_eq, add_sub_assoc, add_comm, ← sub_eq_iff_eq_add] at H ⊢,
have : y - y' ∈ A ⊓ B,
{ refine ⟨submodule.sub_mem _ hy hy', _⟩,
rw H, exact submodule.sub_mem _ hz' hz },
rw [← map_sub, ← map_sub],
change c.inl ⟨y - y', submodule.sub_mem _ hy hy'⟩ = c.inr ⟨z' - z, submodule.sub_mem _ hz' hz⟩,
simp_rw ← H,
change c.inl (Module.of_hom (submodule.of_le (@inf_le_left _ _ A B)) ⟨y - y', this⟩)
= c.inr (Module.of_hom (submodule.of_le (@inf_le_right _ _ A B)) ⟨y - y', this⟩),
rw [← category_theory.comp_apply, category_theory.limits.pushout_cocone.condition], --← category_theory.comp_apply],
refl, },
let g := λ c (x : A ⊔ B), f c ⟨classical.some (submodule.mem_sup.mp x.property),
classical.some (classical.some_spec (submodule.mem_sup.mp x.property))⟩
⟨classical.some (classical.some_spec (classical.some_spec (submodule.mem_sup.mp x.property))),
classical.some (classical.some_spec (classical.some_spec (classical.some_spec (submodule.mem_sup.mp x.property))))⟩,
have g_spec : ∀ c (x : A ⊔ B) y hy z hz, x.val = y + z → g c x = f c ⟨y, hy⟩ ⟨z, hz⟩,
{ rintro c ⟨x, hx⟩ y hy z hz H, apply hf,
refine eq.trans _ H,
exact classical.some_spec (classical.some_spec (classical.some_spec (classical.some_spec (submodule.mem_sup.mp hx)))) },
refine category_theory.limits.pushout_cocone.is_colimit_aux _ _ _ _ _,
{ intro c,
dsimp [category_theory.limits.pushout_cocone.mk],
refine linear_map.mk (g c) _ _,
{ rintro ⟨x1, h1⟩ ⟨x2, h2⟩, rw submodule.mem_sup at h1 h2,
obtain ⟨y1, hy1, z1, hz1, H1⟩ := h1,
obtain ⟨y2, hy2, z2, hz2, H2⟩ := h2,
refine eq.trans (g_spec c _ (y1 + y2) (submodule.add_mem _ hy1 hy2)
(z1 + z2) (submodule.add_mem _ hz1 hz2) _) _,
{ simp, rw [← H1, ← H2], ac_refl },
rw [g_spec c ⟨x1, h1⟩ y1 hy1 z1 hz1 H1.symm, g_spec c ⟨x2, h2⟩ y2 hy2 z2 hz2 H2.symm],
dsimp [f],
rw [add_assoc, add_left_comm (c.inr ⟨z1, hz1⟩), ← add_assoc,
← map_add c.inl, ← map_add c.inr],
refl, },
{ rintros r ⟨x, hx⟩,
rw submodule.mem_sup at hx,
obtain ⟨y, hy, z, hz, H⟩ := hx,
rw [g_spec c ⟨x, hx⟩ y hy z hz H.symm,
g_spec c (r • ⟨x, hx⟩) (r • y) (submodule.smul_mem _ r hy)
(r • z) (submodule.smul_mem _ r hz) _],
{ simp [f], rw [ ← map_smul c.inl, ← map_smul c.inr], refl },
{ rw ← smul_add, rw H, refl } } },
{ intro c, ext x, simp,
refine eq.trans (g_spec c _ x.val x.property 0 (submodule.zero_mem _) _) _,
{ symmetry, exact add_zero x.val },
{ simp [f], exact map_zero c.inr } },
{ intro c, ext x, simp,
refine eq.trans (g_spec c _ 0 (submodule.zero_mem _) x.val x.property _) _,
{ symmetry, exact zero_add x.val },
{ simp [f], exact map_zero c.inl } },
{ intros c m h,
ext x, cases x with x hx, rw submodule.mem_sup at hx,
obtain ⟨y, hy, z, hz, H⟩ := hx,
rw ← ( _ : Module.of_hom (submodule.of_le (@le_sup_left _ _ A B)) ⟨y, hy⟩
+ Module.of_hom (submodule.of_le (@le_sup_right _ _ A B)) ⟨z, hz⟩
= ⟨x, hx⟩),
rw [map_add, map_add],
apply congr_arg2,
{ refine eq.trans _ (g_spec c _ y hy 0 (submodule.zero_mem _) _).symm,
{ transitivity c.inl ⟨y, hy⟩,
{ rw ← category_theory.comp_apply,
refine congr_fun (congr_arg _ _) _,
exact h category_theory.limits.walking_span.left },
{ dsimp [f],
refine eq.trans _ (congr_arg _ (eq.symm (map_zero c.inr))),
exact (add_zero _).symm } },
{ exact (add_zero _).symm } },
{ refine eq.trans _ (g_spec c _ 0 (submodule.zero_mem _) z hz _).symm,
{ transitivity c.inr ⟨z, hz⟩,
{ rw ← category_theory.comp_apply,
refine congr_fun (congr_arg _ _) _,
exact h category_theory.limits.walking_span.right },
{ dsimp [f],
refine eq.trans _ (congr_arg2 has_add.add (eq.symm (map_zero c.inl)) (refl (c.inr ⟨z, hz⟩))),
exact (zero_add _).symm } },
{ exact (zero_add _).symm } },
{ exact subtype.eq H } } }
end
lemma eq_to_hom_apply_heq {C : Type*} [category_theory.category C]
[category_theory.concrete_category C]
{X Y : C} (h : X = Y) (x : X) : @category_theory.eq_to_hom C _ X Y h x == x :=
begin
cases h, apply heq_of_eq, simp
end
lemma Module.sum_is_pushout (R : Type*) [comm_ring R]
{X A B Y : Module R} {f : X ⟶ A} {g : X ⟶ B} {f' : A ⟶ Y} {g' : B ⟶ Y}
(U : Module R) (i : X ⟶ U) (j : A ⟶ U) (k : B ⟶ U) (ℓ : Y ⟶ U)
(hi : function.injective i) (hj : function.injective j)
(hk : function.injective k) (hℓ : function.injective ℓ)
(hf : f ≫ j = i) (hg : g ≫ k = i) (hf' : f' ≫ ℓ = j) (hg' : g' ≫ ℓ = k)
(H : linear_map.range i = linear_map.range j ⊓ linear_map.range k)
(H' : linear_map.range ℓ = linear_map.range j ⊔ linear_map.range k)
: category_theory.is_pushout f g f' g' :=
let i' := (linear_equiv.of_injective i hi).to_Module_iso'_left,
j' := (linear_equiv.of_injective j hj).to_Module_iso'_left,
k' := (linear_equiv.of_injective k hk).to_Module_iso'_left,
ℓ' := (linear_equiv.of_injective ℓ hℓ).to_Module_iso'_left
in
have hij : linear_map.range i ≤ linear_map.range j,
{ rw ← hf, exact le_of_eq_of_le (linear_map.range_comp _ _) linear_map.map_le_range },
have hik : linear_map.range i ≤ linear_map.range k,
{ rw ← hg, exact le_of_eq_of_le (linear_map.range_comp _ _) linear_map.map_le_range },
have hjℓ : linear_map.range j ≤ linear_map.range ℓ,
{ rw ← hf', exact le_of_eq_of_le (linear_map.range_comp _ _) linear_map.map_le_range },
have hkℓ : linear_map.range k ≤ linear_map.range ℓ,
{ rw ← hg', exact le_of_eq_of_le (linear_map.range_comp _ _) linear_map.map_le_range },
begin
have := is_pushout_of_iso_pushout (Module.of_hom (submodule.of_le hij))
(Module.of_hom (submodule.of_le hik))
(Module.of_hom (submodule.of_le hjℓ))
(Module.of_hom (submodule.of_le hkℓ))
_ _ _ _
(category_theory.eq_to_iso _)
(category_theory.iso.refl _) (category_theory.iso.refl _)
(category_theory.eq_to_iso _)
_ _ _ _
(Module.sum_is_pushout' R (linear_map.range j) (linear_map.range k)),
swap, { congr; exact H }, swap, { congr; exact H' },
swap, { ext, dsimp, apply eq_of_heq, congr; try { exact H },
{ ext x, rw [← linear_map.mem_range, ← linear_map.mem_range, ← linear_map.mem_range],
rw [← submodule.mem_inf, H] },
{ symmetry, apply eq_to_hom_apply_heq } },
swap, { ext, dsimp, apply eq_of_heq, congr; try { exact H },
{ ext x, rw [← linear_map.mem_range, ← linear_map.mem_range, ← linear_map.mem_range],
rw [← submodule.mem_inf, H] },
{ symmetry, apply eq_to_hom_apply_heq } },
swap, { ext, cases x with x hx, dsimp [submodule.of_le],
apply eq_of_heq,
transitivity ↑(linear_map.cod_restrict (linear_map.range ℓ) (linear_map.range j).subtype (λ c, hjℓ c.property) ⟨x, hx⟩),
congr; try { ext, rw H', },
{ apply eq_to_hom_apply_heq },
refl },
swap, { ext, cases x with x hx, dsimp [submodule.of_le],
apply eq_of_heq,
transitivity ↑(linear_map.cod_restrict (linear_map.range ℓ) (linear_map.range k).subtype (λ c, hkℓ c.property) ⟨x, hx⟩),
congr; try { ext, rw H', },
{ apply eq_to_hom_apply_heq },
refl },
refine is_pushout_of_iso_pushout _ _ _ _ _ _ _ _ i' j' k' ℓ' _ _ _ _ this,
{ ext x, dsimp [i', j'], rw [← category_theory.comp_apply, hf] },
{ ext x, dsimp [i', k'], rw [← category_theory.comp_apply, hg] },
{ ext x, dsimp [ℓ', j'], rw [← category_theory.comp_apply, hf'] },
{ ext x, dsimp [ℓ', k'], rw [← category_theory.comp_apply, hg'] },
end
lemma singular_chain_complex_basis_natural (R : Type*) [comm_ring R] {X Y : Top}
(f : X ⟶ Y) (n : ℕ)
: ((singular_chain_complex R).map f).f n ∘ (singular_chain_complex_basis R n).get_basis X
= (singular_chain_complex_basis R n).get_basis Y ∘ (λ p, ⟨(), p.2 ≫ f⟩) :=
begin
apply funext, rintro ⟨i, σ⟩, cases i,
dsimp,
rw [← simplex_to_chain_is_basis, ← simplex_to_chain_is_basis],
dsimp [simplex_to_chain],
rw singular_chain_complex_map
end
lemma range_of_singular_chain_complex_include_subspace {X : Type*} [topological_space X]
(R : Type*) [comm_ring R] (S : set X) (cov : set (set S)) (h : set.univ ∈ cov) (n : ℕ)
: (linear_map.dom_restrict (((singular_chain_complex R).map (⟨subtype.val, continuous_subtype_val⟩ : Top.of S ⟶ Top.of X)).f n)
(@bounded_by_submodule R _ (Top.of S) cov n)).range
= subset_submodule R X S n :=
begin
transitivity submodule.map (((singular_chain_complex R).map (⟨subtype.val, continuous_subtype_val⟩ : Top.of S ⟶ Top.of X)).f n)
(@bounded_by_submodule R _ (Top.of S) cov n),
{ ext, simp, split,
{ rintros ⟨⟨y, hy⟩, h'⟩, exact ⟨y, hy, h'⟩ },
{ rintros ⟨y, hy, h'⟩, exact ⟨⟨y, hy⟩, h'⟩ } },
{ refine eq.trans (linear_map.map_span _ _) _,
delta subset_submodule bounded_by_submodule spanned_by_sat,
rw ← set.image_comp,
refine congr_arg _ _,
rw singular_chain_complex_basis_natural,
rw set.image_comp,
congr,
ext, cases x with i σ, cases i,
simp, split,
{ rintro ⟨i, τ, ⟨s, hs, hτ⟩, h⟩, subst h,
refine subset_trans (set.range_comp_subset_range _ _) _,
exact subset_of_eq subtype.range_val, },
{ intro h',
let τ : C(topological_simplex n, S)
:= ⟨(λ p, ⟨σ p, h' (set.mem_range_self p)⟩), _⟩,
{ refine ⟨(), τ, ⟨set.univ, h, set.subset_univ _⟩, _⟩, ext, refl },
{ continuity } } }
end
lemma range_of_bounded_by_subcomplex_inclusion {X : Type*} [topological_space X]
(R : Type*) [comm_ring R] (cov : set (set X)) (n : ℕ)
: linear_map.range ((@bounded_by_subcomplex_inclusion R _ (Top.of X) cov).f n)
= bounded_by_submodule R cov n :=
begin
simp [bounded_by_subcomplex_inclusion, Module.subcomplex_of_compatible_submodules_inclusion],
refl
end
lemma bounded_by_sup {X : Type*} [topological_space X]
(R : Type*) [comm_ring R] (cov cov' : set (set X)) (n : ℕ)
: @bounded_by_submodule R _ (Top.of X) cov n ⊔ bounded_by_submodule R cov' n
= bounded_by_submodule R (cov ∪ cov') n :=
begin
delta bounded_by_submodule spanned_by_sat,
rw submodule.sup_spans R,
congr,
simp,
rw ← set.image_union,
congr,
ext x, split; intro h,
{ cases h with h,
{ obtain ⟨s, hs1, hs2⟩ := h, exact ⟨s, or.inl hs1, hs2⟩ },
{ obtain ⟨s, hs1, hs2⟩ := h, exact ⟨s, or.inr hs1, hs2⟩ } },
{ obtain ⟨s, h', h''⟩ := h, cases h' with h',
{ left, exact ⟨s, h', h''⟩ },
{ right, exact ⟨s, h', h''⟩ } }
end
lemma zero_ring_all_iso (R : Type*) [comm_ring R] (h : (1 : R) = 0) {M N : Module R}
(f : M ⟶ N) : category_theory.is_iso f :=
⟨⟨0, by { simp, ext, change 0 = x, transitivity (1 : R) • x, rw h, simp, simp },
by { simp, ext, change 0 = x, transitivity (1 : R) • x, rw h, simp, simp }⟩⟩.
theorem excision {X : Type*} [topological_space X] (R : Type*) [comm_ring R]
(A B : set X) (hA : is_open A) (hB : is_open B) (hCov : A ∪ B = ⊤)
: quasi_iso ((singular_chain_complex_of_pair R).map (excision_map A B)) :=
begin
by_cases htrivial : (1 : R) = (0 : R),
{ constructor, intro, apply zero_ring_all_iso, exact htrivial },
have hnontriv : nontrivial R := ⟨⟨1, 0, htrivial⟩⟩,
letI := singular_chain_complex_of_pair_under_cover_to_singular_chain_complex_of_pair_quasi_iso
R (excision_outer_map A B) _ { A, B } _ _,
{ have hA : category_theory.is_iso (@bounded_by_subcomplex_inclusion R _ (Top.of A) {set.univ}),
{ apply bounded_by_subcomplex_inclusion_iso_of_contains_univ, apply set.mem_singleton },
have hInter : category_theory.is_iso (@bounded_by_subcomplex_inclusion R _ (Top.of (A ∩ B : set X))
(pullback_family_of_sets {set.univ} (excision_inner_map A B))),
{ apply bounded_by_subcomplex_inclusion_iso_of_contains_univ, existsi set.univ, simp },
let f1 := singular_chain_complex_of_pair_under_cover_to_singular_chain_complex_of_pair R
(category_theory.arrow.mk (excision_inner_map A B)) {set.univ},
have h1 : category_theory.is_iso f1,
{ apply_with category_theory.functor.map_is_iso {instances := ff},
apply_with category_theory.arrow.is_iso_of_iso_left_of_is_iso_right {instances := ff},
exact hInter, exact hA },
letI := h1,
have H : ∀ s, s ∈ {set.univ} → (∃ t, t ∈ {A, B} ∧ excision_include A '' s ⊆ t),
{ intros s hs, existsi A, split, { apply set.mem_insert }, { simp [excision_include] } },
let f2 := singular_chain_complex_of_pair_under_cover_map R (excision_map A B) {set.univ} {A, B} H,
let f3 := (singular_chain_complex_of_pair_under_cover_to_singular_chain_complex_of_pair R (excision_outer_map A B) {A, B}),
suffices : category_theory.is_iso f2
∧ category_theory.inv f1 ≫ f2 ≫ f3
= (singular_chain_complex_of_pair R).map (excision_map A B),
{ rw ← this.right, letI := this.left, apply_instance, },
split,
{ let i : Π n, (bounded_by_subcomplex R (pullback_family_of_sets {set.univ} (excision_inner_map A B))).X n
⟶ ((singular_chain_complex R).obj (Top.of X)).X n
:= λ n, linear_map.dom_restrict (((singular_chain_complex R).map (⟨subtype.val, continuous_subtype_val⟩ : Top.of (A ∩ B : set X) ⟶ Top.of X)).f n) _,
let j : Π n, (bounded_by_subcomplex R {set.univ}).X n
⟶ ((singular_chain_complex R).obj (Top.of X)).X n
:= λ n, linear_map.dom_restrict (((singular_chain_complex R).map (⟨subtype.val, continuous_subtype_val⟩ : Top.of A ⟶ Top.of X)).f n) _,
let k : Π n, (bounded_by_subcomplex R (pullback_family_of_sets {A, B} (excision_outer_map A B))).X n
⟶ ((singular_chain_complex R).obj (Top.of X)).X n
:= λ n, linear_map.dom_restrict (((singular_chain_complex R).map (⟨subtype.val, continuous_subtype_val⟩ : Top.of B ⟶ Top.of X)).f n) _,
let ℓ : Π n, (bounded_by_subcomplex R {A, B}).X n
⟶ ((singular_chain_complex R).obj (Top.of X)).X n
:= λ n, (bounded_by_subcomplex_inclusion R {A, B}).f n,
dsimp [f2, singular_chain_complex_of_pair_under_cover_map],
apply coker_of_cocartesian_square_is_iso,
apply is_pushout_of_is_is_pushout_eval, intro n,
refine Module.sum_is_pushout R (((singular_chain_complex R).obj (Top.of X)).X n)
(i n) (j n) (k n) (ℓ n) _ _ _ _ _ _ _ _ _ _,
{ rintros ⟨x, hx⟩ ⟨y, hy⟩ hxy, apply subtype.eq,
exact singular_chain_complex_map_inj R (⟨subtype.val, continuous_subtype_val⟩ : Top.of (A ∩ B : set X) ⟶ Top.of X) subtype.val_injective n hxy },
{ rintros ⟨x, hx⟩ ⟨y, hy⟩ hxy, apply subtype.eq,
exact singular_chain_complex_map_inj R (⟨subtype.val, continuous_subtype_val⟩ : Top.of A ⟶ Top.of X) subtype.val_injective n hxy, },
{ rintros ⟨x, hx⟩ ⟨y, hy⟩ hxy, apply subtype.eq,
exact singular_chain_complex_map_inj R (⟨subtype.val, continuous_subtype_val⟩ : Top.of B ⟶ Top.of X) subtype.val_injective n hxy },
{ rintros ⟨x, hx⟩ ⟨y, hy⟩ hxy, apply subtype.eq, exact hxy },
{ apply linear_map.ext, rintro ⟨x, hx⟩,
rw category_theory.comp_apply,
dsimp [i, j],
dsimp [bounded_by_pullback_chain_inclusion, bounded_by_subcomplex_map,
subcomplex_spanned_by_map],
rw [← category_theory.comp_apply, ← homological_complex.comp_f,
← (singular_chain_complex R).map_comp],
congr, },
{ apply linear_map.ext, rintro ⟨x, hx⟩,
rw category_theory.comp_apply,
dsimp [i, k],
delta bounded_by_subcomplex_map subcomplex_spanned_by_map,
rw [linear_map.cod_restrict_apply, linear_map.dom_restrict_apply,
← category_theory.comp_apply, ← homological_complex.comp_f,
← (singular_chain_complex R).map_comp],
congr },
{ apply linear_map.ext, rintro ⟨x, hx⟩,
rw category_theory.comp_apply,
dsimp [ℓ, j],
rw [← category_theory.comp_apply, ← homological_complex.comp_f],
rw ← cover_inclusion_natural,
rw [homological_complex.comp_f, category_theory.comp_apply],
congr, },
{ apply linear_map.ext, rintro ⟨x, hx⟩,
rw category_theory.comp_apply,
dsimp [ℓ, k],
rw [← category_theory.comp_apply, ← homological_complex.comp_f],
delta bounded_by_pullback_chain_inclusion,
rw ← cover_inclusion_natural,
rw [homological_complex.comp_f, category_theory.comp_apply],
congr, },
{ dsimp [i, j, k],
refine eq.trans (range_of_singular_chain_complex_include_subspace R _ _ _ n) _,
{ exact ⟨set.univ, set.mem_singleton _, set.preimage_univ⟩, },
refine eq.trans _ (congr_arg2 _ (range_of_singular_chain_complex_include_subspace R _ _ _ n).symm
(range_of_singular_chain_complex_include_subspace R _ _ _ n).symm),
delta subset_submodule bounded_by_submodule spanned_by_sat,
rw submodule.inf_spans_free R ((singular_chain_complex_basis R n).get_basis (Top.of X))
_ _ (set.image_subset_range _ _) (set.image_subset_range _ _),
rw [set.image_inter (@basis.injective _ R _ _ _ _
((singular_chain_complex_basis R n).get_basis (Top.of X))
hnontriv)],
congr,
simp, ext x, split; intro h,
{ exact ⟨subset_trans h (set.inter_subset_left A B),
subset_trans h (set.inter_subset_right A B)⟩ },
{ exact set.subset_inter h.left h.right },
{ exact eq.refl set.univ },
{ refine ⟨B, _, _⟩,
{ rw set.pair_comm, apply set.mem_insert },
{ apply set.eq_univ_of_univ_subset,
rw ← set.image_subset_iff,
rw set.image_univ,
exact subset_of_eq subtype.range_val, } } },
{ dsimp [ℓ, j, k],
refine eq.trans (range_of_bounded_by_subcomplex_inclusion R _ n) _,
refine eq.trans _ (congr_arg2 _ (range_of_singular_chain_complex_include_subspace R _ _ _ n).symm
(range_of_singular_chain_complex_include_subspace R _ _ _ n).symm),
delta subset_submodule,
rw bounded_by_sup,
congr,
{ exact eq.refl set.univ },
{ refine ⟨B, _, _⟩,
{ rw set.pair_comm, apply set.mem_insert },
{ apply set.eq_univ_of_univ_subset,
rw ← set.image_subset_iff,
rw set.image_univ,
exact subset_of_eq subtype.range_val, } } }, },
{ rw category_theory.is_iso.inv_comp_eq,
dsimp [f2, f3, f1],
apply singular_chain_complex_of_pair_under_cover_to_singular_chain_complex_of_pair_naturality } },
{ rintros ⟨x, hx⟩ ⟨y, hy⟩ hxy, ext, exact hxy },
{ simp, exact ⟨hA, hB⟩ },
{ simp, exact hCov }
end.
|
module Idrboot
export
readBootCode : IO (Provider String)
readBootCode =
case !(readFile "../rosetta-lisp/boot.lisp") of
Right r => pure $ Provide r
Left err => pure $ Error $ show err
|
/-
Copyright (c) 2015 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Robert Y. Lewis
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.algebra.group_power.basic
import Mathlib.algebra.opposites
import Mathlib.data.list.basic
import Mathlib.data.int.cast
import Mathlib.data.equiv.basic
import Mathlib.data.equiv.mul_add
import Mathlib.deprecated.group
import Mathlib.PostPort
universes y u w x z u₁ u_1
namespace Mathlib
/-!
# Lemmas about power operations on monoids and groups
This file contains lemmas about `monoid.pow`, `group.pow`, `nsmul`, `gsmul`
which require additional imports besides those available in `.basic`.
-/
/-!
### (Additive) monoid
-/
@[simp] theorem nsmul_one {A : Type y} [add_monoid A] [HasOne A] (n : ℕ) : n •ℕ 1 = ↑n :=
add_monoid_hom.eq_nat_cast
(add_monoid_hom.mk (fun (n : ℕ) => n •ℕ 1) (zero_nsmul 1) fun (_x _x_1 : ℕ) => add_nsmul 1 _x _x_1) (one_nsmul 1)
@[simp] theorem list.prod_repeat {M : Type u} [monoid M] (a : M) (n : ℕ) : list.prod (list.repeat a n) = a ^ n := sorry
@[simp] theorem list.sum_repeat {A : Type y} [add_monoid A] (a : A) (n : ℕ) : list.sum (list.repeat a n) = n •ℕ a :=
list.prod_repeat
@[simp] theorem units.coe_pow {M : Type u} [monoid M] (u : units M) (n : ℕ) : ↑(u ^ n) = ↑u ^ n :=
monoid_hom.map_pow (units.coe_hom M) u n
theorem is_unit_of_pow_eq_one {M : Type u} [monoid M] (x : M) (n : ℕ) (hx : x ^ n = 1) (hn : 0 < n) : is_unit x := sorry
theorem nat.nsmul_eq_mul (m : ℕ) (n : ℕ) : m •ℕ n = m * n := sorry
theorem gsmul_one {A : Type y} [add_group A] [HasOne A] (n : ℤ) : n •ℤ 1 = ↑n := sorry
theorem gpow_add_one {G : Type w} [group G] (a : G) (n : ℤ) : a ^ (n + 1) = a ^ n * a := sorry
theorem add_one_gsmul {A : Type y} [add_group A] (a : A) (i : ℤ) : (i + 1) •ℤ a = i •ℤ a + a :=
gpow_add_one
theorem gpow_sub_one {G : Type w} [group G] (a : G) (n : ℤ) : a ^ (n - 1) = a ^ n * (a⁻¹) := sorry
theorem gpow_add {G : Type w} [group G] (a : G) (m : ℤ) (n : ℤ) : a ^ (m + n) = a ^ m * a ^ n := sorry
theorem mul_self_gpow {G : Type w} [group G] (b : G) (m : ℤ) : b * b ^ m = b ^ (m + 1) := sorry
theorem mul_gpow_self {G : Type w} [group G] (b : G) (m : ℤ) : b ^ m * b = b ^ (m + 1) := sorry
theorem add_gsmul {A : Type y} [add_group A] (a : A) (i : ℤ) (j : ℤ) : (i + j) •ℤ a = i •ℤ a + j •ℤ a :=
gpow_add
theorem gpow_sub {G : Type w} [group G] (a : G) (m : ℤ) (n : ℤ) : a ^ (m - n) = a ^ m * (a ^ n⁻¹) := sorry
theorem sub_gsmul {A : Type y} [add_group A] (m : ℤ) (n : ℤ) (a : A) : (m - n) •ℤ a = m •ℤ a - n •ℤ a := sorry
theorem gpow_one_add {G : Type w} [group G] (a : G) (i : ℤ) : a ^ (1 + i) = a * a ^ i :=
eq.mpr (id (Eq._oldrec (Eq.refl (a ^ (1 + i) = a * a ^ i)) (gpow_add a 1 i)))
(eq.mpr (id (Eq._oldrec (Eq.refl (a ^ 1 * a ^ i = a * a ^ i)) (gpow_one a))) (Eq.refl (a * a ^ i)))
theorem one_add_gsmul {A : Type y} [add_group A] (a : A) (i : ℤ) : (1 + i) •ℤ a = a + i •ℤ a :=
gpow_one_add
theorem gpow_mul_comm {G : Type w} [group G] (a : G) (i : ℤ) (j : ℤ) : a ^ i * a ^ j = a ^ j * a ^ i :=
eq.mpr (id (Eq._oldrec (Eq.refl (a ^ i * a ^ j = a ^ j * a ^ i)) (Eq.symm (gpow_add a i j))))
(eq.mpr (id (Eq._oldrec (Eq.refl (a ^ (i + j) = a ^ j * a ^ i)) (Eq.symm (gpow_add a j i))))
(eq.mpr (id (Eq._oldrec (Eq.refl (a ^ (i + j) = a ^ (j + i))) (add_comm i j))) (Eq.refl (a ^ (j + i)))))
theorem gsmul_add_comm {A : Type y} [add_group A] (a : A) (i : ℤ) (j : ℤ) : i •ℤ a + j •ℤ a = j •ℤ a + i •ℤ a :=
gpow_mul_comm
theorem gpow_mul {G : Type w} [group G] (a : G) (m : ℤ) (n : ℤ) : a ^ (m * n) = (a ^ m) ^ n := sorry
theorem gsmul_mul' {A : Type y} [add_group A] (a : A) (m : ℤ) (n : ℤ) : m * n •ℤ a = n •ℤ (m •ℤ a) :=
gpow_mul
theorem gpow_mul' {G : Type w} [group G] (a : G) (m : ℤ) (n : ℤ) : a ^ (m * n) = (a ^ n) ^ m :=
eq.mpr (id (Eq._oldrec (Eq.refl (a ^ (m * n) = (a ^ n) ^ m)) (mul_comm m n)))
(eq.mpr (id (Eq._oldrec (Eq.refl (a ^ (n * m) = (a ^ n) ^ m)) (gpow_mul a n m))) (Eq.refl ((a ^ n) ^ m)))
theorem gsmul_mul {A : Type y} [add_group A] (a : A) (m : ℤ) (n : ℤ) : m * n •ℤ a = m •ℤ (n •ℤ a) :=
eq.mpr (id (Eq._oldrec (Eq.refl (m * n •ℤ a = m •ℤ (n •ℤ a))) (mul_comm m n)))
(eq.mpr (id (Eq._oldrec (Eq.refl (n * m •ℤ a = m •ℤ (n •ℤ a))) (gsmul_mul' a n m))) (Eq.refl (m •ℤ (n •ℤ a))))
theorem gpow_bit0 {G : Type w} [group G] (a : G) (n : ℤ) : a ^ bit0 n = a ^ n * a ^ n :=
gpow_add a n n
theorem bit0_gsmul {A : Type y} [add_group A] (a : A) (n : ℤ) : bit0 n •ℤ a = n •ℤ a + n •ℤ a :=
gpow_add a n n
theorem gpow_bit1 {G : Type w} [group G] (a : G) (n : ℤ) : a ^ bit1 n = a ^ n * a ^ n * a := sorry
theorem bit1_gsmul {A : Type y} [add_group A] (a : A) (n : ℤ) : bit1 n •ℤ a = n •ℤ a + n •ℤ a + a :=
gpow_bit1
@[simp] theorem monoid_hom.map_gpow {G : Type w} {H : Type x} [group G] [group H] (f : G →* H) (a : G) (n : ℤ) : coe_fn f (a ^ n) = coe_fn f a ^ n :=
int.cases_on n (fun (n : ℕ) => monoid_hom.map_pow f a n)
fun (n : ℕ) =>
Eq.trans (monoid_hom.map_inv f (a ^ Nat.succ n)) (congr_arg has_inv.inv (monoid_hom.map_pow f a (Nat.succ n)))
@[simp] theorem add_monoid_hom.map_gsmul {A : Type y} {B : Type z} [add_group A] [add_group B] (f : A →+ B) (a : A) (n : ℤ) : coe_fn f (n •ℤ a) = n •ℤ coe_fn f a :=
monoid_hom.map_gpow (coe_fn add_monoid_hom.to_multiplicative f) a n
@[simp] theorem units.coe_gpow {G : Type w} [group G] (u : units G) (n : ℤ) : ↑(u ^ n) = ↑u ^ n :=
monoid_hom.map_gpow (units.coe_hom G) u n
/-! Lemmas about `gsmul` under ordering, placed here (rather than in `algebra.group_power.basic`
with their friends) because they require facts from `data.int.basic`-/
theorem gsmul_pos {A : Type y} [ordered_add_comm_group A] {a : A} (ha : 0 < a) {k : ℤ} (hk : 0 < k) : 0 < k •ℤ a := sorry
theorem gsmul_le_gsmul {A : Type y} [ordered_add_comm_group A] {a : A} {n : ℤ} {m : ℤ} (ha : 0 ≤ a) (h : n ≤ m) : n •ℤ a ≤ m •ℤ a := sorry
theorem gsmul_lt_gsmul {A : Type y} [ordered_add_comm_group A] {a : A} {n : ℤ} {m : ℤ} (ha : 0 < a) (h : n < m) : n •ℤ a < m •ℤ a := sorry
theorem gsmul_le_gsmul_iff {A : Type y} [linear_ordered_add_comm_group A] {a : A} {n : ℤ} {m : ℤ} (ha : 0 < a) : n •ℤ a ≤ m •ℤ a ↔ n ≤ m := sorry
theorem gsmul_lt_gsmul_iff {A : Type y} [linear_ordered_add_comm_group A] {a : A} {n : ℤ} {m : ℤ} (ha : 0 < a) : n •ℤ a < m •ℤ a ↔ n < m := sorry
theorem nsmul_le_nsmul_iff {A : Type y} [linear_ordered_add_comm_group A] {a : A} {n : ℕ} {m : ℕ} (ha : 0 < a) : n •ℕ a ≤ m •ℕ a ↔ n ≤ m := sorry
theorem nsmul_lt_nsmul_iff {A : Type y} [linear_ordered_add_comm_group A] {a : A} {n : ℕ} {m : ℕ} (ha : 0 < a) : n •ℕ a < m •ℕ a ↔ n < m := sorry
@[simp] theorem with_bot.coe_nsmul {A : Type y} [add_monoid A] (a : A) (n : ℕ) : ↑(n •ℕ a) = n •ℕ ↑a :=
add_monoid_hom.map_nsmul (add_monoid_hom.mk coe with_bot.coe_zero with_bot.coe_add) a n
theorem nsmul_eq_mul' {R : Type u₁} [semiring R] (a : R) (n : ℕ) : n •ℕ a = a * ↑n := sorry
@[simp] theorem nsmul_eq_mul {R : Type u₁} [semiring R] (n : ℕ) (a : R) : n •ℕ a = ↑n * a :=
eq.mpr (id (Eq._oldrec (Eq.refl (n •ℕ a = ↑n * a)) (nsmul_eq_mul' a n)))
(eq.mpr (id (Eq._oldrec (Eq.refl (a * ↑n = ↑n * a)) (commute.eq (nat.cast_commute n a)))) (Eq.refl (a * ↑n)))
theorem mul_nsmul_left {R : Type u₁} [semiring R] (a : R) (b : R) (n : ℕ) : n •ℕ (a * b) = a * (n •ℕ b) :=
eq.mpr (id (Eq._oldrec (Eq.refl (n •ℕ (a * b) = a * (n •ℕ b))) (nsmul_eq_mul' (a * b) n)))
(eq.mpr (id (Eq._oldrec (Eq.refl (a * b * ↑n = a * (n •ℕ b))) (nsmul_eq_mul' b n)))
(eq.mpr (id (Eq._oldrec (Eq.refl (a * b * ↑n = a * (b * ↑n))) (mul_assoc a b ↑n))) (Eq.refl (a * (b * ↑n)))))
theorem mul_nsmul_assoc {R : Type u₁} [semiring R] (a : R) (b : R) (n : ℕ) : n •ℕ (a * b) = n •ℕ a * b :=
eq.mpr (id (Eq._oldrec (Eq.refl (n •ℕ (a * b) = n •ℕ a * b)) (nsmul_eq_mul n (a * b))))
(eq.mpr (id (Eq._oldrec (Eq.refl (↑n * (a * b) = n •ℕ a * b)) (nsmul_eq_mul n a)))
(eq.mpr (id (Eq._oldrec (Eq.refl (↑n * (a * b) = ↑n * a * b)) (mul_assoc (↑n) a b))) (Eq.refl (↑n * (a * b)))))
@[simp] theorem nat.cast_pow {R : Type u₁} [semiring R] (n : ℕ) (m : ℕ) : ↑(n ^ m) = ↑n ^ m := sorry
@[simp] theorem int.coe_nat_pow (n : ℕ) (m : ℕ) : ↑(n ^ m) = ↑n ^ m := sorry
theorem int.nat_abs_pow (n : ℤ) (k : ℕ) : int.nat_abs (n ^ k) = int.nat_abs n ^ k := sorry
-- The next four lemmas allow us to replace multiplication by a numeral with a `gsmul` expression.
-- They are used by the `noncomm_ring` tactic, to normalise expressions before passing to `abel`.
theorem bit0_mul {R : Type u₁} [ring R] {n : R} {r : R} : bit0 n * r = bit0 1 •ℤ (n * r) := sorry
theorem mul_bit0 {R : Type u₁} [ring R] {n : R} {r : R} : r * bit0 n = bit0 1 •ℤ (r * n) := sorry
theorem bit1_mul {R : Type u₁} [ring R] {n : R} {r : R} : bit1 n * r = bit0 1 •ℤ (n * r) + r := sorry
theorem mul_bit1 {R : Type u₁} [ring R] {n : R} {r : R} : r * bit1 n = bit0 1 •ℤ (r * n) + r := sorry
@[simp] theorem gsmul_eq_mul {R : Type u₁} [ring R] (a : R) (n : ℤ) : n •ℤ a = ↑n * a := sorry
theorem gsmul_eq_mul' {R : Type u₁} [ring R] (a : R) (n : ℤ) : n •ℤ a = a * ↑n :=
eq.mpr (id (Eq._oldrec (Eq.refl (n •ℤ a = a * ↑n)) (gsmul_eq_mul a n)))
(eq.mpr (id (Eq._oldrec (Eq.refl (↑n * a = a * ↑n)) (commute.eq (int.cast_commute n a)))) (Eq.refl (a * ↑n)))
theorem mul_gsmul_left {R : Type u₁} [ring R] (a : R) (b : R) (n : ℤ) : n •ℤ (a * b) = a * (n •ℤ b) :=
eq.mpr (id (Eq._oldrec (Eq.refl (n •ℤ (a * b) = a * (n •ℤ b))) (gsmul_eq_mul' (a * b) n)))
(eq.mpr (id (Eq._oldrec (Eq.refl (a * b * ↑n = a * (n •ℤ b))) (gsmul_eq_mul' b n)))
(eq.mpr (id (Eq._oldrec (Eq.refl (a * b * ↑n = a * (b * ↑n))) (mul_assoc a b ↑n))) (Eq.refl (a * (b * ↑n)))))
theorem mul_gsmul_assoc {R : Type u₁} [ring R] (a : R) (b : R) (n : ℤ) : n •ℤ (a * b) = n •ℤ a * b :=
eq.mpr (id (Eq._oldrec (Eq.refl (n •ℤ (a * b) = n •ℤ a * b)) (gsmul_eq_mul (a * b) n)))
(eq.mpr (id (Eq._oldrec (Eq.refl (↑n * (a * b) = n •ℤ a * b)) (gsmul_eq_mul a n)))
(eq.mpr (id (Eq._oldrec (Eq.refl (↑n * (a * b) = ↑n * a * b)) (mul_assoc (↑n) a b))) (Eq.refl (↑n * (a * b)))))
@[simp] theorem gsmul_int_int (a : ℤ) (b : ℤ) : a •ℤ b = a * b := sorry
theorem gsmul_int_one (n : ℤ) : n •ℤ 1 = n := sorry
@[simp] theorem int.cast_pow {R : Type u₁} [ring R] (n : ℤ) (m : ℕ) : ↑(n ^ m) = ↑n ^ m := sorry
theorem neg_one_pow_eq_pow_mod_two {R : Type u₁} [ring R] {n : ℕ} : (-1) ^ n = (-1) ^ (n % bit0 1) := sorry
/-- Bernoulli's inequality. This version works for semirings but requires
additional hypotheses `0 ≤ a * a` and `0 ≤ (1 + a) * (1 + a)`. -/
theorem one_add_mul_le_pow' {R : Type u₁} [ordered_semiring R] {a : R} (Hsqr : 0 ≤ a * a) (Hsqr' : 0 ≤ (1 + a) * (1 + a)) (H : 0 ≤ bit0 1 + a) (n : ℕ) : 1 + ↑n * a ≤ (1 + a) ^ n := sorry
theorem pow_lt_pow_of_lt_one {R : Type u₁} [ordered_semiring R] {a : R} (h : 0 < a) (ha : a < 1) {i : ℕ} {j : ℕ} (hij : i < j) : a ^ j < a ^ i := sorry
theorem pow_lt_pow_iff_of_lt_one {R : Type u₁} [ordered_semiring R] {a : R} {n : ℕ} {m : ℕ} (hpos : 0 < a) (h : a < 1) : a ^ m < a ^ n ↔ n < m :=
strict_mono.lt_iff_lt fun (m n : order_dual ℕ) => pow_lt_pow_of_lt_one hpos h
theorem pow_le_pow_of_le_one {R : Type u₁} [ordered_semiring R] {a : R} (h : 0 ≤ a) (ha : a ≤ 1) {i : ℕ} {j : ℕ} (hij : i ≤ j) : a ^ j ≤ a ^ i := sorry
theorem pow_le_one {R : Type u₁} [ordered_semiring R] {x : R} (n : ℕ) (h0 : 0 ≤ x) (h1 : x ≤ 1) : x ^ n ≤ 1 := sorry
theorem sign_cases_of_C_mul_pow_nonneg {R : Type u₁} [linear_ordered_semiring R] {C : R} {r : R} (h : ∀ (n : ℕ), 0 ≤ C * r ^ n) : C = 0 ∨ 0 < C ∧ 0 ≤ r := sorry
@[simp] theorem abs_pow {R : Type u₁} [linear_ordered_ring R] (a : R) (n : ℕ) : abs (a ^ n) = abs a ^ n :=
monoid_hom.map_pow (monoid_with_zero_hom.to_monoid_hom abs_hom) a n
@[simp] theorem pow_bit1_neg_iff {R : Type u₁} [linear_ordered_ring R] {a : R} {n : ℕ} : a ^ bit1 n < 0 ↔ a < 0 :=
{ mp := fun (h : a ^ bit1 n < 0) => iff.mp not_le fun (h' : 0 ≤ a) => iff.mpr not_le h (pow_nonneg h' (bit1 n)),
mpr := fun (h : a < 0) => mul_neg_of_neg_of_pos h (pow_bit0_pos (has_lt.lt.ne h) n) }
@[simp] theorem pow_bit1_nonneg_iff {R : Type u₁} [linear_ordered_ring R] {a : R} {n : ℕ} : 0 ≤ a ^ bit1 n ↔ 0 ≤ a :=
iff.mpr le_iff_le_iff_lt_iff_lt pow_bit1_neg_iff
@[simp] theorem pow_bit1_nonpos_iff {R : Type u₁} [linear_ordered_ring R] {a : R} {n : ℕ} : a ^ bit1 n ≤ 0 ↔ a ≤ 0 := sorry
@[simp] theorem pow_bit1_pos_iff {R : Type u₁} [linear_ordered_ring R] {a : R} {n : ℕ} : 0 < a ^ bit1 n ↔ 0 < a :=
lt_iff_lt_of_le_iff_le pow_bit1_nonpos_iff
theorem strict_mono_pow_bit1 {R : Type u₁} [linear_ordered_ring R] (n : ℕ) : strict_mono fun (a : R) => a ^ bit1 n := sorry
/-- Bernoulli's inequality for `n : ℕ`, `-2 ≤ a`. -/
theorem one_add_mul_le_pow {R : Type u₁} [linear_ordered_ring R] {a : R} (H : -bit0 1 ≤ a) (n : ℕ) : 1 + ↑n * a ≤ (1 + a) ^ n :=
one_add_mul_le_pow' (mul_self_nonneg a) (mul_self_nonneg (1 + a)) (iff.mp neg_le_iff_add_nonneg' H) n
/-- Bernoulli's inequality reformulated to estimate `a^n`. -/
theorem one_add_mul_sub_le_pow {R : Type u₁} [linear_ordered_ring R] {a : R} (H : -1 ≤ a) (n : ℕ) : 1 + ↑n * (a - 1) ≤ a ^ n := sorry
/-- Bernoulli's inequality reformulated to estimate `(n : K)`. -/
theorem nat.cast_le_pow_sub_div_sub {K : Type u_1} [linear_ordered_field K] {a : K} (H : 1 < a) (n : ℕ) : ↑n ≤ (a ^ n - 1) / (a - 1) :=
iff.mpr (le_div_iff (iff.mpr sub_pos H))
(le_sub_left_of_add_le (one_add_mul_sub_le_pow (has_le.le.trans (neg_le_self zero_le_one) (has_lt.lt.le H)) n))
/-- For any `a > 1` and a natural `n` we have `n ≤ a ^ n / (a - 1)`. See also
`nat.cast_le_pow_sub_div_sub` for a stronger inequality with `a ^ n - 1` in the numerator. -/
theorem nat.cast_le_pow_div_sub {K : Type u_1} [linear_ordered_field K] {a : K} (H : 1 < a) (n : ℕ) : ↑n ≤ a ^ n / (a - 1) :=
has_le.le.trans (nat.cast_le_pow_sub_div_sub H n)
(div_le_div_of_le (iff.mpr sub_nonneg (has_lt.lt.le H)) (sub_le_self (a ^ n) zero_le_one))
namespace int
theorem units_pow_two (u : units ℤ) : u ^ bit0 1 = 1 :=
Eq.symm (pow_two u) ▸ units_mul_self u
theorem units_pow_eq_pow_mod_two (u : units ℤ) (n : ℕ) : u ^ n = u ^ (n % bit0 1) := sorry
@[simp] theorem nat_abs_pow_two (x : ℤ) : ↑(nat_abs x) ^ bit0 1 = x ^ bit0 1 :=
eq.mpr (id (Eq._oldrec (Eq.refl (↑(nat_abs x) ^ bit0 1 = x ^ bit0 1)) (pow_two ↑(nat_abs x))))
(eq.mpr (id (Eq._oldrec (Eq.refl (↑(nat_abs x) * ↑(nat_abs x) = x ^ bit0 1)) (nat_abs_mul_self' x)))
(eq.mpr (id (Eq._oldrec (Eq.refl (x * x = x ^ bit0 1)) (pow_two x))) (Eq.refl (x * x))))
theorem abs_le_self_pow_two (a : ℤ) : ↑(nat_abs a) ≤ a ^ bit0 1 := sorry
theorem le_self_pow_two (b : ℤ) : b ≤ b ^ bit0 1 :=
le_trans le_nat_abs (abs_le_self_pow_two b)
end int
/-- Monoid homomorphisms from `multiplicative ℕ` are defined by the image
of `multiplicative.of_add 1`. -/
def powers_hom (M : Type u) [monoid M] : M ≃ (multiplicative ℕ →* M) :=
equiv.mk
(fun (x : M) => monoid_hom.mk (fun (n : multiplicative ℕ) => x ^ coe_fn multiplicative.to_add n) (pow_zero x) sorry)
(fun (f : multiplicative ℕ →* M) => coe_fn f (coe_fn multiplicative.of_add 1)) pow_one sorry
/-- Monoid homomorphisms from `multiplicative ℤ` are defined by the image
of `multiplicative.of_add 1`. -/
def gpowers_hom (G : Type w) [group G] : G ≃ (multiplicative ℤ →* G) :=
equiv.mk
(fun (x : G) => monoid_hom.mk (fun (n : multiplicative ℤ) => x ^ coe_fn multiplicative.to_add n) (gpow_zero x) sorry)
(fun (f : multiplicative ℤ →* G) => coe_fn f (coe_fn multiplicative.of_add 1)) gpow_one sorry
/-- Additive homomorphisms from `ℕ` are defined by the image of `1`. -/
def multiples_hom (A : Type y) [add_monoid A] : A ≃ (ℕ →+ A) :=
equiv.mk (fun (x : A) => add_monoid_hom.mk (fun (n : ℕ) => n •ℕ x) (zero_nsmul x) sorry)
(fun (f : ℕ →+ A) => coe_fn f 1) one_nsmul sorry
/-- Additive homomorphisms from `ℤ` are defined by the image of `1`. -/
def gmultiples_hom (A : Type y) [add_group A] : A ≃ (ℤ →+ A) :=
equiv.mk (fun (x : A) => add_monoid_hom.mk (fun (n : ℤ) => n •ℤ x) (zero_gsmul x) sorry)
(fun (f : ℤ →+ A) => coe_fn f 1) one_gsmul sorry
@[simp] theorem powers_hom_apply {M : Type u} [monoid M] (x : M) (n : multiplicative ℕ) : coe_fn (coe_fn (powers_hom M) x) n = x ^ coe_fn multiplicative.to_add n :=
rfl
@[simp] theorem powers_hom_symm_apply {M : Type u} [monoid M] (f : multiplicative ℕ →* M) : coe_fn (equiv.symm (powers_hom M)) f = coe_fn f (coe_fn multiplicative.of_add 1) :=
rfl
@[simp] theorem gpowers_hom_apply {G : Type w} [group G] (x : G) (n : multiplicative ℤ) : coe_fn (coe_fn (gpowers_hom G) x) n = x ^ coe_fn multiplicative.to_add n :=
rfl
@[simp] theorem gpowers_hom_symm_apply {G : Type w} [group G] (f : multiplicative ℤ →* G) : coe_fn (equiv.symm (gpowers_hom G)) f = coe_fn f (coe_fn multiplicative.of_add 1) :=
rfl
@[simp] theorem multiples_hom_apply {A : Type y} [add_monoid A] (x : A) (n : ℕ) : coe_fn (coe_fn (multiples_hom A) x) n = n •ℕ x :=
rfl
@[simp] theorem multiples_hom_symm_apply {A : Type y} [add_monoid A] (f : ℕ →+ A) : coe_fn (equiv.symm (multiples_hom A)) f = coe_fn f 1 :=
rfl
@[simp] theorem gmultiples_hom_apply {A : Type y} [add_group A] (x : A) (n : ℤ) : coe_fn (coe_fn (gmultiples_hom A) x) n = n •ℤ x :=
rfl
@[simp] theorem gmultiples_hom_symm_apply {A : Type y} [add_group A] (f : ℤ →+ A) : coe_fn (equiv.symm (gmultiples_hom A)) f = coe_fn f 1 :=
rfl
theorem monoid_hom.apply_mnat {M : Type u} [monoid M] (f : multiplicative ℕ →* M) (n : multiplicative ℕ) : coe_fn f n = coe_fn f (coe_fn multiplicative.of_add 1) ^ coe_fn multiplicative.to_add n := sorry
theorem monoid_hom.ext_mnat {M : Type u} [monoid M] {f : multiplicative ℕ →* M} {g : multiplicative ℕ →* M} (h : coe_fn f (coe_fn multiplicative.of_add 1) = coe_fn g (coe_fn multiplicative.of_add 1)) : f = g := sorry
theorem monoid_hom.apply_mint {M : Type u} [group M] (f : multiplicative ℤ →* M) (n : multiplicative ℤ) : coe_fn f n = coe_fn f (coe_fn multiplicative.of_add 1) ^ coe_fn multiplicative.to_add n := sorry
theorem monoid_hom.ext_mint {M : Type u} [group M] {f : multiplicative ℤ →* M} {g : multiplicative ℤ →* M} (h : coe_fn f (coe_fn multiplicative.of_add 1) = coe_fn g (coe_fn multiplicative.of_add 1)) : f = g := sorry
theorem add_monoid_hom.apply_nat {M : Type u} [add_monoid M] (f : ℕ →+ M) (n : ℕ) : coe_fn f n = n •ℕ coe_fn f 1 := sorry
/-! `add_monoid_hom.ext_nat` is defined in `data.nat.cast` -/
theorem add_monoid_hom.apply_int {M : Type u} [add_group M] (f : ℤ →+ M) (n : ℤ) : coe_fn f n = n •ℤ coe_fn f 1 := sorry
/-! `add_monoid_hom.ext_int` is defined in `data.int.cast` -/
/-- If `M` is commutative, `powers_hom` is a multiplicative equivalence. -/
def powers_mul_hom (M : Type u) [comm_monoid M] : M ≃* (multiplicative ℕ →* M) :=
mul_equiv.mk (equiv.to_fun (powers_hom M)) (equiv.inv_fun (powers_hom M)) sorry sorry sorry
/-- If `M` is commutative, `gpowers_hom` is a multiplicative equivalence. -/
def gpowers_mul_hom (G : Type w) [comm_group G] : G ≃* (multiplicative ℤ →* G) :=
mul_equiv.mk (equiv.to_fun (gpowers_hom G)) (equiv.inv_fun (gpowers_hom G)) sorry sorry sorry
/-- If `M` is commutative, `multiples_hom` is an additive equivalence. -/
def multiples_add_hom (A : Type y) [add_comm_monoid A] : A ≃+ (ℕ →+ A) :=
add_equiv.mk (equiv.to_fun (multiples_hom A)) (equiv.inv_fun (multiples_hom A)) sorry sorry sorry
/-- If `M` is commutative, `gmultiples_hom` is an additive equivalence. -/
def gmultiples_add_hom (A : Type y) [add_comm_group A] : A ≃+ (ℤ →+ A) :=
add_equiv.mk (equiv.to_fun (gmultiples_hom A)) (equiv.inv_fun (gmultiples_hom A)) sorry sorry sorry
@[simp] theorem powers_mul_hom_apply {M : Type u} [comm_monoid M] (x : M) (n : multiplicative ℕ) : coe_fn (coe_fn (powers_mul_hom M) x) n = x ^ coe_fn multiplicative.to_add n :=
rfl
@[simp] theorem powers_mul_hom_symm_apply {M : Type u} [comm_monoid M] (f : multiplicative ℕ →* M) : coe_fn (mul_equiv.symm (powers_mul_hom M)) f = coe_fn f (coe_fn multiplicative.of_add 1) :=
rfl
@[simp] theorem gpowers_mul_hom_apply {G : Type w} [comm_group G] (x : G) (n : multiplicative ℤ) : coe_fn (coe_fn (gpowers_mul_hom G) x) n = x ^ coe_fn multiplicative.to_add n :=
rfl
@[simp] theorem gpowers_mul_hom_symm_apply {G : Type w} [comm_group G] (f : multiplicative ℤ →* G) : coe_fn (mul_equiv.symm (gpowers_mul_hom G)) f = coe_fn f (coe_fn multiplicative.of_add 1) :=
rfl
@[simp] theorem multiples_add_hom_apply {A : Type y} [add_comm_monoid A] (x : A) (n : ℕ) : coe_fn (coe_fn (multiples_add_hom A) x) n = n •ℕ x :=
rfl
@[simp] theorem multiples_add_hom_symm_apply {A : Type y} [add_comm_monoid A] (f : ℕ →+ A) : coe_fn (add_equiv.symm (multiples_add_hom A)) f = coe_fn f 1 :=
rfl
@[simp] theorem gmultiples_add_hom_apply {A : Type y} [add_comm_group A] (x : A) (n : ℤ) : coe_fn (coe_fn (gmultiples_add_hom A) x) n = n •ℤ x :=
rfl
@[simp] theorem gmultiples_add_hom_symm_apply {A : Type y} [add_comm_group A] (f : ℤ →+ A) : coe_fn (add_equiv.symm (gmultiples_add_hom A)) f = coe_fn f 1 :=
rfl
/-!
### Commutativity (again)
Facts about `semiconj_by` and `commute` that require `gpow` or `gsmul`, or the fact that integer
multiplication equals semiring multiplication.
-/
namespace semiconj_by
@[simp] theorem cast_nat_mul_right {R : Type u₁} [semiring R] {a : R} {x : R} {y : R} (h : semiconj_by a x y) (n : ℕ) : semiconj_by a (↑n * x) (↑n * y) :=
mul_right (nat.commute_cast a n) h
@[simp] theorem cast_nat_mul_left {R : Type u₁} [semiring R] {a : R} {x : R} {y : R} (h : semiconj_by a x y) (n : ℕ) : semiconj_by (↑n * a) x y :=
mul_left (nat.cast_commute n y) h
@[simp] theorem cast_nat_mul_cast_nat_mul {R : Type u₁} [semiring R] {a : R} {x : R} {y : R} (h : semiconj_by a x y) (m : ℕ) (n : ℕ) : semiconj_by (↑m * a) (↑n * x) (↑n * y) :=
cast_nat_mul_right (cast_nat_mul_left h m) n
@[simp] theorem units_gpow_right {M : Type u} [monoid M] {a : M} {x : units M} {y : units M} (h : semiconj_by a ↑x ↑y) (m : ℤ) : semiconj_by a ↑(x ^ m) ↑(y ^ m) := sorry
@[simp] theorem cast_int_mul_right {R : Type u₁} [ring R] {a : R} {x : R} {y : R} (h : semiconj_by a x y) (m : ℤ) : semiconj_by a (↑m * x) (↑m * y) :=
mul_right (int.commute_cast a m) h
@[simp] theorem cast_int_mul_left {R : Type u₁} [ring R] {a : R} {x : R} {y : R} (h : semiconj_by a x y) (m : ℤ) : semiconj_by (↑m * a) x y :=
mul_left (int.cast_commute m y) h
@[simp] theorem cast_int_mul_cast_int_mul {R : Type u₁} [ring R] {a : R} {x : R} {y : R} (h : semiconj_by a x y) (m : ℤ) (n : ℤ) : semiconj_by (↑m * a) (↑n * x) (↑n * y) :=
cast_int_mul_right (cast_int_mul_left h m) n
end semiconj_by
namespace commute
@[simp] theorem cast_nat_mul_right {R : Type u₁} [semiring R] {a : R} {b : R} (h : commute a b) (n : ℕ) : commute a (↑n * b) :=
semiconj_by.cast_nat_mul_right h n
@[simp] theorem cast_nat_mul_left {R : Type u₁} [semiring R] {a : R} {b : R} (h : commute a b) (n : ℕ) : commute (↑n * a) b :=
semiconj_by.cast_nat_mul_left h n
@[simp] theorem cast_nat_mul_cast_nat_mul {R : Type u₁} [semiring R] {a : R} {b : R} (h : commute a b) (m : ℕ) (n : ℕ) : commute (↑m * a) (↑n * b) :=
semiconj_by.cast_nat_mul_cast_nat_mul h m n
@[simp] theorem self_cast_nat_mul {R : Type u₁} [semiring R] {a : R} (n : ℕ) : commute a (↑n * a) :=
cast_nat_mul_right (commute.refl a) n
@[simp] theorem cast_nat_mul_self {R : Type u₁} [semiring R] {a : R} (n : ℕ) : commute (↑n * a) a :=
cast_nat_mul_left (commute.refl a) n
@[simp] theorem self_cast_nat_mul_cast_nat_mul {R : Type u₁} [semiring R] {a : R} (m : ℕ) (n : ℕ) : commute (↑m * a) (↑n * a) :=
cast_nat_mul_cast_nat_mul (commute.refl a) m n
@[simp] theorem units_gpow_right {M : Type u} [monoid M] {a : M} {u : units M} (h : commute a ↑u) (m : ℤ) : commute a ↑(u ^ m) :=
semiconj_by.units_gpow_right h m
@[simp] theorem units_gpow_left {M : Type u} [monoid M] {u : units M} {a : M} (h : commute (↑u) a) (m : ℤ) : commute (↑(u ^ m)) a :=
commute.symm (units_gpow_right (commute.symm h) m)
@[simp] theorem cast_int_mul_right {R : Type u₁} [ring R] {a : R} {b : R} (h : commute a b) (m : ℤ) : commute a (↑m * b) :=
semiconj_by.cast_int_mul_right h m
@[simp] theorem cast_int_mul_left {R : Type u₁} [ring R] {a : R} {b : R} (h : commute a b) (m : ℤ) : commute (↑m * a) b :=
semiconj_by.cast_int_mul_left h m
theorem cast_int_mul_cast_int_mul {R : Type u₁} [ring R] {a : R} {b : R} (h : commute a b) (m : ℤ) (n : ℤ) : commute (↑m * a) (↑n * b) :=
semiconj_by.cast_int_mul_cast_int_mul h m n
@[simp] theorem self_cast_int_mul {R : Type u₁} [ring R] (a : R) (n : ℤ) : commute a (↑n * a) :=
cast_int_mul_right (commute.refl a) n
@[simp] theorem cast_int_mul_self {R : Type u₁} [ring R] (a : R) (n : ℤ) : commute (↑n * a) a :=
cast_int_mul_left (commute.refl a) n
theorem self_cast_int_mul_cast_int_mul {R : Type u₁} [ring R] (a : R) (m : ℤ) (n : ℤ) : commute (↑m * a) (↑n * a) :=
cast_int_mul_cast_int_mul (commute.refl a) m n
end commute
@[simp] theorem nat.to_add_pow (a : multiplicative ℕ) (b : ℕ) : coe_fn multiplicative.to_add (a ^ b) = coe_fn multiplicative.to_add a * b := sorry
@[simp] theorem nat.of_add_mul (a : ℕ) (b : ℕ) : coe_fn multiplicative.of_add (a * b) = coe_fn multiplicative.of_add a ^ b :=
Eq.symm (nat.to_add_pow a b)
@[simp] theorem int.to_add_pow (a : multiplicative ℤ) (b : ℕ) : coe_fn multiplicative.to_add (a ^ b) = coe_fn multiplicative.to_add a * ↑b := sorry
@[simp] theorem int.to_add_gpow (a : multiplicative ℤ) (b : ℤ) : coe_fn multiplicative.to_add (a ^ b) = coe_fn multiplicative.to_add a * b := sorry
@[simp] theorem int.of_add_mul (a : ℤ) (b : ℤ) : coe_fn multiplicative.of_add (a * b) = coe_fn multiplicative.of_add a ^ b :=
Eq.symm (int.to_add_gpow a b)
namespace units
theorem conj_pow {M : Type u} [monoid M] (u : units M) (x : M) (n : ℕ) : (↑u * x * ↑(u⁻¹)) ^ n = ↑u * x ^ n * ↑(u⁻¹) :=
Eq.symm (iff.mpr divp_eq_iff_mul_eq (Eq.symm (semiconj_by.eq (semiconj_by.pow_right (mk_semiconj_by u x) n))))
theorem conj_pow' {M : Type u} [monoid M] (u : units M) (x : M) (n : ℕ) : (↑(u⁻¹) * x * ↑u) ^ n = ↑(u⁻¹) * x ^ n * ↑u :=
conj_pow (u⁻¹) x n
/-- Moving to the opposite monoid commutes with taking powers. -/
@[simp] theorem op_pow {M : Type u} [monoid M] (x : M) (n : ℕ) : opposite.op (x ^ n) = opposite.op x ^ n := sorry
@[simp] theorem unop_pow {M : Type u} [monoid M] (x : Mᵒᵖ) (n : ℕ) : opposite.unop (x ^ n) = opposite.unop x ^ n := sorry
|
module Eq where
infix 4 _≡_
{-
data _≡_ {A : Set} : A → A → Set where
Refl : {x : A} → x ≡ x
-}
data _≡_ {a} {A : Set a} (x : A) : A → Set a where
Refl : x ≡ x
{-# BUILTIN EQUALITY _≡_ #-}
{-# BUILTIN REFL Refl #-}
|
lemma sigma_sets_single[simp]: "sigma_sets A {A} = {{}, A}" |
lemma iff_trans (P Q R : Prop) : (P ↔ Q) → (Q ↔ R) → (P ↔ R) :=
begin
intro fpq,
cases fpq with pq qp,
intro fqr,
cases fqr with qr rq,
split,
intro p,
apply qr,
apply pq,
exact p,
intro r,
apply qp,
apply rq,
exact r,
end |
{-# OPTIONS -WUnknownFixityInMixfixDecl #-}
data Nat : Set where
zero : Nat
suc : Nat → Nat
_+_ : Nat → Nat → Nat
zero + n = n
(suc m) + n = suc (m + n)
private
_*_ : Nat → Nat → Nat
zero * n = zero
(suc m) * n = n + (m * n)
data List (A : Set) : Set where
[] : List A
_∷_ : A → List A → List A
foldr : {A B : Set} → (A → B → B) → B → List A → B
foldr _<>_ n [] = n
foldr _<>_ n (x ∷ xs) = x <> (foldr _<>_ n xs)
sumOfPowers : Nat → List Nat → Nat
sumOfPowers x = foldr (λ p → (x ^ p) +_) zero where
_^_ : Nat → Nat → Nat
m ^ zero = suc zero
m ^ suc n = m * (m ^ n)
|
lemma fract_content_eq_0_iff [simp]: "fract_content p = 0 \<longleftrightarrow> p = 0" |
{-# OPTIONS --safe --warning=error --without-K #-}
open import Agda.Primitive using (Level; lzero; lsuc; _⊔_)
open import Functions.Definition
module Sets.EquivalenceRelations where
Reflexive : {a b : _} {A : Set a} (r : Rel {a} {b} A) → Set (a ⊔ b)
Reflexive {A = A} r = {x : A} → r x x
Symmetric : {a b : _} {A : Set a} (r : Rel {a} {b} A) → Set (a ⊔ b)
Symmetric {A = A} r = {x y : A} → r x y → r y x
Transitive : {a b : _} {A : Set a} (r : Rel {a} {b} A) → Set (a ⊔ b)
Transitive {A = A} r = {x y z : A} → r x y → r y z → r x z
record Equivalence {a b : _} {A : Set a} (r : Rel {a} {b} A) : Set (a ⊔ lsuc b) where
field
reflexive : Reflexive r
symmetric : Symmetric r
transitive : Transitive r
-- See https://lists.chalmers.se/pipermail/agda/2016/009090.html
transitive' : {x y z : A} → r y z → r x y → r x z
transitive' p2 p1 = transitive p1 p2
|
import GMLInit.Data.Index.Basic
import GMLInit.Data.Index.Append
import GMLInit.Data.Index.Map
namespace Index
variable {α} {β : α → Type _} {f : (x : α) → List (β x)} {xs : List α}
def sigma : {xs : List α} → (i : Index xs) × Index (f i.val) → Index (xs.sigma f)
| x::_, ⟨head, j⟩ => append (.inl (j.map (Sigma.mk x)))
| _::_, ⟨tail i, j⟩ => append (.inr (sigma ⟨i, j⟩))
def unsigma : {xs : List α} → Index (xs.sigma f) → (i : Index xs) × Index (f i.val)
| x::_, k =>
match unappend k with
| .inl j => ⟨head, j.unmap (Sigma.mk x)⟩
| .inr k => ⟨tail (unsigma k).fst, (unsigma k).snd⟩
theorem unsigma_sigma (i : (i : Index xs) × Index (f i.val)) : unsigma (sigma i) = i := by
induction xs with
| nil => cases i; contradiction
| cons x xs ih =>
match i with
| ⟨head, j⟩ => clean unfold sigma unsigma; rw [unappend_append]; clean; rw [unmap_map]
| ⟨tail i, j⟩ => clean unfold sigma unsigma; rw [unappend_append]; clean; rw [ih]
theorem sigma_unsigma (k : Index (xs.sigma f)) : sigma (unsigma k) = k := by
induction xs with
| nil => contradiction
| cons x xs ih =>
match h : unappend k with
| .inl j => rw [unappend_eq_iff_eq_append] at h; cases h; rw [unsigma, unappend_append, sigma, map_unmap]
| .inr k => rw [unappend_eq_iff_eq_append] at h; cases h; rw [unsigma, unappend_append, sigma, ih]
theorem sigma_eq_iff_eq_unsigma (i : (i : Index xs) × Index (f i.val)) (k : Index (xs.sigma f)) : sigma i = k ↔ i = unsigma k := by
constr
· intro h; cases h; rw [unsigma_sigma]
· intro h; cases h; rw [sigma_unsigma]
theorem unsigma_eq_iff_eq_sigma (k : Index (xs.sigma f)) (i : (i : Index xs) × Index (f i.val)) : unsigma k = i ↔ k = sigma i := by
constr
· intro h; cases h; rw [sigma_unsigma]
· intro h; cases h; rw [unsigma_sigma]
def sigmaEquiv (xs : List α) (f : (x : α) → List (β x)) : Equiv ((i : Index xs) × (Index (f i.val))) (Index (xs.sigma f)) where
fwd := sigma
rev := unsigma
spec := by
intros
constr
· intro | rfl => exact unsigma_sigma ..
· intro | rfl => exact sigma_unsigma ..
theorem val_sigma (i : (i : Index xs) × Index (f i.val)) : (sigma i).val = ⟨i.fst.val, i.snd.val⟩ := by
induction xs with
| nil => cases i; contradiction
| cons x xs ih =>
match i with
| ⟨head, j⟩ => rw [sigma, val_append_inl, val_map]
| ⟨tail i, j⟩ => rw [sigma, val_append_inr, ih]
theorem val_unsigma (k : Index (xs.sigma f)) : ⟨(unsigma k).fst.val, (unsigma k).snd.val⟩ = k.val := by
rw [←sigma_unsigma k, val_sigma, unsigma_sigma]
end Index
|
import morphisms.isomorphism
import algebraic_geometry.pullback_carrier
open opposite category_theory category_theory.limits topological_space
noncomputable theory
namespace algebraic_geometry
lemma injective_of_surjective_diagonal {X Y : Scheme} (f : X ⟶ Y)
(h : function.surjective (pullback.diagonal f).1.base) : function.injective f.1.base :=
begin
intros x y e,
let T : pullback.triplet f f := ⟨x, y, _, e, rfl⟩,
obtain ⟨z, hz, hz'⟩ := T.exists_preimage,
obtain ⟨z', rfl⟩ := h z,
simp only [← Scheme.comp_val_base_apply, pullback.diagonal_fst, pullback.diagonal_snd] at hz hz',
exact hz.symm.trans hz'
end
lemma surjective_diagonal_of_universally_injective {X Y : Scheme} (f : X ⟶ Y)
(h : function.surjective (pullback.diagonal f).1.base) : function.injective f.1.base :=
begin
intros x y e,
let T : pullback.triplet f f := ⟨x, y, _, e, rfl⟩,
obtain ⟨z, hz, hz'⟩ := T.exists_preimage,
obtain ⟨z', rfl⟩ := h z,
simp only [← Scheme.comp_val_base_apply, pullback.diagonal_fst, pullback.diagonal_snd] at hz hz',
exact hz.symm.trans hz'
end
lemma injective_of_mono {X Y : Scheme} (f : X ⟶ Y) [mono f] :
function.injective f.1.base :=
begin
apply injective_of_surjective_diagonal,
exact (as_iso $ (Scheme.forget_to_Top ⋙ forget Top).map
(pullback.diagonal f)).to_equiv.surjective,
end
-- move me
lemma mono_eq_diagonal :
@mono Scheme _ = morphism_property.diagonal (@is_iso Scheme _) :=
begin
ext X Y f,
split,
{ introI _, show is_iso _, apply_instance },
{ rintro (H : is_iso _),
resetI,
haveI : is_iso (pullback.fst : pullback f f ⟶ X),
{ rw (is_iso.inv_eq_of_hom_inv_id (pullback.diagonal_fst f)).symm, apply_instance },
exact is_kernel_pair.mono_of_is_iso_fst (is_pullback.of_has_pullback f f) }
end
lemma mono_is_local_at_target :
property_is_local_at_target (@mono _ _) :=
begin
rw mono_eq_diagonal,
exact is_iso_is_local_at_target.diagonal
end
end algebraic_geometry |
lemma fst_im_cbox [simp]: "cbox c d \<noteq> {} \<Longrightarrow> (fst ` cbox (a,c) (b,d)) = cbox a b" |
[STATEMENT]
lemma "(1359 + 93746*\<i>) - (2468 + 46375*\<i>) = -1109 + 47371*\<i>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 1359 + 93746 * \<i> - (2468 + 46375 * \<i>) = - 1109 + 47371 * \<i>
[PROOF STEP]
by simp |
/-
Copyright (c) 2022 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
! This file was ported from Lean 3 source module probability.strong_law
! leanprover-community/mathlib commit f2ce6086713c78a7f880485f7917ea547a215982
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Probability.IdentDistrib
import Mathbin.MeasureTheory.Integral.IntervalIntegral
import Mathbin.Analysis.SpecificLimits.FloorPow
import Mathbin.Analysis.PSeries
import Mathbin.Analysis.Asymptotics.SpecificAsymptotics
/-!
# The strong law of large numbers
We prove the strong law of large numbers, in `probability_theory.strong_law_ae`:
If `X n` is a sequence of independent identically distributed integrable real-valued random
variables, then `∑ i in range n, X i / n` converges almost surely to `𝔼[X 0]`.
We give here the strong version, due to Etemadi, that only requires pairwise independence.
This file also contains the Lᵖ version of the strong law of large numbers provided by
`probability_theory.strong_law_Lp` which shows `∑ i in range n, X i / n` converges in Lᵖ to
`𝔼[X 0]` provided `X n` is independent identically distributed and is Lᵖ.
## Implementation
We follow the proof by Etemadi
[Etemadi, *An elementary proof of the strong law of large numbers*][etemadi_strong_law],
which goes as follows.
It suffices to prove the result for nonnegative `X`, as one can prove the general result by
splitting a general `X` into its positive part and negative part.
Consider `Xₙ` a sequence of nonnegative integrable identically distributed pairwise independent
random variables. Let `Yₙ` be the truncation of `Xₙ` up to `n`. We claim that
* Almost surely, `Xₙ = Yₙ` for all but finitely many indices. Indeed, `∑ ℙ (Xₙ ≠ Yₙ)` is bounded by
`1 + 𝔼[X]` (see `sum_prob_mem_Ioc_le` and `tsum_prob_mem_Ioi_lt_top`).
* Let `c > 1`. Along the sequence `n = c ^ k`, then `(∑_{i=0}^{n-1} Yᵢ - 𝔼[Yᵢ])/n` converges almost
surely to `0`. This follows from a variance control, as
```
∑_k ℙ (|∑_{i=0}^{c^k - 1} Yᵢ - 𝔼[Yᵢ]| > c^k ε)
≤ ∑_k (c^k ε)^{-2} ∑_{i=0}^{c^k - 1} Var[Yᵢ] (by Markov inequality)
≤ ∑_i (C/i^2) Var[Yᵢ] (as ∑_{c^k > i} 1/(c^k)^2 ≤ C/i^2)
≤ ∑_i (C/i^2) 𝔼[Yᵢ^2]
≤ 2C 𝔼[X^2] (see `sum_variance_truncation_le`)
```
* As `𝔼[Yᵢ]` converges to `𝔼[X]`, it follows from the two previous items and Cesaro that, along
the sequence `n = c^k`, one has `(∑_{i=0}^{n-1} Xᵢ) / n → 𝔼[X]` almost surely.
* To generalize it to all indices, we use the fact that `∑_{i=0}^{n-1} Xᵢ` is nondecreasing and
that, if `c` is close enough to `1`, the gap between `c^k` and `c^(k+1)` is small.
-/
noncomputable section
open MeasureTheory Filter Finset Asymptotics
open Set (indicator)
open Topology BigOperators MeasureTheory ProbabilityTheory ENNReal NNReal
namespace ProbabilityTheory
/-! ### Prerequisites on truncations -/
section Truncation
variable {α : Type _}
/-- Truncating a real-valued function to the interval `(-A, A]`. -/
def truncation (f : α → ℝ) (A : ℝ) :=
indicator (Set.Ioc (-A) A) id ∘ f
#align probability_theory.truncation ProbabilityTheory.truncation
variable {m : MeasurableSpace α} {μ : Measure α} {f : α → ℝ}
theorem MeasureTheory.AeStronglyMeasurable.truncation (hf : AeStronglyMeasurable f μ) {A : ℝ} :
AeStronglyMeasurable (truncation f A) μ :=
by
apply ae_strongly_measurable.comp_ae_measurable _ hf.ae_measurable
exact (strongly_measurable_id.indicator measurableSet_Ioc).AeStronglyMeasurable
#align measure_theory.ae_strongly_measurable.truncation MeasureTheory.AeStronglyMeasurable.truncation
theorem abs_truncation_le_bound (f : α → ℝ) (A : ℝ) (x : α) : |truncation f A x| ≤ |A| :=
by
simp only [truncation, Set.indicator, Set.mem_Icc, id.def, Function.comp_apply]
split_ifs
· exact abs_le_abs h.2 (neg_le.2 h.1.le)
· simp [abs_nonneg]
#align probability_theory.abs_truncation_le_bound ProbabilityTheory.abs_truncation_le_bound
@[simp]
theorem truncation_zero (f : α → ℝ) : truncation f 0 = 0 := by simp [truncation]
#align probability_theory.truncation_zero ProbabilityTheory.truncation_zero
theorem abs_truncation_le_abs_self (f : α → ℝ) (A : ℝ) (x : α) : |truncation f A x| ≤ |f x| :=
by
simp only [truncation, indicator, Set.mem_Icc, id.def, Function.comp_apply]
split_ifs
· exact le_rfl
· simp [abs_nonneg]
#align probability_theory.abs_truncation_le_abs_self ProbabilityTheory.abs_truncation_le_abs_self
theorem truncation_eq_self {f : α → ℝ} {A : ℝ} {x : α} (h : |f x| < A) : truncation f A x = f x :=
by
simp only [truncation, indicator, Set.mem_Icc, id.def, Function.comp_apply, ite_eq_left_iff]
intro H
apply H.elim
simp [(abs_lt.1 h).1, (abs_lt.1 h).2.le]
#align probability_theory.truncation_eq_self ProbabilityTheory.truncation_eq_self
theorem truncation_eq_of_nonneg {f : α → ℝ} {A : ℝ} (h : ∀ x, 0 ≤ f x) :
truncation f A = indicator (Set.Ioc 0 A) id ∘ f :=
by
ext x
rcases(h x).lt_or_eq with (hx | hx)
· simp only [truncation, indicator, hx, Set.mem_Ioc, id.def, Function.comp_apply, true_and_iff]
by_cases h'x : f x ≤ A
· have : -A < f x := by linarith [h x]
simp only [this, true_and_iff]
· simp only [h'x, and_false_iff]
· simp only [truncation, indicator, hx, id.def, Function.comp_apply, if_t_t]
#align probability_theory.truncation_eq_of_nonneg ProbabilityTheory.truncation_eq_of_nonneg
theorem truncation_nonneg {f : α → ℝ} (A : ℝ) {x : α} (h : 0 ≤ f x) : 0 ≤ truncation f A x :=
Set.indicator_apply_nonneg fun _ => h
#align probability_theory.truncation_nonneg ProbabilityTheory.truncation_nonneg
theorem MeasureTheory.AeStronglyMeasurable.memℒpTruncation [IsFiniteMeasure μ]
(hf : AeStronglyMeasurable f μ) {A : ℝ} {p : ℝ≥0∞} : Memℒp (truncation f A) p μ :=
Memℒp.ofBound hf.truncation (|A|) (eventually_of_forall fun x => abs_truncation_le_bound _ _ _)
#align measure_theory.ae_strongly_measurable.mem_ℒp_truncation MeasureTheory.AeStronglyMeasurable.memℒpTruncation
theorem MeasureTheory.AeStronglyMeasurable.integrableTruncation [IsFiniteMeasure μ]
(hf : AeStronglyMeasurable f μ) {A : ℝ} : Integrable (truncation f A) μ :=
by
rw [← mem_ℒp_one_iff_integrable]
exact hf.mem_ℒp_truncation
#align measure_theory.ae_strongly_measurable.integrable_truncation MeasureTheory.AeStronglyMeasurable.integrableTruncation
theorem moment_truncation_eq_intervalIntegral (hf : AeStronglyMeasurable f μ) {A : ℝ} (hA : 0 ≤ A)
{n : ℕ} (hn : n ≠ 0) : (∫ x, truncation f A x ^ n ∂μ) = ∫ y in -A..A, y ^ n ∂Measure.map f μ :=
by
have M : MeasurableSet (Set.Ioc (-A) A) := measurableSet_Ioc
change (∫ x, (fun z => indicator (Set.Ioc (-A) A) id z ^ n) (f x) ∂μ) = _
rw [← integral_map hf.ae_measurable, intervalIntegral.integral_of_le, ← integral_indicator M]
· simp only [indicator, zero_pow' _ hn, id.def, ite_pow]
· linarith
· exact ((measurable_id.indicator M).pow_const n).AeStronglyMeasurable
#align probability_theory.moment_truncation_eq_interval_integral ProbabilityTheory.moment_truncation_eq_intervalIntegral
theorem moment_truncation_eq_intervalIntegral_of_nonneg (hf : AeStronglyMeasurable f μ) {A : ℝ}
{n : ℕ} (hn : n ≠ 0) (h'f : 0 ≤ f) :
(∫ x, truncation f A x ^ n ∂μ) = ∫ y in 0 ..A, y ^ n ∂Measure.map f μ :=
by
have M : MeasurableSet (Set.Ioc 0 A) := measurableSet_Ioc
have M' : MeasurableSet (Set.Ioc A 0) := measurableSet_Ioc
rw [truncation_eq_of_nonneg h'f]
change (∫ x, (fun z => indicator (Set.Ioc 0 A) id z ^ n) (f x) ∂μ) = _
rcases le_or_lt 0 A with (hA | hA)
· rw [← integral_map hf.ae_measurable, intervalIntegral.integral_of_le hA, ← integral_indicator M]
· simp only [indicator, zero_pow' _ hn, id.def, ite_pow]
· exact ((measurable_id.indicator M).pow_const n).AeStronglyMeasurable
· rw [← integral_map hf.ae_measurable, intervalIntegral.integral_of_ge hA.le, ←
integral_indicator M']
· simp only [Set.Ioc_eq_empty_of_le hA.le, zero_pow' _ hn, Set.indicator_empty, integral_zero,
zero_eq_neg]
apply integral_eq_zero_of_ae
have : ∀ᵐ x ∂measure.map f μ, (0 : ℝ) ≤ x :=
(ae_map_iff hf.ae_measurable measurableSet_Ici).2 (eventually_of_forall h'f)
filter_upwards [this]with x hx
simp only [indicator, Set.mem_Ioc, Pi.zero_apply, ite_eq_right_iff, and_imp]
intro h'x h''x
have : x = 0 := by linarith
simp [this, zero_pow' _ hn]
· exact ((measurable_id.indicator M).pow_const n).AeStronglyMeasurable
#align probability_theory.moment_truncation_eq_interval_integral_of_nonneg ProbabilityTheory.moment_truncation_eq_intervalIntegral_of_nonneg
theorem integral_truncation_eq_intervalIntegral (hf : AeStronglyMeasurable f μ) {A : ℝ}
(hA : 0 ≤ A) : (∫ x, truncation f A x ∂μ) = ∫ y in -A..A, y ∂Measure.map f μ := by
simpa using moment_truncation_eq_interval_integral hf hA one_ne_zero
#align probability_theory.integral_truncation_eq_interval_integral ProbabilityTheory.integral_truncation_eq_intervalIntegral
theorem integral_truncation_eq_intervalIntegral_of_nonneg (hf : AeStronglyMeasurable f μ) {A : ℝ}
(h'f : 0 ≤ f) : (∫ x, truncation f A x ∂μ) = ∫ y in 0 ..A, y ∂Measure.map f μ := by
simpa using moment_truncation_eq_interval_integral_of_nonneg hf one_ne_zero h'f
#align probability_theory.integral_truncation_eq_interval_integral_of_nonneg ProbabilityTheory.integral_truncation_eq_intervalIntegral_of_nonneg
theorem integral_truncation_le_integral_of_nonneg (hf : Integrable f μ) (h'f : 0 ≤ f) {A : ℝ} :
(∫ x, truncation f A x ∂μ) ≤ ∫ x, f x ∂μ :=
by
apply
integral_mono_of_nonneg (eventually_of_forall fun x => _) hf (eventually_of_forall fun x => _)
· exact truncation_nonneg _ (h'f x)
·
calc
truncation f A x ≤ |truncation f A x| := le_abs_self _
_ ≤ |f x| := (abs_truncation_le_abs_self _ _ _)
_ = f x := abs_of_nonneg (h'f x)
#align probability_theory.integral_truncation_le_integral_of_nonneg ProbabilityTheory.integral_truncation_le_integral_of_nonneg
/-- If a function is integrable, then the integral of its truncated versions converges to the
integral of the whole function. -/
theorem tendsto_integral_truncation {f : α → ℝ} (hf : Integrable f μ) :
Tendsto (fun A => ∫ x, truncation f A x ∂μ) atTop (𝓝 (∫ x, f x ∂μ)) :=
by
refine' tendsto_integral_filter_of_dominated_convergence (fun x => abs (f x)) _ _ _ _
· exact eventually_of_forall fun A => hf.ae_strongly_measurable.truncation
· apply eventually_of_forall fun A => _
apply eventually_of_forall fun x => _
rw [Real.norm_eq_abs]
exact abs_truncation_le_abs_self _ _ _
· apply hf.abs
· apply eventually_of_forall fun x => _
apply tendsto_const_nhds.congr' _
filter_upwards [Ioi_mem_at_top (abs (f x))]with A hA
exact (truncation_eq_self hA).symm
#align probability_theory.tendsto_integral_truncation ProbabilityTheory.tendsto_integral_truncation
theorem IdentDistrib.truncation {β : Type _} [MeasurableSpace β] {ν : Measure β} {f : α → ℝ}
{g : β → ℝ} (h : IdentDistrib f g μ ν) {A : ℝ} :
IdentDistrib (truncation f A) (truncation g A) μ ν :=
h.comp (measurable_id.indicator measurableSet_Ioc)
#align probability_theory.ident_distrib.truncation ProbabilityTheory.IdentDistrib.truncation
end Truncation
section StrongLawAe
variable {Ω : Type _} [MeasureSpace Ω] [IsProbabilityMeasure (ℙ : Measure Ω)]
section MomentEstimates
theorem sum_prob_mem_Ioc_le {X : Ω → ℝ} (hint : Integrable X) (hnonneg : 0 ≤ X) {K : ℕ} {N : ℕ}
(hKN : K ≤ N) :
(∑ j in range K, ℙ { ω | X ω ∈ Set.Ioc (j : ℝ) N }) ≤ ENNReal.ofReal (𝔼[X] + 1) :=
by
let ρ : Measure ℝ := measure.map X ℙ
haveI : is_probability_measure ρ := is_probability_measure_map hint.ae_measurable
have A : (∑ j in range K, ∫ x in j..N, (1 : ℝ) ∂ρ) ≤ 𝔼[X] + 1 :=
calc
(∑ j in range K, ∫ x in j..N, (1 : ℝ) ∂ρ) =
∑ j in range K, ∑ i in Ico j N, ∫ x in i..(i + 1 : ℕ), (1 : ℝ) ∂ρ :=
by
apply sum_congr rfl fun j hj => _
rw [intervalIntegral.sum_integral_adjacent_intervals_Ico ((mem_range.1 hj).le.trans hKN)]
intro k hk
exact continuous_const.interval_integrable _ _
_ = ∑ i in range N, ∑ j in range (min (i + 1) K), ∫ x in i..(i + 1 : ℕ), (1 : ℝ) ∂ρ :=
by
simp_rw [sum_sigma']
refine'
sum_bij' (fun (p : Σi : ℕ, ℕ) hp => (⟨p.2, p.1⟩ : Σi : ℕ, ℕ)) _ (fun a ha => rfl)
(fun (p : Σi : ℕ, ℕ) hp => (⟨p.2, p.1⟩ : Σi : ℕ, ℕ)) _ _ _
· rintro ⟨i, j⟩ hij
simp only [mem_sigma, mem_range, mem_Ico] at hij
simp only [hij, Nat.lt_succ_iff.2 hij.2.1, mem_sigma, mem_range, lt_min_iff, and_self_iff]
· rintro ⟨i, j⟩ hij
simp only [mem_sigma, mem_range, lt_min_iff] at hij
simp only [hij, Nat.lt_succ_iff.1 hij.2.1, mem_sigma, mem_range, mem_Ico, and_self_iff]
· rintro ⟨i, j⟩ hij
rfl
· rintro ⟨i, j⟩ hij
rfl
_ ≤ ∑ i in range N, (i + 1) * ∫ x in i..(i + 1 : ℕ), (1 : ℝ) ∂ρ :=
by
apply sum_le_sum fun i hi => _
simp only [Nat.cast_add, Nat.cast_one, sum_const, card_range, nsmul_eq_mul, Nat.cast_min]
refine' mul_le_mul_of_nonneg_right (min_le_left _ _) _
apply intervalIntegral.integral_nonneg
· simp only [le_add_iff_nonneg_right, zero_le_one]
· simp only [zero_le_one, imp_true_iff]
_ ≤ ∑ i in range N, ∫ x in i..(i + 1 : ℕ), x + 1 ∂ρ :=
by
apply sum_le_sum fun i hi => _
have I : (i : ℝ) ≤ (i + 1 : ℕ) := by
simp only [Nat.cast_add, Nat.cast_one, le_add_iff_nonneg_right, zero_le_one]
simp_rw [intervalIntegral.integral_of_le I, ← integral_mul_left]
apply set_integral_mono_on
· exact continuous_const.integrable_on_Ioc
· exact (continuous_id.add continuous_const).integrableOnIoc
· exact measurableSet_Ioc
· intro x hx
simp only [Nat.cast_add, Nat.cast_one, Set.mem_Ioc] at hx
simp [hx.1.le]
_ = ∫ x in 0 ..N, x + 1 ∂ρ :=
by
rw [intervalIntegral.sum_integral_adjacent_intervals fun k hk => _]
· norm_cast
· exact (continuous_id.add continuous_const).IntervalIntegrable _ _
_ = (∫ x in 0 ..N, x ∂ρ) + ∫ x in 0 ..N, 1 ∂ρ :=
by
rw [intervalIntegral.integral_add]
· exact continuous_id.interval_integrable _ _
· exact continuous_const.interval_integrable _ _
_ = 𝔼[truncation X N] + ∫ x in 0 ..N, 1 ∂ρ := by
rw [integral_truncation_eq_interval_integral_of_nonneg hint.1 hnonneg]
_ ≤ 𝔼[X] + ∫ x in 0 ..N, 1 ∂ρ :=
(add_le_add_right (integral_truncation_le_integral_of_nonneg hint hnonneg) _)
_ ≤ 𝔼[X] + 1 := by
refine' add_le_add le_rfl _
rw [intervalIntegral.integral_of_le (Nat.cast_nonneg _)]
simp only [integral_const, measure.restrict_apply', measurableSet_Ioc, Set.univ_inter,
Algebra.id.smul_eq_mul, mul_one]
rw [← ENNReal.one_toReal]
exact ENNReal.toReal_mono ENNReal.one_ne_top prob_le_one
have B : ∀ a b, ℙ { ω | X ω ∈ Set.Ioc a b } = ENNReal.ofReal (∫ x in Set.Ioc a b, (1 : ℝ) ∂ρ) :=
by
intro a b
rw [of_real_set_integral_one ρ _,
measure.map_apply_of_ae_measurable hint.ae_measurable measurableSet_Ioc]
rfl
calc
(∑ j in range K, ℙ { ω | X ω ∈ Set.Ioc (j : ℝ) N }) =
∑ j in range K, ENNReal.ofReal (∫ x in Set.Ioc (j : ℝ) N, (1 : ℝ) ∂ρ) :=
by simp_rw [B]
_ = ENNReal.ofReal (∑ j in range K, ∫ x in Set.Ioc (j : ℝ) N, (1 : ℝ) ∂ρ) :=
by
rw [ENNReal.ofReal_sum_of_nonneg]
simp only [integral_const, Algebra.id.smul_eq_mul, mul_one, ENNReal.toReal_nonneg,
imp_true_iff]
_ = ENNReal.ofReal (∑ j in range K, ∫ x in (j : ℝ)..N, (1 : ℝ) ∂ρ) :=
by
congr 1
refine' sum_congr rfl fun j hj => _
rw [intervalIntegral.integral_of_le (Nat.cast_le.2 ((mem_range.1 hj).le.trans hKN))]
_ ≤ ENNReal.ofReal (𝔼[X] + 1) := ENNReal.ofReal_le_ofReal A
#align probability_theory.sum_prob_mem_Ioc_le ProbabilityTheory.sum_prob_mem_Ioc_le
theorem tsum_prob_mem_Ioi_lt_top {X : Ω → ℝ} (hint : Integrable X) (hnonneg : 0 ≤ X) :
(∑' j : ℕ, ℙ { ω | X ω ∈ Set.Ioi (j : ℝ) }) < ∞ :=
by
suffices : ∀ K : ℕ, (∑ j in range K, ℙ { ω | X ω ∈ Set.Ioi (j : ℝ) }) ≤ ENNReal.ofReal (𝔼[X] + 1)
exact
(le_of_tendsto_of_tendsto (ENNReal.tendsto_nat_tsum _) tendsto_const_nhds
(eventually_of_forall this)).trans_lt
ENNReal.ofReal_lt_top
intro K
have A :
tendsto (fun N : ℕ => ∑ j in range K, ℙ { ω | X ω ∈ Set.Ioc (j : ℝ) N }) at_top
(𝓝 (∑ j in range K, ℙ { ω | X ω ∈ Set.Ioi (j : ℝ) })) :=
by
refine' tendsto_finset_sum _ fun i hi => _
have : { ω | X ω ∈ Set.Ioi (i : ℝ) } = ⋃ N : ℕ, { ω | X ω ∈ Set.Ioc (i : ℝ) N } :=
by
apply Set.Subset.antisymm _ _
· intro ω hω
obtain ⟨N, hN⟩ : ∃ N : ℕ, X ω ≤ N := exists_nat_ge (X ω)
exact Set.mem_unionᵢ.2 ⟨N, hω, hN⟩
·
simp (config := { contextual := true }) only [Set.mem_Ioc, Set.mem_Ioi,
Set.unionᵢ_subset_iff, Set.setOf_subset_setOf, imp_true_iff]
rw [this]
apply tendsto_measure_Union
intro m n hmn x hx
exact ⟨hx.1, hx.2.trans (Nat.cast_le.2 hmn)⟩
apply le_of_tendsto_of_tendsto A tendsto_const_nhds
filter_upwards [Ici_mem_at_top K]with N hN
exact sum_prob_mem_Ioc_le hint hnonneg hN
#align probability_theory.tsum_prob_mem_Ioi_lt_top ProbabilityTheory.tsum_prob_mem_Ioi_lt_top
theorem sum_variance_truncation_le {X : Ω → ℝ} (hint : Integrable X) (hnonneg : 0 ≤ X) (K : ℕ) :
(∑ j in range K, ((j : ℝ) ^ 2)⁻¹ * 𝔼[truncation X j ^ 2]) ≤ 2 * 𝔼[X] :=
by
set Y := fun n : ℕ => truncation X n
let ρ : Measure ℝ := measure.map X ℙ
have Y2 : ∀ n, 𝔼[Y n ^ 2] = ∫ x in 0 ..n, x ^ 2 ∂ρ :=
by
intro n
change 𝔼[fun x => Y n x ^ 2] = _
rw [moment_truncation_eq_interval_integral_of_nonneg hint.1 two_ne_zero hnonneg]
calc
(∑ j in range K, ((j : ℝ) ^ 2)⁻¹ * 𝔼[Y j ^ 2]) =
∑ j in range K, ((j : ℝ) ^ 2)⁻¹ * ∫ x in 0 ..j, x ^ 2 ∂ρ :=
by simp_rw [Y2]
_ = ∑ j in range K, ((j : ℝ) ^ 2)⁻¹ * ∑ k in range j, ∫ x in k..(k + 1 : ℕ), x ^ 2 ∂ρ :=
by
congr 1 with j
congr 1
rw [intervalIntegral.sum_integral_adjacent_intervals]
· norm_cast
intro k hk
exact (continuous_id.pow _).IntervalIntegrable _ _
_ = ∑ k in range K, (∑ j in Ioo k K, ((j : ℝ) ^ 2)⁻¹) * ∫ x in k..(k + 1 : ℕ), x ^ 2 ∂ρ :=
by
simp_rw [mul_sum, sum_mul, sum_sigma']
refine'
sum_bij' (fun (p : Σi : ℕ, ℕ) hp => (⟨p.2, p.1⟩ : Σi : ℕ, ℕ)) _ (fun a ha => rfl)
(fun (p : Σi : ℕ, ℕ) hp => (⟨p.2, p.1⟩ : Σi : ℕ, ℕ)) _ _ _
· rintro ⟨i, j⟩ hij
simp only [mem_sigma, mem_range, mem_filter] at hij
simp [hij, mem_sigma, mem_range, and_self_iff, hij.2.trans hij.1]
· rintro ⟨i, j⟩ hij
simp only [mem_sigma, mem_range, mem_Ioo] at hij
simp only [hij, mem_sigma, mem_range, and_self_iff]
· rintro ⟨i, j⟩ hij
rfl
· rintro ⟨i, j⟩ hij
rfl
_ ≤ ∑ k in range K, 2 / (k + 1) * ∫ x in k..(k + 1 : ℕ), x ^ 2 ∂ρ :=
by
apply sum_le_sum fun k hk => _
refine' mul_le_mul_of_nonneg_right (sum_Ioo_inv_sq_le _ _) _
refine' intervalIntegral.integral_nonneg_of_forall _ fun u => sq_nonneg _
simp only [Nat.cast_add, Nat.cast_one, le_add_iff_nonneg_right, zero_le_one]
_ ≤ ∑ k in range K, ∫ x in k..(k + 1 : ℕ), 2 * x ∂ρ :=
by
apply sum_le_sum fun k hk => _
have Ik : (k : ℝ) ≤ (k + 1 : ℕ) := by simp
rw [← intervalIntegral.integral_const_mul, intervalIntegral.integral_of_le Ik,
intervalIntegral.integral_of_le Ik]
refine' set_integral_mono_on _ _ measurableSet_Ioc fun x hx => _
· apply Continuous.integrableOnIoc
exact continuous_const.mul (continuous_pow 2)
· apply Continuous.integrableOnIoc
exact continuous_const.mul continuous_id'
·
calc
2 / (↑k + 1) * x ^ 2 = x / (k + 1) * (2 * x) := by ring
_ ≤ 1 * (2 * x) :=
(mul_le_mul_of_nonneg_right
(by
apply_mod_cast (div_le_one _).2 hx.2
simp only [Nat.cast_add, Nat.cast_one]
linarith only [show (0 : ℝ) ≤ k from Nat.cast_nonneg k])
(mul_nonneg zero_le_two ((Nat.cast_nonneg k).trans hx.1.le)))
_ = 2 * x := by rw [one_mul]
_ = 2 * ∫ x in (0 : ℝ)..K, x ∂ρ :=
by
rw [intervalIntegral.sum_integral_adjacent_intervals fun k hk => _]
swap; · exact (continuous_const.mul continuous_id').IntervalIntegrable _ _
rw [intervalIntegral.integral_const_mul]
norm_cast
_ ≤ 2 * 𝔼[X] :=
mul_le_mul_of_nonneg_left
(by
rw [← integral_truncation_eq_interval_integral_of_nonneg hint.1 hnonneg]
exact integral_truncation_le_integral_of_nonneg hint hnonneg)
zero_le_two
#align probability_theory.sum_variance_truncation_le ProbabilityTheory.sum_variance_truncation_le
end MomentEstimates
section StrongLawNonneg
/- This paragraph proves the strong law of large numbers (almost sure version, assuming only
pairwise independence) for nonnegative random variables, following Etemadi's proof. -/
variable (X : ℕ → Ω → ℝ) (hint : Integrable (X 0))
(hindep : Pairwise fun i j => IndepFunCat (X i) (X j)) (hident : ∀ i, IdentDistrib (X i) (X 0))
(hnonneg : ∀ i ω, 0 ≤ X i ω)
include X hint hindep hident hnonneg
/- The truncation of `Xᵢ` up to `i` satisfies the strong law of large numbers (with respect to
the truncated expectation) along the sequence `c^n`, for any `c > 1`, up to a given `ε > 0`.
This follows from a variance control. -/
theorem strong_law_aux1 {c : ℝ} (c_one : 1 < c) {ε : ℝ} (εpos : 0 < ε) :
∀ᵐ ω,
∀ᶠ n : ℕ in atTop,
|(∑ i in range ⌊c ^ n⌋₊, truncation (X i) i ω) -
𝔼[∑ i in range ⌊c ^ n⌋₊, truncation (X i) i]| <
ε * ⌊c ^ n⌋₊ :=
by
/- Let `S n = ∑ i in range n, Y i` where `Y i = truncation (X i) i`. We should show that
`|S k - 𝔼[S k]| / k ≤ ε` along the sequence of powers of `c`. For this, we apply Borel-Cantelli:
it suffices to show that the converse probabilites are summable. From Chebyshev inequality, this
will follow from a variance control `∑' Var[S (c^i)] / (c^i)^2 < ∞`. This is checked in `I2` using
pairwise independence to expand the variance of the sum as the sum of the variances, and then
a straightforward but tedious computation (essentially boiling down to the fact that the sum of
`1/(c ^ i)^2` beyong a threshold `j` is comparable to `1/j^2`).
Note that we have written `c^i` in the above proof sketch, but rigorously one should put integer
parts everywhere, making things more painful. We write `u i = ⌊c^i⌋₊` for brevity. -/
have c_pos : 0 < c := zero_lt_one.trans c_one
let ρ : Measure ℝ := measure.map (X 0) ℙ
have hX : ∀ i, ae_strongly_measurable (X i) ℙ := fun i =>
(hident i).symm.aeStronglyMeasurableSnd hint.1
have A : ∀ i, strongly_measurable (indicator (Set.Ioc (-i : ℝ) i) id) := fun i =>
strongly_measurable_id.indicator measurableSet_Ioc
set Y := fun n : ℕ => truncation (X n) n with hY
set S := fun n => ∑ i in range n, Y i with hS
let u : ℕ → ℕ := fun n => ⌊c ^ n⌋₊
have u_mono : Monotone u := fun i j hij => Nat.floor_mono (pow_le_pow c_one.le hij)
have I1 : ∀ K, (∑ j in range K, ((j : ℝ) ^ 2)⁻¹ * Var[Y j]) ≤ 2 * 𝔼[X 0] :=
by
intro K
calc
(∑ j in range K, ((j : ℝ) ^ 2)⁻¹ * Var[Y j]) ≤
∑ j in range K, ((j : ℝ) ^ 2)⁻¹ * 𝔼[truncation (X 0) j ^ 2] :=
by
apply sum_le_sum fun j hj => _
refine' mul_le_mul_of_nonneg_left _ (inv_nonneg.2 (sq_nonneg _))
rw [(hident j).truncation.variance_eq]
exact variance_le_expectation_sq (hX 0).truncation
_ ≤ 2 * 𝔼[X 0] := sum_variance_truncation_le hint (hnonneg 0) K
let C := c ^ 5 * (c - 1)⁻¹ ^ 3 * (2 * 𝔼[X 0])
have I2 : ∀ N, (∑ i in range N, ((u i : ℝ) ^ 2)⁻¹ * Var[S (u i)]) ≤ C :=
by
intro N
calc
(∑ i in range N, ((u i : ℝ) ^ 2)⁻¹ * Var[S (u i)]) =
∑ i in range N, ((u i : ℝ) ^ 2)⁻¹ * ∑ j in range (u i), Var[Y j] :=
by
congr 1 with i
congr 1
rw [hS, indep_fun.variance_sum]
· intro j hj
exact (hident j).aeStronglyMeasurableFst.memℒpTruncation
· intro k hk l hl hkl
exact (hindep hkl).comp (A k).Measurable (A l).Measurable
_ =
∑ j in range (u (N - 1)),
(∑ i in (range N).filterₓ fun i => j < u i, ((u i : ℝ) ^ 2)⁻¹) * Var[Y j] :=
by
simp_rw [mul_sum, sum_mul, sum_sigma']
refine'
sum_bij' (fun (p : Σi : ℕ, ℕ) hp => (⟨p.2, p.1⟩ : Σi : ℕ, ℕ)) _ (fun a ha => rfl)
(fun (p : Σi : ℕ, ℕ) hp => (⟨p.2, p.1⟩ : Σi : ℕ, ℕ)) _ _ _
· rintro ⟨i, j⟩ hij
simp only [mem_sigma, mem_range] at hij
simp only [hij.1, hij.2, mem_sigma, mem_range, mem_filter, and_true_iff]
exact hij.2.trans_le (u_mono (Nat.le_pred_of_lt hij.1))
· rintro ⟨i, j⟩ hij
simp only [mem_sigma, mem_range, mem_filter] at hij
simp only [hij.2.1, hij.2.2, mem_sigma, mem_range, and_self_iff]
· rintro ⟨i, j⟩ hij
rfl
· rintro ⟨i, j⟩ hij
rfl
_ ≤ ∑ j in range (u (N - 1)), c ^ 5 * (c - 1)⁻¹ ^ 3 / j ^ 2 * Var[Y j] :=
by
apply sum_le_sum fun j hj => _
rcases@eq_zero_or_pos _ _ j with (rfl | hj)
· simp only [Y, Nat.cast_zero, zero_pow', Ne.def, bit0_eq_zero, Nat.one_ne_zero,
not_false_iff, div_zero, MulZeroClass.zero_mul]
simp only [Nat.cast_zero, truncation_zero, variance_zero, MulZeroClass.mul_zero]
apply mul_le_mul_of_nonneg_right _ (variance_nonneg _ _)
convert sum_div_nat_floor_pow_sq_le_div_sq N (Nat.cast_pos.2 hj) c_one
· simp only [Nat.cast_lt]
· simp only [one_div]
_ = c ^ 5 * (c - 1)⁻¹ ^ 3 * ∑ j in range (u (N - 1)), ((j : ℝ) ^ 2)⁻¹ * Var[Y j] :=
by
simp_rw [mul_sum, div_eq_mul_inv]
ring_nf
_ ≤ c ^ 5 * (c - 1)⁻¹ ^ 3 * (2 * 𝔼[X 0]) :=
by
apply mul_le_mul_of_nonneg_left (I1 _)
apply mul_nonneg (pow_nonneg c_pos.le _)
exact pow_nonneg (inv_nonneg.2 (sub_nonneg.2 c_one.le)) _
have I3 :
∀ N,
(∑ i in range N, ℙ { ω | (u i * ε : ℝ) ≤ |S (u i) ω - 𝔼[S (u i)]| }) ≤
ENNReal.ofReal (ε⁻¹ ^ 2 * C) :=
by
intro N
calc
(∑ i in range N, ℙ { ω | (u i * ε : ℝ) ≤ |S (u i) ω - 𝔼[S (u i)]| }) ≤
∑ i in range N, ENNReal.ofReal (Var[S (u i)] / (u i * ε) ^ 2) :=
by
refine' sum_le_sum fun i hi => _
apply meas_ge_le_variance_div_sq
· exact mem_ℒp_finset_sum' _ fun j hj => (hident j).aeStronglyMeasurableFst.memℒpTruncation
· apply mul_pos (Nat.cast_pos.2 _) εpos
refine' zero_lt_one.trans_le _
apply Nat.le_floor
rw [Nat.cast_one]
apply one_le_pow_of_one_le c_one.le
_ = ENNReal.ofReal (∑ i in range N, Var[S (u i)] / (u i * ε) ^ 2) :=
by
rw [ENNReal.ofReal_sum_of_nonneg fun i hi => _]
exact div_nonneg (variance_nonneg _ _) (sq_nonneg _)
_ ≤ ENNReal.ofReal (ε⁻¹ ^ 2 * C) :=
by
apply ENNReal.ofReal_le_ofReal
simp_rw [div_eq_inv_mul, ← inv_pow, mul_inv, mul_comm _ ε⁻¹, mul_pow, mul_assoc, ← mul_sum]
refine' mul_le_mul_of_nonneg_left _ (sq_nonneg _)
simp_rw [inv_pow]
exact I2 N
have I4 : (∑' i, ℙ { ω | (u i * ε : ℝ) ≤ |S (u i) ω - 𝔼[S (u i)]| }) < ∞ :=
(le_of_tendsto_of_tendsto' (ENNReal.tendsto_nat_tsum _) tendsto_const_nhds I3).trans_lt
ENNReal.ofReal_lt_top
filter_upwards [ae_eventually_not_mem I4.ne]with ω hω
simp_rw [not_le, mul_comm, S, sum_apply] at hω
exact hω
#align probability_theory.strong_law_aux1 ProbabilityTheory.strong_law_aux1
/- The truncation of `Xᵢ` up to `i` satisfies the strong law of large numbers
(with respect to the truncated expectation) along the sequence
`c^n`, for any `c > 1`. This follows from `strong_law_aux1` by varying `ε`. -/
theorem strong_law_aux2 {c : ℝ} (c_one : 1 < c) :
∀ᵐ ω,
(fun n : ℕ =>
(∑ i in range ⌊c ^ n⌋₊, truncation (X i) i ω) -
𝔼[∑ i in range ⌊c ^ n⌋₊, truncation (X i) i]) =o[atTop]
fun n : ℕ => (⌊c ^ n⌋₊ : ℝ) :=
by
obtain ⟨v, -, v_pos, v_lim⟩ :
∃ v : ℕ → ℝ, StrictAnti v ∧ (∀ n : ℕ, 0 < v n) ∧ tendsto v at_top (𝓝 0) :=
exists_seq_strictAnti_tendsto (0 : ℝ)
have := fun i => strong_law_aux1 X hint hindep hident hnonneg c_one (v_pos i)
filter_upwards [ae_all_iff.2 this]with ω hω
apply Asymptotics.isOCat_iff.2 fun ε εpos => _
obtain ⟨i, hi⟩ : ∃ i, v i < ε := ((tendsto_order.1 v_lim).2 ε εpos).exists
filter_upwards [hω i]with n hn
simp only [Real.norm_eq_abs, LatticeOrderedCommGroup.abs_abs, Nat.abs_cast]
exact hn.le.trans (mul_le_mul_of_nonneg_right hi.le (Nat.cast_nonneg _))
#align probability_theory.strong_law_aux2 ProbabilityTheory.strong_law_aux2
omit hindep hnonneg
/-- The expectation of the truncated version of `Xᵢ` behaves asymptotically like the whole
expectation. This follows from convergence and Cesaro averaging. -/
theorem strong_law_aux3 :
(fun n => 𝔼[∑ i in range n, truncation (X i) i] - n * 𝔼[X 0]) =o[atTop] (coe : ℕ → ℝ) :=
by
have A : tendsto (fun i => 𝔼[truncation (X i) i]) at_top (𝓝 𝔼[X 0]) :=
by
convert(tendsto_integral_truncation hint).comp tendsto_nat_cast_atTop_atTop
ext i
exact (hident i).truncation.integral_eq
convert Asymptotics.isOCat_sum_range_of_tendsto_zero (tendsto_sub_nhds_zero_iff.2 A)
ext1 n
simp only [sum_sub_distrib, sum_const, card_range, nsmul_eq_mul, sum_apply, sub_left_inj]
rw [integral_finset_sum _ fun i hi => _]
exact ((hident i).symm.integrableSnd hint).1.integrableTruncation
#align probability_theory.strong_law_aux3 ProbabilityTheory.strong_law_aux3
include hindep hnonneg
/- The truncation of `Xᵢ` up to `i` satisfies the strong law of large numbers
(with respect to the original expectation) along the sequence
`c^n`, for any `c > 1`. This follows from the version from the truncated expectation, and the
fact that the truncated and the original expectations have the same asymptotic behavior. -/
theorem strong_law_aux4 {c : ℝ} (c_one : 1 < c) :
∀ᵐ ω,
(fun n : ℕ => (∑ i in range ⌊c ^ n⌋₊, truncation (X i) i ω) - ⌊c ^ n⌋₊ * 𝔼[X 0]) =o[atTop]
fun n : ℕ => (⌊c ^ n⌋₊ : ℝ) :=
by
filter_upwards [strong_law_aux2 X hint hindep hident hnonneg c_one]with ω hω
have A : tendsto (fun n : ℕ => ⌊c ^ n⌋₊) at_top at_top :=
tendsto_nat_floor_at_top.comp (tendsto_pow_atTop_atTop_of_one_lt c_one)
convert hω.add ((strong_law_aux3 X hint hident).comp_tendsto A)
ext1 n
simp
#align probability_theory.strong_law_aux4 ProbabilityTheory.strong_law_aux4
omit hindep
/-- The truncated and non-truncated versions of `Xᵢ` have the same asymptotic behavior, as they
almost surely coincide at all but finitely many steps. This follows from a probability computation
and Borel-Cantelli. -/
theorem strong_law_aux5 :
∀ᵐ ω,
(fun n : ℕ => (∑ i in range n, truncation (X i) i ω) - ∑ i in range n, X i ω) =o[atTop]
fun n : ℕ => (n : ℝ) :=
by
have A : (∑' j : ℕ, ℙ { ω | X j ω ∈ Set.Ioi (j : ℝ) }) < ∞ :=
by
convert tsum_prob_mem_Ioi_lt_top hint (hnonneg 0)
ext1 j
exact (hident j).measure_mem_eq measurableSet_Ioi
have B : ∀ᵐ ω, tendsto (fun n : ℕ => truncation (X n) n ω - X n ω) at_top (𝓝 0) :=
by
filter_upwards [ae_eventually_not_mem A.ne]with ω hω
apply tendsto_const_nhds.congr' _
filter_upwards [hω, Ioi_mem_at_top 0]with n hn npos
simp only [truncation, indicator, Set.mem_Ioc, id.def, Function.comp_apply]
split_ifs
· exact (sub_self _).symm
· have : -(n : ℝ) < X n ω := by
apply lt_of_lt_of_le _ (hnonneg n ω)
simpa only [Right.neg_neg_iff, Nat.cast_pos] using npos
simp only [this, true_and_iff, not_le] at h
exact (hn h).elim
filter_upwards [B]with ω hω
convert is_o_sum_range_of_tendsto_zero hω
ext n
rw [sum_sub_distrib]
#align probability_theory.strong_law_aux5 ProbabilityTheory.strong_law_aux5
include hindep
/- `Xᵢ` satisfies the strong law of large numbers along the sequence
`c^n`, for any `c > 1`. This follows from the version for the truncated `Xᵢ`, and the fact that
`Xᵢ` and its truncated version have the same asymptotic behavior. -/
theorem strong_law_aux6 {c : ℝ} (c_one : 1 < c) :
∀ᵐ ω, Tendsto (fun n : ℕ => (∑ i in range ⌊c ^ n⌋₊, X i ω) / ⌊c ^ n⌋₊) atTop (𝓝 𝔼[X 0]) :=
by
have H : ∀ n : ℕ, (0 : ℝ) < ⌊c ^ n⌋₊ := by
intro n
refine' zero_lt_one.trans_le _
simp only [Nat.one_le_cast, Nat.one_le_floor_iff, one_le_pow_of_one_le c_one.le n]
filter_upwards [strong_law_aux4 X hint hindep hident hnonneg c_one,
strong_law_aux5 X hint hident hnonneg]with ω hω h'ω
rw [← tendsto_sub_nhds_zero_iff, ← Asymptotics.isOCat_one_iff ℝ]
have L :
(fun n : ℕ => (∑ i in range ⌊c ^ n⌋₊, X i ω) - ⌊c ^ n⌋₊ * 𝔼[X 0]) =o[at_top] fun n =>
(⌊c ^ n⌋₊ : ℝ) :=
by
have A : tendsto (fun n : ℕ => ⌊c ^ n⌋₊) at_top at_top :=
tendsto_nat_floor_at_top.comp (tendsto_pow_atTop_atTop_of_one_lt c_one)
convert hω.sub (h'ω.comp_tendsto A)
ext1 n
simp only [sub_sub_sub_cancel_left]
convert L.mul_is_O (is_O_refl (fun n : ℕ => (⌊c ^ n⌋₊ : ℝ)⁻¹) at_top) <;>
· ext1 n
field_simp [(H n).ne']
#align probability_theory.strong_law_aux6 ProbabilityTheory.strong_law_aux6
/-- `Xᵢ` satisfies the strong law of large numbers along all integers. This follows from the
corresponding fact along the sequences `c^n`, and the fact that any integer can be sandwiched
between `c^n` and `c^(n+1)` with comparably small error if `c` is close enough to `1`
(which is formalized in `tendsto_div_of_monotone_of_tendsto_div_floor_pow`). -/
theorem strong_law_aux7 :
∀ᵐ ω, Tendsto (fun n : ℕ => (∑ i in range n, X i ω) / n) atTop (𝓝 𝔼[X 0]) :=
by
obtain ⟨c, -, cone, clim⟩ :
∃ c : ℕ → ℝ, StrictAnti c ∧ (∀ n : ℕ, 1 < c n) ∧ tendsto c at_top (𝓝 1) :=
exists_seq_strictAnti_tendsto (1 : ℝ)
have :
∀ k,
∀ᵐ ω,
tendsto (fun n : ℕ => (∑ i in range ⌊c k ^ n⌋₊, X i ω) / ⌊c k ^ n⌋₊) at_top (𝓝 𝔼[X 0]) :=
fun k => strong_law_aux6 X hint hindep hident hnonneg (cone k)
filter_upwards [ae_all_iff.2 this]with ω hω
apply tendsto_div_of_monotone_of_tendsto_div_floor_pow _ _ _ c cone clim _
· intro m n hmn
exact sum_le_sum_of_subset_of_nonneg (range_mono hmn) fun i hi h'i => hnonneg i ω
· exact hω
#align probability_theory.strong_law_aux7 ProbabilityTheory.strong_law_aux7
end StrongLawNonneg
/-- *Strong law of large numbers*, almost sure version: if `X n` is a sequence of independent
identically distributed integrable real-valued random variables, then `∑ i in range n, X i / n`
converges almost surely to `𝔼[X 0]`. We give here the strong version, due to Etemadi, that only
requires pairwise independence. -/
theorem strong_law_ae (X : ℕ → Ω → ℝ) (hint : Integrable (X 0))
(hindep : Pairwise fun i j => IndepFunCat (X i) (X j))
(hident : ∀ i, IdentDistrib (X i) (X 0)) :
∀ᵐ ω, Tendsto (fun n : ℕ => (∑ i in range n, X i ω) / n) atTop (𝓝 𝔼[X 0]) :=
by
let pos : ℝ → ℝ := fun x => max x 0
let neg : ℝ → ℝ := fun x => max (-x) 0
have posm : Measurable Pos := measurable_id'.max measurable_const
have negm : Measurable neg := measurable_id'.neg.max measurable_const
have A :
∀ᵐ ω, tendsto (fun n : ℕ => (∑ i in range n, (Pos ∘ X i) ω) / n) at_top (𝓝 𝔼[Pos ∘ X 0]) :=
strong_law_aux7 _ hint.pos_part (fun i j hij => (hindep hij).comp posm posm)
(fun i => (hident i).comp posm) fun i ω => le_max_right _ _
have B :
∀ᵐ ω, tendsto (fun n : ℕ => (∑ i in range n, (neg ∘ X i) ω) / n) at_top (𝓝 𝔼[neg ∘ X 0]) :=
strong_law_aux7 _ hint.neg_part (fun i j hij => (hindep hij).comp negm negm)
(fun i => (hident i).comp negm) fun i ω => le_max_right _ _
filter_upwards [A, B]with ω hωpos hωneg
convert hωpos.sub hωneg
· simp only [← sub_div, ← sum_sub_distrib, max_zero_sub_max_neg_zero_eq_self]
· simp only [← integral_sub hint.pos_part hint.neg_part, max_zero_sub_max_neg_zero_eq_self]
#align probability_theory.strong_law_ae ProbabilityTheory.strong_law_ae
end StrongLawAe
section StrongLawLp
variable {Ω : Type _} [MeasureSpace Ω] [IsProbabilityMeasure (ℙ : Measure Ω)]
/-- *Strong law of large numbers*, Lᵖ version: if `X n` is a sequence of independent
identically distributed real-valued random variables in Lᵖ, then `∑ i in range n, X i / n`
converges in Lᵖ to `𝔼[X 0]`. -/
theorem strong_law_Lp {p : ℝ≥0∞} (hp : 1 ≤ p) (hp' : p ≠ ∞) (X : ℕ → Ω → ℝ) (hℒp : Memℒp (X 0) p)
(hindep : Pairwise fun i j => IndepFunCat (X i) (X j))
(hident : ∀ i, IdentDistrib (X i) (X 0)) :
Tendsto (fun n => snorm (fun ω => (∑ i in range n, X i ω) / n - 𝔼[X 0]) p ℙ) atTop (𝓝 0) :=
by
have hmeas : ∀ i, ae_strongly_measurable (X i) ℙ := fun i =>
(hident i).aeStronglyMeasurable_iff.2 hℒp.1
have hint : integrable (X 0) ℙ := hℒp.integrable hp
have havg : ∀ n, ae_strongly_measurable (fun ω => (∑ i in range n, X i ω) / n) ℙ :=
by
intro n
simp_rw [div_eq_mul_inv]
exact ae_strongly_measurable.mul_const (ae_strongly_measurable_sum _ fun i _ => hmeas i) _
refine'
tendsto_Lp_of_tendsto_in_measure _ hp hp' havg (mem_ℒp_const _) _
(tendsto_in_measure_of_tendsto_ae havg (strong_law_ae _ hint hindep hident))
rw [(_ : (fun n ω => (∑ i in range n, X i ω) / ↑n) = fun n => (∑ i in range n, X i) / ↑n)]
·
exact
(uniform_integrable_average hp <|
mem_ℒp.uniform_integrable_of_ident_distrib hp hp' hℒp hident).2.1
· ext (n ω)
simp only [Pi.coe_nat, Pi.div_apply, sum_apply]
#align probability_theory.strong_law_Lp ProbabilityTheory.strong_law_Lp
end StrongLawLp
end ProbabilityTheory
|
[STATEMENT]
lemma Ext_foo_accessible [simp]:
"tprg\<turnstile>(Ext,mdecl Ext_foo) of Ext accessible_from S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. tprg \<turnstile> (Ext, mdecl Ext_foo) of Ext accessible_from S
[PROOF STEP]
by (auto intro: accessible_fromR.Immediate
Ext_foo_member_of_Ext Ext_foo_permits_acc) |
(* Title: Setsum.thy
Author: Minamide Yasuhiko
*)
theory Setsum
imports Main
begin
lemma sum1:
fixes f::"'a \<Rightarrow> nat"
assumes "finite s" "sum f s = 1"
shows "\<exists>y \<in> s. f y = 1 \<and> sum f (s - {y}) = 0"
using assms apply (simp add: sum_eq_1_iff[OF assms(1), simplified]) by fastforce
lemma sumk:
fixes f::"'a \<Rightarrow> nat"
assumes "finite s" "x \<in> s" "sum f s \<le> k"
shows "f x \<le> k"
proof (rule contrapos_pp)
assume H: "\<not> f x \<le> k"
have "sum f s = sum f (insert x (s - {x}))"
using assms by (simp add: insert_absorb)
also have "... = f x + sum f (s - {x})"
by (rule sum.insert, insert assms, auto)
finally have "sum f s = f x + sum f (s - {x})" .
thus "\<not> sum f s \<le> k"
proof (simp)
show "\<not> f x + sum f (s - {x}) \<le> k"
using H by auto
qed
qed (fact)
end
|
A set $S$ is simply connected if and only if it is open, connected, and Borsukian. |
lemma is_interval_neg_translationI: assumes "is_interval X" shows "is_interval ((-) x ` X)" |
/-
Copyright (c) 2021 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
notation, basic datatypes and type classes
-/
prelude
import Init.Core
set_option linter.missingDocs true -- keep it documented
theorem of_eq_true (h : p = True) : p := h ▸ trivial
theorem eq_true (h : p) : p = True :=
propext ⟨fun _ => trivial, fun _ => h⟩
theorem eq_false (h : ¬ p) : p = False :=
propext ⟨fun h' => absurd h' h, fun h' => False.elim h'⟩
theorem eq_false' (h : p → False) : p = False := eq_false h
theorem eq_true_of_decide {p : Prop} {_ : Decidable p} (h : decide p = true) : p = True :=
eq_true (of_decide_eq_true h)
theorem eq_false_of_decide {p : Prop} {_ : Decidable p} (h : decide p = false) : p = False :=
eq_false (of_decide_eq_false h)
@[simp] theorem eq_self (a : α) : (a = a) = True := eq_true rfl
theorem implies_congr {p₁ p₂ : Sort u} {q₁ q₂ : Sort v} (h₁ : p₁ = p₂) (h₂ : q₁ = q₂) : (p₁ → q₁) = (p₂ → q₂) :=
h₁ ▸ h₂ ▸ rfl
theorem implies_dep_congr_ctx {p₁ p₂ q₁ : Prop} (h₁ : p₁ = p₂) {q₂ : p₂ → Prop} (h₂ : (h : p₂) → q₁ = q₂ h) : (p₁ → q₁) = ((h : p₂) → q₂ h) :=
propext ⟨
fun hl hp₂ => (h₂ hp₂).mp (hl (h₁.mpr hp₂)),
fun hr hp₁ => (h₂ (h₁.mp hp₁)).mpr (hr (h₁.mp hp₁))⟩
theorem implies_congr_ctx {p₁ p₂ q₁ q₂ : Prop} (h₁ : p₁ = p₂) (h₂ : p₂ → q₁ = q₂) : (p₁ → q₁) = (p₂ → q₂) :=
implies_dep_congr_ctx h₁ h₂
theorem forall_congr {α : Sort u} {p q : α → Prop} (h : ∀ a, p a = q a) : (∀ a, p a) = (∀ a, q a) :=
(funext h : p = q) ▸ rfl
theorem let_congr {α : Sort u} {β : Sort v} {a a' : α} {b b' : α → β}
(h₁ : a = a') (h₂ : ∀ x, b x = b' x) : (let x := a; b x) = (let x := a'; b' x) :=
h₁ ▸ (funext h₂ : b = b') ▸ rfl
theorem let_val_congr {α : Sort u} {β : Sort v} {a a' : α}
(b : α → β) (h : a = a') : (let x := a; b x) = (let x := a'; b x) := h ▸ rfl
theorem let_body_congr {α : Sort u} {β : α → Sort v} {b b' : (a : α) → β a}
(a : α) (h : ∀ x, b x = b' x) : (let x := a; b x) = (let x := a; b' x) :=
(funext h : b = b') ▸ rfl
@[congr]
theorem ite_congr {x y u v : α} {s : Decidable b} [Decidable c]
(h₁ : b = c) (h₂ : c → x = u) (h₃ : ¬ c → y = v) : ite b x y = ite c u v := by
cases Decidable.em c with
| inl h => rw [if_pos h]; subst b; rw [if_pos h]; exact h₂ h
| inr h => rw [if_neg h]; subst b; rw [if_neg h]; exact h₃ h
theorem Eq.mpr_prop {p q : Prop} (h₁ : p = q) (h₂ : q) : p := h₁ ▸ h₂
theorem Eq.mpr_not {p q : Prop} (h₁ : p = q) (h₂ : ¬q) : ¬p := h₁ ▸ h₂
@[congr]
theorem dite_congr {_ : Decidable b} [Decidable c]
{x : b → α} {u : c → α} {y : ¬b → α} {v : ¬c → α}
(h₁ : b = c)
(h₂ : (h : c) → x (h₁.mpr_prop h) = u h)
(h₃ : (h : ¬c) → y (h₁.mpr_not h) = v h) :
dite b x y = dite c u v := by
cases Decidable.em c with
| inl h => rw [dif_pos h]; subst b; rw [dif_pos h]; exact h₂ h
| inr h => rw [dif_neg h]; subst b; rw [dif_neg h]; exact h₃ h
@[simp] theorem ne_eq (a b : α) : (a ≠ b) = ¬(a = b) := rfl
@[simp] theorem ite_true (a b : α) : (if True then a else b) = a := rfl
@[simp] theorem ite_false (a b : α) : (if False then a else b) = b := rfl
@[simp] theorem dite_true {α : Sort u} {t : True → α} {e : ¬ True → α} : (dite True t e) = t True.intro := rfl
@[simp] theorem dite_false {α : Sort u} {t : False → α} {e : ¬ False → α} : (dite False t e) = e not_false := rfl
@[simp] theorem ite_self {α : Sort u} {c : Prop} {d : Decidable c} (a : α) : ite c a a = a := by cases d <;> rfl
@[simp] theorem and_self (p : Prop) : (p ∧ p) = p := propext ⟨(·.1), fun h => ⟨h, h⟩⟩
@[simp] theorem and_true (p : Prop) : (p ∧ True) = p := propext ⟨(·.1), (⟨·, trivial⟩)⟩
@[simp] theorem true_and (p : Prop) : (True ∧ p) = p := propext ⟨(·.2), (⟨trivial, ·⟩)⟩
@[simp] theorem and_false (p : Prop) : (p ∧ False) = False := eq_false (·.2)
@[simp] theorem false_and (p : Prop) : (False ∧ p) = False := eq_false (·.1)
@[simp] theorem or_self (p : Prop) : (p ∨ p) = p := propext ⟨fun | .inl h | .inr h => h, .inl⟩
@[simp] theorem or_true (p : Prop) : (p ∨ True) = True := eq_true (.inr trivial)
@[simp] theorem true_or (p : Prop) : (True ∨ p) = True := eq_true (.inl trivial)
@[simp] theorem or_false (p : Prop) : (p ∨ False) = p := propext ⟨fun (.inl h) => h, .inl⟩
@[simp] theorem false_or (p : Prop) : (False ∨ p) = p := propext ⟨fun (.inr h) => h, .inr⟩
@[simp] theorem iff_self (p : Prop) : (p ↔ p) = True := eq_true .rfl
@[simp] theorem iff_true (p : Prop) : (p ↔ True) = p := propext ⟨(·.2 trivial), fun h => ⟨fun _ => trivial, fun _ => h⟩⟩
@[simp] theorem true_iff (p : Prop) : (True ↔ p) = p := propext ⟨(·.1 trivial), fun h => ⟨fun _ => h, fun _ => trivial⟩⟩
@[simp] theorem iff_false (p : Prop) : (p ↔ False) = ¬p := propext ⟨(·.1), (⟨·, False.elim⟩)⟩
@[simp] theorem false_iff (p : Prop) : (False ↔ p) = ¬p := propext ⟨(·.2), (⟨False.elim, ·⟩)⟩
@[simp] theorem false_implies (p : Prop) : (False → p) = True := eq_true False.elim
@[simp] theorem implies_true (α : Sort u) : (α → True) = True := eq_true fun _ => trivial
@[simp] theorem true_implies (p : Prop) : (True → p) = p := propext ⟨(· trivial), (fun _ => ·)⟩
@[simp] theorem Bool.or_false (b : Bool) : (b || false) = b := by cases b <;> rfl
@[simp] theorem Bool.or_true (b : Bool) : (b || true) = true := by cases b <;> rfl
@[simp] theorem Bool.false_or (b : Bool) : (false || b) = b := by cases b <;> rfl
@[simp] theorem Bool.true_or (b : Bool) : (true || b) = true := by cases b <;> rfl
@[simp] theorem Bool.or_self (b : Bool) : (b || b) = b := by cases b <;> rfl
@[simp] theorem Bool.or_eq_true (a b : Bool) : ((a || b) = true) = (a = true ∨ b = true) := by
cases a <;> cases b <;> decide
@[simp] theorem Bool.and_false (b : Bool) : (b && false) = false := by cases b <;> rfl
@[simp] theorem Bool.and_true (b : Bool) : (b && true) = b := by cases b <;> rfl
@[simp] theorem Bool.false_and (b : Bool) : (false && b) = false := by cases b <;> rfl
@[simp] theorem Bool.true_and (b : Bool) : (true && b) = b := by cases b <;> rfl
@[simp] theorem Bool.and_self (b : Bool) : (b && b) = b := by cases b <;> rfl
@[simp] theorem Bool.and_eq_true (a b : Bool) : ((a && b) = true) = (a = true ∧ b = true) := by
cases a <;> cases b <;> decide
theorem Bool.and_assoc (a b c : Bool) : (a && b && c) = (a && (b && c)) := by
cases a <;> cases b <;> cases c <;> decide
theorem Bool.or_assoc (a b c : Bool) : (a || b || c) = (a || (b || c)) := by
cases a <;> cases b <;> cases c <;> decide
@[simp] theorem Bool.not_not (b : Bool) : (!!b) = b := by cases b <;> rfl
@[simp] theorem Bool.not_true : (!true) = false := by decide
@[simp] theorem Bool.not_false : (!false) = true := by decide
@[simp] theorem Bool.not_beq_true (b : Bool) : (!(b == true)) = (b == false) := by cases b <;> rfl
@[simp] theorem Bool.not_beq_false (b : Bool) : (!(b == false)) = (b == true) := by cases b <;> rfl
@[simp] theorem Bool.not_eq_true' (b : Bool) : ((!b) = true) = (b = false) := by cases b <;> simp
@[simp] theorem Bool.not_eq_false' (b : Bool) : ((!b) = false) = (b = true) := by cases b <;> simp
@[simp] theorem Bool.beq_to_eq (a b : Bool) :
(a == b) = (a = b) := by cases a <;> cases b <;> decide
@[simp] theorem Bool.not_beq_to_not_eq (a b : Bool) :
(!(a == b)) = ¬(a = b) := by cases a <;> cases b <;> decide
@[simp] theorem Bool.not_eq_true (b : Bool) : (¬(b = true)) = (b = false) := by cases b <;> decide
@[simp] theorem Bool.not_eq_false (b : Bool) : (¬(b = false)) = (b = true) := by cases b <;> decide
@[simp] theorem decide_eq_true_eq [Decidable p] : (decide p = true) = p := propext <| Iff.intro of_decide_eq_true decide_eq_true
@[simp] theorem decide_not [h : Decidable p] : decide (¬ p) = !decide p := by cases h <;> rfl
@[simp] theorem not_decide_eq_true [h : Decidable p] : ((!decide p) = true) = ¬ p := by cases h <;> simp [decide, *]
@[simp] theorem heq_eq_eq {α : Sort u} (a b : α) : HEq a b = (a = b) := propext <| Iff.intro eq_of_heq heq_of_eq
@[simp] theorem cond_true (a b : α) : cond true a b = a := rfl
@[simp] theorem cond_false (a b : α) : cond false a b = b := rfl
@[simp] theorem beq_self_eq_true [BEq α] [LawfulBEq α] (a : α) : (a == a) = true := LawfulBEq.rfl
@[simp] theorem beq_self_eq_true' [DecidableEq α] (a : α) : (a == a) = true := by simp [BEq.beq]
@[simp] theorem bne_self_eq_false [BEq α] [LawfulBEq α] (a : α) : (a != a) = false := by simp [bne]
@[simp] theorem bne_self_eq_false' [DecidableEq α] (a : α) : (a != a) = false := by simp [bne]
@[simp] theorem Nat.le_zero_eq (a : Nat) : (a ≤ 0) = (a = 0) :=
propext ⟨fun h => Nat.le_antisymm h (Nat.zero_le ..), fun h => by simp [h]⟩
@[simp] theorem decide_False : decide False = false := rfl
@[simp] theorem decide_True : decide True = true := rfl
|
module Naive
(
reduceByRowsV
, reduceByColumnsV
)
where
import qualified Numeric.LinearAlgebra as LA
type Matrix = LA.Matrix LA.R
type Vector = LA.Vector LA.R
type R = LA.R
reduceByRowsV :: (Vector -> R) -> Matrix -> Vector
reduceByRowsV f = LA.vector . map f . LA.toRows
reduceByColumnsV :: (Vector -> R) -> Matrix -> Vector
reduceByColumnsV f = LA.vector . map f . LA.toColumns
|
-- Copyright (c) Microsoft Corporation. All rights reserved.
-- Licensed under the MIT license.
import ..smtexpr
import ..smtcompile
import ..bitvector
import .spec
import .lemmas
import .lemmas_basic
import .irstate
import .freevar
import .equiv
import .closed
import smt2.syntax
import system.io
import init.meta.tactic
import init.meta.interactive
namespace spec
open irsem
open freevar
-- get_free_*_name
lemma get_free_name_diff: ∀ n,
get_free_sbitvec_name n ≠ get_free_sbool_name n
:= begin
intros,
intros H,
unfold get_free_sbitvec_name at H,
unfold get_free_sbool_name at H,
rw string.eq_list at H,
rw string.append_to_list at H,
rw string.append_to_list at H,
have H' := list.append_eq2 H,
cases H'
end
lemma closed_regfile_apply_add_b_bv: ∀ (rf:regfile irsem_smt) (η:freevar.env)
vname vz bname vb
(HC:closed_regfile (rf.apply_to_values irsem_smt (env.replace_valty η))),
closed_regfile (rf.apply_to_values irsem_smt
(env.replace_valty ((η.add_bv vname vz).add_b bname vb)))
:= begin
intros,
revert HC,
apply regfile.induction rf,
{
unfold closed_regfile,
intros,
rw regfile.empty_apply_empty,
apply closed_regfile_empty
},
{
intros rf IH,
intros,
unfold closed_regfile,
intros,
rw regfile.apply_update_comm at HC,
rw regfile.apply_update_comm,
unfold closed_regfile at IH,
rw closed_regfile_update_split at HC,
cases HC with HC HCval,
have HC := IH HC,
rw closed_regfile_update_split,
split,
{
assumption
},
{
cases v,
unfold freevar.env.replace_valty at HCval,
rw closed_ival_split at HCval,
cases HCval with HCval1 HCval2,
unfold freevar.env.replace_valty,
rw closed_ival_split,
split,
{
have H := closed_b_add_bv vname vz HCval1,
have H := closed_b_add_b bname vb H,
assumption
},
{
have H := closed_bv_add_bv vname vz HCval2,
have H := closed_bv_add_b bname vb H,
assumption
}
}
}
end
lemma regfile_update_ival_closed: ∀ rf rf' (η:freevar.env) regn sz
vn pn bvn p
(HCRF: closed_regfile (regfile.apply_to_values irsem_smt rf
(env.replace_valty η)))
(HRF': rf' = regfile.update irsem_smt rf regn
(valty.ival sz (sbitvec.var sz vn) (sbool.var pn))),
closed_regfile (regfile.apply_to_values irsem_smt rf'
(env.replace_valty (env.add_b (env.add_bv η vn bvn) pn p)))
:= begin
intros,
have H1 : closed_regfile (regfile.apply_to_values irsem_smt
rf (env.replace_valty (env.add_b (env.add_bv η vn bvn) pn p))),
{
apply closed_regfile_apply_add_b_bv,
assumption
},
have H2 : closed_valty
((env.add_b (env.add_bv η vn bvn) pn p)
⟦valty.ival sz (sbitvec.var sz vn) (sbool.var pn)⟧),
{ apply ival_closed },
rw HRF',
rw regfile.apply_update_comm,
rw closed_regfile_update_split,
split, assumption, assumption
end
lemma updatereg_closed: ∀ (ss ss':irstate_smt) (η:freevar.env)
regn sz vn pn bvn p
(HC:closed_irstate (η⟦ss⟧))
(HNOTIN1: vn ∉ η)
(HNOTIN2: pn ∉ η)
(HNOTEQ: vn ≠ pn)
(HS:ss' = irstate.updatereg irsem_smt ss regn
(irsem.valty.ival sz (sbitvec.var sz vn) (sbool.var pn))),
closed_irstate (((η.add_bv vn bvn).add_b pn p)⟦ss'⟧)
:= begin
intros,
cases ss with sub srf,
cases ss' with sub' srf',
unfold freevar.env.replace at *,
rw ← irstate.setub_apply_to_values at *,
unfold irstate.getub at *,
simp at *,
unfold irstate.setub at *,
unfold irstate.apply_to_values at *,
rw closed_irstate_split,
rw closed_irstate_split at HC,
cases HC with HCUB HCRF,
unfold irstate.updatereg at HS,
simp at *,
injection HS,
subst h_1,
split,
{
have H0: closed_b ((env.add_bv η vn bvn)⟦sub'⟧),
{
apply closed_b_add_bv,
apply HCUB
},
apply closed_b_add_b,
{ assumption }
},
{
apply regfile_update_ival_closed, assumption, assumption
}
end
-- encode
-- Note that `irstate_equiv η⟦iss⟧ ise` does not imply
-- closed_irstate η⟦iss⟧. It is because, for example,
-- `b_equiv (sbool.and (sbool.var _) (sbool.ff)) ff` holds.
-- Then why `encode iss ise` is needed? -> encode is
-- the only way to relate ise and iss.
lemma init_var_encode_intty: ∀ ise iss ise' iss' (sg sg':std_gen) η n t
(HENC: encode iss ise η) (HCLOSED: closed_irstate (η⟦iss⟧))
(HNOTIN1: get_free_sbitvec_name n ∉ η)
(HNOTIN2: get_free_sbool_name n ∉ η)
(HIE:(ise', sg') = create_init_var_exec n t (ise, sg))
(HIS:iss' = create_init_var_smt n t iss),
∃ η', (encode iss' ise' η' ∧ closed_irstate (η'⟦iss'⟧) ∧
env.added2 η (get_free_sbitvec_name n)
(get_free_sbool_name n) η')
:= begin
intros,
unfold create_init_var_smt at HIS,
simp at HIS,
unfold create_init_var_exec at HIE,
simp at HIE,
generalize Hrbv':(get_rand_bv (get_sz_from_ty t) sg) = rbv',
cases rbv' with rbv' sg'',
rw Hrbv' at *,
unfold create_init_var_exec._match_2 at HIE,
generalize Hrb':(get_rand_bool sg'') = rb',
cases rb' with rb' sg''',
rw Hrb' at *,
unfold create_init_var_exec._match_1 at HIE,
injection HIE with HIE HIE_sg,
simp at HIE,
existsi ((η.add_b (get_free_sbool_name n) rb')
.add_bv (get_free_sbitvec_name n) rbv'.to_int),
split,
{
unfold encode,
rw HIS,
rw replace_updatereg,
rw HIE,
rw env.not_in_add_bv_irstate_comm,
rw env.not_in_add_b_irstate_comm,
rw HCLOSED, rw HCLOSED,
rw env.not_in_add_bv_valty_comm,
rw env.not_in_add_b_valty_comm,
unfold freevar.env.replace_valty,
-- making value..
unfold get_free_sbitvec,
rw env.not_in_replace_sbv,
rw env.add_b_replace_sbv,
rw env.empty_replace_sbv,
rw env.add_bv_replace_match,
-- making poison..
unfold get_free_sbool,
rw env.not_in_replace_sb,
rw env.add_b_replace_match,
rw env.replace_sb_of_bool,
apply irstate.updatereg_equiv,
{
intros,
cases rb',
{ -- poison
apply val_equiv.poison_intty,
{ constructor, constructor },
{ refl },
{ refl }
},
{
apply val_equiv.concrete_intty,
{ constructor, constructor },
{
cases rbv',
rw sbitvec_of_int_const,
constructor
},
{ refl }
}
},
{ rw sbitvec_of_int_const, unfold equals_size, simp },
{ apply HENC },
any_goals { assumption },
any_goals {
apply env.not_in_add_b,
apply get_free_name_diff,
assumption
},
},
split,
{
unfold closed_irstate,
intros,
rw HIS,
rw replace_updatereg,
rw env.not_in_add_bv_irstate_comm,
rw env.not_in_add_b_irstate_comm,
rw HCLOSED, rw HCLOSED,
rw env.not_in_add_bv_valty_comm,
rw env.not_in_add_b_valty_comm,
unfold freevar.env.replace_valty,
-- making value..
unfold get_free_sbitvec,
rw env.not_in_replace_sbv,
rw env.add_b_replace_sbv,
rw env.empty_replace_sbv,
rw env.add_bv_replace_match,
-- making poison..
unfold get_free_sbool,
rw env.not_in_replace_sb,
rw env.add_b_replace_match,
rw env.replace_sb_of_bool,
rw replace_updatereg,
unfold freevar.env.replace_valty,
rw env.replace_sbv_of_int,
rw env.replace_sb_of_bool,
rw HCLOSED,
any_goals { assumption },
apply env.not_in_add_b, apply get_free_name_diff, assumption,
apply env.not_in_add_b, apply get_free_name_diff, assumption
},
{
unfold env.added2,
split, {
intros n_1 H1 H2,
cases H2,
apply env.not_in_add_bv,
assumption,
apply env.not_in_add_b,
assumption,
rw env.not_in_split at H1,
rw env.not_in_split,
assumption
},
{
intros n_1 H,
unfold env.add_b,
unfold env.add_bv,
unfold has_mem.mem, simp,
cases H,
{
rw if_neg, rw if_neg, unfold has_mem.mem at H,
cases H, left, assumption, right, assumption,
intros H', rw H' at H, apply HNOTIN1, assumption,
intros H', rw H' at H, apply HNOTIN2, assumption,
},
{
cases H,
{ right, rw if_pos, intros H0, cases H0, assumption },
{ left, rw if_pos, intros H0, cases H0, assumption }
}
}
}
end
def fv_smt_names (fvnames:list string) :=
fvnames.map get_free_sbitvec_name ++
fvnames.map get_free_sbool_name
lemma init_state_encode_strong: ∀ (freevars:list (string × ty)) (sg sg':std_gen) ise iss
(HUNQ: list.unique $ freevars.map prod.fst)
(HIE:(ise, sg') = create_init_state_exec freevars sg)
(HIS:iss = create_init_state_smt freevars),
∃ η, (encode iss ise η ∧ closed_irstate (η⟦iss⟧)
∧ env.has_only η (fv_smt_names $ freevars.map prod.fst))
:= begin
intros,
revert ise iss sg sg',
induction freevars,
{
intros,
unfold create_init_state_exec at HIE,
unfold create_init_state_smt at HIS,
simp at HIE,simp at HIS,
injection HIE with HIE _,
rw [HIS, HIE],
existsi (freevar.env.empty),
unfold encode, rw empty_replace_st,
constructor, constructor, constructor,
any_goals { constructor },
{
apply closed_irstate_empty
},
{
unfold fv_smt_names, simp,
unfold env.has_only, intros, split,
{ intros H, cases H },
{ intros H, have H := (env.not_in_empty name) H, cases H }
}
},
{
intros,
rename freevars_tl tl,
cases freevars_hd with vname vty,
have HEtmp: ∀ h t, create_init_state_exec (h::t) sg
= create_init_var_exec h.1 h.2 (create_init_state_exec t sg),
{ intros, refl },
rw HEtmp at HIE,
clear HEtmp,
have HStmp: ∀ h t, create_init_state_smt (h::t)
= create_init_var_smt h.1 h.2 (create_init_state_smt t),
{ intros, refl },
rw HStmp at HIS,
clear HStmp,
generalize HE0: create_init_state_exec tl sg = ise_sg0,
generalize HS0: create_init_state_smt tl = iss0,
rw HE0 at *,
rw HS0 at *,
cases ise_sg0 with ise0 sg0,
simp at HIE HIS,
have HEX: (∃ (η0 : env), encode iss0 ise0 η0 ∧ closed_irstate (η0⟦iss0⟧)
∧ env.has_only η0 (fv_smt_names $ tl.map prod.fst)),
{
apply freevars_ih,
{
simp at HUNQ, cases HUNQ, assumption
}, apply (eq.symm HE0), refl
},
cases HEX with η0 HEX,
cases HEX with HEX1 HEX2,
cases HEX2 with HEX2 HEX3,
-- Now add a new variable to each irstate
have HUPDATED: ∃ η', (encode iss ise η' ∧ closed_irstate (η'⟦iss⟧) ∧
env.added2 η0 (get_free_sbitvec_name vname)
(get_free_sbool_name vname) η'),
{
apply init_var_encode_intty,
apply HEX1,
apply HEX2,
{ -- get_free_sbitvec_name vname ∉ η0
simp at HUNQ, cases HUNQ,
apply env.has_only_not_in,
{ apply HEX3 },
{ unfold fv_smt_names,
unfold get_free_sbitvec_name,
apply list.not_mem_append,
apply slist_prefix_notin, assumption,
apply slist_prefix_notin2 "v_" "b_" 'v' 'b', assumption,
{ intros H0, cases H0 }, refl, refl
}
},
{ -- get_free_sbool_name vname ∉ η0
simp at HUNQ, cases HUNQ,
apply env.has_only_not_in,
{ apply HEX3 },
{ unfold fv_smt_names,
unfold get_free_sbool_name,
apply list.not_mem_append,
apply slist_prefix_notin2 "b_" "v_" 'b' 'v', assumption,
{ intros H0, cases H0 }, refl, refl,
apply slist_prefix_notin, assumption,
}
},
assumption, assumption
},
cases HUPDATED with η HUPDATED,
cases HUPDATED with HUPDATED Htmp, -- env.has_only_added2 (Honly) (Hadd2)
cases Htmp with HUPDATED2 HUPDATED3,
have Hη := env.has_only_added2 HEX3 HUPDATED3,
existsi η,
split, assumption, split, assumption,
{
unfold fv_smt_names, simp,
unfold fv_smt_names at Hη, simp at Hη,
rw ← list.cons_append,
rw ← env.has_only_shuffle (get_free_sbool_name vname),
simp,
rw env.has_only_shuffle2,
apply Hη
}
}
end
theorem init_state_encode_prf: init_state_encode
:= begin
unfold init_state_encode,
intros,
have H := init_state_encode_strong freevars sg sg' ise iss HUNQ
HIE HIS,
cases H with η H,
cases H, existsi η, assumption
end
-- Future work: theorem that `freevars.get` correctly returns all
-- free variables.
end spec |
axiom q : Nat → Prop
axiom p : Nat → Prop
axiom q_eq_p : q = p
example (h₁ : ¬ q 0) (h₂ : ¬ q 0) : ¬ p 0 := by
trace_state
/-
h₁ : ¬ q 0
h₂ : ¬ q 0
⊢ ¬ p 0
-/
simp_all
/-
h₂ : ¬ q 0
⊢ ¬ p 0
-/
trace_state
rw [← q_eq_p]
assumption
|
lemma homeomorphism_moving_points_exists_gen: assumes K: "finite K" "\<And>i. i \<in> K \<Longrightarrow> x i \<in> S \<and> y i \<in> S" "pairwise (\<lambda>i j. (x i \<noteq> x j) \<and> (y i \<noteq> y j)) K" and "2 \<le> aff_dim S" and ope: "openin (top_of_set (affine hull S)) S" and "S \<subseteq> T" "T \<subseteq> affine hull S" "connected S" shows "\<exists>f g. homeomorphism T T f g \<and> (\<forall>i \<in> K. f(x i) = y i) \<and> {x. \<not> (f x = x \<and> g x = x)} \<subseteq> S \<and> bounded {x. \<not> (f x = x \<and> g x = x)}" |
constant mynat : Type
-- https://leanprover.github.io/reference/declarations.html?highlight=assume
-- (Remember that ∀ is syntactic sugar for Π, and assume is syntactic sugar for λ.)
lemma func : nat → nat :=
begin
intro n,
exact 3*n+2,
end
#reduce func 5 -- 17
#reduce func 1 -- 5
#check func -- func : ℕ → ℕ
-- #eval func 7 -- code generation failed, VM does not have code for 'func'
-- level 3
lemma example2 (P Q R S T U: Type)
(p : P)
(h : P → Q)
(i : Q → R)
(j : Q → T)
(k : S → T)
(l : T → U)
: U :=
begin
suffices q : Q,
-- have q := h p,
have t := j q, -- <=> have t : T, from j q,
have u := l t,
show U, from u,
show Q, from h p, -- <=> exact h p,
end
#print example2
/-
λ (P Q R S T U : Type)
(p : P) (h : P → Q) (i : Q → R)
(j : Q → T) (k : S → T) (l : T → U),
l (j (h p))
-/
-- level 4
example (P Q R S T U: Type)
(p : P)
(h : P → Q)
(i : Q → R)
(j : Q → T)
(k : S → T)
(l : T → U)
: U :=
begin
-- exact l (j (h p)),
apply l,
-- exact j (h p),
apply j,
-- exact h p,
apply h,
exact p,
end
-- level 5
example (P Q : Type) : P → (Q → P) :=
begin
intro p, -- let p element in set P
intro q, -- let q some element in set Q
exact p, -- we already know that p is element in P, from hypothesis "p"
end
-- level 6
example (P Q R : Type) : (P → (Q → R)) → ((P → Q) → (P → R)) :=
begin
intros f h p,
apply f,
-- first case P:
exact p,
-- second case Q:
exact h p,
end
-- with refine
lemma example_refine (P Q R : Type) : (P → (Q → R)) → ((P → Q) → (P → R)) :=
begin
intros f h p,
-- refine f p _, exact h p, => goals accomplished
-- refine f p (h p), -- => goals accomplished
exact f p (h p),
end
#print example_refine
-- λ (P Q R : Type) (f : P → Q → R) (h : P → Q) (p : P), f p (h p)
-- with assume
lemma example_assume (P Q R : Type) : (P → (Q → R)) → ((P → Q) → (P → R)) :=
begin
assume (f' : P → Q → R) (h' : P → Q) (p' : P),
exact f' p' (h' p'),
end
#print example_assume
-- λ (P Q R : Type) (f' : P → Q → R) (h' : P → Q) (p' : P), f' p' (h' p')
-- level 8
example (P Q : Type) : (P → Q) → ((Q → empty) → (P → empty)) :=
begin
intros f g h,
-- apply g (f _),
-- apply h,
-- <=>
exact g (f h),
end
-- level 9
example (A B C D E F G H I J K L : Type)
(f1 : A → B) (f2 : B → E) (f3 : E → D) (f4 : D → A) (f5 : E → F)
(f6 : F → C) (f7 : B → C) (f8 : F → G) (f9 : G → J) (f10 : I → J)
(f11 : J → I) (f12 : I → H) (f13 : E → H) (f14 : H → K) (f15 : I → L)
: A → L :=
begin
intro a,
apply f15, -- I
-- apply f11, apply f10, -- I <=> same state as in previos row
sorry,
end
-- Advanced proposition world.
-- Level 9: exfalso and proof by contradiction.
lemma contra (P Q : Prop) : (P ∧ ¬ P) → Q :=
begin
sorry,
have h : (P ∧ ¬ P) → false,
begin
rw not_iff_imp_false,
intro f,
cases p with pt pf,
end,
end
|
/-
Copyright (c) 2022 Kevin H. Wilson. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kevin H. Wilson
! This file was ported from Lean 3 source module analysis.calculus.uniform_limits_deriv
! leanprover-community/mathlib commit f2ce6086713c78a7f880485f7917ea547a215982
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Analysis.Calculus.MeanValue
import Mathbin.Analysis.NormedSpace.IsROrC
import Mathbin.Order.Filter.Curry
/-!
# Swapping limits and derivatives via uniform convergence
The purpose of this file is to prove that the derivative of the pointwise limit of a sequence of
functions is the pointwise limit of the functions' derivatives when the derivatives converge
_uniformly_. The formal statement appears as `has_fderiv_at_of_tendsto_locally_uniformly_at`.
## Main statements
* `uniform_cauchy_seq_on_filter_of_fderiv`: If
1. `f : ℕ → E → G` is a sequence of functions which have derivatives
`f' : ℕ → E → (E →L[𝕜] G)` on a neighborhood of `x`,
2. the functions `f` converge at `x`, and
3. the derivatives `f'` form a Cauchy sequence uniformly on a neighborhood of `x`,
then the `f` form a Cauchy sequence _uniformly_ on a neighborhood of `x`
* `has_fderiv_at_of_tendsto_uniformly_on_filter` : Suppose (1), (2), and (3) above are true. Let
`g` (resp. `g'`) be the limiting function of the `f` (resp. `g'`). Then `f'` is the derivative of
`g` on a neighborhood of `x`
* `has_fderiv_at_of_tendsto_uniformly_on`: An often-easier-to-use version of the above theorem when
*all* the derivatives exist and functions converge on a common open set and the derivatives
converge uniformly there.
Each of the above statements also has variations that support `deriv` instead of `fderiv`.
## Implementation notes
Our technique for proving the main result is the famous "`ε / 3` proof." In words, you can find it
explained, for instance, at [this StackExchange post](https://math.stackexchange.com/questions/214218/uniform-convergence-of-derivatives-tao-14-2-7).
The subtlety is that we want to prove that the difference quotients of the `g` converge to the `g'`.
That is, we want to prove something like:
```
∀ ε > 0, ∃ δ > 0, ∀ y ∈ B_δ(x), |y - x|⁻¹ * |(g y - g x) - g' x (y - x)| < ε.
```
To do so, we will need to introduce a pair of quantifers
```lean
∀ ε > 0, ∃ N, ∀ n ≥ N, ∃ δ > 0, ∀ y ∈ B_δ(x), |y - x|⁻¹ * |(g y - g x) - g' x (y - x)| < ε.
```
So how do we write this in terms of filters? Well, the initial definition of the derivative is
```lean
tendsto (|y - x|⁻¹ * |(g y - g x) - g' x (y - x)|) (𝓝 x) (𝓝 0)
```
There are two ways we might introduce `n`. We could do:
```lean
∀ᶠ (n : ℕ) in at_top, tendsto (|y - x|⁻¹ * |(g y - g x) - g' x (y - x)|) (𝓝 x) (𝓝 0)
```
but this is equivalent to the quantifier order `∃ N, ∀ n ≥ N, ∀ ε > 0, ∃ δ > 0, ∀ y ∈ B_δ(x)`,
which _implies_ our desired `∀ ∃ ∀ ∃ ∀` but is _not_ equivalent to it. On the other hand, we might
try
```lean
tendsto (|y - x|⁻¹ * |(g y - g x) - g' x (y - x)|) (at_top ×ᶠ 𝓝 x) (𝓝 0)
```
but this is equivalent to the quantifer order `∀ ε > 0, ∃ N, ∃ δ > 0, ∀ n ≥ N, ∀ y ∈ B_δ(x)`, which
again _implies_ our desired `∀ ∃ ∀ ∃ ∀` but is not equivalent to it.
So to get the quantifier order we want, we need to introduce a new filter construction, which we
call a "curried filter"
```lean
tendsto (|y - x|⁻¹ * |(g y - g x) - g' x (y - x)|) (at_top.curry (𝓝 x)) (𝓝 0)
```
Then the above implications are `filter.tendsto.curry` and
`filter.tendsto.mono_left filter.curry_le_prod`. We will use both of these deductions as part of
our proof.
We note that if you loosen the assumptions of the main theorem then the proof becomes quite a bit
easier. In particular, if you assume there is a common neighborhood `s` where all of the three
assumptions of `has_fderiv_at_of_tendsto_uniformly_on_filter` hold and that the `f'` are
continuous, then you can avoid the mean value theorem and much of the work around curried filters.
## Tags
uniform convergence, limits of derivatives
-/
open Filter
open uniformity Filter Topology
section LimitsOfDerivatives
variable {ι : Type _} {l : Filter ι} {E : Type _} [NormedAddCommGroup E] {𝕜 : Type _} [IsROrC 𝕜]
[NormedSpace 𝕜 E] {G : Type _} [NormedAddCommGroup G] [NormedSpace 𝕜 G] {f : ι → E → G}
{g : E → G} {f' : ι → E → E →L[𝕜] G} {g' : E → E →L[𝕜] G} {x : E}
/-- If a sequence of functions real or complex functions are eventually differentiable on a
neighborhood of `x`, they are Cauchy _at_ `x`, and their derivatives
are a uniform Cauchy sequence in a neighborhood of `x`, then the functions form a uniform Cauchy
sequence in a neighborhood of `x`. -/
theorem uniformCauchySeqOnFilter_of_fderiv (hf' : UniformCauchySeqOnFilter f' l (𝓝 x))
(hf : ∀ᶠ n : ι × E in l ×ᶠ 𝓝 x, HasFderivAt (f n.1) (f' n.1 n.2) n.2)
(hfg : Cauchy (map (fun n => f n x) l)) : UniformCauchySeqOnFilter f l (𝓝 x) :=
by
let : NormedSpace ℝ E
exact NormedSpace.restrictScalars ℝ 𝕜 _
rw [SeminormedAddGroup.uniformCauchySeqOnFilter_iff_tendstoUniformlyOnFilter_zero] at hf'⊢
suffices
TendstoUniformlyOnFilter (fun (n : ι × ι) (z : E) => f n.1 z - f n.2 z - (f n.1 x - f n.2 x)) 0
(l ×ᶠ l) (𝓝 x) ∧
TendstoUniformlyOnFilter (fun (n : ι × ι) (z : E) => f n.1 x - f n.2 x) 0 (l ×ᶠ l) (𝓝 x)
by
have := this.1.add this.2
rw [add_zero] at this
exact this.congr (by simp)
constructor
· -- This inequality follows from the mean value theorem. To apply it, we will need to shrink our
-- neighborhood to small enough ball
rw [Metric.tendstoUniformlyOnFilter_iff] at hf'⊢
intro ε hε
have := (tendsto_swap4_prod.eventually (hf.prod_mk hf)).diag_of_prod_right
obtain ⟨a, b, c, d, e⟩ := eventually_prod_iff.1 ((hf' ε hε).And this)
obtain ⟨R, hR, hR'⟩ := metric.nhds_basis_ball.eventually_iff.mp d
let r := min 1 R
have hr : 0 < r := by simp [hR]
have hr' : ∀ ⦃y : E⦄, y ∈ Metric.ball x r → c y := fun y hy =>
hR' (lt_of_lt_of_le (metric.mem_ball.mp hy) (min_le_right _ _))
have hxy : ∀ y : E, y ∈ Metric.ball x r → ‖y - x‖ < 1 :=
by
intro y hy
rw [Metric.mem_ball, dist_eq_norm] at hy
exact lt_of_lt_of_le hy (min_le_left _ _)
have hxyε : ∀ y : E, y ∈ Metric.ball x r → ε * ‖y - x‖ < ε :=
by
intro y hy
exact (mul_lt_iff_lt_one_right hε.lt).mpr (hxy y hy)
-- With a small ball in hand, apply the mean value theorem
refine'
eventually_prod_iff.mpr
⟨_, b, fun e : E => Metric.ball x r e,
eventually_mem_set.mpr (metric.nhds_basis_ball.mem_of_mem hr), fun n hn y hy => _⟩
simp only [Pi.zero_apply, dist_zero_left] at e⊢
refine' lt_of_le_of_lt _ (hxyε y hy)
exact
Convex.norm_image_sub_le_of_norm_has_fderiv_within_le
(fun y hy => ((e hn (hr' hy)).2.1.sub (e hn (hr' hy)).2.2).HasFderivWithinAt)
(fun y hy => (e hn (hr' hy)).1.le) (convex_ball x r) (Metric.mem_ball_self hr) hy
· -- This is just `hfg` run through `eventually_prod_iff`
refine' metric.tendsto_uniformly_on_filter_iff.mpr fun ε hε => _
obtain ⟨t, ht, ht'⟩ := (metric.cauchy_iff.mp hfg).2 ε hε
exact
eventually_prod_iff.mpr
⟨fun n : ι × ι => f n.1 x ∈ t ∧ f n.2 x ∈ t,
eventually_prod_iff.mpr ⟨_, ht, _, ht, fun n hn n' hn' => ⟨hn, hn'⟩⟩, fun y => True, by
simp, fun n hn y hy => by simpa [norm_sub_rev, dist_eq_norm] using ht' _ hn.1 _ hn.2⟩
#align uniform_cauchy_seq_on_filter_of_fderiv uniformCauchySeqOnFilter_of_fderiv
/-- A variant of the second fundamental theorem of calculus (FTC-2): If a sequence of functions
between real or complex normed spaces are differentiable on a ball centered at `x`, they
form a Cauchy sequence _at_ `x`, and their derivatives are Cauchy uniformly on the ball, then the
functions form a uniform Cauchy sequence on the ball.
NOTE: The fact that we work on a ball is typically all that is necessary to work with power series
and Dirichlet series (our primary use case). However, this can be generalized by replacing the ball
with any connected, bounded, open set and replacing uniform convergence with local uniform
convergence. See `cauchy_map_of_uniform_cauchy_seq_on_fderiv`.
-/
theorem uniformCauchySeqOn_ball_of_fderiv {r : ℝ} (hf' : UniformCauchySeqOn f' l (Metric.ball x r))
(hf : ∀ n : ι, ∀ y : E, y ∈ Metric.ball x r → HasFderivAt (f n) (f' n y) y)
(hfg : Cauchy (map (fun n => f n x) l)) : UniformCauchySeqOn f l (Metric.ball x r) :=
by
let : NormedSpace ℝ E
exact NormedSpace.restrictScalars ℝ 𝕜 _
have : ne_bot l := (cauchy_map_iff.1 hfg).1
rcases le_or_lt r 0 with (hr | hr)
·
simp only [Metric.ball_eq_empty.2 hr, UniformCauchySeqOn, Set.mem_empty_iff_false,
IsEmpty.forall_iff, eventually_const, imp_true_iff]
rw [SeminormedAddGroup.uniformCauchySeqOn_iff_tendstoUniformlyOn_zero] at hf'⊢
suffices
TendstoUniformlyOn (fun (n : ι × ι) (z : E) => f n.1 z - f n.2 z - (f n.1 x - f n.2 x)) 0
(l ×ᶠ l) (Metric.ball x r) ∧
TendstoUniformlyOn (fun (n : ι × ι) (z : E) => f n.1 x - f n.2 x) 0 (l ×ᶠ l) (Metric.ball x r)
by
have := this.1.add this.2
rw [add_zero] at this
refine' this.congr _
apply eventually_of_forall
intro n z hz
simp
constructor
· -- This inequality follows from the mean value theorem
rw [Metric.tendstoUniformlyOn_iff] at hf'⊢
intro ε hε
obtain ⟨q, hqpos, hq⟩ : ∃ q : ℝ, 0 < q ∧ q * r < ε :=
by
simp_rw [mul_comm]
exact exists_pos_mul_lt hε.lt r
apply (hf' q hqpos.gt).mono
intro n hn y hy
simp_rw [dist_eq_norm, Pi.zero_apply, zero_sub, norm_neg] at hn⊢
have mvt :=
Convex.norm_image_sub_le_of_norm_has_fderiv_within_le
(fun z hz => ((hf n.1 z hz).sub (hf n.2 z hz)).HasFderivWithinAt) (fun z hz => (hn z hz).le)
(convex_ball x r) (Metric.mem_ball_self hr) hy
refine' lt_of_le_of_lt mvt _
have : q * ‖y - x‖ < q * r :=
mul_lt_mul' rfl.le (by simpa only [dist_eq_norm] using metric.mem_ball.mp hy) (norm_nonneg _)
hqpos
exact this.trans hq
· -- This is just `hfg` run through `eventually_prod_iff`
refine' metric.tendsto_uniformly_on_iff.mpr fun ε hε => _
obtain ⟨t, ht, ht'⟩ := (metric.cauchy_iff.mp hfg).2 ε hε
rw [eventually_prod_iff]
refine' ⟨fun n => f n x ∈ t, ht, fun n => f n x ∈ t, ht, _⟩
intro n hn n' hn' z hz
rw [dist_eq_norm, Pi.zero_apply, zero_sub, norm_neg, ← dist_eq_norm]
exact ht' _ hn _ hn'
#align uniform_cauchy_seq_on_ball_of_fderiv uniformCauchySeqOn_ball_of_fderiv
/-- If a sequence of functions between real or complex normed spaces are differentiable on a
preconnected open set, they form a Cauchy sequence _at_ `x`, and their derivatives are Cauchy
uniformly on the set, then the functions form a Cauchy sequence at any point in the set. -/
theorem cauchy_map_of_uniformCauchySeqOn_fderiv {s : Set E} (hs : IsOpen s) (h's : IsPreconnected s)
(hf' : UniformCauchySeqOn f' l s) (hf : ∀ n : ι, ∀ y : E, y ∈ s → HasFderivAt (f n) (f' n y) y)
{x₀ x : E} (hx₀ : x₀ ∈ s) (hx : x ∈ s) (hfg : Cauchy (map (fun n => f n x₀) l)) :
Cauchy (map (fun n => f n x) l) :=
by
have : ne_bot l := (cauchy_map_iff.1 hfg).1
let t := { y | y ∈ s ∧ Cauchy (map (fun n => f n y) l) }
suffices H : s ⊆ t
exact (H hx).2
have A : ∀ x ε, x ∈ t → Metric.ball x ε ⊆ s → Metric.ball x ε ⊆ t := fun x ε xt hx y hy =>
⟨hx hy,
(uniformCauchySeqOn_ball_of_fderiv (hf'.mono hx) (fun n y hy => hf n y (hx hy))
xt.2).cauchy_map
hy⟩
have open_t : IsOpen t := by
rw [Metric.isOpen_iff]
intro x hx
rcases Metric.isOpen_iff.1 hs x hx.1 with ⟨ε, εpos, hε⟩
exact ⟨ε, εpos, A x ε hx hε⟩
have st_nonempty : (s ∩ t).Nonempty := ⟨x₀, hx₀, ⟨hx₀, hfg⟩⟩
suffices H : closure t ∩ s ⊆ t
exact h's.subset_of_closure_inter_subset open_t st_nonempty H
rintro x ⟨xt, xs⟩
obtain ⟨ε, εpos, hε⟩ : ∃ (ε : ℝ)(H : ε > 0), Metric.ball x ε ⊆ s
exact Metric.isOpen_iff.1 hs x xs
obtain ⟨y, yt, hxy⟩ : ∃ (y : E)(yt : y ∈ t), dist x y < ε / 2
exact Metric.mem_closure_iff.1 xt _ (half_pos εpos)
have B : Metric.ball y (ε / 2) ⊆ Metric.ball x ε :=
by
apply Metric.ball_subset_ball'
rw [dist_comm]
linarith
exact A y (ε / 2) yt (B.trans hε) (Metric.mem_ball.2 hxy)
#align cauchy_map_of_uniform_cauchy_seq_on_fderiv cauchy_map_of_uniformCauchySeqOn_fderiv
/-- If `f_n → g` pointwise and the derivatives `(f_n)' → h` _uniformly_ converge, then
in fact for a fixed `y`, the difference quotients `‖z - y‖⁻¹ • (f_n z - f_n y)` converge
_uniformly_ to `‖z - y‖⁻¹ • (g z - g y)` -/
theorem difference_quotients_converge_uniformly (hf' : TendstoUniformlyOnFilter f' g' l (𝓝 x))
(hf : ∀ᶠ n : ι × E in l ×ᶠ 𝓝 x, HasFderivAt (f n.1) (f' n.1 n.2) n.2)
(hfg : ∀ᶠ y : E in 𝓝 x, Tendsto (fun n => f n y) l (𝓝 (g y))) :
TendstoUniformlyOnFilter (fun n : ι => fun y : E => (‖y - x‖⁻¹ : 𝕜) • (f n y - f n x))
(fun y : E => (‖y - x‖⁻¹ : 𝕜) • (g y - g x)) l (𝓝 x) :=
by
let : NormedSpace ℝ E
exact NormedSpace.restrictScalars ℝ 𝕜 _
rcases eq_or_ne l ⊥ with (hl | hl)
· simp only [hl, TendstoUniformlyOnFilter, bot_prod, eventually_bot, imp_true_iff]
haveI : ne_bot l := ⟨hl⟩
refine'
UniformCauchySeqOnFilter.tendstoUniformlyOnFilter_of_tendsto _
((hfg.and (eventually_const.mpr hfg.self_of_nhds)).mono fun y hy =>
(hy.1.sub hy.2).const_smul _)
rw [SeminormedAddGroup.uniformCauchySeqOnFilter_iff_tendstoUniformlyOnFilter_zero]
rw [Metric.tendstoUniformlyOnFilter_iff]
have hfg' := hf'.uniform_cauchy_seq_on_filter
rw [SeminormedAddGroup.uniformCauchySeqOnFilter_iff_tendstoUniformlyOnFilter_zero] at hfg'
rw [Metric.tendstoUniformlyOnFilter_iff] at hfg'
intro ε hε
obtain ⟨q, hqpos, hqε⟩ := exists_pos_rat_lt hε
specialize hfg' (q : ℝ) (by simp [hqpos])
have := (tendsto_swap4_prod.eventually (hf.prod_mk hf)).diag_of_prod_right
obtain ⟨a, b, c, d, e⟩ := eventually_prod_iff.1 (hfg'.and this)
obtain ⟨r, hr, hr'⟩ := metric.nhds_basis_ball.eventually_iff.mp d
rw [eventually_prod_iff]
refine'
⟨_, b, fun e : E => Metric.ball x r e,
eventually_mem_set.mpr (metric.nhds_basis_ball.mem_of_mem hr), fun n hn y hy => _⟩
simp only [Pi.zero_apply, dist_zero_left]
rw [← smul_sub, norm_smul, norm_inv, IsROrC.norm_coe_norm]
refine' lt_of_le_of_lt _ hqε
by_cases hyz' : x = y
· simp [hyz', hqpos.le]
have hyz : 0 < ‖y - x‖ := by
rw [norm_pos_iff]
intro hy'
exact hyz' (eq_of_sub_eq_zero hy').symm
rw [inv_mul_le_iff hyz, mul_comm, sub_sub_sub_comm]
simp only [Pi.zero_apply, dist_zero_left] at e
refine'
Convex.norm_image_sub_le_of_norm_has_fderiv_within_le
(fun y hy => ((e hn (hr' hy)).2.1.sub (e hn (hr' hy)).2.2).HasFderivWithinAt)
(fun y hy => (e hn (hr' hy)).1.le) (convex_ball x r) (Metric.mem_ball_self hr) hy
#align difference_quotients_converge_uniformly difference_quotients_converge_uniformly
/-- `(d/dx) lim_{n → ∞} f n x = lim_{n → ∞} f' n x` when the `f' n` converge
_uniformly_ to their limit at `x`.
In words the assumptions mean the following:
* `hf'`: The `f'` converge "uniformly at" `x` to `g'`. This does not mean that the `f' n` even
converge away from `x`!
* `hf`: For all `(y, n)` with `y` sufficiently close to `x` and `n` sufficiently large, `f' n` is
the derivative of `f n`
* `hfg`: The `f n` converge pointwise to `g` on a neighborhood of `x` -/
theorem hasFderivAt_of_tendstoUniformlyOnFilter [NeBot l]
(hf' : TendstoUniformlyOnFilter f' g' l (𝓝 x))
(hf : ∀ᶠ n : ι × E in l ×ᶠ 𝓝 x, HasFderivAt (f n.1) (f' n.1 n.2) n.2)
(hfg : ∀ᶠ y in 𝓝 x, Tendsto (fun n => f n y) l (𝓝 (g y))) : HasFderivAt g (g' x) x :=
by
-- The proof strategy follows several steps:
-- 1. The quantifiers in the definition of the derivative are
-- `∀ ε > 0, ∃δ > 0, ∀y ∈ B_δ(x)`. We will introduce a quantifier in the middle:
-- `∀ ε > 0, ∃N, ∀n ≥ N, ∃δ > 0, ∀y ∈ B_δ(x)` which will allow us to introduce the `f(') n`
-- 2. The order of the quantifiers `hfg` are opposite to what we need. We will be able to swap
-- the quantifiers using the uniform convergence assumption
rw [hasFderivAt_iff_tendsto]
-- Introduce extra quantifier via curried filters
suffices
tendsto (fun y : ι × E => ‖y.2 - x‖⁻¹ * ‖g y.2 - g x - (g' x) (y.2 - x)‖) (l.curry (𝓝 x)) (𝓝 0)
by
rw [Metric.tendsto_nhds] at this⊢
intro ε hε
specialize this ε hε
rw [eventually_curry_iff] at this
simp only at this
exact (eventually_const.mp this).mono (by simp only [imp_self, forall_const])
-- With the new quantifier in hand, we can perform the famous `ε/3` proof. Specifically,
-- we will break up the limit (the difference functions minus the derivative go to 0) into 3:
-- * The difference functions of the `f n` converge *uniformly* to the difference functions
-- of the `g n`
-- * The `f' n` are the derivatives of the `f n`
-- * The `f' n` converge to `g'` at `x`
conv =>
congr
ext
rw [← norm_norm, ← norm_inv, ← @IsROrC.norm_of_real 𝕜 _ _, IsROrC.of_real_inv, ← norm_smul]
rw [← tendsto_zero_iff_norm_tendsto_zero]
have :
(fun a : ι × E => (‖a.2 - x‖⁻¹ : 𝕜) • (g a.2 - g x - (g' x) (a.2 - x))) =
((fun a : ι × E => (‖a.2 - x‖⁻¹ : 𝕜) • (g a.2 - g x - (f a.1 a.2 - f a.1 x))) +
fun a : ι × E =>
(‖a.2 - x‖⁻¹ : 𝕜) • (f a.1 a.2 - f a.1 x - ((f' a.1 x) a.2 - (f' a.1 x) x))) +
fun a : ι × E => (‖a.2 - x‖⁻¹ : 𝕜) • (f' a.1 x - g' x) (a.2 - x) :=
by
ext
simp only [Pi.add_apply]
rw [← smul_add, ← smul_add]
congr
simp only [map_sub, sub_add_sub_cancel, ContinuousLinearMap.coe_sub', Pi.sub_apply]
simp_rw [this]
have : 𝓝 (0 : G) = 𝓝 (0 + 0 + 0)
simp only [add_zero]
rw [this]
refine' tendsto.add (tendsto.add _ _) _
simp only
· have := difference_quotients_converge_uniformly hf' hf hfg
rw [Metric.tendstoUniformlyOnFilter_iff] at this
rw [Metric.tendsto_nhds]
intro ε hε
apply ((this ε hε).filter_mono curry_le_prod).mono
intro n hn
rw [dist_eq_norm] at hn⊢
rw [← smul_sub] at hn
rwa [sub_zero]
· -- (Almost) the definition of the derivatives
rw [Metric.tendsto_nhds]
intro ε hε
rw [eventually_curry_iff]
refine' hf.curry.mono fun n hn => _
have := hn.self_of_nhds
rw [hasFderivAt_iff_tendsto, Metric.tendsto_nhds] at this
refine' (this ε hε).mono fun y hy => _
rw [dist_eq_norm] at hy⊢
simp only [sub_zero, map_sub, norm_mul, norm_inv, norm_norm] at hy⊢
rw [norm_smul, norm_inv, IsROrC.norm_coe_norm]
exact hy
· -- hfg' after specializing to `x` and applying the definition of the operator norm
refine' tendsto.mono_left _ curry_le_prod
have h1 : tendsto (fun n : ι × E => g' n.2 - f' n.1 n.2) (l ×ᶠ 𝓝 x) (𝓝 0) :=
by
rw [Metric.tendstoUniformlyOnFilter_iff] at hf'
exact metric.tendsto_nhds.mpr fun ε hε => by simpa using hf' ε hε
have h2 : tendsto (fun n : ι => g' x - f' n x) l (𝓝 0) :=
by
rw [Metric.tendsto_nhds] at h1⊢
exact fun ε hε => (h1 ε hε).curry.mono fun n hn => hn.self_of_nhds
have := tendsto_fst.comp (h2.prod_map tendsto_id)
refine' squeeze_zero_norm _ (tendsto_zero_iff_norm_tendsto_zero.mp this)
intro n
simp_rw [norm_smul, norm_inv, IsROrC.norm_coe_norm]
by_cases hx : x = n.2
· simp [hx]
have hnx : 0 < ‖n.2 - x‖ := by
rw [norm_pos_iff]
intro hx'
exact hx (eq_of_sub_eq_zero hx').symm
rw [inv_mul_le_iff hnx, mul_comm]
simp only [Function.comp_apply, Prod_map]
rw [norm_sub_rev]
exact (f' n.1 x - g' x).le_op_norm (n.2 - x)
#align has_fderiv_at_of_tendsto_uniformly_on_filter hasFderivAt_of_tendstoUniformlyOnFilter
/- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/
theorem hasFderivAt_of_tendstoLocallyUniformlyOn [NeBot l] {s : Set E} (hs : IsOpen s)
(hf' : TendstoLocallyUniformlyOn f' g' l s) (hf : ∀ n, ∀ x ∈ s, HasFderivAt (f n) (f' n x) x)
(hfg : ∀ x ∈ s, Tendsto (fun n => f n x) l (𝓝 (g x))) (hx : x ∈ s) : HasFderivAt g (g' x) x :=
by
have h1 : s ∈ 𝓝 x := hs.mem_nhds hx
have h3 : Set.univ ×ˢ s ∈ l ×ᶠ 𝓝 x := by simp only [h1, prod_mem_prod_iff, univ_mem, and_self_iff]
have h4 : ∀ᶠ n : ι × E in l ×ᶠ 𝓝 x, HasFderivAt (f n.1) (f' n.1 n.2) n.2 :=
eventually_of_mem h3 fun ⟨n, z⟩ ⟨hn, hz⟩ => hf n z hz
refine' hasFderivAt_of_tendstoUniformlyOnFilter _ h4 (eventually_of_mem h1 hfg)
simpa [IsOpen.nhdsWithin_eq hs hx] using tendsto_locally_uniformly_on_iff_filter.mp hf' x hx
#align has_fderiv_at_of_tendsto_locally_uniformly_on hasFderivAt_of_tendstoLocallyUniformlyOn
/-- A slight variant of `has_fderiv_at_of_tendsto_locally_uniformly_on` with the assumption stated
in terms of `differentiable_on` rather than `has_fderiv_at`. This makes a few proofs nicer in
complex analysis where holomorphicity is assumed but the derivative is not known a priori. -/
theorem hasFderivAt_of_tendsto_locally_uniformly_on' [NeBot l] {s : Set E} (hs : IsOpen s)
(hf' : TendstoLocallyUniformlyOn (fderiv 𝕜 ∘ f) g' l s) (hf : ∀ n, DifferentiableOn 𝕜 (f n) s)
(hfg : ∀ x ∈ s, Tendsto (fun n => f n x) l (𝓝 (g x))) (hx : x ∈ s) : HasFderivAt g (g' x) x :=
by
refine' hasFderivAt_of_tendstoLocallyUniformlyOn hs hf' (fun n z hz => _) hfg hx
exact ((hf n z hz).DifferentiableAt (hs.mem_nhds hz)).HasFderivAt
#align has_fderiv_at_of_tendsto_locally_uniformly_on' hasFderivAt_of_tendsto_locally_uniformly_on'
/-- `(d/dx) lim_{n → ∞} f n x = lim_{n → ∞} f' n x` when the `f' n` converge
_uniformly_ to their limit on an open set containing `x`. -/
theorem hasFderivAt_of_tendstoUniformlyOn [NeBot l] {s : Set E} (hs : IsOpen s)
(hf' : TendstoUniformlyOn f' g' l s)
(hf : ∀ n : ι, ∀ x : E, x ∈ s → HasFderivAt (f n) (f' n x) x)
(hfg : ∀ x : E, x ∈ s → Tendsto (fun n => f n x) l (𝓝 (g x))) :
∀ x : E, x ∈ s → HasFderivAt g (g' x) x := fun x =>
hasFderivAt_of_tendstoLocallyUniformlyOn hs hf'.TendstoLocallyUniformlyOn hf hfg
#align has_fderiv_at_of_tendsto_uniformly_on hasFderivAt_of_tendstoUniformlyOn
/-- `(d/dx) lim_{n → ∞} f n x = lim_{n → ∞} f' n x` when the `f' n` converge
_uniformly_ to their limit. -/
theorem hasFderivAt_of_tendstoUniformly [NeBot l] (hf' : TendstoUniformly f' g' l)
(hf : ∀ n : ι, ∀ x : E, HasFderivAt (f n) (f' n x) x)
(hfg : ∀ x : E, Tendsto (fun n => f n x) l (𝓝 (g x))) : ∀ x : E, HasFderivAt g (g' x) x :=
by
intro x
have hf : ∀ n : ι, ∀ x : E, x ∈ Set.univ → HasFderivAt (f n) (f' n x) x := by simp [hf]
have hfg : ∀ x : E, x ∈ Set.univ → tendsto (fun n => f n x) l (𝓝 (g x)) := by simp [hfg]
have hf' : TendstoUniformlyOn f' g' l Set.univ := by rwa [tendstoUniformlyOn_univ]
refine' hasFderivAt_of_tendstoUniformlyOn isOpen_univ hf' hf hfg x (Set.mem_univ x)
#align has_fderiv_at_of_tendsto_uniformly hasFderivAt_of_tendstoUniformly
end LimitsOfDerivatives
section deriv
/-! ### `deriv` versions of above theorems
In this section, we provide `deriv` equivalents of the `fderiv` lemmas in the previous section.
The protected function `promote_deriv` provides the translation between derivatives and Fréchet
derivatives
-/
variable {ι : Type _} {l : Filter ι} {𝕜 : Type _} [IsROrC 𝕜] {G : Type _} [NormedAddCommGroup G]
[NormedSpace 𝕜 G] {f : ι → 𝕜 → G} {g : 𝕜 → G} {f' : ι → 𝕜 → G} {g' : 𝕜 → G} {x : 𝕜}
/-- If our derivatives converge uniformly, then the Fréchet derivatives converge uniformly -/
theorem UniformCauchySeqOnFilter.one_smulRight {l' : Filter 𝕜}
(hf' : UniformCauchySeqOnFilter f' l l') :
UniformCauchySeqOnFilter (fun n => fun z => (1 : 𝕜 →L[𝕜] 𝕜).smul_right (f' n z)) l l' :=
by
-- The tricky part of this proof is that operator norms are written in terms of `≤` whereas
-- metrics are written in terms of `<`. So we need to shrink `ε` utilizing the archimedean
-- property of `ℝ`
rw [SeminormedAddGroup.uniformCauchySeqOnFilter_iff_tendstoUniformlyOnFilter_zero,
Metric.tendstoUniformlyOnFilter_iff] at hf'⊢
intro ε hε
obtain ⟨q, hq, hq'⟩ := exists_between hε.lt
apply (hf' q hq).mono
intro n hn
refine' lt_of_le_of_lt _ hq'
simp only [dist_eq_norm, Pi.zero_apply, zero_sub, norm_neg] at hn⊢
refine' ContinuousLinearMap.op_norm_le_bound _ hq.le _
intro z
simp only [ContinuousLinearMap.coe_sub', Pi.sub_apply, ContinuousLinearMap.smulRight_apply,
ContinuousLinearMap.one_apply]
rw [← smul_sub, norm_smul, mul_comm]
exact mul_le_mul hn.le rfl.le (norm_nonneg _) hq.le
#align uniform_cauchy_seq_on_filter.one_smul_right UniformCauchySeqOnFilter.one_smulRight
theorem uniformCauchySeqOnFilter_of_deriv (hf' : UniformCauchySeqOnFilter f' l (𝓝 x))
(hf : ∀ᶠ n : ι × 𝕜 in l ×ᶠ 𝓝 x, HasDerivAt (f n.1) (f' n.1 n.2) n.2)
(hfg : Cauchy (map (fun n => f n x) l)) : UniformCauchySeqOnFilter f l (𝓝 x) :=
by
simp_rw [hasDerivAt_iff_hasFderivAt] at hf
exact uniformCauchySeqOnFilter_of_fderiv hf'.one_smul_right hf hfg
#align uniform_cauchy_seq_on_filter_of_deriv uniformCauchySeqOnFilter_of_deriv
theorem uniformCauchySeqOn_ball_of_deriv {r : ℝ} (hf' : UniformCauchySeqOn f' l (Metric.ball x r))
(hf : ∀ n : ι, ∀ y : 𝕜, y ∈ Metric.ball x r → HasDerivAt (f n) (f' n y) y)
(hfg : Cauchy (map (fun n => f n x) l)) : UniformCauchySeqOn f l (Metric.ball x r) :=
by
simp_rw [hasDerivAt_iff_hasFderivAt] at hf
rw [uniformCauchySeqOn_iff_uniformCauchySeqOnFilter] at hf'
have hf' :
UniformCauchySeqOn (fun n => fun z => (1 : 𝕜 →L[𝕜] 𝕜).smul_right (f' n z)) l
(Metric.ball x r) :=
by
rw [uniformCauchySeqOn_iff_uniformCauchySeqOnFilter]
exact hf'.one_smul_right
exact uniformCauchySeqOn_ball_of_fderiv hf' hf hfg
#align uniform_cauchy_seq_on_ball_of_deriv uniformCauchySeqOn_ball_of_deriv
theorem hasDerivAt_of_tendstoUniformlyOnFilter [NeBot l]
(hf' : TendstoUniformlyOnFilter f' g' l (𝓝 x))
(hf : ∀ᶠ n : ι × 𝕜 in l ×ᶠ 𝓝 x, HasDerivAt (f n.1) (f' n.1 n.2) n.2)
(hfg : ∀ᶠ y in 𝓝 x, Tendsto (fun n => f n y) l (𝓝 (g y))) : HasDerivAt g (g' x) x :=
by
-- The first part of the proof rewrites `hf` and the goal to be functions so that Lean
-- can recognize them when we apply `has_fderiv_at_of_tendsto_uniformly_on_filter`
let F' n z := (1 : 𝕜 →L[𝕜] 𝕜).smul_right (f' n z)
let G' z := (1 : 𝕜 →L[𝕜] 𝕜).smul_right (g' z)
simp_rw [hasDerivAt_iff_hasFderivAt] at hf⊢
-- Now we need to rewrite hf' in terms of continuous_linear_maps. The tricky part is that
-- operator norms are written in terms of `≤` whereas metrics are written in terms of `<`. So we
-- need to shrink `ε` utilizing the archimedean property of `ℝ`
have hf' : TendstoUniformlyOnFilter F' G' l (𝓝 x) :=
by
rw [Metric.tendstoUniformlyOnFilter_iff] at hf'⊢
intro ε hε
obtain ⟨q, hq, hq'⟩ := exists_between hε.lt
apply (hf' q hq).mono
intro n hn
refine' lt_of_le_of_lt _ hq'
simp only [F', G', dist_eq_norm] at hn⊢
refine' ContinuousLinearMap.op_norm_le_bound _ hq.le _
intro z
simp only [ContinuousLinearMap.coe_sub', Pi.sub_apply, ContinuousLinearMap.smulRight_apply,
ContinuousLinearMap.one_apply]
rw [← smul_sub, norm_smul, mul_comm]
exact mul_le_mul hn.le rfl.le (norm_nonneg _) hq.le
exact hasFderivAt_of_tendstoUniformlyOnFilter hf' hf hfg
#align has_deriv_at_of_tendsto_uniformly_on_filter hasDerivAt_of_tendstoUniformlyOnFilter
theorem hasDerivAt_of_tendstoLocallyUniformlyOn [NeBot l] {s : Set 𝕜} (hs : IsOpen s)
(hf' : TendstoLocallyUniformlyOn f' g' l s)
(hf : ∀ᶠ n in l, ∀ x ∈ s, HasDerivAt (f n) (f' n x) x)
(hfg : ∀ x ∈ s, Tendsto (fun n => f n x) l (𝓝 (g x))) (hx : x ∈ s) : HasDerivAt g (g' x) x :=
by
have h1 : s ∈ 𝓝 x := hs.mem_nhds hx
have h2 : ∀ᶠ n : ι × 𝕜 in l ×ᶠ 𝓝 x, HasDerivAt (f n.1) (f' n.1 n.2) n.2 :=
eventually_prod_iff.2 ⟨_, hf, fun x => x ∈ s, h1, fun n => id⟩
refine' hasDerivAt_of_tendstoUniformlyOnFilter _ h2 (eventually_of_mem h1 hfg)
simpa [IsOpen.nhdsWithin_eq hs hx] using tendsto_locally_uniformly_on_iff_filter.mp hf' x hx
#align has_deriv_at_of_tendsto_locally_uniformly_on hasDerivAt_of_tendstoLocallyUniformlyOn
/-- A slight variant of `has_deriv_at_of_tendsto_locally_uniformly_on` with the assumption stated in
terms of `differentiable_on` rather than `has_deriv_at`. This makes a few proofs nicer in complex
analysis where holomorphicity is assumed but the derivative is not known a priori. -/
theorem hasDerivAt_of_tendsto_locally_uniformly_on' [NeBot l] {s : Set 𝕜} (hs : IsOpen s)
(hf' : TendstoLocallyUniformlyOn (deriv ∘ f) g' l s)
(hf : ∀ᶠ n in l, DifferentiableOn 𝕜 (f n) s)
(hfg : ∀ x ∈ s, Tendsto (fun n => f n x) l (𝓝 (g x))) (hx : x ∈ s) : HasDerivAt g (g' x) x :=
by
refine' hasDerivAt_of_tendstoLocallyUniformlyOn hs hf' _ hfg hx
filter_upwards [hf]with n h z hz using((h z hz).DifferentiableAt (hs.mem_nhds hz)).HasDerivAt
#align has_deriv_at_of_tendsto_locally_uniformly_on' hasDerivAt_of_tendsto_locally_uniformly_on'
theorem hasDerivAt_of_tendstoUniformlyOn [NeBot l] {s : Set 𝕜} (hs : IsOpen s)
(hf' : TendstoUniformlyOn f' g' l s)
(hf : ∀ᶠ n in l, ∀ x : 𝕜, x ∈ s → HasDerivAt (f n) (f' n x) x)
(hfg : ∀ x : 𝕜, x ∈ s → Tendsto (fun n => f n x) l (𝓝 (g x))) :
∀ x : 𝕜, x ∈ s → HasDerivAt g (g' x) x := fun x =>
hasDerivAt_of_tendstoLocallyUniformlyOn hs hf'.TendstoLocallyUniformlyOn hf hfg
#align has_deriv_at_of_tendsto_uniformly_on hasDerivAt_of_tendstoUniformlyOn
theorem hasDerivAt_of_tendstoUniformly [NeBot l] (hf' : TendstoUniformly f' g' l)
(hf : ∀ᶠ n in l, ∀ x : 𝕜, HasDerivAt (f n) (f' n x) x)
(hfg : ∀ x : 𝕜, Tendsto (fun n => f n x) l (𝓝 (g x))) : ∀ x : 𝕜, HasDerivAt g (g' x) x :=
by
intro x
have hf : ∀ᶠ n in l, ∀ x : 𝕜, x ∈ Set.univ → HasDerivAt (f n) (f' n x) x := by
filter_upwards [hf]with n h x hx using h x
have hfg : ∀ x : 𝕜, x ∈ Set.univ → tendsto (fun n => f n x) l (𝓝 (g x)) := by simp [hfg]
have hf' : TendstoUniformlyOn f' g' l Set.univ := by rwa [tendstoUniformlyOn_univ]
exact hasDerivAt_of_tendstoUniformlyOn isOpen_univ hf' hf hfg x (Set.mem_univ x)
#align has_deriv_at_of_tendsto_uniformly hasDerivAt_of_tendstoUniformly
end deriv
|
# This file is a part of JuliaFEM.
# License is MIT: see https://github.com/JuliaFEM/FEMBase.jl/blob/master/LICENSE
using FEMBase
using Test
# Time interpolation of fields
element = Element(Seg2, [1, 2])
update!(element, "force", 1.0 => 15.0)
update!(element, "force", 2.0 => 30.0)
@test isapprox(element("force", 1.0), 15.0)
@test isapprox(element("force", 1.2), 18.0)
@test isapprox(element("force", 1.6), 24.0)
@test isapprox(element("force", 2.0), 30.0)
update!(element, "temperature", 0.0 => (1.0, 3.0))
update!(element, "temperature", 1.0 => (2.0, 14.0))
@test isapprox(element("temperature", (0.0,), 0.0), 2.0)
@test isapprox(element("temperature", (0.0,), 0.3), 3.8)
@test isapprox(element("temperature", (0.0,), 1.0), 8.0)
|
% vgg_mrdivs Solves equation system Y*diag(s) = A*X with unkowns A, s.
%
% A = vgg_mrdivs(X,Y) solves (overdetermined) equation system Y*diag(s) = A*X
% by linear method (DLT algorithm).
% Parameters:
% X ... double (N,K)
% Y ... double (M,K)
% A ... double (M,N)
% s ... double (1,K)
%
% Preconditioning of the points not included in the function. Use vgg_conditioner_*.
%
% Typical usage:
% 1. Estimating an image homography from K pairs of corresponding points.
% If 3-by-K matrices x and y are the points in homogeneous coordinates, the 3-by-3 homography
% matrix is obtained as H = vgg_mrdivs(x,y).
%
% 2. Estimating 3-by-4 camera projection matrix P from corresponding pairs of image and scene points.
% For image points x (3xK matrix) and scene points X (4xK matrix) do P = vgg_mrdivs(X,x).
% (c) [email protected]
% Algorithm:
%
% For n-th point pair X(:,n) and Y(:,n) we have
% s*X(:,n) = A*Y(:,n)
% We eliminate s what results in MY*(MY-1)/2 linear homogenous equations
% for elements of A. We solve this system by svd or eig.
function A = vgg_mrdivs(X,Y)
[MX,N] = size(X);
[MY,NY] = size(Y);
if N ~= NY, error('Matrices A, B must have equal number of columns.'); end
% construct the measurement matrix
W = zeros(MX*MY,MY*(MY-1)/2*N);
k = 1;
for i = 1:MY
for j = 1:i-1
W([[1:MX]+MX*(j-1) [1:MX]+MX*(i-1)],[1:N]+N*(k-1)) = [(ones(MX,1)*Y(i,:)).*X; -(ones(MX,1)*Y(j,:)).*X];
k = k+1;
end
end
% solve the system || A'(:)' * W || ---> min
[dummy,s,A] = svd(W',0);
A = reshape(A(:,end),MX,MY)';
return |
lemma big_prod_in_1: assumes "\<And>x. x \<in> A \<Longrightarrow> f x \<in> L F (\<lambda>_. 1)" shows "(\<lambda>y. \<Prod>x\<in>A. f x y) \<in> L F (\<lambda>_. 1)" |
/-
# Formalising IUM 2021 in Lean (unofficial version)
Things about the logical foundations of Lean are skipped here...
(I just assumed the basic knowledge of "universes", "Π types", "inductive types" and their "recursors",
also you need to know about how the logical connectives, the naturals and equality are defined in Lean...)
(For them you may want to read *Theorem Proving in Lean*:
https://leanprover.github.io/theorem_proving_in_lean/
I also made some notes about that...)
-/
import tactic
universe u
--------------------------------------------------------------------------------
-- ## Using sets in Lean
namespace using_sets_in_lean
#print set
/-
In Lean, the `set` is a "type constructor":
`def set : Type u → Type u :=`
` λ (α : Type u), α → Prop`
"A set on a type α" (`set α`) is just the type `α → Prop`!
Note that:
1. This is a function type, which contains all "predicates on `α`"
(i.e. function that receives an element of `α` and returns a `Prop`).
2. This function type lives in the same universe as `α`.
-/
#check set ℕ
#reduce set ℕ -- `set ℕ` and `ℕ → Prop` are the same (definitionally equal)!
-- This is the set {0, 1, 12}.
def some_subset : set ℕ := λ n, n = 0 ∨ n = 1 ∨ n = 12
-- You can also define it as:
-- `def some_subset : ℕ → Prop := λ n, n = 0 ∨ n = 1 ∨ n = 12`
#print notation ∈
#print set.mem
/-
The `mem` is defined as:
`def set.mem : Π {α : Type u} (a : α) (s : set α), Prop :=`
`λ α a s, s a`
which is just a function that "takes an implicit `α : Type`,
an element of `a`, and a "set" `s` on `α` (i.e. `s : α → Prop`),
and returns the proposition obtained by putting `a` into the predicate `s`."
(i.e. it is just an application of the predicate `s` on `a`!)
-/
#check (set.mem 4 some_subset) -- `Prop`
#check (4 ∈ some_subset) -- An alternative notation for the above line
#check (some_subset 4) -- An alternative notation for the above line!
#reduce (4 ∈ some_subset) -- `4 = 0 ∨ 4 = 1 ∨ 4 = 12`
-- Let's prove that 1 ∈ {0, 1, 12}...
lemma l1 : (1 ∈ some_subset) :=
begin
unfold some_subset,
right, left, refl,
end
-- (Term mode version)
example : (1 ∈ some_subset) := or.inr (or.inl rfl)
-- Now prove that 4 ∉ {0, 1, 12}...
example : (4 ∉ some_subset) :=
begin
intros h,
unfold some_subset at h,
cases h with h₁ h₂,
{ injection h₁ },
{ cases h₂ with h₂ h₃,
{ injections },
{ injections }}
end
-- (Term mode version)
example : (4 ∉ some_subset) :=
(λ h : (some_subset 4), h.elim
(λ h, nat.no_confusion h)
(λ h, h.elim
(λ h, nat.no_confusion (nat.succ.inj h))
(λ h, nat.no_confusion ((nat.succ.inj ∘ nat.succ.inj ∘ nat.succ.inj ∘ nat.succ.inj) h))))
#print notation ⊆
#print set.subset
/-
In Lean, the `subset` takes two `set`s and emits a `Prop`:
`def set.subset : Π {α : Type u}, set α → set α → Prop :=`
` λ α s₁ s₂, (∀ (a : α), a ∈ s₁ → a ∈ s₂)`
-/
#print set.univ
#print notation ∅
#print set.has_emptyc
/-
In Lean, the `univ` emits a `set` (α is implicit):
`def set.univ : Π {α : Type u}, set α :=`
` λ α, (λ a, true)`
-/
#reduce (some_subset ⊆ (set.univ : set ℕ))
-- `∀ (a : ℕ), (a = 0 ∨ a = 1 ∨ a = 12) → true`
example : some_subset ⊆ (set.univ : set ℕ) :=
λ x (hx : some_subset x), trivial
-- TODO: intersection, union, complement
-- TODO: indexed intersection / union
-- TODO: extensionality of sets
-- https://leanprover.github.io/logic_and_proof/sets_in_lean.html
end using_sets_in_lean
--------------------------------------------------------------------------------
-- ## Using functions in Lean
-- TODO: complete
|
{-|
Module : ODMatrix.ElementaryDecomposition
Description : This module is responsible for the representation of Elementary operations and the process of elementary decomposition of the path between Origin-Destination matrices.
-}
module ODMatrix.ElementaryDecomposition (
ElementaryMatrix(..)
, getChildren
, applyElementary
, applyPath
, applicables
, elementaryValue
, opposite
, isNullElementary
, ODM
, Bounds
) where
import Numeric.LinearAlgebra
--import Data.Tree (Tree(..), unfoldTree)
import ODMatrix (ODM)
data ElementaryMatrix =
NullElementary Int |
Elementary Int (Int, Int) (Int, Int)
deriving (Show)
-- | Size of the elementary matrix
emSize :: ElementaryMatrix -> Int
emSize (NullElementary s) = s
emSize (Elementary s _ _) = s
-- | Bounds of the capacity of the unit (Lower bound, Upper bound).
type Bounds = (Double, Double)
-- Size, visited, s
type ES = (ElementaryMatrix, ODM)
applyElementaries :: ODM -> [ElementaryMatrix] -> [ODM]
applyElementaries s es = map (applyElementary s) es
-- | Apply a elementary operation to the given ODM.
applyElementary :: ODM -> ElementaryMatrix -> ODM
applyElementary a e = a + (toMatrix e)
-- | Apply a sequence of elementary operations to the given ODM.
applyPath :: ODM -> [ElementaryMatrix] -> ODM
applyPath = foldl applyElementary
-- | Given S, such that A + S = B, this function computes the first elementary permutation matrix E such that S' + E = S.
getChildren :: ODM -- ^ Full path permutation matrix
-> [ElementaryMatrix] -- ^ All possible elementary matrices
getChildren odm = es ++ map opposite es
where al = toAssocList odm
s = rows odm
es = [ Elementary s (r1,c1) (r2,c2) |
((r1,c1),_) <- al,
((r2,c2),_) <- al,
r1 > r2, c1 < c2 ]
opposite :: ElementaryMatrix -> ElementaryMatrix
opposite (Elementary s (r1,c1) (r2,c2)) = Elementary s (r2,c1) (r1,c2)
opposite (NullElementary s) = NullElementary s
-- getChildren s = foldl (\acc p -> acc ++ opposites s p pivots) [] pivots
-- where -- Filtra solo los potenciales pivotes a los que puede agregarse o quitarse un pasajero.
-- pivots = [ p | p@((i,j),x) <- toAssocList s, i <= j, x /= 0]
-- reciprocals :: Pivot -> [Pivot] -> [Pivot]
-- reciprocals ((i,j),x) pivots =
-- [ p | p@((h,k),y) <- pivots, h < i, j < k, x/y > 0]
-- opposites :: ODM -> Pivot -> [Pivot] -> [ElementaryMatrix]
-- opposites s p@(i,j) pivots =
-- [ Elementary (rows s) (i,j) (h,k)
-- | ((h,k),_) <- reciprocals p pivots,
-- atIndex s (h,j) / x < 0, -- Opposite signs
-- atIndex s (i,k) / x < 0 ]
-- | Filter the elementary matrices that cannot be applied to the odm
applicables :: Bounds -- ^ Bounds of the values of the target
-> ODM -- ^ Target Origin-Destination Matrix
-> [ElementaryMatrix] -- ^ List of potential elementary operations
-> [ElementaryMatrix] -- ^ List of applicable elementary operations
applicables b s = filter (applicable b s)
-- | Indicates if an elementary operation is applicable for a given origin destination matrix with the given bounds.
applicable :: Bounds -- ^ Bounds of the values of the target
-> ODM -- ^ Target Origin-Destination Matrix
-> ElementaryMatrix -- ^ Elementary operation to apply
-> Bool
applicable _ _ (NullElementary _) = True
applicable (mn,mx) s (Elementary _ (i,j) (h,k)) =
p (i,k) > mn && p (h,k) < mx &&
p (i,j) < mx && p (h,j) > mn
where p = atIndex s
-- | Compute the value of a elementary matrix in base of a distance functions between cells.
elementaryValue :: (Int -> Int -> Double) -- ^ Measure function
-> ElementaryMatrix -- ^ E
-> Double -- ^ Value of E
elementaryValue _ (NullElementary _) = 0
elementaryValue m (Elementary _ (r1,_ ) (r2,_)) = s * 2 * m r1 r2
where s = fromIntegral . signum $ (r1 - r2)
-- * Convertions
-- | Transform the internal representation of a Elementary Matrix in a Matrix Double
toMatrix :: ElementaryMatrix -> ODM
toMatrix (NullElementary n) = assoc (n,n) 0 []
toMatrix (Elementary n idx1@(r1,c1) idx2@(r2,c2)) =
assoc (n,n) 0 [(idx1,1), (idx2,1), ((r1,c2),-1), ((r2,c1),-1)]
-- | Transform a matrix to an assoc list.
-- Recovers only the valid, different to 0, elements of the ODM.
toAssocList :: ODM -> [((Int, Int), Double)]
toAssocList s = [ c | c@((i,j),v) <- zip idxs elems, i <= j, v /= 0]
where n = rows s
idxs = [ (i,j) | i <- [0..(n-1)], j <- [0..(n-1)] ]
elems = toList . flatten $ s
-- | Return True if the elementary matrix is a null elementary matrix.
isNullElementary :: ElementaryMatrix -> Bool
isNullElementary (NullElementary s) = True
isNullElementary _ = False |
function I = haze_linear(R, t, L)
%HAZE_LINEAR Generate hazy image from clean image using the linear haze model
%corresponding to Lambert-Beer law
% Inputs:
% -|R|: H-by-W-by-|image_channels| clean image representing true radiance
% of scene.
% -|t|: H-by-W transmission map.
% -|L|: 1-by-1-by-|image_channels| homogeneous atmospheric light.
%
% Outputs:
% -|I|: synthetic hazy image, with same size as input clean image |R|.
image_channels = size(L, 3);
% Auxiliary matrix with replicates of transmission map for all channels,
% allowing to express hazy image conveniently.
t_replicated = repmat(t, 1, 1, image_channels);
% Apply linear haze model.
I = t_replicated .* R + (1 - t_replicated) .* repmat(L, size(t));
end
|
A set is connected if and only if it has exactly one connected component. |
lemma in_smallomega_zero [simp]: "f \<in> \<omega>[F](\<lambda>x. 0)" |
-- WARNING: This file was generated automatically by Vehicle
-- and should not be modified manually!
-- Metadata
-- - Agda version: 2.6.2
-- - AISEC version: 0.1.0.1
-- - Time generated: ???
open import Data.Unit
open import Data.Int as ℤ using (ℤ)
open import Data.List
open import Data.List.Relation.Unary.All as List
module MyTestModule where
emptyList : List ℤ
emptyList = []
empty : List.All (λ (x : ℤ) → ⊤) emptyList
empty = checkProperty record
{ databasePath = DATABASE_PATH
; propertyUUID = ????
} |
import Euclid.synthetic
import Euclid.proportion
import Mathlib.Tactic.SwapVar
import Mathlib.Tactic.LibrarySearch
open incidence_geometry
variable [i: incidence_geometry]
/-- find two different points on line -/
lemma pts_of_line (L : line) :
∃ a b : point, online a L ∧ online b L ∧ a ≠ b := by
-- start with two points
obtain ⟨ a , ha ⟩ := more_pts ∅ Set.finite_empty
obtain ⟨ b , hb ⟩ := more_pts {a} (Set.finite_singleton a)
-- if they are both on the line, we are done
by_cases online a L ∧ online b L
· use a
use b
have h_a_ne_b : a ≠ b := by refine ne_of_mem_of_not_mem (Set.mem_singleton a) hb
exact ⟨ h.1, h.2, h_a_ne_b ⟩
-- select the one that is not on the line
by_cases h_nonline_a: online a L
simp only [not_and] at h
have h_nonline_a := h h_nonline_a
-- swap a and b, so the proof can be done for both goals at once
swap_var a ↔ b
-- at this stage, the two goals can be achieved with the same steps
all_goals
-- find point on other side of line
obtain ⟨ c, hc ⟩ := diffside_of_not_online h_nonline_a
have : a ≠ c := by
by_contra contra
rw [contra.symm] at hc
exact hc.2 (sameside_rfl_of_not_online h_nonline_a)
-- circle through c centered at a
obtain ⟨ C, hC ⟩ := circle_of_ne this
-- intersections
have' := line_circle_inter_of_not_sameside hc.2 _ _
swap
exact C
swap
left
exact hC.1
swap
right
exact inside_circle_of_center hC.2
obtain ⟨ e, f, hef ⟩ := pts_of_line_circle_inter this
use e
use f
exact ⟨ hef.2.1, hef.2.2.1, hef.1 ⟩
/-- construct point on line -/
lemma pt_of_line (L : line) :
∃ a : point, online a L := by
obtain ⟨ a, b, hab ⟩ := pts_of_line L
use a
exact hab.1
/-- find second point on line -/
lemma pt_of_line_ne_pt {a : point} {L : line} (haL: online a L) :
∃ b : point, (b ≠ a) ∧ (online b L) := by
obtain ⟨ b, c, hbc ⟩ := pts_of_line L
by_cases b = a
· use c
rw [h] at hbc
exact ⟨ hbc.2.2.symm, hbc.2.1 ⟩
· use b
exact ⟨ h, hbc.1 ⟩
/-- intersection of non_parallel lines -/
lemma pt_of_line_line {L M : line} (hp: ¬ para L M) :
∃ a:point, (online a L)∧(online a M) := by
dsimp [para] at hp
rw [not_forall] at hp
obtain ⟨ e, he ⟩ := hp
use e
rw [not_or,not_not,not_not] at he
exact he
/-- parallel lines don't intersect -/
lemma neq_of_para {a b : point} {L M : line}
(haL: online a L)
(hbM: online b M)
(hpar: para L M) :
a ≠ b := by
by_contra contra
have := online_of_online_para haL hpar
rw [contra] at this
exact this hbM
/-- ## Euclid I.30
parallel is (almost) transitive (almost because parallel means not equal) -/
theorem para_trans {L M N : line}
(pLM: para L M)
(pLN: para L N) :
M=N ∨ (para M N) := by sorry
--
/-begin
by_cases MN: M = N,
left,
exact MN,
right,
-- assume that M, N intersect at c; drop a line from it perpendicular to L
by_contra npMN,
rcases pt_of_line_line npMN with ⟨c, hcM, hcN⟩,
have hncL := online_of_online_para hcN (para_symm pLN),
rcases perppointnon hncL with ⟨-, d, -, -, hdL, -, -, -, -⟩,
obtain ⟨O, hcO, hdO⟩ := line_of_pts c d,
have cd : c ≠ d := neq_of_para hcM hdL (para_symm pLM),
have hLO : L ≠ O := λ LO, hncL (by rwa ← LO at hcO),
-- draw a circle α with center c and radius d; find its intersections with M,N
obtain ⟨α, hα⟩ := circle_of_ne cd,
have cα := inside_circle_of_center hα.2,
have αM := line_circle_inter_of_inside_online hcM cα,
have αN := line_circle_inter_of_inside_online hcN cα,
obtain ⟨a, a', aa', haM, ha'M, aα⟩ := pts_of_line_circle_inter αM,
obtain ⟨b, b', bb', hbN, hb'N, bα⟩ := pts_of_line_circle_inter αN,
have Baca' := B_of_line_circle_inter aa' hcM haM ha'M aα.1 aα.2 cα,
have Bbcb' := B_of_line_circle_inter bb' hcN hbN hb'N bα.1 bα.2 cα,
have ac := ne_12_of_B Baca',
have bc := ne_12_of_B Bbcb',
have b'c := (ne_23_of_B Bbcb').symm,
have hnaO: ¬ online a O := λ haO, neq_of_para hdL hdO (by rwa ← line_unique_of_pts ac haO hcO haM hcM at pLM) (by refl),
have hnbO : ¬ online b O := λ hbO, neq_of_para hdL hdO (by rwa ← line_unique_of_pts bc hbO hcO hbN hcN at pLN) (by refl),
have hnb'O : ¬ online b' O := λ hb'O, neq_of_para hdL hdO (by rwa ← line_unique_of_pts b'c hb'O hcO hb'N hcN at pLN) (by refl),
have hnaN: ¬ online a N := λ haN, MN (line_unique_of_pts ac haM hcM haN hcN),
have hnbM: ¬ online b M := λ hbM, MN (line_unique_of_pts bc hbM hcM hbN hcN),
have hnb'M: ¬ online b' M := λ hb'M, MN (line_unique_of_pts b'c hb'M hcM hb'N hcN),
have hNO : N ≠ O := λ NO, hnbO (by rwa NO at hbN),
have hMO : M ≠ O := λ MO, hnaO (by rwa MO at haM),
-- choose b so that a, b that lie on the same side of O by symmetry
by_cases ssbaO: sameside b a O,
have ssabO := sameside_symm ssbaO,
swap,
have nsbb'O := not_sameside13_of_B123_online2 Bbcb' hcO,
have ssabO := sameside_of_diffside_diffside ⟨hnbO, hnaO, ssbaO⟩ ⟨hnbO, hnb'O, nsbb'O⟩,
swap_var [b b'],
swap_var [hbN hb'N],
swap_var [hnbM hnb'M],
swap_var [hnbO hnb'O],
swap_var [bc b'c],
all_goals {
have ss1 := sameside_of_sameside_not_sameside cd hcO hcN hcM hdO hbN haM hnaN (sameside_symm ssabO),
have ss2 := not_sameside_of_sameside_sameside hcO hcM hcN hdO haM hbN ssabO,
have ss: sameside d a N ∨ sameside d b M := by
begin
by_cases ss: sameside d a N,
left,
exact ss,
right,
exact ss1 ss,
end,
-- choose e on L so that it lies on the opposite side w.r.t. to O than a, b
obtain ⟨e0, e0d, -⟩ := pt_of_line_ne_pt hdL,
obtain ⟨β, hβ⟩ := circle_of_ne e0d.symm,
have dβ := inside_circle_of_center hβ.2,
have βL := line_circle_inter_of_inside_online hdL dβ,
obtain ⟨e, e', ee', heL, he'L, eβ⟩ := pts_of_line_circle_inter βL,
have Bede' := B_of_line_circle_inter ee' hdL heL he'L eβ.1 eβ.2 dβ,
have ed := ne_12_of_B Bede',
have e'd := (ne_23_of_B Bede').symm,
have hneO : ¬ online e O := λ heO, hLO (line_unique_of_pts ed heL hdL heO hdO),
have hne'O : ¬ online e' O := λ he'O, hLO (line_unique_of_pts e'd he'L hdL he'O hdO),
by_cases nsaeO: sameside a e' O,
have nse'eO := not_sameside13_of_B123_online2 (B_symm Bede') hdO,
have dsaeO := difsamedif (sameside_symm nsaeO) ⟨hne'O, hneO, nse'eO⟩,
swap,
swap_var [e e'],
swap_var [heL he'L],
swap_var [ed e'd],
swap_var [hneO hne'O],
have dsaeO: diffside a e O := ⟨hnaO, hneO, nsaeO⟩,
all_goals {
have dsbeO := difsamedif ssabO dsaeO,
have acd := parapostcor ed haM hcM hdL heL hdO hcO (para_symm pLM) dsaeO,
have bcd := parapostcor ed hbN hcN hdL heL hdO hcO (para_symm pLN) dsbeO,
-- argue about angles given by parallel assumptions in two symmetric cases
cases ss,
-- sameside d a N
have sum := (angle_add_iff_sameside bc.symm cd hcN hbN hcO hdO hnaO hnaN hNO).2 ⟨sameside_symm ssabO, ss⟩,
rwa [acd, bcd] at sum,
simp at sum,
exact hnaN ((angle_zero_iff_online bc.symm ac.symm hcN hbN).2 sum).1,
-- sameside d b M
have sum := (angle_add_iff_sameside ac.symm cd hcM haM hcO hdO hnbO hnbM hMO).2 ⟨ssabO, ss⟩,
rwa [acd, bcd] at sum,
simp at sum,
exact hnbM ((angle_zero_iff_online ac.symm bc.symm hcM haM).2 sum).1,
},
},
end-/
/-- diffside is symmetric -/
lemma diffside_symm {a b : point} {L: line}
(hdiff: diffside a b L) :
diffside b a L := by
dsimp [diffside]
dsimp [diffside] at hdiff
exact ⟨ hdiff.2.1, hdiff.1, (difsym hdiff.2.2) ⟩
/-- reorder areas -/
lemma area_invariant_132 {a b c : point} :
area a b c = area a c b := by
exact (area_invariant a b c).2
lemma area_invariant_213 {a b c : point} :
area a b c = area b a c := by
rw [(area_invariant a b c).2]
rw [(area_invariant a c b).1]
lemma area_invariant_231 {a b c : point} :
area a b c = area b c a := by
rw [(area_invariant a b c).1]
rw [(area_invariant c a b).1]
lemma area_invariant_312 {a b c : point} :
area a b c = area c a b := by
exact (area_invariant a b c).1
lemma area_invariant_321 {a b c : point} :
area a b c = area c b a := by
rw [(area_invariant a b c).2]
rw [(area_invariant c b a).1]
/-- degenerate area: more general statement -/
lemma area_of_eq (a b c : point)
(h: a=b ∨ a=c ∨ b=c) :
area a b c =0 := by
cases h with
| inl ab =>
rw [ab]
exact degenerate_area b c
| inr h =>
cases h with
| inl ac =>
rw [ac,area_invariant_132]
exact degenerate_area c b
| inr bc =>
rw [bc,area_invariant_321]
exact degenerate_area c a
/-- equivalent areas of paralellogram -/
lemma area_of_parallelogram {a b c d : point} {L M N O : line}
(haL: online a L) (hbL: online b L)
(hbM: online b M) (hcM: online c M)
(hcN: online c N) (hdN: online d N)
(hdO: online d O) (haO: online a O)
(parLN: para L N)
(parMO: para M O) :
area a b c + area a d c = 2*(area a b c)
∧ area b a d + area b c d = 2*(area a b c) := by
constructor
have := (parasianar hbL haL hcN hdN hbM hcM haO hdO parLN parMO).2.2
rw [area_invariant_321] at this
rw [this.symm]
ring_nf
have := (parasianar haL hbL hdN hcN haO hdO hbM hcM parLN (para_symm parMO)).2.2
rw [area_invariant_321] at this
rw [this.symm]
ring_nf
rw [area_invariant_312, @area_invariant_321 i a b c]
field_simp
exact triarea hdN hcN hbL haL (para_symm parLN)
/-- non-degeneracy of triangle -/
lemma not_online_of_triangle {a b c : point} {L M : line}
(haL: online a L)
(hbL: online b L)
(hbM: online b M)
(hcM: online c M)
(hn: ¬ online a M)
(hdeg: b ≠ c) :
¬ online c L := by
by_contra contra
rw [line_unique_of_pts hdeg hbL contra hbM hcM] at haL
exact hn haL
/--parallel line through point -/
lemma parallel_of_line_pt {a : point} {L : line}
(haL: ¬ online a L) :
∃ M : line, (online a M) ∧ (para L M) := by
obtain ⟨ b, hb ⟩ := pt_of_line L
obtain ⟨ c, hc ⟩ := pt_of_line_ne_pt hb
have := drawpar hc.1 hc.2 hb haL
obtain ⟨ throwaway,O,hO ⟩ := drawpar hc.1 hc.2 hb haL
use O
exact ⟨ hO.2.1, para_symm hO.2.2.2.2 ⟩
/-- parallel projection of point -/
lemma parallel_projection {a : point}{L M N : line}
(haM: online a M)
(hpar: para M N)
(h_L_npara_M: ¬ para L M)
(haL: ¬ online a L) :
∃ b : point, ∃ O : line, (online b N) ∧ (online b O) ∧ (online a O) ∧ (para L O) := by
-- intersections with L
obtain ⟨ c, hc ⟩ := pt_of_line_line h_L_npara_M
have h_L_npara_N : ¬ para L N := by
by_contra contra
cases para_trans (para_symm hpar) (para_symm contra) with
| inl h =>
rw [h] at haM
exact haL haM
| inr h =>
exact h_L_npara_M (para_symm h)
obtain ⟨ d, hd ⟩ := pt_of_line_line h_L_npara_N
have h_c_ne_d : c ≠ d := by
by_contra contra
rw [contra] at hc
dsimp [para] at hpar
cases hpar d with
| inl h =>
exact h hc.2
| inr h =>
exact h hd.2
-- construct parallel to L through a
obtain ⟨ throwaway,O,hO ⟩ := drawpar h_c_ne_d hc.1 hd.1 haL
-- construct intersection of O and M
have h_O_npara_N : ¬ para O N := by
by_contra contra
have pNO := para_trans contra hO.2.2.2.2
cases pNO with
| inl pNO =>
rw [pNO] at hpar
exact h_L_npara_M (para_symm hpar)
| inr pNO =>
exact (online_of_online_para hd.1 (para_symm pNO)) hd.2
obtain ⟨ ap, hap ⟩ := pt_of_line_line h_O_npara_N
use ap
use O
exact ⟨ hap.2, hap.1, hO.2.1, (para_symm hO.2.2.2.2) ⟩
/-- intersecting lines cannot be parallel -/
lemma not_para_of_online_online {a : point} {L M : line} :
(online a L) → (online a M) → ¬ para L M := by
intro hL hM
dsimp [para]
simp only [not_forall]
use a
rw [not_or,not_not,not_not]
exact ⟨ hL, hM ⟩
/-- diagonals of a trapezoid imply diffside -/
theorem diffside_of_trapezoid {a b c d : point} {L M N : line}
(haL: online a L) (hbL: online b L)
(hbM: online b M) (hcM: online c M)
(hcN: online c N) (hdN: online d N)
{D : line}
(hbD: online b D) (hdD: online d D)
(parLN: para L N)
(h_nondeg: a ≠ b ∧ c ≠ d) :
diffside a c D ∨ diffside a d M := by
dsimp [diffside]
by_cases h_ss : sameside a c D
right
constructor
exact not_online_of_triangle hcM hbM hbL haL (online_of_online_para hcN (para_symm parLN)) h_nondeg.1.symm
constructor
exact not_online_of_triangle hbM hcM hcN hdN (online_of_online_para hbL parLN) h_nondeg.2
have := sameside_of_online_online_para hcN hdN (para_symm parLN)
exact not_sameside_of_sameside_sameside hbL hbM hbD haL hcM hdD this h_ss
left
constructor
exact not_online_of_triangle hdD hbD hbL haL (online_of_online_para hdN (para_symm parLN)) h_nondeg.1.symm
constructor
exact not_online_of_triangle hbD hdD hdN hcN (online_of_online_para hbL parLN) h_nondeg.2.symm
exact h_ss
/-- cannot have B a b c if lengths don't match up -/
lemma not_B_of_short {a b c : point}
(hlen: length a b < length a c) :
¬ B a c b := by
by_contra contra
rw [(length_sum_of_B contra).symm] at hlen
linarith [length_nonneg c b]
/-- B_of_three_online_ne but with one length too short -/
lemma B_of_three_online_ne_short {a b c : point} {L : line}
(hlen: length a b < length a c) :
a ≠ b → a ≠ c → b ≠ c → online a L → online b L → online c L → B a b c ∨ B b a c := by
intro ab ac bc aL bL cL
have := B_of_three_online_ne ab ac bc aL bL cL
convert this
simp [not_B_of_short hlen]
/-- complement to same_length_of_ne_le -/
theorem same_length_B_of_ne_ge {a b c d : point} (a_ne_b : a ≠ b) (big : length a b < length c d) :
∃ (p : point), B a b p ∧ length a p = length c d := by
have c_ne_d : c ≠ d := by
by_contra contra
rw [contra] at big
rw [length_eq_zero_iff.mpr (Eq.refl d)] at big
have := length_nonneg a b
exact not_lt_of_ge this big
obtain ⟨q,hq⟩ := same_length_B_of_ne_four a_ne_b.symm c_ne_d
have a_ne_q : a ≠ q := by
by_contra contra
rw [contra] at hq
rw [length_eq_zero_iff.mpr (Eq.refl q)] at hq
rw [hq.2.symm] at big
have := length_nonneg a b
exact not_lt_of_ge this big
obtain ⟨C, hC⟩ := circle_of_ne a_ne_q
obtain ⟨p, hp⟩ := pt_oncircle_of_inside_ne a_ne_q (inside_circle_of_center hC.2)
obtain ⟨AB, hAB⟩ := line_of_pts a b
have q_online_AB := online_3_of_B hq.1 hAB.2 hAB.1
have p_online_AB := online_3_of_B (B_symm hp.1) q_online_AB hAB.1
have := (oncircle_iff_length_eq hp.2 hC.2).mpr hC.1
rw [this.symm] at hq
have b_ne_p : b ≠ p := by
by_contra contra
rw [contra.symm] at hq
rw [contra] at hq big
rw [hq.2] at big
simp at big
rw [hq.2.symm] at big
have a_ne_p := (ne_12_of_B hp.1).symm
use p
refine' ⟨_, hq.2⟩
have B3 := B_of_three_online_ne_short big a_ne_b a_ne_p b_ne_p hAB.1 hAB.2 p_online_AB
cases B3 with
| inl B3 =>
exact B3
| inr B3 =>
exfalso
exact (not_B324_of_B123_B124 (B_symm hq.1) (B_symm hp.1)) B3
/-- ## Euclid I.33
lines which join the ends of equal and parallel lines in the same directions are themselves equal and parallel
https://mathcs.clarku.edu/~djoyce/java/elements/bookI/propI33.html -/
theorem para_len_parallelogram {a b c d : point} {L M N O P : line}
(haL: online a L) (hbL: online b L)
(hbM: online b M) (hcM: online c M)
(hcN: online c N) (hdN: online d N)
(hdO: online d O) (haO: online a O)
(hcP: online c P) (haP: online a P)
(hdiff: d ≠ c)
(hside: diffside b d P)
(pLN: para L N)
(hlen: length a b = length c d) :
para O M := by
have :=parapostcor hdiff hbL haL hcN hdN hcP haP pLN hside
rw [angle_symm a c d] at this
have := ((sas hlen (length_symm a c) this).2.2).symm
rw [angle_symm c a d] at this
exact angeqpar (neq_of_para hdN haL (para_symm pLN)) (neq_of_para haL hcN pLN) (neq_of_para hcN hbL (para_symm pLN)) hdO haO hcM hbM haP hcP this (diffside_symm hside)
/-- ## Euclid I.36
parallelograms which are on equal bases and in the same parallels equal one another
https://mathcs.clarku.edu/~djoyce/java/elements/bookI/propI36.html -/
theorem eq_of_parallelogram_of_eq_basis_of_diffside {a b c d e f g h: point} {L M K N O P: line}
(haL: online a L) (hdL: online d L) (heL: online e L) (hhL: online h L)
(hbM: online b M) (hcM: online c M) (hfM: online f M) (hgM: online g M)
(haK: online a K) (hbK: online b K)
(hdN: online d N) (hcN: online c N)
(heO: online e O) (hfO: online f O)
(hhP: online h P) (hgP: online g P)
(parLM: para L M) (parKN: para K N) (parOP: para O P)
(hlen: length b c = length f g)
{S: line}
(hcS: online c S) (heS: online e S)
(hside: diffside b h S)
(h_b_ne_c: b ≠ c) :
area a b c + area a d c = area e f g + area e h g := by
obtain ⟨ Q, hQ ⟩ := line_of_pts b e
obtain ⟨ R, hR ⟩ := line_of_pts c h
have' parQR := para_len_parallelogram heL hhL hR.2 hR.1 hcM hbM hQ.1 hQ.2 hcS heS h_b_ne_c (diffside_symm hside) parLM _
have eq1 := parallelarea haL hdL hbM hcM heL hhL haK hbK hdN hcN hQ.1 hQ.2 hR.1 hR.2 parLM parKN parQR
have eq2 := parallelarea hbM hcM heL hhL hfM hgM hQ.1 hQ.2 hR.1 hR.2 heO hfO hhP hgP (para_symm parLM) parQR parOP
rw [(area_invariant e b c).2] at eq2
rw [(area_invariant c e h).2] at eq2
rw [add_comm] at eq2
rw [eq2] at eq1
have arp := (area_of_parallelogram haL hdL hdN hcN hcM hbM hbK haK parLM (para_symm parKN)).1
rw [add_comm] at arp
rw [arp]
have arp := (area_of_parallelogram heL hhL hhP hgP hgM hfM hfO heO parLM (para_symm parOP)).1
rw [add_comm] at arp
rw [arp]
have arp := (area_of_parallelogram haL hdL hdN hcN hcM hbM hbK haK parLM (para_symm parKN)).2
rw [area_invariant_321] at arp
rw [(area_invariant d c b).2] at arp
rw [arp] at eq1
rw [eq1]
have arp := (area_of_parallelogram heL hhL hhP hgP hgM hfM hfO heO parLM (para_symm parOP)).2
rw [(area_invariant h e f).1] at arp
rw [arp.symm, add_comm]
rw [Eq.symm (parasianar hfM hgM heL hhL hfO heO hgP hhP (para_symm parLM) parOP).1]
rw [hlen.symm]
exact length_symm b c
theorem eq_of_parallelogram_of_eq_basis {a b c d e f g h: point} {L M K N O P: line}
(haL: online a L) (hdL: online d L) (heL: online e L) (hhL: online h L)
(hbM: online b M) (hcM: online c M) (hfM: online f M) (hgM: online g M)
(haK: online a K) (hbK: online b K)
(hdN: online d N) (hcN: online c N)
(heO: online e O) (hfO: online f O)
(hhP: online h P) (hgP: online g P)
(parLM: para L M) (parKN: para K N) (parOP: para O P)
(hlen: length b c = length f g) :
area a b c + area a d c = area e f g + area e h g := by
have h_fg_eq_eh := (parasianar heL hhL hfM hgM heO hfO hhP hgP parLM parOP).1
-- trivial case: b=c
by_cases h_b_ne_c: b=c
· have h_f_eq_g := (length_eq_zero_iff.mp (Eq.trans hlen.symm (length_eq_zero_iff.mpr h_b_ne_c)))
have := (parasianar haL hdL hbM hcM haK hbK hdN hcN parLM parKN).1
have h_a_eq_d := (length_eq_zero_iff.mp (Eq.trans this (length_eq_zero_iff.mpr h_b_ne_c)))
have h_e_eq_h := (length_eq_zero_iff.mp (Eq.trans h_fg_eq_eh (length_eq_zero_iff.mpr h_f_eq_g)))
rw [(area_of_eq a b c _)]
rw [(area_of_eq a d c _)]
rw [(area_of_eq e f g _)]
rw [(area_of_eq e h g _)]
left
exact h_e_eq_h
right
right
exact h_f_eq_g
left
exact h_a_eq_d
right
right
exact h_b_ne_c
have h_e_ne_h : e ≠ h := by
by_contra contra
rw [hlen.symm] at h_fg_eq_eh
exact h_b_ne_c (length_eq_zero_iff.mp (Eq.trans h_fg_eq_eh.symm (length_eq_zero_iff.mpr contra)))
rw [(Ne.def b c).symm] at h_b_ne_c
obtain ⟨ S, hS ⟩ := line_of_pts c e
obtain ⟨ Q, hQ ⟩ := line_of_pts b e
obtain ⟨ R, hR ⟩ := line_of_pts c h
have hside := diffside_of_trapezoid hhL heL hQ.2 hQ.1 hbM hcM hS.2 hS.1 parLM ⟨ h_e_ne_h.symm, h_b_ne_c ⟩
cases hside with
| inl hside =>
exact eq_of_parallelogram_of_eq_basis_of_diffside haL hdL heL hhL hbM hcM hfM hgM haK hbK hdN hcN heO hfO hhP hgP parLM parKN parOP hlen hS.1 hS.2 (diffside_symm hside) h_b_ne_c
| inr hside =>
-- invert parallelogram
rw [length_symm b c] at hlen
have := eq_of_parallelogram_of_eq_basis_of_diffside hdL haL heL hhL hcM hbM hfM hgM hdN hcN haK hbK heO hfO hhP hgP parLM (para_symm parKN) parOP hlen hQ.1 hQ.2 (diffside_symm hside) h_b_ne_c.symm
rw [area_invariant_321] at this
rw [@area_invariant_321 i d a b, add_comm] at this
rw [this.symm]
rw [(area_of_parallelogram haK hbK hbM hcM hcN hdN hdL haL parKN (para_symm parLM)).2]
rw [(area_of_parallelogram haK hbK hbM hcM hcN hdN hdL haL parKN (para_symm parLM)).1]
/-- ## Euclid I.38
triangles which are on equal bases and in the same parallels equal one another (version where the vertex is different for both triangles)
https://mathcs.clarku.edu/~djoyce/java/elements/bookI/propI38.html -/
theorem eq_area_of_eq_base {a b c d e f : point} {L M : line}
(haM: online a M)
(hbL: online b L)
(hcL: online c L)
(hdM: online d M)
(heL: online e L)
(hfL: online f L)
(pLM: para L M)
(hlen: length b c = length e f) :
area a b c=area d e f := by
-- trivial case: b=c
by_cases h_b_ne_c: b=c
· rw [area_of_eq a b c _]
rw [area_of_eq d e f _]
right
right
exact length_eq_zero_iff.mp (Eq.trans hlen.symm (length_eq_zero_iff.mpr h_b_ne_c))
right
right
exact h_b_ne_c
have h_e_ne_f : e ≠ f := by
by_contra contra
exact h_b_ne_c (length_eq_zero_iff.mp (Eq.trans hlen (length_eq_zero_iff.mpr contra)))
rw [(Ne.def b c).symm] at h_b_ne_c
-- line through a c abd d e
obtain ⟨ K, hK ⟩ := line_of_pts a c
obtain ⟨ N, hN ⟩ := line_of_pts d e
-- construct parallel projection of b through a c
have h_a_nonline_L := online_of_online_para haM (para_symm pLM)
have := not_online_of_triangle hK.1 hK.2 hcL hbL h_a_nonline_L h_b_ne_c.symm
obtain ⟨ g,O,hg ⟩ := parallel_projection hbL pLM (not_para_of_online_online hK.2 hcL) this
-- construct parallel projection of f through d e
have h_d_nonline_L := online_of_online_para hdM (para_symm pLM)
have := not_online_of_triangle hN.1 hN.2 heL hfL h_d_nonline_L h_e_ne_f
obtain ⟨ h,P,hh ⟩ := parallel_projection hfL pLM (not_para_of_online_online hN.2 heL) this
have := eq_of_parallelogram_of_eq_basis
hg.1 haM hdM hh.1 hbL hcL heL hfL hg.2.1 hg.2.2.1 hK.1 hK.2 hN.1 hN.2 hh.2.1 hh.2.2.1
(para_symm pLM) (para_symm hg.2.2.2) hh.2.2.2
hlen
rw [@area_invariant_321 i g b c] at this
rw [@area_invariant_321 i g a c] at this
rw [(area_of_parallelogram hbL hcL hK.2 hK.1 haM hg.1 hg.2.1 hg.2.2.1 pLM hg.2.2.2).2] at this
rw [(area_invariant b c a).1] at this
rw [(area_of_parallelogram hN.1 hN.2 heL hfL hh.2.2.1 hh.2.1 hh.1 hdM hh.2.2.2 pLM).1] at this
simp at this
exact this
/-- ## Euclid I.38
triangles which are on equal bases and in the same parallels equal one another (version where the vertex is the same for both triangles)
https://mathcs.clarku.edu/~djoyce/java/elements/bookI/propI38.html -/
theorem eq_area_of_eq_base_samevertex (a : point) {b c e f : point} {L : line}
(hbL: online b L)
(hcL: online c L)
(heL: online e L)
(hfL: online f L)
(hlen: length b c = length e f) :
area a b c=area a e f := by
-- trivial case: b=c
by_cases h_b_ne_c : b=c
· rw [length_eq_zero_iff.mpr h_b_ne_c] at hlen
have := length_eq_zero_iff.mp hlen.symm
rw [(area_of_eq a b c _)]
rw [(area_of_eq a e f _)]
right
right
exact this
right
right
exact h_b_ne_c
have h_e_ne_f : e ≠ f := by
have := length_eq_zero_iff.not.mpr h_b_ne_c
rw [hlen] at this
exact length_eq_zero_iff.not.mp this
-- trivial case online a L
by_cases h_a_nonline_L : online a L
· have := (area_zero_iff_online h_b_ne_c hbL hcL).mpr h_a_nonline_L
rw [@area_invariant_231 i a b c]
rw [this]
have := (area_zero_iff_online h_e_ne_f heL hfL).mpr h_a_nonline_L
rw [@area_invariant_231 i a e f]
exact this.symm
obtain ⟨ M, hM ⟩ := parallel_of_line_pt h_a_nonline_L
exact eq_area_of_eq_base hM.1 hbL hcL hM.1 heL hfL hM.2 hlen
/-- ## Euclid I.37
triangles which are on the same base and in the same parallels equal one another (a special case of I.38)
https://mathcs.clarku.edu/~djoyce/java/elements/bookI/propI37.html -/
theorem para_implies_eq_area_of_same_base {a b c d : point} {L M : line}
(haM: online a M)
(hbL: online b L)
(hcL: online c L)
(hdM: online d M)
(pLM: para L M) :
area a b c = area d b c := by sorry
--
/-begin
apply eq_area_of_eq_base haM hbL hcL hdM hbL hcL pLM,
simp,
end-/
/-- area of a triangle cannot equal the area of its subtriangle -/
lemma tri_sum_contra {b c d e: point} {O : line}
(hbO: online b O)
(hdO: online d O)
(heO: online e O)
(hncO: ¬ online c O)
(bd: b ≠ d)
(de: d ≠ e)
(eb: e ≠ b)
(hBbed: B b e d)
(harea: area b c d = area e b c) :
false := by sorry
--
/-begin
have sum:= (area_add_iff_B bd de eb hbO hdO heO hncO).1 hBbed,
have bec_eq_ebc : area b e c = area e b c := by
rw [(area_invariant b e c).2, (area_invariant b c e).1],
have ced_eq_dec : area c e d = area d e c:= by
rw [(area_invariant c e d).1,(area_invariant d e c).2],
rw [harea, bec_eq_ebc, ced_eq_dec] at sum,
simp at sum,
have hcO := (area_zero_iff_online de hdO heO).1 (sum),
apply hncO(hcO),
end-/
/-- ## Euclid I.39
equal triangles which are on the same base and on the same side are also in the same parallels
https://mathcs.clarku.edu/~djoyce/java/elements/bookI/propI39.html -/
theorem eq_area_of_same_base_implies_para {a b c d : point} {L M O : line}
(hbL: online b L)
(hcL: online c L)
(hnaL: ¬ online a L)
(haM: online a M)
(hdM: online d M)
(hbO: online b O)
(hdO: online d O)
(hncO: ¬ online c O)
(ad: a ≠ d)
(bc: b ≠ c)
(bd: b ≠ d)
(ssadL: sameside a d L)
(harea: area a b c = area d b c) :
para L M := by sorry
--
/-begin
rcases drawpar bc hbL hcL hnaL with ⟨-, N,_,haN,-,-,pNL⟩,
have pLN:= para_symm pNL,
-- show that N and O intersect
have npNO: ¬ para N O :=
begin
by_contra' pNO,
have LO_or_pLO := para_trans pNL pNO,
cases LO_or_pLO,
-- L = O
rw ← LO_or_pLO at hncO,
exact hncO(hcL),
-- L is parallel to O
apply neq_of_para hbL hbO LO_or_pLO,
simp,
end,
-- contruct e as intersection of N and O
rcases pt_of_line_line npNO with ⟨e, heN, heO⟩,
have harea2: area a b c = area e b c := by
begin
apply eq_area_of_eq_base haN hbL hcL heN hbL hcL pLN,
simp,
end,
have dbc_eq_bcd : area d b c = area b c d := by rw (area_invariant b c d).1,
rw [harea, dbc_eq_bcd] at harea2,
have be := neq_of_para hbL heN pLN,
by_cases de: d = e,
-- case d = e
rw ← de at heN,
rwa line_unique_of_pts ad haM hdM haN heN,
-- case d != e (cannot actually occur)
rw ← ne.def d e at de,
exfalso,
cases B_of_three_online_ne be bd de.symm hbO heO hdO with hBbed hB,
-- case B b e d
apply tri_sum_contra hbO hdO heO hncO bd de be.symm hBbed harea2,
cases hB with hBebd hBbde,
swap,
-- case B b d e
have ebc_eq_bce : area e b c = area b c e := by rw (area_invariant b c e).1,
rw ← dbc_eq_bcd at harea2,
rw ebc_eq_bce at harea2,
apply tri_sum_contra hbO heO hdO hncO be de.symm bd.symm hBbde harea2.symm,
-- case B e b d
have ssaeL := sameside_of_online_online_para haN heN pNL,
have ssdeL := sameside_trans ssadL ssaeL,
have dsedL:= not_sameside13_of_B123_online2 hBebd hbL,
exact dsedL(sameside_symm ssdeL),
end-/
|
During the 2011 – 12 NBA season , which was shortened to 66 games , the Bobcats posted a 7 – 59 record . Their <unk> winning percentage was the worst in NBA history . " I 'm not real happy about the record book scenario last year . It 's very , very frustrating " , Jordan said later that year .
|
# Create some plots
source("heston.r")
system("mkdir -p images")
fn <- "images/impvol.png"
png(file=fn, width=480, heigh=480)
PlotHestonSurface()
dev.off()
system(paste("convert -trim", fn, fn))
|
stepUpIter <- function() {
i <- 0
while ( ! i) {
i <- i - 1 + (2 * step())
}
}
|
import copy
import numpy as np
import xarray as xr
from starfish.core.test.factories import SyntheticData
from starfish.core.types import Axes, Levels
from .factories import synthetic_stack
from ..imagestack import ImageStack
def divide(array, value):
return array / value
def test_apply():
"""test that apply correctly applies a simple function across 2d tiles of a Stack"""
stack = synthetic_stack()
assert (stack.xarray == 1).all()
output = stack.apply(divide, value=2)
assert (output.xarray == 0.5).all()
def test_apply_positional():
"""test that apply correctly applies a simple function across 2d tiles of a Stack. Unlike
test_apply, the parameter is passed in as a positional parameter."""
stack = synthetic_stack()
assert (stack.xarray == 1).all()
output = stack.apply(divide, 2, n_processes=1)
assert (output.xarray == 0.5).all()
def test_apply_3d():
"""test that apply correctly applies a simple function across 3d volumes of a Stack"""
stack = synthetic_stack()
assert np.all(stack.xarray == 1)
stack.apply(divide, in_place=True, value=4,
group_by={Axes.ROUND, Axes.CH})
assert (stack.xarray == 0.25).all()
def test_apply_labeled_dataset():
"""
test that apply correctly applies a simple function across starfish-generated synthetic data
"""
original = SyntheticData().spots()
image = original.apply(divide, value=2)
assert np.all(image.xarray == original.xarray / 2)
def test_apply_in_place():
"""
test that apply correctly applies a simple function across a starfish stack without modifying
original data
"""
image = SyntheticData().spots()
original = copy.deepcopy(image)
image.apply(divide, value=2, in_place=True)
assert np.all(image.xarray == original.xarray / 2)
def test_apply_single_process():
"""test that apply correctly applies a simple function across 2d tiles of a Stack"""
stack = synthetic_stack()
assert (stack.xarray == 1).all()
output = stack.apply(divide, value=2, n_processes=1)
assert (output.xarray == 0.5).all()
def test_apply_clipping_methods():
"""test that apply properly clips the imagestack"""
# create a half-valued float array
data = np.full((2, 2, 2, 5, 5), fill_value=0.5, dtype=np.float32)
# set one value to max
data[1, 1, 1, 1, 1] = 1
imagestack = ImageStack.from_numpy(data)
# max value after multiplication == 2, all other values == 1
def apply_function(x):
return x * 2
# clip_method 0
# all data are clipped to 1, setting all values to 1 (np.unique(pre_scaled) == [1, 2])
res = imagestack.apply(apply_function, level_method=Levels.CLIP, in_place=False, n_processes=1)
assert np.allclose(res.xarray.values, 1)
# clip_method 1
# all data are scaled, resulting in values being multiplied by 0.5, replicating the original
# data
res = imagestack.apply(
apply_function, level_method=Levels.SCALE_SATURATED_BY_IMAGE, in_place=False, n_processes=1
)
assert np.allclose(imagestack.xarray, res.xarray)
assert isinstance(imagestack.xarray, xr.DataArray)
# clip_method 2
res = imagestack.apply(
apply_function, level_method=Levels.SCALE_SATURATED_BY_CHUNK, in_place=False, n_processes=1,
group_by={Axes.CH, Axes.ROUND},
)
# any (round, ch) combination that was all 0.5 should now be all 1.
assert np.allclose(res.sel({Axes.ROUND: 0, Axes.CH: 0}).xarray, 1)
assert np.allclose(res.sel({Axes.ROUND: 1, Axes.CH: 0}).xarray, 1)
assert np.allclose(res.sel({Axes.ROUND: 0, Axes.CH: 1}).xarray, 1)
# the specific (round, ch) combination with the single "1" value should be scaled, and due to
# construction, look like the original data.
assert np.allclose(
res.sel({Axes.ROUND: 1, Axes.CH: 1}).xarray,
imagestack.sel({Axes.ROUND: 1, Axes.CH: 1}).xarray
)
|
Formal statement is: lemma bigoI [intro]: assumes "eventually (\<lambda>x. (norm (f x)) \<le> c * (norm (g x))) F" shows "f \<in> O[F](g)" Informal statement is: If $f$ is eventually bounded by $cg$ for some constant $c$, then $f$ is big-O of $g$. |
using DrWatson
@quickactivate "StatReth"
# %%
# Bayes: Pr(Earth|Land) = Pr(Land|Earth) * Pr(Earth) / Pr(Land)
p_land_earth = 1 - 0.7
p_land_mars = 1.0
p_earth = 0.5
p_land = p_land_earth * p_earth + p_land_mars * (1 - p_earth)
p_earth_land = p_land_earth * p_earth / p_land
|
[GOAL]
M : Type u_1
X : Type u_2
Y : Type u_3
α : Type u_4
inst✝⁶ : TopologicalSpace M
inst✝⁵ : TopologicalSpace X
inst✝⁴ : TopologicalSpace Y
inst✝³ : SMul M X
inst✝² : ContinuousSMul M X
f : Y → M
g : Y → X
b : Y
s : Set Y
inst✝¹ : SMul Mᵐᵒᵖ X
inst✝ : IsCentralScalar M X
⊢ Continuous fun p => p.fst • p.snd
[PROOFSTEP]
suffices Continuous fun p : M × X => MulOpposite.op p.fst • p.snd from
this.comp (MulOpposite.continuous_unop.prod_map continuous_id)
[GOAL]
M : Type u_1
X : Type u_2
Y : Type u_3
α : Type u_4
inst✝⁶ : TopologicalSpace M
inst✝⁵ : TopologicalSpace X
inst✝⁴ : TopologicalSpace Y
inst✝³ : SMul M X
inst✝² : ContinuousSMul M X
f : Y → M
g : Y → X
b : Y
s : Set Y
inst✝¹ : SMul Mᵐᵒᵖ X
inst✝ : IsCentralScalar M X
⊢ Continuous fun p => MulOpposite.op p.fst • p.snd
[PROOFSTEP]
simpa only [op_smul_eq_smul] using (continuous_smul : Continuous fun p : M × X => _)
[GOAL]
ι : Sort u_1
M : Type u_2
X : Type u_3
inst✝¹ : TopologicalSpace M
inst✝ : SMul M X
ts : Set (TopologicalSpace X)
h : ∀ (t : TopologicalSpace X), t ∈ ts → ContinuousSMul M X
⊢ Continuous fun p => p.fst • p.snd
[PROOFSTEP]
rw [← (@sInf_singleton _ _ ‹TopologicalSpace M› :)]
[GOAL]
ι : Sort u_1
M : Type u_2
X : Type u_3
inst✝¹ : TopologicalSpace M
inst✝ : SMul M X
ts : Set (TopologicalSpace X)
h : ∀ (t : TopologicalSpace X), t ∈ ts → ContinuousSMul M X
⊢ Continuous fun p => p.fst • p.snd
[PROOFSTEP]
exact
continuous_sInf_rng.2 fun t ht =>
continuous_sInf_dom₂ (Eq.refl _) ht (@ContinuousSMul.continuous_smul _ _ _ _ t (h t ht))
[GOAL]
ι : Sort u_1
M : Type u_2
X : Type u_3
inst✝³ : TopologicalSpace M
inst✝² : SMul M X
t₁ t₂ : TopologicalSpace X
inst✝¹ : ContinuousSMul M X
inst✝ : ContinuousSMul M X
⊢ ContinuousSMul M X
[PROOFSTEP]
rw [inf_eq_iInf]
[GOAL]
ι : Sort u_1
M : Type u_2
X : Type u_3
inst✝³ : TopologicalSpace M
inst✝² : SMul M X
t₁ t₂ : TopologicalSpace X
inst✝¹ : ContinuousSMul M X
inst✝ : ContinuousSMul M X
⊢ ContinuousSMul M X
[PROOFSTEP]
refine' continuousSMul_iInf fun b => _
[GOAL]
ι : Sort u_1
M : Type u_2
X : Type u_3
inst✝³ : TopologicalSpace M
inst✝² : SMul M X
t₁ t₂ : TopologicalSpace X
inst✝¹ : ContinuousSMul M X
inst✝ : ContinuousSMul M X
b : Bool
⊢ ContinuousSMul M X
[PROOFSTEP]
cases b
[GOAL]
case false
ι : Sort u_1
M : Type u_2
X : Type u_3
inst✝³ : TopologicalSpace M
inst✝² : SMul M X
t₁ t₂ : TopologicalSpace X
inst✝¹ : ContinuousSMul M X
inst✝ : ContinuousSMul M X
⊢ ContinuousSMul M X
[PROOFSTEP]
assumption
[GOAL]
case true
ι : Sort u_1
M : Type u_2
X : Type u_3
inst✝³ : TopologicalSpace M
inst✝² : SMul M X
t₁ t₂ : TopologicalSpace X
inst✝¹ : ContinuousSMul M X
inst✝ : ContinuousSMul M X
⊢ ContinuousSMul M X
[PROOFSTEP]
assumption
[GOAL]
G : Type u_1
P : Type u_2
inst✝⁵ : AddGroup G
inst✝⁴ : AddTorsor G P
inst✝³ : TopologicalSpace G
inst✝² : PreconnectedSpace G
inst✝¹ : TopologicalSpace P
inst✝ : ContinuousVAdd G P
⊢ IsPreconnected Set.univ
[PROOFSTEP]
convert
isPreconnected_univ.image (Equiv.vaddConst (Classical.arbitrary P) : G → P)
(continuous_id.vadd continuous_const).continuousOn
[GOAL]
case h.e'_3
G : Type u_1
P : Type u_2
inst✝⁵ : AddGroup G
inst✝⁴ : AddTorsor G P
inst✝³ : TopologicalSpace G
inst✝² : PreconnectedSpace G
inst✝¹ : TopologicalSpace P
inst✝ : ContinuousVAdd G P
⊢ Set.univ = ↑(Equiv.vaddConst (Classical.arbitrary P)) '' Set.univ
[PROOFSTEP]
rw [Set.image_univ, Equiv.range_eq_univ]
|
module Graphics.Rendering.Gl
import Data.Fin
import Graphics.Util.Mesh
import Graphics.Util.Transforms
import public Graphics.Rendering.Gl.Types
import public Graphics.Rendering.Gl.Buffers
import public Graphics.Rendering.Gl.Gl41
import Data.Matrix
import Data.Vect
import Control.Algebra
%include C "GL/glew.h"
%include C "gl_idris.h"
%link C "gl_idris.o"
%access public export
-- GLEW
||| initialises the GL function pointers
export
glewInit : IO Int
glewInit = foreign FFI_C "idr_init_glew" (IO Int)
-- ----------------------------------------------------------------- [ Helpers ]
||| upload a list of doubles to the GPU into an array buffer
export
loadDoubleData : BufferUsageARB -> List Double -> IO ()
loadDoubleData usage data' = do
ds <- sizeofDouble
ptr <- doublesToBuffer data'
glBufferData GL_ARRAY_BUFFER (ds * (cast $ length data')) ptr usage
free ptr
pure ()
toList' : Vect n (Vect m a) -> List a
toList' [] = []
toList' (x :: xs) = (toList x) ++ toList' xs
-- ----------------------------------------------------------------- [ Simple API ]
||| location of a texture on the GPU
export
record Texture where
constructor MkTexture
textureLocation: Int
||| locations of a shading program
export
record Shader where
constructor MkShader
||| location of the shader program
program: Int
||| locations of all shaders for this program. minimum of two shaders is required (vertex and fragment shader)
shaders: Vect (S (S n)) Int
||| create a shader from a pair of a shader type an a string containing the shader source
createShader : (GLenum, String) -> IO Int
createShader (shaderType, shaderSource) = do
shaderLoc <- glCreateShader shaderType
glShaderSource shaderLoc 1 [shaderSource] [(cast $ length shaderSource)]
glCompileShader shaderLoc
pure shaderLoc
||| creates and returns a shader program
||| @ shaderSpec a list of pairs of shader type and shader source
export
createShaders : (shaderSpec: Vect (S (S n)) (GLenum, String)) -> IO Shader
createShaders shaderSpec = do
locs <- traverse createShader shaderSpec
programLoc <- glCreateProgram
traverse (glAttachShader programLoc) locs
glLinkProgram programLoc
glUseProgram 0
pure $ MkShader programLoc locs
||| deletes the shader program
export
deleteShaders : Shader -> IO ()
deleteShaders (MkShader programLoc shaderLocs) = do
glUseProgram 0
traverse (glDetachShader programLoc) shaderLocs
traverse glDeleteShader shaderLocs
glDeleteProgram programLoc
pure ()
||| the model on the GPU
|||
||| the minumum information needed is the location of the vertex array object (VAO)
||| and the locations of the vertex buffer objects (VBO)
||| textures are optional
export
data Model : Type where
||| creates a textured model
||| @ vao the location of the vertex array object
||| @ vbos the locations of the vertex buffer objects
||| @ indices the number of indices
||| @ textures locations of the textures for the model
TexturedModel : (vao: Int) ->
(vbos: Vect (S n) Int) ->
(indices: Int) ->
(textures: Vect m Texture) -> Model
||| creates a model from a mesh and some texture locations
||| @ m the mesh. should be uv unwrapped
||| @ textures texture locations we need to bind when using the model
export
createModel : (m: Mesh) -> (textures: (Vect n Texture)) -> IO Model
createModel (UvMesh positions normals uvs indices) textures = do
(vaoLoc :: _) <- glGenVertexArrays 1
glBindVertexArray vaoLoc
(positionBuffer :: normalBuffer :: uvBuffer :: indexBuffer :: _) <- glGenBuffers 4
glBindBuffer GL_ARRAY_BUFFER positionBuffer
loadDoubleData GL_STATIC_DRAW (toList' positions)
glEnableVertexAttribArray 0
glVertexAttribPointer 0 3 GL_DOUBLE GL_FALSE 0 prim__null
glBindBuffer GL_ARRAY_BUFFER normalBuffer
loadDoubleData GL_STATIC_DRAW (toList' normals)
glEnableVertexAttribArray 1
glVertexAttribPointer 1 3 GL_DOUBLE GL_FALSE 0 prim__null
glBindBuffer GL_ARRAY_BUFFER uvBuffer
loadDoubleData GL_STATIC_DRAW (toList' uvs)
glEnableVertexAttribArray 2
glVertexAttribPointer 2 2 GL_DOUBLE GL_FALSE 0 prim__null
is <- sizeofInt
glBindBuffer GL_ELEMENT_ARRAY_BUFFER indexBuffer
ptr <- intsToBuffer indices
glBufferData GL_ELEMENT_ARRAY_BUFFER (is * (cast $ length indices)) ptr GL_STATIC_DRAW
free ptr
glDisableVertexAttribArray 2 -- uvs
glDisableVertexAttribArray 1 -- normals
glDisableVertexAttribArray 0 -- positions
glBindVertexArray 0
pure $ TexturedModel vaoLoc [positionBuffer, normalBuffer, uvBuffer, indexBuffer] (cast $ length indices) textures
export
deleteModel : Model -> IO ()
deleteModel (TexturedModel vao vbos _ _) = do
glDisableVertexAttribArray 2 -- uvs
glDisableVertexAttribArray 1 -- normals
glDisableVertexAttribArray 0 -- positions
glBindBuffer GL_ARRAY_BUFFER 0
glBindBuffer GL_ELEMENT_ARRAY_BUFFER 0
glDeleteBuffers (cast $ length vbos) $ toList vbos
glBindVertexArray 0
glDeleteVertexArrays 1 [vao]
pure ()
||| data type for entities.
||| an entity is like an instance of a model. it consists of the model an instance
||| specific data like location, rotation, etc.
|||
export
data Entity : Type -> Type where
||| a simple entity: model, shader, texture and instance data
||| @ model the model ('class') of the entity
||| @ shader shader program to use
||| @ position 3D position of the entity
||| @ rotation 3D rotation of the entity
||| @ location uniform location of the transform matrix in the shader program
||| @ val arbtitrary data
SimpleEntity : (model: Model)
-> (shader: Shader)
-> (position: Vec3)
-> (rotation: Vect 3 Angle)
-> (location: Int)
-> (val: a)
-> Entity a
export
render : Entity a -> (prepare: a -> IO ()) -> IO ()
render (SimpleEntity (TexturedModel vao _ numIndices textures) (MkShader prog _) entityPosition rotation location val) prepare = do
glBindVertexArray vao
glEnableVertexAttribArray 0
glEnableVertexAttribArray 1
glEnableVertexAttribArray 2
glUseProgram prog
prepare val
traverse (\t => glBindTexture GL_TEXTURE_2D (textureLocation t)) textures
glDrawElements GL_TRIANGLES numIndices GL_UNSIGNED_INT prim__null
glDisableVertexAttribArray 2 -- uvs
glDisableVertexAttribArray 1 -- normals
glDisableVertexAttribArray 0 -- positions
glBindVertexArray 0
pure ()
||| load a png file to the currently bound texture
export
glLoadPNGTexture : Int -> Int -> String -> IO Int
glLoadPNGTexture target level filename = foreign FFI_C "png_texture" (Int -> Int -> String -> IO Int) target level filename
export
loadTexture : String -> Fin 30 -> IO Texture
loadTexture filename index = do
putStrLn $ "Loading " ++ filename ++ " to texture unit " ++ (show $ finToNat index)
glActiveTexture (GL_TEXTURE0 + (cast $ finToNat index))
(texture :: _) <- glGenTextures 1
glBindTexture GL_TEXTURE_2D texture
ret <- glLoadPNGTexture (toGlInt Graphics.Rendering.Gl.GL41.TextureTarget.GL_TEXTURE_2D) 0 filename
-- the texture is bound ... so we can set some params
glTexParameteri GL_TEXTURE_2D GL_TEXTURE_WRAP_S GL_CLAMP_TO_EDGE
glTexParameteri GL_TEXTURE_2D GL_TEXTURE_WRAP_T GL_CLAMP_TO_EDGE
glTexParameteri GL_TEXTURE_2D GL_TEXTURE_MIN_FILTER GL_LINEAR
glTexParameteri GL_TEXTURE_2D GL_TEXTURE_MAG_FILTER GL_LINEAR
pure $ MkTexture texture
export
deleteTextures : List Texture -> IO ()
deleteTextures xs = glDeleteTextures (cast $ length xs) $ map textureLocation xs
-- ----------------------------------------------------------------- [ Helper ]
export
printShaderLog : Int -> IO ()
printShaderLog id = foreign FFI_C "printShaderLog" (Int -> IO()) id
export
glGetInfo : IO String
glGetInfo = do vendor <- glGetString GL_VENDOR
renderer <- glGetString GL_RENDERER
version <- glGetString GL_VERSION
pure $ foldl1 (++) (the (List String) ["Vendor = ", vendor, "\nRenderer = ", renderer, "\nVersion = ", version, "\n"])
|
/-
Copyright (c) 2022 Frédéric Dupuis. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Frédéric Dupuis
-/
import topology.algebra.module.weak_dual
import algebra.algebra.spectrum
/-!
# Character space of a topological algebra
The character space of a topological algebra is the subset of elements of the weak dual that
are also algebra homomorphisms. This space is used in the Gelfand transform, which gives an
isomorphism between a commutative C⋆-algebra and continuous functions on the character space
of the algebra. This, in turn, is used to construct the continuous functional calculus on
C⋆-algebras.
## Implementation notes
We define `character_space 𝕜 A` as a subset of the weak dual, which automatically puts the
correct topology on the space. We then define `to_alg_hom` which provides the algebra homomorphism
corresponding to any element. We also provide `to_clm` which provides the element as a
continuous linear map. (Even though `weak_dual 𝕜 A` is a type copy of `A →L[𝕜] 𝕜`, this is
often more convenient.)
## Tags
character space, Gelfand transform, functional calculus
-/
namespace weak_dual
/-- The character space of a topological algebra is the subset of elements of the weak dual that
are also algebra homomorphisms. -/
def character_space (𝕜 : Type*) (A : Type*) [comm_semiring 𝕜] [topological_space 𝕜]
[has_continuous_add 𝕜] [has_continuous_const_smul 𝕜 𝕜]
[non_unital_non_assoc_semiring A] [topological_space A] [module 𝕜 A] :=
{φ : weak_dual 𝕜 A | (φ ≠ 0) ∧ (∀ (x y : A), φ (x * y) = (φ x) * (φ y))}
variables {𝕜 : Type*} {A : Type*}
namespace character_space
section non_unital_non_assoc_semiring
variables [comm_semiring 𝕜] [topological_space 𝕜] [has_continuous_add 𝕜]
[has_continuous_const_smul 𝕜 𝕜] [non_unital_non_assoc_semiring A] [topological_space A]
[module 𝕜 A]
lemma coe_apply (φ : character_space 𝕜 A) (x : A) : (φ : weak_dual 𝕜 A) x = φ x := rfl
/-- An element of the character space, as a continuous linear map. -/
def to_clm (φ : character_space 𝕜 A) : A →L[𝕜] 𝕜 := (φ : weak_dual 𝕜 A)
lemma to_clm_apply (φ : character_space 𝕜 A) (x : A) : φ x = to_clm φ x := rfl
/-- An element of the character space, as an non-unital algebra homomorphism. -/
@[simps] def to_non_unital_alg_hom (φ : character_space 𝕜 A) : A →ₙₐ[𝕜] 𝕜 :=
{ to_fun := (φ : A → 𝕜),
map_mul' := φ.prop.2,
map_smul' := (to_clm φ).map_smul,
map_zero' := continuous_linear_map.map_zero _,
map_add' := continuous_linear_map.map_add _ }
lemma map_zero (φ : character_space 𝕜 A) : φ 0 = 0 := (to_non_unital_alg_hom φ).map_zero
lemma map_add (φ : character_space 𝕜 A) (x y : A) : φ (x + y) = φ x + φ y :=
(to_non_unital_alg_hom φ).map_add _ _
lemma map_smul (φ : character_space 𝕜 A) (r : 𝕜) (x : A) : φ (r • x) = r • (φ x) :=
(to_clm φ).map_smul _ _
lemma map_mul (φ : character_space 𝕜 A) (x y : A) : φ (x * y) = φ x * φ y :=
(to_non_unital_alg_hom φ).map_mul _ _
lemma continuous (φ : character_space 𝕜 A) : continuous φ := (to_clm φ).continuous
end non_unital_non_assoc_semiring
section unital
variables [comm_ring 𝕜] [no_zero_divisors 𝕜] [topological_space 𝕜] [has_continuous_add 𝕜]
[has_continuous_const_smul 𝕜 𝕜] [topological_space A] [semiring A] [algebra 𝕜 A]
lemma map_one (φ : character_space 𝕜 A) : φ 1 = 1 :=
begin
have h₁ : (φ 1) * (1 - φ 1) = 0 := by rw [mul_sub, sub_eq_zero, mul_one, ←map_mul φ, one_mul],
rcases mul_eq_zero.mp h₁ with h₂|h₂,
{ exfalso,
apply φ.prop.1,
ext,
rw [continuous_linear_map.zero_apply, ←one_mul x, coe_apply, map_mul φ, h₂, zero_mul] },
{ rw [sub_eq_zero] at h₂,
exact h₂.symm },
end
/-- An element of the character space, as an algebra homomorphism. -/
@[simps] def to_alg_hom (φ : character_space 𝕜 A) : A →ₐ[𝕜] 𝕜 :=
{ map_one' := map_one φ,
commutes' := λ r, by
{ rw [algebra.algebra_map_eq_smul_one, algebra.id.map_eq_id, ring_hom.id_apply],
change ((φ : weak_dual 𝕜 A) : A →L[𝕜] 𝕜) (r • 1) = r,
rw [continuous_linear_map.map_smul, algebra.id.smul_eq_mul, coe_apply, map_one φ, mul_one] },
..to_non_unital_alg_hom φ }
lemma eq_set_map_one_map_mul [nontrivial 𝕜] : character_space 𝕜 A =
{φ : weak_dual 𝕜 A | (φ 1 = 1) ∧ (∀ (x y : A), φ (x * y) = (φ x) * (φ y))} :=
begin
ext x,
refine ⟨λ h, ⟨map_one ⟨x, h⟩, h.2⟩, λ h, ⟨_, h.2⟩⟩,
rintro rfl,
simpa using h.1,
end
lemma is_closed [nontrivial 𝕜] [t2_space 𝕜] [has_continuous_mul 𝕜] :
is_closed (character_space 𝕜 A) :=
begin
rw [eq_set_map_one_map_mul],
refine is_closed.inter (is_closed_eq (eval_continuous _) continuous_const) _,
change is_closed {φ : weak_dual 𝕜 A | ∀ x y : A, φ (x * y) = φ x * φ y},
rw [set.set_of_forall],
refine is_closed_Inter (λ a, _),
rw [set.set_of_forall],
exact is_closed_Inter (λ _, is_closed_eq (eval_continuous _)
((eval_continuous _).mul (eval_continuous _)))
end
end unital
section ring
variables [comm_ring 𝕜] [no_zero_divisors 𝕜] [topological_space 𝕜] [has_continuous_add 𝕜]
[has_continuous_const_smul 𝕜 𝕜] [topological_space A] [ring A] [algebra 𝕜 A]
lemma apply_mem_spectrum [nontrivial 𝕜] (φ : character_space 𝕜 A) (a : A) : φ a ∈ spectrum 𝕜 a :=
(to_alg_hom φ).apply_mem_spectrum a
end ring
end character_space
end weak_dual
|
Formal statement is: lemma norm_ii [simp]: "norm \<i> = 1" Informal statement is: The norm of $\i$ is $1$. |
/-
Copyright (c) 2020 Markus Himmel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Markus Himmel
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.category_theory.abelian.pseudoelements
import Mathlib.PostPort
universes v u
namespace Mathlib
/-!
# The four lemma
Consider the following commutative diagram with exact rows in an abelian category:
A ---f--> B ---g--> C ---h--> D
| | | |
α β γ δ
| | | |
v v v v
A' --f'-> B' --g'-> C' --h'-> D'
We prove the "mono" version of the four lemma: if α is an epimorphism and β and δ are monomorphisms,
then γ is a monomorphism.
## Future work
The "epi" four lemma and the five lemma, which is then an easy corollary.
## Tags
four lemma, diagram lemma, diagram chase
-/
namespace category_theory.abelian
/-- The four lemma, mono version. For names of objects and morphisms, consider the following
diagram:
```
A ---f--> B ---g--> C ---h--> D
| | | |
α β γ δ
| | | |
v v v v
A' --f'-> B' --g'-> C' --h'-> D'
```
-/
theorem mono_of_epi_of_mono_of_mono {V : Type u} [category V] [abelian V] {A : V} {B : V} {C : V} {D : V} {A' : V} {B' : V} {C' : V} {D' : V} {f : A ⟶ B} {g : B ⟶ C} {h : C ⟶ D} {f' : A' ⟶ B'} {g' : B' ⟶ C'} {h' : C' ⟶ D'} {α : A ⟶ A'} {β : B ⟶ B'} {γ : C ⟶ C'} {δ : D ⟶ D'} [exact f g] [exact g h] [exact f' g'] (comm₁ : α ≫ f' = f ≫ β) (comm₂ : β ≫ g' = g ≫ γ) (comm₃ : γ ≫ h' = h ≫ δ) (hα : epi α) (hβ : mono β) (hδ : mono δ) : mono γ := sorry
|
If $f \in L_F(g)$ and $t(x, g(x)) \in \Theta_F(h)$, then $t(x, f(x)) \in L_F(h)$. |
## Display a demo, pausing between pages
demo(graphics, package = "graphics", ask = TRUE)
|
function srcPatch = sc_prep_source_patch(img, uvTform, optS)
% SC_PREP_SOURCE_PATCH
%
% Prepare source patches according to uvTform
%
% Input:
% - img: input image
% - uvPixSub: target patch position
% - optS: parameter
% Output:
% - srcPatch: [pNumPix] x [3] x [numUvPix]
numUvPix = size(uvTform, 1);
% Prepare source patch sampling position
% srcPatchPos = zeros(optS.pNumPix, 3, numUvPix);
% Get srcPatchPos
c1 = reshape(uvTform(:,1:3)', 1, 3, numUvPix);
c2 = reshape(uvTform(:,4:6)', 1, 3, numUvPix);
c3 = reshape(uvTform(:,7:9)', 1, 3, numUvPix);
% Get the source patch pixel positions
srcPatchPos = bsxfun(@times, optS.refPatchPos(:,1), c1) + ...
bsxfun(@times, optS.refPatchPos(:,2), c2);
srcPatchPos = bsxfun(@plus, srcPatchPos, c3);
% Convert back to Eucledian coordinate
srcPatchPos = bsxfun(@rdivide, srcPatchPos, srcPatchPos(:, 3, :));
% Grab the color values of source patch using bilinear interpolation
srcPatch = vgg_interp2(img, srcPatchPos(:,1,:), srcPatchPos(:,2,:), 'linear', 0);
% Convert to the format [pNumPix] x [3] x [numUvPix]
srcPatch = permute(srcPatch, [1,3,2]);
% for i = 1 : optS.pNumPix
% dx = optS.refPatchPos(1,i);
% dy = optS.refPatchPos(2,i);
%
% srcPatchPos(i, 1, :) = uvTform(1,:)*dx + uvTform(4,:)*dy + uvTform(7,:);
% srcPatchPos(i, 2, :) = uvTform(2,:)*dx + uvTform(5,:)*dy + uvTform(8,:);
% srcPatchPos(i, 3, :) = uvTform(3,:)*dx + uvTform(6,:)*dy + uvTform(9,:);
% end
% srcPatchPos(:, 1:2,:) = bsxfun(@rdivide, srcPatchPos(:, 1:2,:), srcPatchPos(:, 3,:));
% % Avoid sample out of boundary positions
% % srcPatchPos(:,1,:) = sc_clamp(srcPatchPos(:,1,:), 1, size(img,2));
% % srcPatchPos(:,2,:) = sc_clamp(srcPatchPos(:,2,:), 1, size(img,1));
%
% % Sampling source patch
% srcPatch = mirt2D_mexinterp(img, srcPatchPos(:, 1, :), srcPatchPos(:, 2, :));
% srcPatch = permute(srcPatch, [1,3,2]);
end |
[STATEMENT]
lemma rel_frontier_convex_hull_explicit:
fixes S :: "'a::euclidean_space set"
assumes "\<not> affine_dependent S"
shows "rel_frontier(convex hull S) =
{y. \<exists>u. (\<forall>x \<in> S. 0 \<le> u x) \<and> (\<exists>x \<in> S. u x = 0) \<and> sum u S = 1 \<and> sum (\<lambda>x. u x *\<^sub>R x) S = y}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rel_frontier (convex hull S) = {y. \<exists>u. (\<forall>x\<in>S. 0 \<le> u x) \<and> (\<exists>x\<in>S. u x = 0) \<and> sum u S = 1 \<and> (\<Sum>x\<in>S. u x *\<^sub>R x) = y}
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. rel_frontier (convex hull S) = {y. \<exists>u. (\<forall>x\<in>S. 0 \<le> u x) \<and> (\<exists>x\<in>S. u x = 0) \<and> sum u S = 1 \<and> (\<Sum>x\<in>S. u x *\<^sub>R x) = y}
[PROOF STEP]
have fs: "finite S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite S
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
\<not> affine_dependent S
goal (1 subgoal):
1. finite S
[PROOF STEP]
by (simp add: aff_independent_finite)
[PROOF STATE]
proof (state)
this:
finite S
goal (1 subgoal):
1. rel_frontier (convex hull S) = {y. \<exists>u. (\<forall>x\<in>S. 0 \<le> u x) \<and> (\<exists>x\<in>S. u x = 0) \<and> sum u S = 1 \<and> (\<Sum>x\<in>S. u x *\<^sub>R x) = y}
[PROOF STEP]
have "\<And>u y v.
\<lbrakk>y \<in> S; u y = 0; sum u S = 1; \<forall>x\<in>S. 0 < v x;
sum v S = 1; (\<Sum>x\<in>S. v x *\<^sub>R x) = (\<Sum>x\<in>S. u x *\<^sub>R x)\<rbrakk>
\<Longrightarrow> \<exists>u. sum u S = 0 \<and> (\<exists>v\<in>S. u v \<noteq> 0) \<and> (\<Sum>v\<in>S. u v *\<^sub>R v) = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>u y v. \<lbrakk>y \<in> S; u y = 0; sum u S = 1; \<forall>x\<in>S. 0 < v x; sum v S = 1; (\<Sum>x\<in>S. v x *\<^sub>R x) = (\<Sum>x\<in>S. u x *\<^sub>R x)\<rbrakk> \<Longrightarrow> \<exists>u. sum u S = 0 \<and> (\<exists>v\<in>S. u v \<noteq> 0) \<and> (\<Sum>v\<in>S. u v *\<^sub>R v) = (0::'a)
[PROOF STEP]
apply (rule_tac x = "\<lambda>x. u x - v x" in exI)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>u y v. \<lbrakk>y \<in> S; u y = 0; sum u S = 1; \<forall>x\<in>S. 0 < v x; sum v S = 1; (\<Sum>x\<in>S. v x *\<^sub>R x) = (\<Sum>x\<in>S. u x *\<^sub>R x)\<rbrakk> \<Longrightarrow> (\<Sum>x\<in>S. u x - v x) = 0 \<and> (\<exists>va\<in>S. u va - v va \<noteq> 0) \<and> (\<Sum>va\<in>S. (u va - v va) *\<^sub>R va) = (0::'a)
[PROOF STEP]
apply (force simp: sum_subtractf scaleR_diff_left)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
\<lbrakk>?y \<in> S; ?u ?y = 0; sum ?u S = 1; \<forall>x\<in>S. 0 < ?v x; sum ?v S = 1; (\<Sum>x\<in>S. ?v x *\<^sub>R x) = (\<Sum>x\<in>S. ?u x *\<^sub>R x)\<rbrakk> \<Longrightarrow> \<exists>u. sum u S = 0 \<and> (\<exists>v\<in>S. u v \<noteq> 0) \<and> (\<Sum>v\<in>S. u v *\<^sub>R v) = (0::'a)
goal (1 subgoal):
1. rel_frontier (convex hull S) = {y. \<exists>u. (\<forall>x\<in>S. 0 \<le> u x) \<and> (\<exists>x\<in>S. u x = 0) \<and> sum u S = 1 \<and> (\<Sum>x\<in>S. u x *\<^sub>R x) = y}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>?y \<in> S; ?u ?y = 0; sum ?u S = 1; \<forall>x\<in>S. 0 < ?v x; sum ?v S = 1; (\<Sum>x\<in>S. ?v x *\<^sub>R x) = (\<Sum>x\<in>S. ?u x *\<^sub>R x)\<rbrakk> \<Longrightarrow> \<exists>u. sum u S = 0 \<and> (\<exists>v\<in>S. u v \<noteq> 0) \<and> (\<Sum>v\<in>S. u v *\<^sub>R v) = (0::'a)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?y \<in> S; ?u ?y = 0; sum ?u S = 1; \<forall>x\<in>S. 0 < ?v x; sum ?v S = 1; (\<Sum>x\<in>S. ?v x *\<^sub>R x) = (\<Sum>x\<in>S. ?u x *\<^sub>R x)\<rbrakk> \<Longrightarrow> \<exists>u. sum u S = 0 \<and> (\<exists>v\<in>S. u v \<noteq> 0) \<and> (\<Sum>v\<in>S. u v *\<^sub>R v) = (0::'a)
goal (1 subgoal):
1. rel_frontier (convex hull S) = {y. \<exists>u. (\<forall>x\<in>S. 0 \<le> u x) \<and> (\<exists>x\<in>S. u x = 0) \<and> sum u S = 1 \<and> (\<Sum>x\<in>S. u x *\<^sub>R x) = y}
[PROOF STEP]
using fs assms
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?y \<in> S; ?u ?y = 0; sum ?u S = 1; \<forall>x\<in>S. 0 < ?v x; sum ?v S = 1; (\<Sum>x\<in>S. ?v x *\<^sub>R x) = (\<Sum>x\<in>S. ?u x *\<^sub>R x)\<rbrakk> \<Longrightarrow> \<exists>u. sum u S = 0 \<and> (\<exists>v\<in>S. u v \<noteq> 0) \<and> (\<Sum>v\<in>S. u v *\<^sub>R v) = (0::'a)
finite S
\<not> affine_dependent S
goal (1 subgoal):
1. rel_frontier (convex hull S) = {y. \<exists>u. (\<forall>x\<in>S. 0 \<le> u x) \<and> (\<exists>x\<in>S. u x = 0) \<and> sum u S = 1 \<and> (\<Sum>x\<in>S. u x *\<^sub>R x) = y}
[PROOF STEP]
apply (simp add: rel_frontier_def finite_imp_compact rel_interior_convex_hull_explicit)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<And>y u v. \<lbrakk>y \<in> S; u y = 0; sum u S = 1; \<forall>x\<in>S. 0 < v x; sum v S = 1; (\<Sum>x\<in>S. v x *\<^sub>R x) = (\<Sum>x\<in>S. u x *\<^sub>R x)\<rbrakk> \<Longrightarrow> \<exists>u. sum u S = 0 \<and> (\<exists>v\<in>S. u v \<noteq> 0) \<and> (\<Sum>v\<in>S. u v *\<^sub>R v) = (0::'a); finite S; \<not> affine_dependent S\<rbrakk> \<Longrightarrow> convex hull S - {y. \<exists>u. (\<forall>x\<in>S. 0 < u x) \<and> sum u S = 1 \<and> (\<Sum>x\<in>S. u x *\<^sub>R x) = y} = {y. \<exists>u. (\<forall>x\<in>S. 0 \<le> u x) \<and> (\<exists>x\<in>S. u x = 0) \<and> sum u S = 1 \<and> (\<Sum>x\<in>S. u x *\<^sub>R x) = y}
[PROOF STEP]
apply (auto simp: convex_hull_finite)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>u. \<lbrakk>\<And>y u v. \<lbrakk>y \<in> S; u y = 0; sum u S = 1; \<forall>x\<in>S. 0 < v x; sum v S = 1; (\<Sum>x\<in>S. v x *\<^sub>R x) = (\<Sum>x\<in>S. u x *\<^sub>R x)\<rbrakk> \<Longrightarrow> \<exists>u. sum u S = 0 \<and> (\<exists>v\<in>S. u v \<noteq> 0) \<and> (\<Sum>v\<in>S. u v *\<^sub>R v) = (0::'a); finite S; \<not> affine_dependent S; \<forall>ua. sum ua S = 1 \<longrightarrow> (\<exists>x\<in>S. \<not> 0 < ua x) \<or> (\<Sum>x\<in>S. ua x *\<^sub>R x) \<noteq> (\<Sum>x\<in>S. u x *\<^sub>R x); \<forall>x\<in>S. 0 \<le> u x; sum u S = 1\<rbrakk> \<Longrightarrow> \<exists>ua. (\<forall>x\<in>S. 0 \<le> ua x) \<and> (\<exists>x\<in>S. ua x = 0) \<and> sum ua S = 1 \<and> (\<Sum>x\<in>S. ua x *\<^sub>R x) = (\<Sum>x\<in>S. u x *\<^sub>R x)
2. \<And>u xa ua. \<lbrakk>\<And>y u v. \<lbrakk>y \<in> S; u y = 0; sum u S = 1; \<forall>x\<in>S. 0 < v x; sum v S = 1; (\<Sum>x\<in>S. v x *\<^sub>R x) = (\<Sum>x\<in>S. u x *\<^sub>R x)\<rbrakk> \<Longrightarrow> \<exists>u. sum u S = 0 \<and> (\<exists>v\<in>S. u v \<noteq> 0) \<and> (\<Sum>v\<in>S. u v *\<^sub>R v) = (0::'a); finite S; \<not> affine_dependent S; \<forall>x\<in>S. 0 \<le> u x; xa \<in> S; u xa = 0; sum u S = 1; \<forall>x\<in>S. 0 < ua x; sum ua S = 1; (\<Sum>x\<in>S. ua x *\<^sub>R x) = (\<Sum>x\<in>S. u x *\<^sub>R x)\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
apply (metis less_eq_real_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>u xa ua. \<lbrakk>\<And>y u v. \<lbrakk>y \<in> S; u y = 0; sum u S = 1; \<forall>x\<in>S. 0 < v x; sum v S = 1; (\<Sum>x\<in>S. v x *\<^sub>R x) = (\<Sum>x\<in>S. u x *\<^sub>R x)\<rbrakk> \<Longrightarrow> \<exists>u. sum u S = 0 \<and> (\<exists>v\<in>S. u v \<noteq> 0) \<and> (\<Sum>v\<in>S. u v *\<^sub>R v) = (0::'a); finite S; \<not> affine_dependent S; \<forall>x\<in>S. 0 \<le> u x; xa \<in> S; u xa = 0; sum u S = 1; \<forall>x\<in>S. 0 < ua x; sum ua S = 1; (\<Sum>x\<in>S. ua x *\<^sub>R x) = (\<Sum>x\<in>S. u x *\<^sub>R x)\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
by (simp add: affine_dependent_explicit_finite)
[PROOF STATE]
proof (state)
this:
rel_frontier (convex hull S) = {y. \<exists>u. (\<forall>x\<in>S. 0 \<le> u x) \<and> (\<exists>x\<in>S. u x = 0) \<and> sum u S = 1 \<and> (\<Sum>x\<in>S. u x *\<^sub>R x) = y}
goal:
No subgoals!
[PROOF STEP]
qed |
import Mathlib
import ClassicalMechanicsLean.JetSpace_1D
/-!
# Formal Calculus
We introduce formal structures for integration and differentiation. Properties should be added to make these mathematically sound. But correctness can be ensured temporarily by making sure individual definitions are correct.
## Formal Integrals
-/
/--
Integrability of `f`, i.e., given an interval `[a, b]`, we can compute the integral of `f` over that interval. Additivity over intervals is also required.
-/
class Integrable (f: ℝ → ℝ) where
integral (a b : ℝ) : ℝ
interval_union (a b c : ℝ) :
integral a c = integral a b + integral b c
/-- The integral of a function, with the typeclass derived -/
def integral (f: ℝ → ℝ)[int : Integrable f]
(a b : ℝ ) :=
int.integral a b
/-- The integral over a single point is zero, proved as an illustration. -/
theorem integral_point(f: ℝ → ℝ)[int : Integrable f]
(a : ℝ ) : integral f a a = 0 := by
unfold integral
have l := int.interval_union a a a
simp at l
assumption
/-!
As an exercise, prove that flip ends of an interval gives the negative of the integral.
-/
theorem integral_flip (f : ℝ → ℝ) [int : Integrable f]
(a b : ℝ ) : integral f a b = - integral f b a := by
unfold integral
have l := int.interval_union a b b
have lem1 : int.integral a b + int.integral b a = 0 := by
trans
· rw [<- int.interval_union a b a]
· apply integral_point f a
have lem2 : int.integral a b = 0 - int.integral b a := by
apply eq_zero_sub_of_add_eq_zero_left lem1
simp at lem2
apply lem2
/-!
## Formal Derivatives
We define so called __one-jets__ as a value and a derivative at a point. A differentiable function has values a one-jet at each point.
-/
/--
A _one-jet_ is a value and a derivative at a point.
-/
structure OneJet where
value : ℝ
derivative : ℝ
/--
A differentiable function is a function that has a one-jet at each point.
-/
structure SmoothFunction where
jet: ℝ → OneJet
/--
Derivative of a smooth function, i.e., the derivative of the one-jet at a point.
-/
def Jet.SmoothFunction.derivative (f: Jet.SmoothFunction 1) : ℝ → ℝ :=
fun x => f.grad (Vector.cons x Vector.nil)
/--
The value of a smooth function, i.e., the value of the one-jet at a point.
-/
def Jet.SmoothFunction.value (f: Jet.SmoothFunction 1) : ℝ → ℝ :=
fun x => f.asFunc (Vector.cons x Vector.nil)
/--
Integrable functions can be obtained from smooth functions via the fundamental theorem of calculus.
-/
instance fundThm (f: Jet.SmoothFunction 1) :
Integrable (f.derivative) where
integral (a b) := f.value b - f.value a
interval_union (a b c) := by
simp [integral]
/-!
## Constructions of smooth functions
To use the above we need to construct a few smooth functions
-/
namespace SmoothFunction
/--
Constant functions as smooth functions.
-/
def constant (c : ℝ) : SmoothFunction :=
⟨fun _ ↦ ⟨c, 0⟩⟩
/--
Sum of smooth functions.
-/
def sum (f g : Jet.SmoothFunction 1) : Jet.SmoothFunction 1 :=
⟨fun x => f.value x + g.value x, fun x => Vector.cons (f.derivative x + g.derivative x) Vector.nil⟩
/--
Product of smooth functions using Liebnitz rule.
-/
def prod (f g : Jet.SmoothFunction 1) : Jet.SmoothFunction 1 :=
⟨fun x => f.value x * g.value x, fun x => Vector.cons (f.derivative x * g.value x + f.value x * g.derivative x) Vector.nil⟩
/--
Product of a scalar and a smooth function.
-/
def scalarProd (c : ℝ) (f : Jet.SmoothFunction 1) : Jet.SmoothFunction 1 :=
⟨fun x => c * f.value x, fun x => Vector.cons (c * f.derivative x) Vector.nil⟩
/-- Addition operation on smooth functions -/
instance : Add (Jet.SmoothFunction 1) := ⟨sum⟩
/-- Multiplication operation on smooth functions -/
instance : Mul (Jet.SmoothFunction 1) := ⟨prod⟩
/-- Scalar multiplication for smooth functions -/
instance : SMul ℝ (Jet.SmoothFunction 1) := ⟨scalarProd⟩
/-!
This gives polynomial functions as a special case. As an exercise, prove that smooth functions form a Ring (indeed an Algebra over ℝ).
We will define some polynomials as smooth functions as an example.
-/
/- Can we use extends here -/
theorem Jet.SmoothFunction.add.comm (f g : Jet.SmoothFunction 1) : f + g = g + f := by
have lem1 : (fun x => f.asFunc x + g.asFunc x) = (fun x => g.asFunc x + f.asFunc x) := by
apply add_comm
have lem2 : (fun x => f.grad x + g.grad x) = (fun x => g.grad x + f.grad x) := by
sorry
--apply add_comm
sorry
theorem Jet.smoothfunction.add.assoc (f g h : Jet.SmoothFunction 1) : (f + g) + h = f + (g + h) := by
have lem1 : (fun x => (f.asFunc x + g.asFunc x) + h.asFunc x) = (fun x => f.asFunc x + (g.asFunc x + h.asFunc x)) := by
apply add_assoc
have lem2 : (fun x => (f.grad x + g.grad x) + h.grad x) = (fun x => f.grad x + (g.grad x + h.grad x)) := by
sorry
sorry
theorem Jet.smoothfunction.add.zero (f : Jet.SmoothFunction 1) : f + 0 = f := by
have lem1 : (fun x => f.asFunc x + 0) = (fun x => f.asFunc x) := by
apply add_zero
have lem2 : (fun x => f.grad x + 0) = (fun x => f.grad x) := by
sorry
--simp[Jet.SmoothFunction.add]
sorry
/-- The coordinate function -/
def x : Jet.SmoothFunction 1 := ⟨fun x => x, fun x => ⟨[1], rfl⟩⟩
/-- The power function for a smooth function (automatic if ring is proved) -/
def pow (f: Jet.SmoothFunction 1): ℕ → Jet.SmoothFunction 1
| 0 => Jet.SmoothFunction.const 1 1
| n + 1 => f * (pow f n)
instance : HPow (Jet.SmoothFunction 1) ℕ (Jet.SmoothFunction 1) := ⟨pow⟩
instance : Coe ℝ SmoothFunction := ⟨constant⟩
/-- A polynomial. We can have cleaner notation but the goal is to illustrate the construction -/
def poly_example := (Jet.SmoothFunction.const 1 2) * x+ (Jet.SmoothFunction.const 1 3) * x^3 + (Jet.SmoothFunction.const 1 7)
end SmoothFunction |
'Аналогично примеру, разобранному выше, создать функцию, в к
оторой переданный параметр х возводится в степень y, а затем делится на параметр z.
Результат выводится на экран. В случае деления на ноль функция должна
выводить сообщение об ошибке в параметре z.
'
#Функция
calc <- function(x, y, z) {
if ((typeof(x) != "double") ||
(typeof(y) != "double") || (typeof(z) != "double")) {
print(
"Предупреждение о несоответствии типов! Возможно вы передаете переменные некорректного типа!"
)
}
if (y == 0) {
z <- "Ошибка, деление на 0"
} else{
z <- (x ^ y) / z
}
return(z)
}
#Основная программа
{
#Запускаем функцию нормально
result <- calc(4, 6, 7)
print(result)
#Запускаем функцию с делением на 0
result <- calc(4, 0, 7)
print(result)
} |
def and_emoji (a b : Prop) := a ∧ b
def or_emoji (a b : Prop) := a ∨ b
def iff_emoji (a b : Prop) := a ↔ b
def impl_emoji (a b : Prop) := a → b
infix `😂` : 55 := and_emoji
infix `😆` : 45 := iff_emoji
infix `😶` : 55 := or_emoji
infix `😉` : 50 := and.intro
infix `😇` : 40 := iff.intro
notation `🤜` a : 50 := or.inl a
notation a `🤛` : 50 := or.inr a
notation a `👈` : 90 := and.elim_right a
notation `👉` a : 90 := and.elim_left a
notation `👍` a `👌` b `👌` c := or.elim a b c
example : ∀ p q r, p 😂 (q 😶 r) 😆 (p 😂 q) 😶 (p 😂 r) :=
λ p q r,
(λ pqr : p 😂 (q 😶 r),
(👍 (pqr👈)
👌(λ pfq, 🤜 (👉pqr 😉 pfq))
👌(λ pfr, (👉pqr 😉 pfr) 🤛)
)
)
😇
(λ pqpr : (p 😂 q) 😶 (p 😂 r),
(👍 pqpr
👌(λ pq, 👉pq 😉 (🤜 pq👈))
👌(λ pr, 👉pr 😉 (pr👈 🤛))
)
)
example : ∀ P Q, P 😂 Q 😆 Q 😂 P :=
λ P Q,
(λ pq, pq👈 😉 👉pq)
😇
(λ qp, qp👈 😉 👉qp)
|
#install.packages("devtools")
### Test case. Let's say we want to find which Ordovician graptolite
### species have taxonomic authority data and which do not. This will
### look at all graptolite records entered after the 19th of November 1998
### (i.e., the entire history of the PaleoDB) and return three files.
### 1. A file of species with authority data plus all of the genus-
### species combinations under which it has occurrences
### 2. A file of species without authority data, including the epithet
### in a separate column (allowing you to sort all "smithi" together)
### and the number of times that species occurs in the PaleoDB.
### 3. A file giving the sources for the species that do not have authority
### data.
#### Note: once you have done this, you can use a later oldest_entry.
taxa <- "Graptolithina"
onset <- "Ordovician"
end <- "Ordovician"
oldest_entry <- "1998-11-19"
# use today's date
latest_entry <- strsplit(as.character(Sys.time())," ")[[1]][1]
# If "TRUE" then the routine automatically outputs relevant files
save_files <- TRUE
# If you are saving the files, then you can specify how to append the files.
# I like to append with ".xls" so that Excel will open it directly.
# If you use ".cvs" then the output is comma-delimited rather than tab delimited
output_type <- ".xls"
data_breakdowns <- paleodb_entered_species_for_curation(taxa,onset,end,oldest_entry,latest_entry,save_files,output_type)
### Test case #2. Let's say we want information about marine rock units from
### the Ordovician. This routine downloads all collections with animals,
### plants or protists from the PaleoDB, and then separates out those with
### marine environments. It then returns for each formation-member combination
### 1. A file giving all of the intervals to which that rock unit is
### assigned;
### 2. A file giving all of the zones to which that rock unit is assigned.
### 3. A file of rock units that are considered both formations and
### members by different workers, with the sources providing the
### different rank opinions.
end <- onset <- "Ordovician"
system <- "Marine"
delete_rock_type <- TRUE # removes rock types from names for comparisons
save_files <- TRUE # saves three files for stages, zones & inconsistent formations & members
output_type <- ".xls" #
strata_summaries <- paleodb_rock_units_for_curation(onset,end,taxa=c("Animalia","Protista","Plantae"),system,delete_rock_type,save_files,output_type)
### Test case #3. The same as #2, but this time for terrestrial squamates in the Cenozoic
end <- onset <- "Cenozoic"
system <- "Terrestrial"
taxa <- "Squamata"
delete_rock_type <- TRUE # removes rock types from names for comparisons
save_files <- TRUE # saves three files for stages, zones & inconsistent formations & members
output_type <- ".xls" #
strata_summaries <- paleodb_rock_units_for_curation(onset,end,taxa,system,delete_rock_type,save_files,output_type)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.