text
stringlengths 0
3.34M
|
---|
theory T75
imports Main
begin
lemma "(
(\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) &
(\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(meet(x, y), z) = join(undr(x, z), undr(y, z))) &
(\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) &
(\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) &
(\<forall> x::nat. invo(invo(x)) = x)
) \<longrightarrow>
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(x, join(y, z)) = join(undr(x, y), undr(x, z)))
"
nitpick[card nat=4,timeout=86400]
oops
end |
using DINA
import CSV
using Test
@testset "DINA.jl" begin
tbl = get_dina(1980)
@test tbl isa DINA.StatFiles.StatFile
@test sort(dina_years()) == [1962; 1964; 1966:2019]
var = [:fiinc, :fninc, :ownermort, :ownerhome, :rentalmort, :rentalhome]
df = dina_quantile_panel(var, :fiinc, 10)
@test df isa DINA.DataFrames.DataFrame
@test size(df) == (1530, 11)
filename = joinpath(@__DIR__(), "dina-aggregated.csv")
df |> CSV.write(filename)
@show filename
end
|
[STATEMENT]
lemma ntcf_cf_comp_ntcf_cf_comp_assoc:
assumes "\<NN> : \<HH> \<mapsto>\<^sub>C\<^sub>F \<HH>' : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<DD>"
and "\<GG> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>"
and "\<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<BB>"
shows "(\<NN> \<circ>\<^sub>N\<^sub>T\<^sub>C\<^sub>F\<^sub>-\<^sub>C\<^sub>F \<GG>) \<circ>\<^sub>N\<^sub>T\<^sub>C\<^sub>F\<^sub>-\<^sub>C\<^sub>F \<FF> = \<NN> \<circ>\<^sub>N\<^sub>T\<^sub>C\<^sub>F\<^sub>-\<^sub>C\<^sub>F (\<GG> \<circ>\<^sub>C\<^sub>F \<FF>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> = \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> = \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)
[PROOF STEP]
interpret \<NN>: is_ntcf \<alpha> \<CC> \<DD> \<HH> \<HH>' \<NN>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<NN> : \<HH> \<mapsto>\<^sub>C\<^sub>F \<HH>' : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<DD>
[PROOF STEP]
by (rule assms(1))
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> = \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)
[PROOF STEP]
interpret \<GG>: is_functor \<alpha> \<BB> \<CC> \<GG>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<GG> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
[PROOF STEP]
by (rule assms(2))
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> = \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)
[PROOF STEP]
interpret \<FF>: is_functor \<alpha> \<AA> \<BB> \<FF>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<BB>
[PROOF STEP]
by (rule assms(3))
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> = \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> = \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)
[PROOF STEP]
proof(rule ntcf_ntsmcf_eqI)
[PROOF STATE]
proof (state)
goal (7 subgoals):
1. \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> : ?\<FF> \<mapsto>\<^sub>C\<^sub>F ?\<GG> : ?\<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>?\<alpha>\<^esub> ?\<BB>
2. \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) : ?\<FF>' \<mapsto>\<^sub>C\<^sub>F ?\<GG>' : ?\<AA>' \<mapsto>\<mapsto>\<^sub>C\<^bsub>?\<alpha>\<^esub> ?\<BB>'
3. ?\<FF> = ?\<FF>'
4. ?\<GG> = ?\<GG>'
5. ?\<AA> = ?\<AA>'
6. ?\<BB> = ?\<BB>'
7. ntcf_ntsmcf (\<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) = ntcf_ntsmcf (\<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>))
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
\<NN> : \<HH> \<mapsto>\<^sub>C\<^sub>F \<HH>' : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<DD>
\<GG> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
\<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<BB>
[PROOF STEP]
show
"(\<NN> \<circ>\<^sub>N\<^sub>T\<^sub>C\<^sub>F\<^sub>-\<^sub>C\<^sub>F \<GG>) \<circ>\<^sub>N\<^sub>T\<^sub>C\<^sub>F\<^sub>-\<^sub>C\<^sub>F \<FF> :
\<HH> \<circ>\<^sub>C\<^sub>F \<GG> \<circ>\<^sub>C\<^sub>F \<FF> \<mapsto>\<^sub>C\<^sub>F \<HH>' \<circ>\<^sub>C\<^sub>F \<GG> \<circ>\<^sub>C\<^sub>F \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<DD>"
[PROOF STATE]
proof (prove)
using this:
\<NN> : \<HH> \<mapsto>\<^sub>C\<^sub>F \<HH>' : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<DD>
\<GG> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
\<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<BB>
goal (1 subgoal):
1. \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> : \<HH> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> \<mapsto>\<^sub>C\<^sub>F \<HH>' \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<DD>
[PROOF STEP]
by (cs_concl cs_shallow cs_intro: cat_cs_intros)
[PROOF STATE]
proof (state)
this:
\<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> : \<HH> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> \<mapsto>\<^sub>C\<^sub>F \<HH>' \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<DD>
goal (6 subgoals):
1. \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) : ?\<FF>' \<mapsto>\<^sub>C\<^sub>F ?\<GG>' : ?\<AA>' \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> ?\<BB>'
2. \<HH> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> = ?\<FF>'
3. \<HH>' \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> = ?\<GG>'
4. \<AA> = ?\<AA>'
5. \<DD> = ?\<BB>'
6. ntcf_ntsmcf (\<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) = ntcf_ntsmcf (\<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>))
[PROOF STEP]
show "\<NN> \<circ>\<^sub>N\<^sub>T\<^sub>C\<^sub>F\<^sub>-\<^sub>C\<^sub>F (\<GG> \<circ>\<^sub>C\<^sub>F \<FF>) :
\<HH> \<circ>\<^sub>C\<^sub>F \<GG> \<circ>\<^sub>C\<^sub>F \<FF> \<mapsto>\<^sub>C\<^sub>F \<HH>' \<circ>\<^sub>C\<^sub>F \<GG> \<circ>\<^sub>C\<^sub>F \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<DD>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) : \<HH> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> \<mapsto>\<^sub>C\<^sub>F \<HH>' \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<DD>
[PROOF STEP]
by (cs_concl cs_simp: cat_cs_simps cs_intro: cat_cs_intros)
[PROOF STATE]
proof (state)
this:
\<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) : \<HH> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> \<mapsto>\<^sub>C\<^sub>F \<HH>' \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<DD>
goal (5 subgoals):
1. \<HH> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> = \<HH> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>
2. \<HH>' \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> = \<HH>' \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>
3. \<AA> = \<AA>
4. \<DD> = \<DD>
5. ntcf_ntsmcf (\<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) = ntcf_ntsmcf (\<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>))
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
\<NN> : \<HH> \<mapsto>\<^sub>C\<^sub>F \<HH>' : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<DD>
\<GG> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
\<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<BB>
[PROOF STEP]
show
"ntcf_ntsmcf ((\<NN> \<circ>\<^sub>N\<^sub>T\<^sub>C\<^sub>F\<^sub>-\<^sub>C\<^sub>F \<GG>) \<circ>\<^sub>N\<^sub>T\<^sub>C\<^sub>F\<^sub>-\<^sub>C\<^sub>F \<FF>) =
ntcf_ntsmcf (\<NN> \<circ>\<^sub>N\<^sub>T\<^sub>C\<^sub>F\<^sub>-\<^sub>C\<^sub>F (\<GG> \<circ>\<^sub>C\<^sub>F \<FF>))"
[PROOF STATE]
proof (prove)
using this:
\<NN> : \<HH> \<mapsto>\<^sub>C\<^sub>F \<HH>' : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<DD>
\<GG> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
\<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<BB>
goal (1 subgoal):
1. ntcf_ntsmcf (\<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) = ntcf_ntsmcf (\<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>))
[PROOF STEP]
by
(
cs_concl
cs_simp: slicing_commute[symmetric]
cs_intro: slicing_intros ntsmcf_smcf_comp_ntsmcf_smcf_comp_assoc
)
[PROOF STATE]
proof (state)
this:
ntcf_ntsmcf (\<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>) = ntcf_ntsmcf (\<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>))
goal (4 subgoals):
1. \<HH> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> = \<HH> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>
2. \<HH>' \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> = \<HH>' \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>
3. \<AA> = \<AA>
4. \<DD> = \<DD>
[PROOF STEP]
qed simp_all
[PROOF STATE]
proof (state)
this:
\<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<GG> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF> = \<NN> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M (\<GG> \<circ>\<^sub>D\<^sub>G\<^sub>H\<^sub>M \<FF>)
goal:
No subgoals!
[PROOF STEP]
qed |
State Before: α : Type u_1
inst✝¹ : DistribLattice α
inst✝ : BoundedOrder α
a b c : α
hab : Disjoint a b
hbc : Codisjoint b c
⊢ a ≤ c State After: α : Type u_1
inst✝¹ : DistribLattice α
inst✝ : BoundedOrder α
a b c : α
hab : Disjoint a b
hbc : Codisjoint b c
⊢ a ⊓ (b ⊔ c) ≤ (a ⊔ c) ⊓ (b ⊔ c) Tactic: rw [← @inf_top_eq _ _ _ a, ← @bot_sup_eq _ _ _ c, ← hab.eq_bot, ← hbc.eq_top, sup_inf_right] State Before: α : Type u_1
inst✝¹ : DistribLattice α
inst✝ : BoundedOrder α
a b c : α
hab : Disjoint a b
hbc : Codisjoint b c
⊢ a ⊓ (b ⊔ c) ≤ (a ⊔ c) ⊓ (b ⊔ c) State After: no goals Tactic: exact inf_le_inf_right _ le_sup_left |
import data.set.lattice
import data.nat.parity
import tactic
section
variable {α : Type*}
variables (s t u : set α)
open set
example (h : s ⊆ t) : s ∩ u ⊆ t ∩ u :=
begin
rw [subset_def, inter_def, inter_def],
rw subset_def at h,
dsimp,
rintros x ⟨xs, xu⟩,
from ⟨h x xs, xu⟩,
end
example (h : s ⊆ t) : s ∩ u ⊆ t ∩ u :=
begin
simp only [subset_def, mem_inter_eq] at *,
rintros x ⟨xs, xu⟩,
from ⟨h x xs, xu⟩,
end
example (h : s ⊆ t) : s ∩ u ⊆ t ∩ u :=
begin
rintros x ⟨xs, xu⟩,
from ⟨h xs, xu⟩,
end
example : s ∩ (t ∪ u) ⊆ (s ∩ t) ∪ (s ∩ u) :=
begin
rintros x ⟨hs, htu⟩,
cases htu with ht hu,
{ left,
from ⟨hs, ht⟩,},
{ right,
from ⟨hs, hu⟩,}
end
example : (s ∩ t) ∪ (s ∩ u) ⊆ s ∩ (t ∪ u):=
begin
rintros x (⟨hs, ht⟩ | ⟨hs, hu⟩),
{ from ⟨hs, or.inl ht⟩,},
{ from ⟨hs, or.inr hu⟩,},
end
example : s \ t \ u ⊆ s \ (t ∪ u) :=
begin
rintros x ⟨⟨hs, hnt⟩, hnu⟩,
split,
{ from hs},
{ dsimp,
rintro (ht | hu),
{ from hnt ht,},
{ from hnu hu},},
end
example : s \ t \ u ⊆ s \ (t ∪ u) :=
begin
rintros x ⟨⟨xs, xnt⟩, xnu⟩,
use xs,
rintros (xt | xu); contradiction,
end
example : s \ (t ∪ u) ⊆ s \ t \ u :=
begin
rintros x ⟨xs, xsu⟩,
dsimp at xsu,
use xs,
intro xt,
from xsu (or.inl xt),
intro xu,
from xsu (or.inr xu),
end
example : s ∩ t = t ∩ s :=
begin
ext x,
simp only [mem_inter_eq],
split,
{ rintros ⟨xs, xt⟩, exact ⟨xt, xs⟩,},
{ rintros ⟨xt, xs⟩, exact ⟨xs, xt⟩},
end
example : s ∩ t = t ∩ s :=
subset.antisymm (λ x ⟨xs, xt⟩, ⟨xt, xs⟩) (λ x ⟨xt, xs⟩, ⟨xs, xt⟩)
example : s ∩ (s ∪ t) = s :=
begin
ext x,
dsimp,
split,
{ intro xsst,
from xsst.1,},
{ intro xs,
from ⟨xs, or.inl xs⟩,},
end
example : s ∪ (s ∩ t) = s :=
begin
ext x,
dsimp,
split,
{ rintro (xs | ⟨xs, xt⟩);
from xs,},
{ intro xs,
left,
from xs,},
end
example : (s \ t) ∪ t = s ∪ t :=
begin
ext x,
split,
{ rintro (⟨xs, xnt⟩ | xt),
{ from or.inl xs,},
{ from or.inr xt,},},
{ by_cases h : x ∈ t,
{ intro _,
right,
from h,},
{ rintro (xs | xt),
{ left,
from ⟨xs, h⟩,},
{ contradiction,}}},
end
example : (s \ t) ∪ (t \ s) = (s ∪ t) \ (s ∩ t) :=
begin
ext x, split,
{ rintro (⟨xs, xnt⟩ | ⟨xt, xns⟩),
{ split,
{ left,
from xs,},
{ rintro ⟨_, xt⟩,
contradiction,},},
{ split,
{ right,
from xt,},
{ rintro ⟨xs, xt⟩,
contradiction,}}},
{ rintro ⟨xs | xt, xnst⟩,
{ dsimp at xnst,
push_neg at xnst,
left,
from ⟨xs, xnst xs⟩,},
{ dsimp at xnst,
push_neg at xnst,
right,
split,
from xt,
intro xs,
from (xnst xs) xt,}},
end
def evens : set ℕ := {n | even n}
def odds : set ℕ := {n | ¬ even n}
example : evens ∪ odds = univ :=
begin
rw [evens, odds],
ext n,
simp,
apply classical.em,
end
example : { n | nat.prime n } ∩ { n | n > 2} ⊆ { n | ¬ even n } :=
begin
intro n,
dsimp,
rintros ⟨pp, ngt⟩,
apply nat.odd_iff_not_even.mp,
cases nat.prime.eq_two_or_odd' pp with neq2 odn,
linarith,
from odn,
end
example (n : ℕ) (h : prime n) : nat.prime n :=
by rwa nat.prime_iff
end
section
variables (s t : set ℕ)
example (h₀ : ∀ x ∈ s, ¬ even x) (h₁ : ∀ x ∈ s, prime x) :
∀ x ∈ s, ¬ even x ∧ prime x :=
begin
intros x xs,
split,
{ apply h₀ x xs },
apply h₁ x xs
end
example (h : ∃ x ∈ s, ¬ even x ∧ prime x) :
∃ x ∈ s, prime x :=
begin
rcases h with ⟨x, xs, _, prime_x⟩,
use [x, xs, prime_x]
end
section
variable (ssubt : s ⊆ t)
include ssubt
example (h₀ : ∀ x ∈ t, ¬ even x) (h₁ : ∀ x ∈ t, prime x) :
∀ x ∈ s, ¬ even x ∧ prime x :=
begin
intros x xs,
have : x ∈ t := ssubt xs,
split,
from h₀ x this,
from h₁ x this,
end
example (h : ∃ x ∈ s, ¬ even x ∧ prime x) :
∃ x ∈ t, prime x :=
begin
rcases h with ⟨x, xs, ne, px⟩,
use [x, ssubt xs, px],
end
end
end
section
variables {α I : Type*}
variables A B : I → set α
variable s : set α
open set
example : s ∩ (⋃ i, A i) = ⋃ i, (A i ∩ s) :=
begin
ext x,
simp only [mem_inter_eq, mem_Union],
split,
{ rintro ⟨xs, ⟨i, xAi⟩⟩,
use [i, xAi, xs],},
{ rintro ⟨i, ⟨xAi, xs⟩⟩,
use [xs, i, xAi],},
end
example : (⋂ i, A i ∩ B i) = (⋂ i, A i) ∩ (⋂ i, B i) :=
begin
ext x,
simp only [mem_inter_eq, mem_Inter],
split,
{ intro h,
split,
{ intro i,
exact (h i).1 },
intro i,
exact (h i).2 },
rintros ⟨h1, h2⟩ i,
split,
{ exact h1 i },
exact h2 i
end
open_locale classical
example : s ∪ (⋂ i, A i) = ⋂ i, (A i ∪ s) :=
begin
ext x,
simp only [mem_union_eq, mem_Inter],
split,
{ rintros (xs | xAi) i,
right, from xs,
left, from xAi i,},
{ rintros xsAi,
by_cases h : x ∈ s,
{ left, from h,},
{ right, intro i,
cases xsAi i with xAi xs,
{ from xAi,},
{ contradiction,},}},
end
def primes : set ℕ := {x | nat.prime x}
example : (⋃ p ∈ primes, {x | p^2 ∣ x}) = {x | ∃ p ∈ primes, p^2 ∣ x} :=
begin
ext x,
rw mem_Union₂,
refl,
end
example : (⋃ p ∈ primes, {x | x ≤ p}) = univ :=
begin
apply eq_univ_of_forall,
intros x,
rw mem_Union₂,
dsimp,
rcases nat.exists_infinite_primes x with ⟨p, pge, pp⟩,
use [p, pp, pge],
end
end |
{-# OPTIONS --without-K --safe #-}
open import Categories.Category
module Categories.Category.Cocomplete.Finitely {o ℓ e} (C : Category o ℓ e) where
open import Level
open import Categories.Category.Cocartesian C
open import Categories.Diagram.Coequalizer C
open import Categories.Diagram.Pushout C
open import Categories.Diagram.Pushout.Properties C
open Category C
record FinitelyCocomplete : Set (levelOfTerm C) where
field
cocartesian : Cocartesian
coequalizer : ∀ {A B} (f g : A ⇒ B) → Coequalizer f g
module cocartesian = Cocartesian cocartesian
module coequalizer {A B} (f g : A ⇒ B) = Coequalizer (coequalizer f g)
open cocartesian public
pushout : ∀ {X Y Z} (f : X ⇒ Y) (g : X ⇒ Z) → Pushout f g
pushout f g = Coproduct×Coequalizer⇒Pushout coproduct (coequalizer _ _)
module pushout {X Y Z} (f : X ⇒ Y) (g : X ⇒ Z) = Pushout (pushout f g)
|
/-
Copyright (c) 2021 Anne Baanen. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Anne Baanen
! This file was ported from Lean 3 source module data.int.absolute_value
! leanprover-community/mathlib commit 9aba7801eeecebb61f58a5763c2b6dd1b47dc6ef
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Algebra.Module.Basic
import Mathlib.Algebra.Order.AbsoluteValue
import Mathlib.Data.Int.Cast.Lemmas
import Mathlib.Data.Int.Units
import Mathlib.GroupTheory.GroupAction.Units
/-!
# Absolute values and the integers
This file contains some results on absolute values applied to integers.
## Main results
* `AbsoluteValue.map_units_int`: an absolute value sends all units of `ℤ` to `1`
* `Int.natAbsHom`: `Int.natAbs` bundled as a `MonoidWithZeroHom`
-/
variable {R S : Type _} [Ring R] [LinearOrderedCommRing S]
@[simp]
theorem AbsoluteValue.map_units_int (abv : AbsoluteValue ℤ S) (x : ℤˣ) : abv x = 1 := by
rcases Int.units_eq_one_or x with (rfl | rfl) <;> simp
#align absolute_value.map_units_int AbsoluteValue.map_units_int
@[simp]
theorem AbsoluteValue.map_units_int_cast [Nontrivial R] (abv : AbsoluteValue R S) (x : ℤˣ) :
abv ((x : ℤ) : R) = 1 := by rcases Int.units_eq_one_or x with (rfl | rfl) <;> simp
#align absolute_value.map_units_int_cast AbsoluteValue.map_units_int_cast
@[simp]
theorem AbsoluteValue.map_units_int_smul (abv : AbsoluteValue R S) (x : ℤˣ) (y : R) :
abv (x • y) = abv y := by rcases Int.units_eq_one_or x with (rfl | rfl) <;> simp
#align absolute_value.map_units_int_smul AbsoluteValue.map_units_int_smul
/-- `Int.natAbs` as a bundled monoid with zero hom. -/
@[simps]
def Int.natAbsHom : ℤ →*₀ ℕ where
toFun := Int.natAbs
map_mul' := Int.natAbs_mul
map_one' := Int.natAbs_one
map_zero' := Int.natAbs_zero
#align int.nat_abs_hom Int.natAbsHom
#align int.nat_abs_hom_apply Int.natAbsHom_apply
|
# Práctica Background Subtraction
En esta segunda práctica del bloque de análisis de secuencias haremos trabajo con 3 técnicas de background subtraction.
En primer lugar debéis implementar vuestro propio método de eliminación de fondo basado en la idea que explicamos del filtro exponencial. Podéis hacer las adaptaciones que encontréis necesario a la idea general del método para mejorar su rendimiento.
En segundo lugar debéis de usar las implementaciones del MOG y del MOG2 que se encuentran en la librería
[OpenCV](https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_video/py_bg_subtraction/py_bg_subtraction.html).
En tercer lugar lo que quiero es que hagáis una comparativa entre su método y los dos basados en Mixture de gausiano utilizando el dataset de la competición [changedetection.net](http://changedetection.net/) del año 2012.
En concreto debe usar una secuencia de cada categoría exceptuando la de *Thermal*.
Las métricas que a usar para cada secuencia son:
* **TP** : True Positive
* **FP** : False Positive
* **FN** : False Negative
* **TN** : True Negative
* **Re (Recall)** : TP / (TP + FN)
* **F-Measure** : (2 * Precision * Recall) / (Precision + Recall)
* **Precision** : TP / (TP + FP)
Finalmente se pide que hagáis un análisis de los resultados obtenidos por cada secuencia: problemas, carencias, puntos fuertes de los algoritmos ...
**Entrega:**
* Explicación del algoritmo implementado y código fuente.
* Resultados de las secuencias en formato imagen o video.
* Resultados de las secuencias en formato tabla resumen de las métricas.
* Análisis de los resultados
Secuencias a usar:
- baseline: todo
- cameraJitter: badminton, traffic
- dynamicBackground: canoe, fall, fountain02
- intermittentObjectMotion: sofa,
- shadow: busStation, copyMachine, cubicle, peopleInShade
### Métricas
```python
def recall(tp, fn):
return (tp/(tp+fn))
def precision(tp, fp):
return (tp/(tp+fp))
def f_measure(precision, recall):
return ((2*precision*recall)/(precision+recall))
def all_metrics (resultPath, groundtruthPath, start_frame, end_frame):
# Cargamos el resultado del filtro indicado
result = loadImages(resultPath)
groundTruth = loadImages(groundtruthPath)
# ground truth empieza en start_frame, los frames están numerados de 1 hasta n, pero el array de frames va de 0 a n-1
total_frames = end_frame-start_frame
# Inicializamos un array para cada métrica donde se el resultado para cada frame
tp = np.zeros(total_frames)
fp = np.zeros(total_frames)
fn = np.zeros(total_frames)
tn = np.zeros(total_frames)
# Calculamos las métricas True Positive, False Positive, False Negative, True Negative para cada frame
for i in range(total_frames):
tp[i], fp[i], fn[i], tn[i] = comparator(result[i+start_frame], groundTruth[i+start_frame])
# Calculamos las métricas Recall, Precision y F-measure para cada frame
recall_frame = recall(tp=tp, fn=fn)
precision_frame = precision(tp=tp, fp=fp)
fMeasure_frame = f_measure(precision=precision_frame, recall=recall_frame)
# Calculamos las métricas anteriores para toda la secuencia en conjunto
tp_secuencia = tp.sum()
fp_secuencia = fp.sum()
fn_secuencia = fn.sum()
tn_secuencia = tn.sum()
recall_secuencia = recall(tp=tp_secuencia, fn=fn_secuencia)
precision_secuencia = precision(tp=tp_secuencia, fp=fp_secuencia)
fMeasure_secuencia = f_measure(precision=precision_secuencia, recall=recall_secuencia)
cm_frames = np.array([tp,fp,fn,tn])
cm_secuencia = np.array ([tp_secuencia, fp_secuencia, fn_secuencia, tn_secuencia])
r_p_fm_frames = np.array([recall_frame, precision_frame, fMeasure_frame])
r_p_fm_secuencia = np.array([recall_secuencia, precision_secuencia, fMeasure_secuencia])
return cm_frames, cm_secuencia, r_p_fm_frames, r_p_fm_secuencia
```
### Filtro exponencial para _background subtraction_
Parte de la idea de eliminación de fondo más básica: restar el fotograman **_fondo_** (_background_) del fotograma actual para obtener el **_primer plano_** (_foreground_).
El primer problema o tarea para realizar este algoritmo es conseguir ese fotograma **_fondo_**. Para ello se necesita una imagen que represente el fondo sin ningún otro objeto, pero esto no siempre es posible, por lo tanto hay que buscar otras soluciones. Una de ellas es estimar el fondo como la media o la mediana a partir de los primeros _n_ fotogramas.
El segundo problema que surge es que el fondo en los casos prácticos varía con el tiempo y por lo tanto es posible que ese fotograma _fondo_ que hemos estimado no nos sirve más tarde. Por lo tanto la mejora que se propone es estimar el fondo recursivamente respecto el fotograma anterior siguiendo la siguiente fórmula:
$$
\begin{align}
B_t=(1-{\alpha})B_{t-1}+{\alpha}I_t
\end{align}
$$
Donde $B_t$ es la estimación del fondo para el fotograma actual, $B_{t-1}$ es el fondo para el fotograma anterior, $I_t$ es el fotograma actual y $\alpha$ es el factor de aprendizaje que indica cuánto del fotograma actual se considerará fondo para el siguiente fotograma.
```python
from utils import *
import numpy as np
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
```
Para mantener la limpieza del _notebook_ se han definidos las funciones para realizar el análisis dentro del archivo _utils.py_.
Las funciones que se encuentran son las siguientes:
- **comparator**: compara un _frame_ resultante de un modelo de _background subtraction_ con el resultado esperado (_groundtruth_)
- **loadImages**: devuelve una lista con las imágenes dentro de la carpeta especificada
- **exponentialFilter**: implementación del algoritmo basado en filtro exponencial
- **MOG**: implementación del algoritmo MOG disponible [aquí](https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_video/py_bg_subtraction/py_bg_subtraction.html)
- **MOG2**: implementación del algoritmo MOG2 disponible [aquí](https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_video/py_bg_subtraction/py_bg_subtraction.html)
- **im2vid**: genera video a partir de las imágenes ubicadas en una carpeta
- **showVideo**: muestra el video en una ventana flotante para visualizar mejor el resultado del modelo
Notar que, a pesar de tratarse de algoritmos que trabajan con video, el _dataset_ que se proporciona para realizar las pruebas son imágenes que representan los fotogramas de una secuencia. Por lo tanto, las funciones que se han realizado, se han pensado para trabajar con imágenes en vez de video.
Pasos a seguir para cada secuencia:
1. Cargar las imágenes en una variable (path de las imágenes)
2. Aplicar el filtro deseado EF, MOG y MOG2 (path donde se guardarán las imágenes aplicadas por el filtro)
3. Obtener las métricas con comparator (hacer bucle a lo largo de las imágenes resultado -> path del groundtruth)
4. Mostrar los resultados y videos
5. Comparar los resultados (hacer gráfica a lo largo del
```python
# dynamicBackground canoe dataset output paths
expFilter_canoe_path = 'DATA/dynamicBackground/results/canoe/expFilter/'
MOG_canoe_path = 'DATA/dynamicBackground/results/canoe/MOG/'
MOG2_canoe_path = 'DATA/dynamicBackground/results/canoe/MOG2'
# intermittentObject sofa dataset output paths
expFilter_sofa_path = 'DATA/intermittentObjectMotion/results/sofa/expFilter/'
MOG_sofa_path = 'DATA/intermittentObjectMotion/results/sofa/MOG'
MOG2_sofa_path = 'DATA/intermittentObjectMotion/results/sofa/MOG2'
# shadow copyMachine dataset output paths
expFilter_copyMachine_path = 'DATA/shadow/results/copyMachine/expFilter/'
MOG_copyMachine_path = 'DATA/shadow/results/copyMachine/MOG'
MOG2_copyMachine_path = 'DATA/shadow/results/copyMachine/MOG2'
```
# 1. Secuencia _highway_ del dataset _baseline_
Cargamos las imágenes de entrada y el ground truth con el que se calcularán las métricas.
```python
# baseline highway dataset groundtruth y input paths
gt_highway_path = 'DATA/baseline/baseline/highway/groundtruth/*.png'
in_highway_path = 'DATA/baseline/baseline/highway/input/*.jpg'
groundTruth = loadImages(gt_highway_path)
frames = loadImages(in_highway_path)
```
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:00<00:00, 2667.40it/s]
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:01<00:00, 1208.82it/s]
## 1.1. Algoritmo filtro exponencial
Aplicamos el algoritmo basado en el filtro exponencial que hemos implementado.
```python
# baseline highway dataset output path
expFilter_highway_path = 'DATA/baseline/results/highway/expFilter/'
exponentialFilter(frames, 0.3, expFilter_highway_path, 20, bgFilter ='MEDIAN')
```
Algorithm application: 100%|█████████████████████████████████████████████████████| 1680/1680 [00:01<00:00, 1032.18it/s]
Convertimos la secuencia de imágenes en video para poder visualizar mejor el resultado.
```python
#im2vid(expFilter_highway_path+'*.png','testvid.mp4')
```
Mostramos el video en una ventana flotante.
```python
#showVideo('testvid.mp4')
```
### 1.1.1. Cálculo de métricas
Calculamos las métricas comparando con el _ground truth_ propocionado. Para cada secuencia se proporciona una ROI (Region Of Interest) sobre la cual se realiza el groundtruth, para simplificar el cálculo se han seleccionado aquellas secuencias en las que la ROI sea toda la imagen. Por otro lado, también se especifica el _frame_ de inicio y final sobre los que se ha de realizar la verificación del algoritmo, por lo tanto solo se calcularán las métricas dentro de ese rango.
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_EF = all_metrics (expFilter_highway_path+'*.png', gt_highway_path, start_frame=469, end_frame=1699)
```
Loading images: 100%|█████████████████████████████████████████████████████████████| 1700/1700 [00:06<00:00, 262.06it/s]
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:00<00:00, 2594.43it/s]
## 1.2. Algoritmo MOG
Aplicamos el algoritmo MOG (Mixture Of Gaussians)
```python
# baseline highway dataset output path
MOG_highway_path = 'DATA/baseline/results/highway/MOG/'
MOG(frames,MOG_highway_path)
```
Algorithm application2: 100%|█████████████████████████████████████████████████████| 1700/1700 [00:03<00:00, 496.46it/s]
```python
#im2vid(MOG_highway_path+'*.png','testvid.mp4')
```
```python
#showVideo('testvid.mp4')
```
### 1.2.1. Cálculo de métricas
```python
cm_frames_MOG,\
cm_secuencia_MOG,\
r_p_fm_frames_MOG,\
r_p_fm_secuencia_MOG = all_metrics (MOG_highway_path+'*.png', gt_highway_path, start_frame=469, end_frame=1699)
```
Loading images: 100%|█████████████████████████████████████████████████████████████| 1700/1700 [00:06<00:00, 275.06it/s]
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:00<00:00, 2675.90it/s]
## 1.3. Algoritmo MOG2
```python
# baseline highway dataset output path
MOG2_highway_path = 'DATA/baseline/results/highway/MOG2/'
MOG2(frames,MOG2_highway_path)
```
Algorithm application: 100%|██████████████████████████████████████████████████████| 1700/1700 [00:03<00:00, 448.95it/s]
### 1.3.1. Cálculo de métricas
```python
cm_frames_MOG2,\
cm_secuencia_MOG2,\
r_p_fm_frames_MOG2,\
r_p_fm_secuencia_MOG2 = all_metrics (MOG2_highway_path+'*.png', gt_highway_path, start_frame=469, end_frame=1699)
```
Loading images: 100%|█████████████████████████████████████████████████████████████| 1700/1700 [00:06<00:00, 276.62it/s]
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:00<00:00, 2637.81it/s]
## Comparación resultados
```python
cm_frames_gt,\
cm_secuencia_gt,\
r_p_fm_frames_gt,\
r_p_fm_secuencia_gt = all_metrics (gt_highway_path, gt_highway_path, start_frame=469, end_frame=1699)
```
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:00<00:00, 2642.47it/s]
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:00<00:00, 2634.41it/s]
```python
fig, ax = plt.subplots(3,3, sharex=True, sharey=True)
ax[0,0].plot(r_p_fm_frames_EF[0])
ax[0,0].set_title("recall-EF")
ax[1,0].plot(r_p_fm_frames_EF[1])
ax[1,0].set_title("precision-EF")
ax[2,0].plot(r_p_fm_frames_EF[2])
ax[2,0].set_title("f-measure-EF")
ax[0,1].plot(r_p_fm_frames_MOG[0])
ax[0,1].set_title("recall-MOG")
ax[1,1].plot(r_p_fm_frames_MOG[1])
ax[1,1].set_title("precision-MOG")
ax[2,1].plot(r_p_fm_frames_MOG[2])
ax[2,1].set_title("f-measure-MOG")
ax[0,2].plot(r_p_fm_frames_MOG2[0])
ax[0,2].set_title("recall-MOG2")
ax[1,2].plot(r_p_fm_frames_MOG2[1])
ax[1,2].set_title("precision-MOG2")
ax[2,2].plot(r_p_fm_frames_MOG2[2])
ax[2,2].set_title("f-measure-MOG2")
plt.ylim([0,1])
fig.tight_layout()
plt.show()
```
```python
import pandas as pd
```
```python
r_p_fm_secuencia_EF
```
array([0.60293747, 0.5 , 0.54666514])
```python
r_p_fm_secuencia_MOG
```
array([0.50234751, 0.5 , 0.501171 ])
```python
r_p_fm_secuencia_MOG2
```
array([0.92913693, 0.5 , 0.65013849])
```python
cm_secuencia_gt
```
array([ 5448641., 0., 0., 86228220.])
```python
cm_secuencia_MOG-cm_secuencia_gt
```
array([-3168986., 68877., 2258349., -14606.])
```python
np.concatenate((r_p_fm_secuencia_EF,cm_secuencia_EF))
```
array([6.02937474e-01, 5.00000000e-01, 5.46665145e-01, 2.95299300e+06,
3.57410400e+06, 1.94468400e+06, 6.80320250e+07])
```python
cm_secuencia_EF.sum()
```
94464000.0
```python
cm_secuencia_MOG.sum()
```
94464000.0
```python
cm_secuencia_MOG2.sum()
```
94464000.0
```python
cm_secuencia_gt.sum()
```
91676861.0
```python
gt_result = loadImages(gt_highway_path)
```
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:00<00:00, 2606.23it/s]
```python
(gt_result[0].shape[0])
```
240
```python
pixels = 0
for i in range(469,len(gt_result)):
gt_result[i] = cv2.cvtColor(gt_result[i], cv2.COLOR_BGR2GRAY)
print(np.unique(gt_result[i]))
```
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 50 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 50 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 50 170 255]
[ 0 170 255]
[ 0 50 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
[ 0 50 170 255]
```python
pixels
```
94540800
```python
mog_result = loadImages(MOG2_highway_path+'*.png')
```
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:00<00:00, 1703.61it/s]
```python
mog_result[469].shape
```
(240, 320, 3)
```python
ef_result = loadImages(expFilter_highway_path+'*.png')
```
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:01<00:00, 1313.18it/s]
```python
ef_result[469] = cv2.cvtColor(ef_result[469], cv2.COLOR_BGR2GRAY)
```
```python
ef_result[469]
```
array([[ 0, 0, 1, ..., 254, 3, 0],
[ 2, 0, 0, ..., 255, 0, 0],
[254, 255, 0, ..., 0, 2, 1],
...,
[ 2, 0, 2, ..., 0, 0, 0],
[ 1, 0, 0, ..., 0, 1, 1],
[ 0, 1, 0, ..., 1, 1, 0]], dtype=uint8)
```python
df_highway = pd.DataFrame(np.array([np.concatenate((r_p_fm_secuencia_EF,cm_secuencia_EF)),
np.concatenate((r_p_fm_secuencia_MOG,cm_secuencia_MOG)),
np.concatenate((r_p_fm_secuencia_MOG2,cm_secuencia_MOG2)),
np.concatenate((r_p_fm_secuencia_gt,cm_secuencia_gt))]),
columns = ['Recall', 'Precision','F-measure','TP','FP','FN','TN'])
df_highway['Algorithm'] = ['Exponential Filter','MOG','MOG2','Ground truth']
df_highway.set_index('Algorithm')
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Recall</th>
<th>Precision</th>
<th>F-measure</th>
<th>TP</th>
<th>FP</th>
<th>FN</th>
<th>TN</th>
</tr>
<tr>
<th>Algorithm</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>Exponential Filter</th>
<td>0.590599</td>
<td>0.434918</td>
<td>0.500942</td>
<td>4165508.0</td>
<td>5412167.0</td>
<td>2887516.0</td>
<td>81998809.0</td>
</tr>
<tr>
<th>MOG</th>
<td>0.500938</td>
<td>0.967187</td>
<td>0.660027</td>
<td>3156445.0</td>
<td>107085.0</td>
<td>3144618.0</td>
<td>88055852.0</td>
</tr>
<tr>
<th>MOG2</th>
<td>0.933806</td>
<td>0.774233</td>
<td>0.846565</td>
<td>6843616.0</td>
<td>1995606.0</td>
<td>485117.0</td>
<td>85139661.0</td>
</tr>
<tr>
<th>Ground truth</th>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>5448641.0</td>
<td>0.0</td>
<td>0.0</td>
<td>86228220.0</td>
</tr>
</tbody>
</table>
</div>
```python
r_p_fm_secuencia_EF
```
array([0.60293747, 0.5 , 0.54666514])
```python
df_highway
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Recall</th>
<th>Precision</th>
<th>F-measure</th>
<th>Algorithm</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0.602937</td>
<td>0.5</td>
<td>0.546665</td>
<td>Exponential Filter</td>
</tr>
<tr>
<th>1</th>
<td>0.502348</td>
<td>0.5</td>
<td>0.501171</td>
<td>MOG</td>
</tr>
<tr>
<th>2</th>
<td>0.929137</td>
<td>0.5</td>
<td>0.650138</td>
<td>MOG2</td>
</tr>
</tbody>
</table>
</div>
```python
df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9],[10,11,12]]),
columns=['a', 'b', 'c'])
df2
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>a</th>
<th>b</th>
<th>c</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>1</td>
<td>2</td>
<td>3</td>
</tr>
<tr>
<th>1</th>
<td>4</td>
<td>5</td>
<td>6</td>
</tr>
<tr>
<th>2</th>
<td>7</td>
<td>8</td>
<td>9</td>
</tr>
<tr>
<th>3</th>
<td>10</td>
<td>11</td>
<td>12</td>
</tr>
</tbody>
</table>
</div>
# 2. Secuencia _badminton_ del dataset _cameraJitter_
Cargamos las imágenes de entrada y el ground truth con el que se calcularán las métricas.
```python
#camaraJitter badminton dataset input
gt_badminton_path = 'DATA/cameraJitter/cameraJitter/badminton/groundtruth/*.png'
in_badminton_path = 'DATA/cameraJitter/cameraJitter/badminton/input/*.jpg'
groundTruth = loadImages(gt_badminton_path)
frames = loadImages(in_badminton_path)
```
Loading images: 100%|█████████████████████████████████████████████████████████████| 1150/1150 [00:03<00:00, 296.71it/s]
Loading images: 100%|█████████████████████████████████████████████████████████████| 1150/1150 [00:08<00:00, 129.45it/s]
## 2.1. Algoritmo filtro exponencial
```python
# camaraJitter badminton dataset output path
expFilter_badminton_path = 'DATA/cameraJitter/results/badminton/expFilter/'
```
### 2.1.1. Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_EF = all_metrics (expFilter_badminton_path+'*.jpg', gt_badminton_path, start_frame=800, end_frame=1150)
```
## 2.2. Algoritmo MOG
```python
# camaraJitter badminton dataset output paths
MOG_badminton_path = 'DATA/cameraJitter/results/badminton/MOG/'
```
### 2.2.1. Cálculo de métricas
```python
cm_frames_MOG,\
cm_secuencia_MOG,\
r_p_fm_frames_MOG,\
r_p_fm_secuencia_MOG = all_metrics (MOG_badminton_path+'*.jpg', gt_badminton_path, start_frame=800, end_frame=1150)
```
## 2.3. Algoritmo MOG2
```python
# camaraJitter badminton dataset output paths
MOG2_badminton_path = 'DATA/cameraJitter/results/badminton/MOG2'
```
### 2.3.1. Cálculo de métricas
```python
cm_frames_MOG2,\
cm_secuencia_MOG2,\
r_p_fm_frames_MOG2,\
r_p_fm_secuencia_MOG2 = all_metrics (MOG2_badminton_path+'*.jpg', gt_badminton_path, start_frame=800, end_frame=1150)
```
## Comparación resultados
# 3. Secuencia _canoe_ del dataset _dynamicBackground_
```python
Cargamos las imágenes de entrada y el ground truth con el que se calcularán las métricas.
```
```python
# dynamicBackground canoe dataset input
gt_canoe_path = 'DATA/dynamicBackground/dynamicBackground/canoe/groundtruth/*.png'
in_canoe_path = 'DATA/dynamicBackground/dynamicBackground/canoe/input/*.jpg'
groundTruth = loadImages(gt_canoe_path)
frames = loadImages(in_canoe_path)
```
```python
# dynamicBackground canoe dataset output path
expFilter_canoe_path = 'DATA/dynamicBackground/results/canoe/expFilter/'
MOG_canoe_path = 'DATA/dynamicBackground/results/canoe/MOG/'
MOG2_canoe_path = 'DATA/dynamicBackground/results/canoe/MOG2'
```
## 3.1. Algoritmo filtro exponencial
```python
# dynamicBackground canoe dataset output paths
expFilter_canoe_path = 'DATA/dynamicBackground/results/canoe/expFilter/'
```
### 3.1.1 Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (expFilter_canoe_path+'*.jpg', gt_canoe_path, start_frame=469, end_frame=1699)
```
## 3.2. Algoritmo MOG
```python
# dynamicBackground canoe dataset output path
MOG_canoe_path = 'DATA/dynamicBackground/results/canoe/MOG/'
```
### 3.2.1 Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (MOG_canoe_path+'*.jpg', gt_canoe_path, start_frame=469, end_frame=1699)
```
## 3.3. Algoritmo MOG2
```python
# dynamicBackground canoe dataset output path
MOG2_canoe_path = 'DATA/dynamicBackground/results/canoe/MOG2'
```
### 3.3.1. Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (MOG2_canoe_path+'*.jpg', gt_canoe_path, start_frame=469, end_frame=1699)
```
## 3.4. Comparación resultados
# 4. Secuencia _sofa_ del dataset _intermittentObjectMotion_
```python
Cargamos las imágenes de entrada y el ground truth con el que se calcularán las métricas.
```
```python
# intermittentObjectMotion sofa dataset input
gt_sofa_path = 'DATA/intermittentObjectMotion/intermittentObjectMotion/sofa/groundtruth/*.png'
in_sofa_path = 'DATA/intermittentObjectMotion/intermittentObjectMotion/sofa/input/*.jpg'
groundTruth = loadImages(gt_sofa_path)
frames = loadImages(in_sofa_path)
```
## 4.1. Algoritmo filtro exponencial
### 4.1.1. Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (expFilter_highway_path+'*.jpg', gt_highway_path, start_frame=469, end_frame=1699)
```
## 4.2. Algoritmo MOG
### 4.2.1. Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (expFilter_highway_path+'*.jpg', gt_highway_path, start_frame=469, end_frame=1699)
```
## 4.3. Algoritmo MOG2
### 4.3.1. Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (expFilter_highway_path+'*.jpg', gt_highway_path, start_frame=469, end_frame=1699)
```
## 4.4. Comparación resultados
# 5. Secuencia _copyMachine_ del dataset _shadow_
```python
Cargamos las imágenes de entrada y el ground truth con el que se calcularán las métricas.
```
```python
# shadow copyMachine dataset input
gt_copyMachine_path = 'DATA/shadow/shadow/copyMachine/groundtruth/*.png'
in_copyMachine_path = 'DATA/shadow/shadow/copyMachine/input/*.jpg'
groundTruth = loadImages(gt_copyMachine_path)
frames = loadImages(in_copyMachine_path)
```
## 5.1. Algoritmo filtro exponencial
### 5.1.1. Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (expFilter_highway_path+'*.jpg', gt_highway_path, start_frame=469, end_frame=1699)
```
## 5.2. Algoritmo MOG
### 5.2.1. Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (expFilter_highway_path+'*.jpg', gt_highway_path, start_frame=469, end_frame=1699)
```
## 5.3. Algoritmo MOG2
### 5.3.1. Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (expFilter_highway_path+'*.jpg', gt_highway_path, start_frame=469, end_frame=1699)
```
## 5.4. Comparación resultados
|
module Rationals
import ZZ
%access public export
Pair : Type
Pair = (Integer, Integer)
ZZPair : Type
ZZPair = (ZZ, ZZ)
ZZtoDb : ZZ -> Double
ZZtoDb x = cast{from=Integer}{to=Double} (cast{from=ZZ}{to=Integer} x)
DbtoZZ : Double -> ZZ
DbtoZZ x = cast{from=Integer}{to=ZZ} (cast{from=Double}{to=Integer} x)
apZZ : (f: ZZ -> ZZ) -> (n: ZZ) -> (m: ZZ) -> n = m -> f n = f m
apZZ f m m Refl = Refl
isNotZero : Nat -> Bool
isNotZero Z = False
isNotZero (S k) = True
isFactorInt : Integer -> Integer -> Type --Needed for defining Integer division
isFactorInt m n = (k : Integer ** (m * k = n))
divides : (m: Integer) -> (n: Integer) -> (k: Integer ** (m * k = n)) -> Integer
divides m n k = (fst k)
--Integer implemetation of gcd
CalcGCD : (Integer, Integer) -> Integer
CalcGCD (a, b) = if (isNotZero (toNat b)) then next else a where
next = CalcGCD (b, toIntegerNat (modNat (toNat a) (toNat b)))
OnlyPositive : (x: Pair) -> Pair
OnlyPositive x = (if (fst x)>0 then fst x else (-1)*(fst x), if(snd x)>0 then (snd x) else (-1)*(snd x))
--Integer implemetation of gcd
gccd : (Integer, Integer) -> Integer
gccd x = CalcGCD (OnlyPositive x)
data NotZero : Integer -> Type where --Proof that a number is not zero, needed to construct Q
OneNotZero : NotZero 1
NegativeNotZero : ( n: Integer ) -> NotZero n -> NotZero (-n)
PositiveNotZero : ( m: Integer ) -> LTE 1 (fromIntegerNat m) -> NotZero m
data ZZNotZero : ZZ -> Type where
ZZOneNotZero : ZZNotZero 1
ZZNegativeNotZero : ( n: ZZ ) -> ZZNotZero n -> ZZNotZero (-n)
ZZPositiveNotZero : ( m: ZZ ) -> LTE 1 (fromIntegerNat (cast(m))) -> ZZNotZero m
--Type for equality of two Rationals
data EqRat : Pair -> Pair -> Type where
IdEq : (m : Pair) -> EqRat m m
MulEq : (c : Integer) -> EqRat n m -> EqRat ((fst n)*c,(snd n)*c) m
make_rational : (p: Nat) -> (q: Integer) -> NotZero q -> Pair
make_rational p q x = (toIntegerNat(p), q)
InclusionMap : (n : Nat) -> Pair --Includes the naturals in Q
InclusionMap n = make_rational n 1 OneNotZero
AddRationals : (x: ZZPair) -> ZZNotZero (snd x) -> (y: ZZPair) -> ZZNotZero (snd y) -> ZZPair
AddRationals x a y b = ((fst x)*(snd y) + (snd x)*(fst y), (snd x)*(snd y))
MultiplyRationals : (x: ZZPair) -> ZZNotZero (snd x) -> (y: ZZPair) -> ZZNotZero (snd y) -> ZZPair
MultiplyRationals x a y b =((fst x)*(fst y), (snd x)*(snd y))
MultInverse : (x: ZZPair) -> ZZNotZero (fst x) -> ZZNotZero (snd x) -> ZZPair
MultInverse x y z = ((snd x), (fst x))
AddInverse : (x: ZZPair) -> ZZNotZero (snd x) -> ZZPair
AddInverse x a = (-(fst x), (snd x))
Subtraction : (x: ZZPair) -> ZZNotZero (snd x) -> (y: ZZPair) -> ZZNotZero (snd y) -> ZZPair
Subtraction x a y b = AddRationals x a (AddInverse y b) b
Division : (x: ZZPair) -> ZZNotZero (snd x) -> (y: ZZPair) -> ZZNotZero (fst y) -> ZZNotZero (snd y) -> ZZPair
Division x a y b c = MultiplyRationals x a (MultInverse y b c) b
--SimplifyRational : (x: Pair) -> NotZero (snd x) -> Pair
--SimplifyRational x a = (divides (gcdab fromIntegerNat((fst x)) fromIntegerNat((snd x))) ___ (fst x), divides (gcdab fromIntegerNat((fst x)) fromIntegerNat((snd x)) __ (snd x))
--To prove that the SimplifyRational works, we can just check if the output is equal to the input
-- To be done
simplifyRational : (x : ZZPair) -> ZZPair
simplifyRational (a, b) = (sa, sb) where
sa = DbtoZZ (da / g) where
da = ZZtoDb a
g = cast {from=Integer} {to=Double}
(gccd (cast {from=ZZ} {to=Integer}a,cast {from=ZZ} {to=Integer}b))
sb = DbtoZZ (db / g) where
db = ZZtoDb b
g = cast {from=Integer} {to=Double}
(gccd (cast {from=ZZ} {to=Integer}a,cast {from=ZZ} {to=Integer}b))
--Above, I will need to supply a proof that the GCD divides the two numbers. Then, the function defined above will produce the rational in simplified form.
xAndInverseNotZero : (x: ZZPair) -> (k: ZZNotZero (snd x)) -> ZZNotZero (snd (AddInverse x k))
xAndInverseNotZero x (ZZPositiveNotZero (snd x) y) = (ZZPositiveNotZero (snd x) y)
FirstIsInverted : (x: ZZPair) -> (k: ZZNotZero (snd x)) -> (a: ZZ) -> (a = (fst x)) -> ((-a) = fst (AddInverse x k))
FirstIsInverted x k a prf = (apZZ (\x => -x) a (fst x) prf)
SecondStaysSame : (x: ZZPair) -> (k: ZZNotZero (snd x)) -> (b: ZZ) -> (b = (snd x)) -> (b = (snd (AddInverse x k)))
SecondStaysSame x k b prf = (apZZ (\x => x) b (snd x) prf)
xplusinverse: (x: ZZPair) -> (k: ZZNotZero (snd x)) -> ZZPair
xplusinverse x k = AddRationals x k (AddInverse x k) (xAndInverseNotZero x k)
addinverselemma: (a: ZZ) -> (b: ZZ) -> ((-a)=b) -> (a + b = ((-a) + a)) -> ((-a)+a =0 ) -> (a + b = 0)
addinverselemma a b prf prf1 prf2 = trans prf1 prf2
addinverseFST: (x: ZZPair) -> (k: ZZNotZero (snd x)) -> (a: ZZ) -> (a = (fst x)) -> (fst (AddInverse x k) = b) -> ((-a) = b)
addinverseFST x k a prf prf1 = trans (FirstIsInverted x k a prf) (prf1)
addinverseSND: (x: ZZPair) -> (k: ZZNotZero (snd x)) -> (c: ZZ) -> (c = (snd x)) -> (snd (AddInverse x k) = b) -> (c = b)
addinverseSND x k c prf prf1 = trans (SecondStaysSame x k c prf) (prf1)
|
Formal statement is: lemma cone_hull_empty_iff: "S = {} \<longleftrightarrow> cone hull S = {}" Informal statement is: The cone hull of an empty set is empty. |
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
# Here we define shorter print functions for common constructs.
# Loading this file will cause more compact printing of formulas,
# but the output can not be pasted back into GAP.
#
Import(formgen, code);
Diag.print := (s,i,is) >> Print("D");
Blk.print := (s,i,is) >> Print("B");
Gath._sym := true;
Scat._sym := true;
Diag._sym := true;
Blk._sym := true;
HideRTWrap := function()
RTWrap._origprint := RTWrap.print;
RTWrap.print := (self,i,is) >> Print(self.rt.node);
end;
ShowRTWrap := function()
if IsBound(RTWrap._origprint)
then RTWrap.print := RTWrap._origprint;
fi;
end;
|
[STATEMENT]
lemma real_affinity_eq: "m \<noteq> 0 \<Longrightarrow> m * x + c = y \<longleftrightarrow> x = inverse m * y + - (c / m)"
for m :: "'a::linordered_field"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. m \<noteq> (0::'a) \<Longrightarrow> (m * x + c = y) = (x = inverse m * y + - (c / m))
[PROOF STEP]
by (simp add: field_simps) |
section \<open>Records \label{sec:records}\<close>
(*<*)
theory Records imports Main begin
(*>*)
text \<open>
\index{records|(}%
Records are familiar from programming languages. A record of $n$
fields is essentially an $n$-tuple, but the record's components have
names, which can make expressions easier to read and reduces the
risk of confusing one field for another.
A record of Isabelle/HOL covers a collection of fields, with select
and update operations. Each field has a specified type, which may
be polymorphic. The field names are part of the record type, and
the order of the fields is significant --- as it is in Pascal but
not in Standard ML. If two different record types have field names
in common, then the ambiguity is resolved in the usual way, by
qualified names.
Record types can also be defined by extending other record types.
Extensible records make use of the reserved pseudo-field \cdx{more},
which is present in every record type. Generic record operations
work on all possible extensions of a given type scheme; polymorphism
takes care of structural sub-typing behind the scenes. There are
also explicit coercion functions between fixed record types.
\<close>
subsection \<open>Record Basics\<close>
text \<open>
Record types are not primitive in Isabelle and have a delicate
internal representation @{cite "NaraschewskiW-TPHOLs98"}, based on
nested copies of the primitive product type. A \commdx{record}
declaration introduces a new record type scheme by specifying its
fields, which are packaged internally to hold up the perception of
the record as a distinguished entity. Here is a simple example:
\<close>
record point =
Xcoord :: int
Ycoord :: int
text \<open>\noindent
Records of type \<^typ>\<open>point\<close> have two fields named \<^const>\<open>Xcoord\<close>
and \<^const>\<open>Ycoord\<close>, both of type~\<^typ>\<open>int\<close>. We now define a
constant of type \<^typ>\<open>point\<close>:
\<close>
definition pt1 :: point where
"pt1 \<equiv> (| Xcoord = 999, Ycoord = 23 |)"
text \<open>\noindent
We see above the ASCII notation for record brackets. You can also
use the symbolic brackets \<open>\<lparr>\<close> and \<open>\<rparr>\<close>. Record type
expressions can be also written directly with individual fields.
The type name above is merely an abbreviation.
\<close>
definition pt2 :: "\<lparr>Xcoord :: int, Ycoord :: int\<rparr>" where
"pt2 \<equiv> \<lparr>Xcoord = -45, Ycoord = 97\<rparr>"
text \<open>
For each field, there is a \emph{selector}\index{selector!record}
function of the same name. For example, if \<open>p\<close> has type \<^typ>\<open>point\<close> then \<open>Xcoord p\<close> denotes the value of the \<open>Xcoord\<close> field of~\<open>p\<close>. Expressions involving field selection
of explicit records are simplified automatically:
\<close>
lemma "Xcoord \<lparr>Xcoord = a, Ycoord = b\<rparr> = a"
by simp
text \<open>
The \emph{update}\index{update!record} operation is functional. For
example, \<^term>\<open>p\<lparr>Xcoord := 0\<rparr>\<close> is a record whose \<^const>\<open>Xcoord\<close>
value is zero and whose \<^const>\<open>Ycoord\<close> value is copied from~\<open>p\<close>. Updates of explicit records are also simplified automatically:
\<close>
lemma "\<lparr>Xcoord = a, Ycoord = b\<rparr>\<lparr>Xcoord := 0\<rparr> =
\<lparr>Xcoord = 0, Ycoord = b\<rparr>"
by simp
text \<open>
\begin{warn}
Field names are declared as constants and can no longer be used as
variables. It would be unwise, for example, to call the fields of
type \<^typ>\<open>point\<close> simply \<open>x\<close> and~\<open>y\<close>.
\end{warn}
\<close>
subsection \<open>Extensible Records and Generic Operations\<close>
text \<open>
\index{records!extensible|(}%
Now, let us define coloured points (type \<open>cpoint\<close>) to be
points extended with a field \<open>col\<close> of type \<open>colour\<close>:
\<close>
datatype colour = Red | Green | Blue
record cpoint = point +
col :: colour
text \<open>\noindent
The fields of this new type are \<^const>\<open>Xcoord\<close>, \<open>Ycoord\<close> and
\<open>col\<close>, in that order.
\<close>
definition cpt1 :: cpoint where
"cpt1 \<equiv> \<lparr>Xcoord = 999, Ycoord = 23, col = Green\<rparr>"
text \<open>
We can define generic operations that work on arbitrary
instances of a record scheme, e.g.\ covering \<^typ>\<open>point\<close>, \<^typ>\<open>cpoint\<close>, and any further extensions. Every record structure has an
implicit pseudo-field, \cdx{more}, that keeps the extension as an
explicit value. Its type is declared as completely
polymorphic:~\<^typ>\<open>'a\<close>. When a fixed record value is expressed
using just its standard fields, the value of \<open>more\<close> is
implicitly set to \<open>()\<close>, the empty tuple, which has type
\<^typ>\<open>unit\<close>. Within the record brackets, you can refer to the
\<open>more\<close> field by writing ``\<open>\<dots>\<close>'' (three dots):
\<close>
lemma "Xcoord \<lparr>Xcoord = a, Ycoord = b, \<dots> = p\<rparr> = a"
by simp
text \<open>
This lemma applies to any record whose first two fields are \<open>Xcoord\<close> and~\<^const>\<open>Ycoord\<close>. Note that \<open>\<lparr>Xcoord = a, Ycoord
= b, \<dots> = ()\<rparr>\<close> is exactly the same as \<open>\<lparr>Xcoord = a, Ycoord
= b\<rparr>\<close>. Selectors and updates are always polymorphic wrt.\ the
\<open>more\<close> part of a record scheme, its value is just ignored (for
select) or copied (for update).
The \<open>more\<close> pseudo-field may be manipulated directly as well,
but the identifier needs to be qualified:
\<close>
lemma "point.more cpt1 = \<lparr>col = Green\<rparr>"
by (simp add: cpt1_def)
text \<open>\noindent
We see that the colour part attached to this \<^typ>\<open>point\<close> is a
rudimentary record in its own right, namely \<open>\<lparr>col =
Green\<rparr>\<close>. In order to select or update \<open>col\<close>, this fragment
needs to be put back into the context of the parent type scheme, say
as \<open>more\<close> part of another \<^typ>\<open>point\<close>.
To define generic operations, we need to know a bit more about
records. Our definition of \<^typ>\<open>point\<close> above has generated two
type abbreviations:
\medskip
\begin{tabular}{l}
\<^typ>\<open>point\<close>~\<open>=\<close>~\<open>\<lparr>Xcoord :: int, Ycoord :: int\<rparr>\<close> \\
\<^typ>\<open>'a point_scheme\<close>~\<open>=\<close>~\<open>\<lparr>Xcoord :: int, Ycoord :: int, \<dots> :: 'a\<rparr>\<close> \\
\end{tabular}
\medskip
\noindent
Type \<^typ>\<open>point\<close> is for fixed records having exactly the two fields
\<^const>\<open>Xcoord\<close> and~\<open>Ycoord\<close>, while the polymorphic type \<^typ>\<open>'a point_scheme\<close> comprises all possible extensions to those two
fields. Note that \<^typ>\<open>unit point_scheme\<close> coincides with \<^typ>\<open>point\<close>, and \<^typ>\<open>\<lparr>col :: colour\<rparr> point_scheme\<close> with \<^typ>\<open>cpoint\<close>.
In the following example we define two operations --- methods, if we
regard records as objects --- to get and set any point's \<open>Xcoord\<close> field.
\<close>
definition getX :: "'a point_scheme \<Rightarrow> int" where
"getX r \<equiv> Xcoord r"
definition setX :: "'a point_scheme \<Rightarrow> int \<Rightarrow> 'a point_scheme" where
"setX r a \<equiv> r\<lparr>Xcoord := a\<rparr>"
text \<open>
Here is a generic method that modifies a point, incrementing its
\<^const>\<open>Xcoord\<close> field. The \<open>Ycoord\<close> and \<open>more\<close> fields
are copied across. It works for any record type scheme derived from
\<^typ>\<open>point\<close> (including \<^typ>\<open>cpoint\<close> etc.):
\<close>
definition incX :: "'a point_scheme \<Rightarrow> 'a point_scheme" where
"incX r \<equiv>
\<lparr>Xcoord = Xcoord r + 1, Ycoord = Ycoord r, \<dots> = point.more r\<rparr>"
text \<open>
Generic theorems can be proved about generic methods. This trivial
lemma relates \<^const>\<open>incX\<close> to \<open>getX\<close> and \<open>setX\<close>:
\<close>
lemma "incX r = setX r (getX r + 1)"
by (simp add: getX_def setX_def incX_def)
text \<open>
\begin{warn}
If you use the symbolic record brackets \<open>\<lparr>\<close> and \<open>\<rparr>\<close>,
then you must also use the symbolic ellipsis, ``\<open>\<dots>\<close>'', rather
than three consecutive periods, ``\<open>...\<close>''. Mixing the ASCII
and symbolic versions causes a syntax error. (The two versions are
more distinct on screen than they are on paper.)
\end{warn}%
\index{records!extensible|)}
\<close>
subsection \<open>Record Equality\<close>
text \<open>
Two records are equal\index{equality!of records} if all pairs of
corresponding fields are equal. Concrete record equalities are
simplified automatically:
\<close>
lemma "(\<lparr>Xcoord = a, Ycoord = b\<rparr> = \<lparr>Xcoord = a', Ycoord = b'\<rparr>) =
(a = a' \<and> b = b')"
by simp
text \<open>
The following equality is similar, but generic, in that \<open>r\<close>
can be any instance of \<^typ>\<open>'a point_scheme\<close>:
\<close>
lemma "r\<lparr>Xcoord := a, Ycoord := b\<rparr> = r\<lparr>Ycoord := b, Xcoord := a\<rparr>"
by simp
text \<open>\noindent
We see above the syntax for iterated updates. We could equivalently
have written the left-hand side as \<open>r\<lparr>Xcoord := a\<rparr>\<lparr>Ycoord :=
b\<rparr>\<close>.
Record equality is \emph{extensional}:
\index{extensionality!for records} a record is determined entirely
by the values of its fields.
\<close>
lemma "r = \<lparr>Xcoord = Xcoord r, Ycoord = Ycoord r\<rparr>"
by simp
text \<open>\noindent
The generic version of this equality includes the pseudo-field
\<open>more\<close>:
\<close>
lemma "r = \<lparr>Xcoord = Xcoord r, Ycoord = Ycoord r, \<dots> = point.more r\<rparr>"
by simp
text \<open>
The simplifier can prove many record equalities
automatically, but general equality reasoning can be tricky.
Consider proving this obvious fact:
\<close>
lemma "r\<lparr>Xcoord := a\<rparr> = r\<lparr>Xcoord := a'\<rparr> \<Longrightarrow> a = a'"
apply simp?
oops
text \<open>\noindent
Here the simplifier can do nothing, since general record equality is
not eliminated automatically. One way to proceed is by an explicit
forward step that applies the selector \<^const>\<open>Xcoord\<close> to both sides
of the assumed record equality:
\<close>
lemma "r\<lparr>Xcoord := a\<rparr> = r\<lparr>Xcoord := a'\<rparr> \<Longrightarrow> a = a'"
apply (drule_tac f = Xcoord in arg_cong)
txt \<open>@{subgoals [display, indent = 0, margin = 65]}
Now, \<open>simp\<close> will reduce the assumption to the desired
conclusion.\<close>
apply simp
done
text \<open>
The \<open>cases\<close> method is preferable to such a forward proof. We
state the desired lemma again:
\<close>
lemma "r\<lparr>Xcoord := a\<rparr> = r\<lparr>Xcoord := a'\<rparr> \<Longrightarrow> a = a'"
txt \<open>The \methdx{cases} method adds an equality to replace the
named record term by an explicit record expression, listing all
fields. It even includes the pseudo-field \<open>more\<close>, since the
record equality stated here is generic for all extensions.\<close>
apply (cases r)
txt \<open>@{subgoals [display, indent = 0, margin = 65]} Again, \<open>simp\<close> finishes the proof. Because \<open>r\<close> is now represented as
an explicit record construction, the updates can be applied and the
record equality can be replaced by equality of the corresponding
fields (due to injectivity).\<close>
apply simp
done
text \<open>
The generic cases method does not admit references to locally bound
parameters of a goal. In longer proof scripts one might have to
fall back on the primitive \<open>rule_tac\<close> used together with the
internal field representation rules of records. The above use of
\<open>(cases r)\<close> would become \<open>(rule_tac r = r in
point.cases_scheme)\<close>.
\<close>
subsection \<open>Extending and Truncating Records\<close>
text \<open>
Each record declaration introduces a number of derived operations to
refer collectively to a record's fields and to convert between fixed
record types. They can, for instance, convert between types \<^typ>\<open>point\<close> and \<^typ>\<open>cpoint\<close>. We can add a colour to a point or convert
a \<^typ>\<open>cpoint\<close> to a \<^typ>\<open>point\<close> by forgetting its colour.
\begin{itemize}
\item Function \cdx{make} takes as arguments all of the record's
fields (including those inherited from ancestors). It returns the
corresponding record.
\item Function \cdx{fields} takes the record's very own fields and
returns a record fragment consisting of just those fields. This may
be filled into the \<open>more\<close> part of the parent record scheme.
\item Function \cdx{extend} takes two arguments: a record to be
extended and a record containing the new fields.
\item Function \cdx{truncate} takes a record (possibly an extension
of the original record type) and returns a fixed record, removing
any additional fields.
\end{itemize}
These functions provide useful abbreviations for standard
record expressions involving constructors and selectors. The
definitions, which are \emph{not} unfolded by default, are made
available by the collective name of \<open>defs\<close> (\<open>point.defs\<close>, \<open>cpoint.defs\<close>, etc.).
For example, here are the versions of those functions generated for
record \<^typ>\<open>point\<close>. We omit \<open>point.fields\<close>, which happens to
be the same as \<open>point.make\<close>.
@{thm [display, indent = 0, margin = 65] point.make_def [no_vars]
point.extend_def [no_vars] point.truncate_def [no_vars]}
Contrast those with the corresponding functions for record \<^typ>\<open>cpoint\<close>. Observe \<open>cpoint.fields\<close> in particular.
@{thm [display, indent = 0, margin = 65] cpoint.make_def [no_vars]
cpoint.fields_def [no_vars] cpoint.extend_def [no_vars]
cpoint.truncate_def [no_vars]}
To demonstrate these functions, we declare a new coloured point by
extending an ordinary point. Function \<open>point.extend\<close> augments
\<open>pt1\<close> with a colour value, which is converted into an
appropriate record fragment by \<open>cpoint.fields\<close>.
\<close>
definition cpt2 :: cpoint where
"cpt2 \<equiv> point.extend pt1 (cpoint.fields Green)"
text \<open>
The coloured points \<^const>\<open>cpt1\<close> and \<open>cpt2\<close> are equal. The
proof is trivial, by unfolding all the definitions. We deliberately
omit the definition of~\<open>pt1\<close> in order to reveal the underlying
comparison on type \<^typ>\<open>point\<close>.
\<close>
lemma "cpt1 = cpt2"
apply (simp add: cpt1_def cpt2_def point.defs cpoint.defs)
txt \<open>@{subgoals [display, indent = 0, margin = 65]}\<close>
apply (simp add: pt1_def)
done
text \<open>
In the example below, a coloured point is truncated to leave a
point. We use the \<open>truncate\<close> function of the target record.
\<close>
lemma "point.truncate cpt2 = pt1"
by (simp add: pt1_def cpt2_def point.defs)
text \<open>
\begin{exercise}
Extend record \<^typ>\<open>cpoint\<close> to have a further field, \<open>intensity\<close>, of type~\<^typ>\<open>nat\<close>. Experiment with generic operations
(using polymorphic selectors and updates) and explicit coercions
(using \<open>extend\<close>, \<open>truncate\<close> etc.) among the three record
types.
\end{exercise}
\begin{exercise}
(For Java programmers.)
Model a small class hierarchy using records.
\end{exercise}
\index{records|)}
\<close>
(*<*)
end
(*>*)
|
// Example is based on num_list1.cpp from the boost.spirit repo:
// https://github.com/boostorg/spirit/blob/develop/example/x3/num_list/num_list1.cpp
#include <boost/spirit/home/x3.hpp>
#include <string>
namespace x3 = boost::spirit::x3;
int main()
{
std::string csl("1,2,3,4");
auto first = csl.begin();
auto last = csl.end();
auto res = x3::phrase_parse(first, last, x3::double_ >> *(',' >> x3::double_), x3::ascii::space);
return res && (first == last) ? 0 : 1;
} |
function o2 = canlab_results_fmridisplay(input_activation, varargin)
% :Usage:
% ::
%
% canlab_results_fmridisplay(input_activation, [optional inputs])
%
% purpose: This function display fmri results.
%
% :Input:
%
% **input_activation:**
% nii, img,
%
% This image has the blobs you want to
% display. You can also enter a cl "clusters" structure or
% "region" object.
%
% you can also get a thresholded image like the examples used here
% from a number of places - by thresholding your results in SPM
% and using "write filtered" to save the image, by creating masks
% from meta-analysis or anatomical atlases, or by using
% mediation_brain_results, robust_results_threshold,
% robust_results_batch_script, threshold_imgs, or object
% oriented tools including fmri_data and statistic_image objects.
%
% :Optional Inputs:
%
% **'noblobs':**
% do not display blobs
%
% **'outline':**
% display blob outlines
%
% **'nooutline':**
% do not display blob outlines (default)
%
% **'addmontages':**
% when entering existing fmridisplay obj, add new montages
%
% **'noremove':**
% do not remove current blobs when adding new ones
%
% **'nofigure':**
% do not create a new figure (for selected montage sets only)
%
% **'outlinecolor:**
% followed by new outline color
%
% **'splitcolor':**
% followed by 4-cell new split colormap colors (help fmridisplay or edit code for defaults as example)
%
% **'montagetype':**
%
% 'full' Axial, coronal, and saggital slices, 4 cortical surfaces
% 'compact' Midline saggital and two rows of axial slices [the default]
% 'compact2' A single row showing midline saggital and axial slices
% 'multirow' A series of 'compact2' displays in one figure for comparing different images/maps side by side
% 'regioncenters' A series of separate axes, each focused on one region
%
% 'full' for full montages of axial and sagg slices.
%
% 'full hcp' for full montage, but with surfaces and volumes from
% HCP data
%
% 'compact' [default] for single-figure parasagittal and axials slices.
%
% 'compact2': like 'compact', but fewer axial slices.
%
% 'multirow': followed by number of rows
% e.g., o2 = canlab_results_fmridisplay([], 'multirow', 2);
%
% {'blobcenters', 'regioncenters'}: Slices for the center of each blob/region
% Note: this creates a new figure, tagged
% 'fmridisplay_regioncenters', and is not compatible with 'nofigure'
%
% **'noverbose':**
% suppress verbose output, good for scripts/publish to html, etc.
%
% **'overlay':**
% specify anatomical image for montage (not surfaces), followed by
% image name
% e.g., o2 = canlab_results_fmridisplay([], 'overlay', 'icbm152_2009_symmetric_for_underlay.img')';
%
% The default brain for overlays is based on Keuken et al. 2014
% For legacy SPM8 single subject, enter as arguments:
% 'overlay', which('SPM8_colin27T1_seg.img')
%
% Other inputs to addblobs (fmridisplay method) are allowed, e.g., 'cmaprange', [-2 2], 'trans'
%
% See help fmridisplay
% e.g., 'color', [1 0 0]
%
% You can also input an existing fmridisplay object, and it will use the
% one you have created rather than setting up the canonical slices.
%
% Try "brighten(.4) to make the images brighter.
%
% :Example Script:
% ::
%
% input_activation = 'Pick_Atlas_PAL_large.nii';
%
% % set up the anatomical overlay and display blobs
% % (see the code of this function and help fmridisplay for more examples)
%
% o2 = canlab_results_fmridisplay(input_activation);
%
% %% ========== remove those blobs and change the color ==========
%
% cl = mask2clusters(input_activation);
% removeblobs(o2);
% o2 = addblobs(o2, cl, 'color', [0 0 1]);
%
% %% ========== OR
%
% r = region(input_activation);
% o2 = removeblobs(o2);
% o2 = addblobs(o2, r, 'color', [1 0 0]);
%
% %% ========== Create empty fmridisplay object on which to add blobs:
% o2 = canlab_results_fmridisplay
% o2 = canlab_results_fmridisplay([], 'compact2', 'noverbose');
%
% %% ========== If you want to start over with a new fmridisplay object,
% % make sure to clear o2, because it uses lots of memory
%
% % This image should be on your path in the "canlab_canonical_brains" subfolder:
%
% input_activation = 'pain-emotion_2s_z_val_FDR_05.img';
% clear o2
% close all
% o2 = canlab_results_fmridisplay(input_activation);
%
% %% ========== save PNGs of your images to insert into powerpoint, etc.
% % for your paper/presentation
%
% scn_export_papersetup(400);
% saveas(gcf, 'results_images/pain_meta_fmridisplay_example_sagittal.png');
%
% scn_export_papersetup(350);
% saveas(gcf, 'results_images/pain_meta_fmridisplay_example_sagittal.png');
%
% Change colors, removing old blobs and replacing with new ones:
% o2 = canlab_results_fmridisplay(d, o2, 'cmaprange', [.3 .45], 'splitcolor', {[0 0 1] [.3 0 .8] [.9 0 .5] [1 1 0]}, 'outlinecolor', [.5 0 .5]);
%
% ..
% Tor Wager
% 1/27/2012
% ..
if ~which('fmridisplay.m')
disp('fmridisplay is not on path. it is in canlab tools, which must be on your path!')
return
end
if nargin == 0
o2 = canlab_results_fmridisplay(region(), 'noblobs', 'nooutline');
return
end
if ischar(input_activation)
if strcmp(input_activation, 'compact') || strcmp(input_activation, 'compact2') || strcmp(input_activation, 'full') ...
|| strcmp(input_activation, 'multirow') || strcmp(input_activation, 'coronal') || strcmp(input_activation, 'sagittal')
% Entered no data map; intention is not to plot blobs, just create underlay
varargin{end + 1} = 'noblobs';
varargin{end + 1} = 'nooutline';
varargin{end + 1} = input_activation; % so code below finds it later
% do nothing else for now - this is not an input image
else
% assume it is an input image
cl = region(fmri_data(input_activation)); % mask2clusters(input_activation);
end
elseif isstruct(input_activation) || isa(input_activation, 'region')
cl = input_activation;
if ~isa(input_activation, 'region'), cl = cluster2region(cl); end
elseif isa(input_activation, 'image_vector')
cl = region(input_activation);
elseif isempty(input_activation)
% do nothing for now
else
error('I don''t recognize the format of input_activation. It should be a thresholded mask, clusters, or region object');
end
% process input arguments
% --------------------------------------------
doblobs = true;
dooutline = false;
doaddmontages = false;
doremove = true;
outlinecolor = [0 0 0];
splitcolor = {[0 0 1] [0 .8 .8] [1 .4 .5] [1 1 0]}; % {[0 0 1] [.3 .6 .9] [.8 .3 0] [1 1 0]}; % more straight orange to yellow: {[0 0 1] [0 1 1] [1 .5 0] [1 1 0]}
montagetype = 'compact';
doverbose = true;
%overlay='SPM8_colin27T1_seg.img';
overlay = 'keuken_2014_enhanced_for_underlay.img';
dofigure = true;
wh = strcmp(varargin, 'overlay');
if any(wh), wh = find(wh); overlay = varargin{wh(1) + 1}; varargin([wh wh+1]) = []; end
wh = strcmp(varargin, 'noblobs');
if any(wh), doblobs = false; varargin(wh) = []; end
wh = strcmp(varargin, 'nooutline');
if any(wh), dooutline = false; varargin(wh) = []; end
wh = strcmp(varargin, 'outline');
if any(wh), dooutline = true; varargin(wh) = []; end
wh = strcmp(varargin, 'addmontages');
if any(wh), doaddmontages = true; varargin(wh) = []; end
wh = strcmp(varargin, 'outlinecolor');
if any(wh), wh = find(wh); outlinecolor = varargin{wh(1) + 1}; end
wh = strcmp(varargin, 'splitcolor');
if any(wh), wh = find(wh); splitcolor = varargin{wh(1) + 1}; end
wh = strcmp(varargin, 'noremove');
if any(wh), doremove = false; varargin(wh) = []; end
wh = strcmp(varargin, 'full');
if any(wh), montagetype = varargin{find(wh)}; varargin(wh) = []; end
wh = strcmp(varargin, 'multirow');
if any(wh), montagetype = varargin{find(wh)};
nrows = varargin{find(wh) + 1};
varargin{find(wh) + 1} = [];
varargin(wh) = [];
end
wh = strcmp(varargin, 'blobcenters');
if any(wh), montagetype = varargin{find(wh)}; varargin(wh) = []; end
wh = strcmp(varargin, 'regioncenters');
if any(wh), montagetype = varargin{find(wh)}; varargin(wh) = []; end
wh = strcmp(varargin, 'full hcp');
if any(wh), montagetype = varargin{find(wh)}; varargin(wh) = []; end
wh = strcmp(varargin, 'compact');
if any(wh), montagetype = varargin{find(wh)}; varargin(wh) = []; end
wh = strcmp(varargin, 'compact2');
if any(wh), montagetype = varargin{find(wh)}; varargin(wh) = []; end
wh = strcmp(varargin, 'coronal');
if any(wh), montagetype = varargin{find(wh)}; varargin(wh) = []; end
wh = strcmp(varargin, 'saggital');
if any(wh), montagetype = varargin{find(wh)}; varargin(wh) = []; end
wh = strcmp(varargin, 'allslices');
if any(wh), montagetype = varargin{find(wh)}; varargin(wh) = []; end
wh = strcmp(varargin, 'noverbose');
if any(wh), doverbose = false; end
wh = strcmp(varargin, 'nofigure');
if any(wh), dofigure = false; varargin(wh) = []; end
wh = false(1, length(varargin));
for i = 1:length(varargin)
wh(i) = isa(varargin{i}, 'fmridisplay');
if wh(i), o2 = varargin{wh}; end
end
varargin(wh) = [];
xyz = [-20 -10 -6 -2 0 2 6 10 20]';
xyz(:, 2:3) = 0;
if isempty(input_activation)
% we will skip the blobs, but process other optional input args
doblobs = false;
dooutline = false;
end
if ~exist('o2', 'var')
% set up fmridisplay
% --------------------------------------------
% you only need to do this once
% then you can add montages, add and remove blobs, add and remove points (for
% meta-analysis), etc.
if doverbose
disp('Setting up fmridisplay objects');
% disp('This takes a lot of memory, and can hang if you have too little.');
end
[opath, ofname, oext] = fileparts(overlay);
if isempty(opath)
% check for file in matlab path
o2 = fmridisplay('overlay', which(overlay));
else
% complete path specified
o2 = fmridisplay('overlay',overlay);
end
% You can customize these and run them from the command line
switch montagetype
case {'blobcenters', 'regioncenters'}
%Make a series of montages at center of each region and add blobs to that:
if ~exist('cl', 'var'), error('Must enter region object to use blobcenters option.'); end
xyz = cat(1, cl.mm_center);
% onerowstr = [];
% if length(cl) < 20, onerowstr = 'onerow'; end
orientation = 'axial';
% Make a grid - determine subplots
nr = floor(sqrt(length(cl)));
nc = ceil(length(cl) ./ nr);
[~, axh] = create_figure('fmridisplay_regioncenters', nr, nc, false, true);
set(axh,'Visible','off'); % turn off axis grid for all axes
for i = 1:length(cl)
%axh(i) = subplot(nr, nc, i);
%axes(axh(i))
if i == 1
[o2, dat] = montage(o2, orientation, 'wh_slice', xyz(i, :), 'onerow', 'existing_axes', axh(i), 'existing_figure', 'noverbose');
else
o2 = montage(o2, 'volume_data', dat, orientation, 'wh_slice', xyz(i, :), 'onerow', 'existing_axes', axh(i), 'existing_figure', 'noverbose');
end
end
wh_montages = 1:length(cl);
case 'compact'
% The default
% saggital
axh1 = axes('Position', [-0.02 0.4 .17 .17]);
[o2, dat] = montage(o2, 'saggital', 'wh_slice', [-4 0 0], 'onerow', 'noverbose', 'existing_axes', axh1);
text(50, -50, 'left');
drawnow
axh2 = axes('Position', [-0.02 0.6 .17 .17]);
o2 = montage(o2, 'volume_data', dat, 'saggital', 'wh_slice', [4 0 0], 'onerow', 'noverbose', 'existing_axes', axh2);
text(50, -50, 'right');
drawnow
% sagg center
axh3 = axes('Position', [.08 0.5 .17 .17]);
o2 = montage(o2, 'volume_data', dat, 'saggital', 'wh_slice', [0 0 0], 'onerow', 'noverbose', 'existing_axes', axh3);
drawnow;
o2.montage{3}.slice_mm_coords;
% axial bottom
axh4 = axes('Position', [.10 0.4 .17 .17]);
o2 = montage(o2, 'volume_data', dat, 'axial', 'slice_range', [-40 50], 'onerow', 'spacing', 12, 'noverbose', 'existing_axes', axh4);
% axial top
axh5 = axes('Position', [.12 0.54 .17 .17]);
o2 = montage(o2, 'volume_data', dat, 'axial', 'slice_range', [-46 50], 'onerow', 'spacing', 12, 'noverbose', 'existing_axes', axh5);
wh_montages = [1 2 4 5];
% Lines
axes(o2.montage{3}.axis_handles)
locs = [o2.montage{4}.slice_mm_coords; o2.montage{5}.slice_mm_coords];
for i = 1:length(locs)
%draw_vertical_line(locs(i));
hh(i) = plot( [-105 65], [locs(i) locs(i)], 'b', 'LineWidth', 1);
end
% brighten(.5)
sz = get(0, 'screensize');
set(gcf, 'Color', 'w', 'Position', [sz(3).*.1 sz(4).*.9 sz(3).*.6 sz(4).*.6]);
case 'multirow'
% Notes: for some reason, at least in Matlab 2017a, when you
% use the existing figure the slices scale with the fig
% position in size. When using montage to create a new figure,
% they don't seem to do this. could be enlarge_axes, or ???
if dofigure
slices_fig_h = figure('Color', 'w');
end
ss = get(0, 'ScreenSize');
myheightdivisor = 1.5; % 3/nrows; % controls figure aspect ratio
set(gcf, 'Position', [round(ss(3)/20) round(ss(4)*.5) round(ss(3)*.9) round(ss(4)/myheightdivisor) ])
%shiftvals = [0:.17:nrows]; % more than we need, but works
%shiftvals = [0:.24:nrows]; % more than we need, but works
shiftvals = repmat([0:.24:.75], 1, ceil(nrows/4)); % repeat positions every 4, for adding new figures
for i = 1:nrows
% Can only put 4 on one figure, so create additional
% figures as needed
if i > 4 && rem(i, 4) == 1
figure('Color', 'w');
set(gcf, 'Position', [round(i*ss(3)/20) round(ss(4)*.5) round(ss(3)*.9) round(ss(4)/myheightdivisor) ])
end
% saggital
axh = axes('Position', [-0.02 .75-shiftvals(i) .17 .17]); % [-0.02 0.15+shiftvals(i) .17 .17]);
axh(2) = axes('Position', [.022 .854-shiftvals(i) .17 .17]);
if i == 1
[o2, dat] = montage(o2, 'saggital', 'slice_range', [-2 2], 'spacing', 4, 'onerow', 'noverbose', 'existing_axes', axh);
else
o2 = montage(o2, 'volume_data', dat, 'saggital', 'slice_range', [-2 2], 'spacing', 4, 'onerow', 'noverbose', 'existing_axes', axh);
end
drawnow
% axial
axh = axes('Position', [.022 0.8-shiftvals(i) .17 .17]); % [.015 0.15+shiftvals(i) .17 .17]);
o2 = montage(o2, 'volume_data', dat, 'axial', 'slice_range', [-32 50], 'onerow', 'spacing', 8, 'noverbose', 'existing_axes', axh);
drawnow
end
case 'full'
% saggital
[o2, dat] = montage(o2, 'saggital', 'wh_slice', xyz, 'onerow', 'noverbose');
shift_axes(-0.02, -0.04);
% coronal
axh = axes('Position', [-0.02 0.37 .17 .17]);
o2 = montage(o2, 'volume_data', dat, 'coronal', 'slice_range', [-40 50], 'onerow', 'spacing', 8, 'noverbose', 'existing_axes', axh);
% axial
axh = axes('Position', [-0.02 0.19 .17 .17]);
o2 = montage(o2, 'volume_data', dat, 'axial', 'slice_range', [-40 50], 'onerow', 'spacing', 8, 'noverbose', 'existing_axes', axh);
axh = axes('Position', [-0.02 0.01 .17 .17]);
o2 = montage(o2, 'volume_data', dat, 'axial', 'slice_range', [-44 50], 'onerow', 'spacing', 8, 'noverbose', 'existing_axes', axh);
allaxh = findobj(gcf, 'Type', 'axes');
disp(length(allaxh));
for i = 1:(length(allaxh)-36)
pos1 = get(allaxh(i), 'Position');
pos1(1) = pos1(1) - 0.03;
set(allaxh(i), 'Position', pos1);
end
% surface
o2 = surface(o2, 'axes', [0.1 0.74 .25 .25], 'direction', 'hires left', 'orientation', 'medial');
o2 = surface(o2, 'axes', [0.3 0.74 .25 .25], 'direction', 'hires right', 'orientation', 'medial');
o2 = surface(o2, 'axes', [0.5 0.74 .25 .25], 'direction', 'hires left', 'orientation', 'lateral');
o2 = surface(o2, 'axes', [0.7 0.74 .25 .25], 'direction', 'hires right', 'orientation', 'lateral');
wh_montages = [1 2 3 4];
wh_surfaces = [1 2 3 4];
case 'compact2' % creates a new figure
%subplot(2, 1, 1);
[o2, dat] = montage(o2, 'axial', 'slice_range', [-32 50], 'onerow', 'spacing', 8, 'noverbose');
% shift all axes down and right
allaxh = o2.montage{1}.axis_handles;
for i = 1:length(allaxh)
pos1 = get(allaxh(i), 'Position');
pos1(2) = pos1(2) - 0.08;
pos1(1) = pos1(1) + 0.03;
% enlarge a bit
pos1(3:4) = pos1(3:4) + .02;
set(allaxh(i), 'Position', pos1);
end
enlarge_axes(gcf, 1);
%axh = axes('Position', [0.0 0.08 .15 1]);
axh = axes('Position', [-0.02 .75-.3 .17 .17]); % [-0.02 0.15+shiftvals(i) .17 .17]);
axh(2) = axes('Position', [.022 .854-.3 .17 .17]);
%o2 = montage(o2, 'saggital', 'wh_slice', [0 0 0], 'existing_axes', axh, 'noverbose');
o2 = montage(o2, 'volume_data', dat, 'saggital', 'slice_range', [-2 2], 'spacing', 4, 'onerow', 'noverbose', 'existing_axes', axh);
%ss = get(0, 'ScreenSize');
%set(gcf, 'Position', [round(ss(3)/12) round(ss(4)*.9) round(ss(3)*.8) round(ss(4)/5.5) ]) % this line messes p the
%images, makes it too big an overlapping
wh_montages = [1 2];
brighten(.4)
case 'coronal'
% coronal
o2 = montage(o2, 'coronal', 'slice_range', [-40 50], 'onerow', 'spacing', 8, 'noverbose');
wh_montages = 1;
case 'saggital'
o2 = montage(o2, 'saggital', 'wh_slice', xyz, 'onerow', 'noverbose');
wh_montages = 1;
case 'allslices'
[o2, dat] = montage(o2, 'saggital', 'wh_slice', xyz, 'onerow', 'noverbose');
shift_axes(-0.02, -0.04);
axh = axes('Position', [-0.02 0.37 .17 .17]);
o2 = montage(o2, 'volume_data', dat, 'coronal', 'slice_range', [-40 50], 'onerow', 'spacing', 8, 'noverbose', 'existing_axes', axh);
% axial
axh = axes('Position', [-0.02 0.19 .17 .17]);
o2 = montage(o2, 'volume_data', dat, 'axial', 'slice_range', [-40 50], 'onerow', 'spacing', 8, 'noverbose', 'existing_axes', axh);
wh_montages = [1 2 3];
case 'full hcp'
% saggital
[o2, dat] = montage(o2, 'saggital', 'wh_slice', xyz, 'onerow', 'noverbose');
shift_axes(-0.02, -0.04);
% coronal
axh = axes('Position', [-0.02 0.37 .17 .17]);
o2 = montage(o2, 'volume_data', dat, 'coronal', 'slice_range', [-40 50], 'onerow', 'spacing', 8, 'noverbose', 'existing_axes', axh);
% axial
axh = axes('Position', [-0.02 0.19 .17 .17]);
o2 = montage(o2, 'volume_data', dat, 'axial', 'slice_range', [-40 50], 'onerow', 'spacing', 8, 'noverbose', 'existing_axes', axh);
axh = axes('Position', [-0.02 0.01 .17 .17]);
o2 = montage(o2, 'volume_data', dat, 'axial', 'slice_range', [-44 50], 'onerow', 'spacing', 8, 'noverbose', 'existing_axes', axh);
allaxh = findobj(gcf, 'Type', 'axes');
disp(length(allaxh));
for i = 1:(length(allaxh)-36)
pos1 = get(allaxh(i), 'Position');
pos1(1) = pos1(1) - 0.03;
set(allaxh(i), 'Position', pos1);
end
% surface
o2 = surface(o2, 'axes', [0.1 0.74 .25 .25], 'direction', 'surface left', 'orientation', 'medial');
o2 = surface(o2, 'axes', [0.3 0.74 .25 .25], 'direction', 'surface right', 'orientation', 'medial');
o2 = surface(o2, 'axes', [0.5 0.74 .25 .25], 'direction', 'surface left', 'orientation', 'lateral');
o2 = surface(o2, 'axes', [0.7 0.74 .25 .25], 'direction', 'surface right', 'orientation', 'lateral');
wh_montages = [1 2 3 4];
wh_surfaces = [1:8];
otherwise error('illegal montage type. choose full or compact.');
end
% wh_montages = [1 2];
else
if doverbose, disp('Using existing fmridisplay object'); end
% Other inputs will be passed into addblobs
existingmons = length(o2.montage);
existingsurs = length(o2.surface);
if doaddmontages
% use same o2, but add montages
switch montagetype
case 'full'
% saggital
[o2, dat] = montage(o2, 'saggital', 'wh_slice', xyz, 'onerow', 'noverbose');
shift_axes(-0.02, -0.04);
% coronal
axh = axes('Position', [-0.02 0.37 .17 .17]);
o2 = montage(o2, 'volume_data', dat, 'coronal', 'slice_range', [-40 50], 'onerow', 'spacing', 8, 'noverbose', 'existing_axes', axh);
% axial
axh = axes('Position', [-0.02 0.19 .17 .17]);
o2 = montage(o2, 'volume_data', dat, 'axial', 'slice_range', [-40 50], 'onerow', 'spacing', 8, 'noverbose', 'existing_axes', axh);
axh = axes('Position', [-0.02 0.01 .17 .17]);
o2 = montage(o2, 'volume_data', dat, 'axial', 'slice_range', [-44 50], 'onerow', 'spacing', 8, 'noverbose', 'existing_axes', axh);
allaxh = findobj(gcf, 'Type', 'axes');
disp(length(allaxh));
for i = 1:(length(allaxh)-36)
pos1 = get(allaxh(i), 'Position');
pos1(1) = pos1(1) - 0.03;
set(allaxh(i), 'Position', pos1);
end
% surface
o2 = surface(o2, 'axes', [0.1 0.74 .25 .25], 'direction', 'hires left', 'orientation', 'medial');
o2 = surface(o2, 'axes', [0.3 0.74 .25 .25], 'direction', 'hires right', 'orientation', 'medial');
o2 = surface(o2, 'axes', [0.5 0.74 .25 .25], 'direction', 'hires left', 'orientation', 'lateral');
o2 = surface(o2, 'axes', [0.7 0.74 .25 .25], 'direction', 'hires right', 'orientation', 'lateral');
wh_surfaces = existingsurs + [1 2 3 4];
case 'compact'
[o2, dat] = montage(o2, 'axial', 'slice_range', [-40 50], 'onerow', 'spacing', 6, 'noverbose');
axh = axes('Position', [0.05 0.4 .1 .5]);
o2 = montage(o2, 'volume_data', dat, 'saggital', 'wh_slice', [0 0 0], 'existing_axes', axh, 'noverbose');
case 'compact2'
[o2, dat] = montage(o2, 'axial', 'slice_range', [-40 50], 'onerow', 'spacing', 8, 'noverbose');
enlarge_axes(gcf, 1);
axh = axes('Position', [-0.03 0.15 .2 1]);
o2 = montage(o2, 'volume_data', dat, 'saggital', 'wh_slice', [0 0 0], 'existing_axes', axh, 'noverbose');
% shift all axes down and right
shift_axes(+0.03, -0.10);
%ss = get(0, 'ScreenSize');
%set(gcf, 'Position', [round(ss(3)/12) round(ss(4)*.9) round(ss(3)*.8) round(ss(4)/5.5) ])
otherwise error('illegal montage type. choose full or compact when adding to existing montage set.')
end
wh_montages = existingmons + [1 2];
else
if doremove
o2 = removeblobs(o2);
end
wh_montages = 1:existingmons;
wh_surfaces = 1:existingsurs;
end
end
% Now we can add blobs
% --------------------------------------------
% they are added to all montages by default, but you can specify selected
% montages if you want to as well.
% it's easy to remove them as well:
% o2 = removeblobs(o2);
if doblobs
if exist('wh_surfaces', 'var')
o2 = addblobs(o2, cl, 'splitcolor', splitcolor, 'wh_montages', wh_montages, 'wh_surfaces', wh_surfaces, 'nolegend', varargin{:});
else
o2 = addblobs(o2, cl, 'splitcolor', splitcolor, 'wh_montages', wh_montages, varargin{:});
end
end
if dooutline
o2 = addblobs(o2, cl, 'color', outlinecolor, 'outline', 'wh_montages', wh_montages, 'no_surface');
end
% ------------------------------------------------------
% INLINE FUNCTIONS
% ------------------------------------------------------
function shift_axes(x_offset, y_offset)
% usage: function shift_axes(-0.03, -0.18);
% purpose: To shift axes according of the habdles.
%
% input: Function parameters are (x, y) offset relative to current position
% shift all axes according to offset values
allaxh = findobj(gcf, 'Type', 'axes');
for i = 1:length(allaxh)
pos1 = get(allaxh(i), 'Position');
pos1(2) = pos1(2) + y_offset;
pos1(1) = pos1(1) + x_offset;
set(allaxh(i), 'Position', pos1);
end
end % shift_axes
end % function
|
#include <memory>
#include <executor/configurator.hpp>
#include <executor/application.hpp>
#include <boost/program_options/variables_map.hpp>
namespace executor
{
configurator::configurator(const boost::program_options::variables_map&)
{
app = std::make_unique<executor::whole_application>();
}
configurator::~configurator()
{}
executor::application& configurator::get_application() const
{
return *app;
}
}
|
Harrison was pleasantly surprised when he got a call from Beyoncé , who was working on one of the most anticipated albums of the year . However , things did not turn up according to his plans the following day as he was late and was still suffering the effects of a hangover . When Harrison played the sample to Beyoncé in the studio , the singer initially had doubts about the " sound so full of blaring fanfare " ; it seemed too retro and according to her , no one used horn riffs in the 21st century . Nevertheless , Beyoncé became friendlier to the sample much to Harrison 's delight and gave him two hours to write the song while she went out .
|
The normalization of a monomial is the monomial with the same exponent and the normalized coefficient. |
/-
Copyright (c) 2022 Antoine Labelle. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Antoine Labelle
-/
import algebra.module.basic
import algebra.module.linear_map
import algebra.monoid_algebra.basic
import linear_algebra.trace
import linear_algebra.dual
import linear_algebra.free_module.basic
/-!
# Monoid representations
This file introduces monoid representations and their characters and defines a few ways to construct
representations.
## Main definitions
* representation.representation
* representation.character
* representation.tprod
* representation.lin_hom
* represensation.dual
## Implementation notes
Representations of a monoid `G` on a `k`-module `V` are implemented as
homomorphisms `G →* (V →ₗ[k] V)`.
-/
open monoid_algebra (lift) (of)
open linear_map
section
variables (k G V : Type*) [comm_semiring k] [monoid G] [add_comm_monoid V] [module k V]
/--
A representation of `G` on the `k`-module `V` is an homomorphism `G →* (V →ₗ[k] V)`.
-/
abbreviation representation := G →* (V →ₗ[k] V)
end
namespace representation
section trivial
variables {k G V : Type*} [comm_semiring k] [monoid G] [add_comm_monoid V] [module k V]
/--
The trivial representation of `G` on the one-dimensional module `k`.
-/
def trivial : representation k G k := 1
@[simp]
lemma trivial_def (g : G) (v : k) : trivial g v = v := rfl
end trivial
section monoid_algebra
variables {k G V : Type*} [comm_semiring k] [monoid G] [add_comm_monoid V] [module k V]
variables (ρ : representation k G V)
/--
A `k`-linear representation of `G` on `V` can be thought of as
an algebra map from `monoid_algebra k G` into the `k`-linear endomorphisms of `V`.
-/
noncomputable def as_algebra_hom : monoid_algebra k G →ₐ[k] (module.End k V) :=
(lift k G _) ρ
lemma as_algebra_hom_def :
as_algebra_hom ρ = (lift k G _) ρ := rfl
@[simp]
lemma as_algebra_hom_single (g : G):
(as_algebra_hom ρ (finsupp.single g 1)) = ρ g :=
by simp only [as_algebra_hom_def, monoid_algebra.lift_single, one_smul]
lemma as_algebra_hom_of (g : G):
(as_algebra_hom ρ (of k G g)) = ρ g :=
by simp only [monoid_algebra.of_apply, as_algebra_hom_single]
/--
A `k`-linear representation of `G` on `V` can be thought of as
a module over `monoid_algebra k G`.
-/
noncomputable def as_module : module (monoid_algebra k G) V :=
module.comp_hom V (as_algebra_hom ρ).to_ring_hom
end monoid_algebra
section group
variables {k G V : Type*} [comm_semiring k] [group G] [add_comm_monoid V] [module k V]
variables (ρ : representation k G V)
/--
When `G` is a group, a `k`-linear representation of `G` on `V` can be thought of as
a group homomorphism from `G` into the invertible `k`-linear endomorphisms of `V`.
-/
def as_group_hom : G →* units (V →ₗ[k] V) :=
monoid_hom.to_hom_units ρ
end group
section tensor_product
variables {k G V W : Type*} [comm_semiring k] [monoid G]
variables [add_comm_monoid V] [module k V] [add_comm_monoid W] [module k W]
variables (ρV : representation k G V) (ρW : representation k G W)
open_locale tensor_product
/--
Given representations of `G` on `V` and `W`, there is a natural representation of `G` on their
tensor product `V ⊗[k] W`.
-/
def tprod : representation k G (V ⊗[k] W) :=
{ to_fun := λ g, tensor_product.map (ρV g) (ρW g),
map_one' := by simp only [map_one, tensor_product.map_one],
map_mul' := λ g h, by simp only [map_mul, tensor_product.map_mul] }
local notation ρV ` ⊗ ` ρW := tprod ρV ρW
@[simp]
lemma tprod_apply (g : G) : (ρV ⊗ ρW) g = tensor_product.map (ρV g) (ρW g) := rfl
end tensor_product
section linear_hom
variables {k G V W : Type*} [comm_semiring k] [group G]
variables [add_comm_monoid V] [module k V] [add_comm_monoid W] [module k W]
variables (ρV : representation k G V) (ρW : representation k G W)
/--
Given representations of `G` on `V` and `W`, there is a natural representation of `G` on the
module `V →ₗ[k] W`, where `G` acts by conjugation.
-/
def lin_hom : representation k G (V →ₗ[k] W) :=
{ to_fun := λ g,
{ to_fun := λ f, (ρW g) ∘ₗ f ∘ₗ (ρV g⁻¹),
map_add' := λ f₁ f₂, by simp_rw [add_comp, comp_add],
map_smul' := λ r f, by simp_rw [ring_hom.id_apply, smul_comp, comp_smul]},
map_one' := linear_map.ext $ λ x,
by simp_rw [coe_mk, inv_one, map_one, one_apply, one_eq_id, comp_id, id_comp],
map_mul' := λ g h, linear_map.ext $ λ x,
by simp_rw [coe_mul, coe_mk, function.comp_apply, mul_inv_rev, map_mul, mul_eq_comp,
comp_assoc ]}
@[simp]
lemma lin_hom_apply (g : G) (f : V →ₗ[k] W) : (lin_hom ρV ρW) g f = (ρW g) ∘ₗ f ∘ₗ (ρV g⁻¹) := rfl
/--
The dual of a representation `ρ` of `G` on a module `V`, given by `(dual ρ) g f = f ∘ₗ (ρ g⁻¹)`,
where `f : module.dual k V`.
-/
def dual : representation k G (module.dual k V) :=
{ to_fun := λ g,
{ to_fun := λ f, f ∘ₗ (ρV g⁻¹),
map_add' := λ f₁ f₂, by simp only [add_comp],
map_smul' := λ r f,
by {ext, simp only [coe_comp, function.comp_app, smul_apply, ring_hom.id_apply]} },
map_one' :=
by {ext, simp only [coe_comp, function.comp_app, map_one, inv_one, coe_mk, one_apply]},
map_mul' := λ g h,
by {ext, simp only [coe_comp, function.comp_app, mul_inv_rev, map_mul, coe_mk, mul_apply]}}
@[simp]
lemma dual_apply (g : G) : (dual ρV) g = module.dual.transpose (ρV g⁻¹) := rfl
lemma dual_tensor_hom_comm (g : G) :
(dual_tensor_hom k V W) ∘ₗ (tensor_product.map (ρV.dual g) (ρW g)) =
(lin_hom ρV ρW) g ∘ₗ (dual_tensor_hom k V W) :=
begin
ext, simp [module.dual.transpose_apply],
end
end linear_hom
end representation
|
lemma IVT: fixes f :: "'a::linear_continuum_topology \<Rightarrow> 'b::linorder_topology" shows "f a \<le> y \<Longrightarrow> y \<le> f b \<Longrightarrow> a \<le> b \<Longrightarrow> (\<forall>x. a \<le> x \<and> x \<le> b \<longrightarrow> isCont f x) \<Longrightarrow> \<exists>x. a \<le> x \<and> x \<le> b \<and> f x = y" |
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl
-/
import logic.basic
/-!
# Nonempty types
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file proves a few extra facts about `nonempty`, which is defined in core Lean.
## Main declarations
* `nonempty.some`: Extracts a witness of nonemptiness using choice. Takes `nonempty α` explicitly.
* `classical.arbitrary`: Extracts a witness of nonemptiness using choice. Takes `nonempty α` as an
instance.
-/
variables {α β : Type*} {γ : α → Type*}
attribute [simp] nonempty_of_inhabited
@[priority 20]
instance has_zero.nonempty [has_zero α] : nonempty α := ⟨0⟩
@[priority 20]
instance has_one.nonempty [has_one α] : nonempty α := ⟨1⟩
lemma exists_true_iff_nonempty {α : Sort*} : (∃a:α, true) ↔ nonempty α :=
iff.intro (λ⟨a, _⟩, ⟨a⟩) (λ⟨a⟩, ⟨a, trivial⟩)
@[simp] lemma nonempty_Prop {p : Prop} : nonempty p ↔ p :=
iff.intro (assume ⟨h⟩, h) (assume h, ⟨h⟩)
lemma not_nonempty_iff_imp_false {α : Sort*} : ¬ nonempty α ↔ α → false :=
⟨λ h a, h ⟨a⟩, λ h ⟨a⟩, h a⟩
@[simp] lemma nonempty_sigma : nonempty (Σa:α, γ a) ↔ (∃a:α, nonempty (γ a)) :=
iff.intro (assume ⟨⟨a, c⟩⟩, ⟨a, ⟨c⟩⟩) (assume ⟨a, ⟨c⟩⟩, ⟨⟨a, c⟩⟩)
@[simp] lemma nonempty_psigma {α} {β : α → Sort*} : nonempty (psigma β) ↔ (∃a:α, nonempty (β a)) :=
iff.intro (assume ⟨⟨a, c⟩⟩, ⟨a, ⟨c⟩⟩) (assume ⟨a, ⟨c⟩⟩, ⟨⟨a, c⟩⟩)
@[simp] lemma nonempty_subtype {α} {p : α → Prop} : nonempty (subtype p) ↔ (∃a:α, p a) :=
iff.intro (assume ⟨⟨a, h⟩⟩, ⟨a, h⟩) (assume ⟨a, h⟩, ⟨⟨a, h⟩⟩)
@[simp] lemma nonempty_prod : nonempty (α × β) ↔ (nonempty α ∧ nonempty β) :=
iff.intro (assume ⟨⟨a, b⟩⟩, ⟨⟨a⟩, ⟨b⟩⟩) (assume ⟨⟨a⟩, ⟨b⟩⟩, ⟨⟨a, b⟩⟩)
@[simp] lemma nonempty_pprod {α β} : nonempty (pprod α β) ↔ (nonempty α ∧ nonempty β) :=
iff.intro (assume ⟨⟨a, b⟩⟩, ⟨⟨a⟩, ⟨b⟩⟩) (assume ⟨⟨a⟩, ⟨b⟩⟩, ⟨⟨a, b⟩⟩)
@[simp] lemma nonempty_sum : nonempty (α ⊕ β) ↔ (nonempty α ∨ nonempty β) :=
iff.intro
(assume ⟨h⟩, match h with sum.inl a := or.inl ⟨a⟩ | sum.inr b := or.inr ⟨b⟩ end)
(assume h, match h with or.inl ⟨a⟩ := ⟨sum.inl a⟩ | or.inr ⟨b⟩ := ⟨sum.inr b⟩ end)
@[simp] lemma nonempty_psum {α β} : nonempty (psum α β) ↔ (nonempty α ∨ nonempty β) :=
iff.intro
(assume ⟨h⟩, match h with psum.inl a := or.inl ⟨a⟩ | psum.inr b := or.inr ⟨b⟩ end)
(assume h, match h with or.inl ⟨a⟩ := ⟨psum.inl a⟩ | or.inr ⟨b⟩ := ⟨psum.inr b⟩ end)
@[simp] lemma nonempty_empty : ¬ nonempty empty :=
assume ⟨h⟩, h.elim
@[simp] lemma nonempty_ulift : nonempty (ulift α) ↔ nonempty α :=
iff.intro (assume ⟨⟨a⟩⟩, ⟨a⟩) (assume ⟨a⟩, ⟨⟨a⟩⟩)
@[simp] lemma nonempty_plift {α} : nonempty (plift α) ↔ nonempty α :=
iff.intro (assume ⟨⟨a⟩⟩, ⟨a⟩) (assume ⟨a⟩, ⟨⟨a⟩⟩)
@[simp] lemma nonempty.forall {α} {p : nonempty α → Prop} : (∀h:nonempty α, p h) ↔ (∀a, p ⟨a⟩) :=
iff.intro (assume h a, h _) (assume h ⟨a⟩, h _)
@[simp] lemma nonempty.exists {α} {p : nonempty α → Prop} : (∃h:nonempty α, p h) ↔ (∃a, p ⟨a⟩) :=
iff.intro (assume ⟨⟨a⟩, h⟩, ⟨a, h⟩) (assume ⟨a, h⟩, ⟨⟨a⟩, h⟩)
/-- Using `classical.choice`, lifts a (`Prop`-valued) `nonempty` instance to a (`Type`-valued)
`inhabited` instance. `classical.inhabited_of_nonempty` already exists, in
`core/init/classical.lean`, but the assumption is not a type class argument,
which makes it unsuitable for some applications. -/
noncomputable def classical.inhabited_of_nonempty' {α} [h : nonempty α] : inhabited α :=
⟨classical.choice h⟩
/-- Using `classical.choice`, extracts a term from a `nonempty` type. -/
@[reducible] protected noncomputable def nonempty.some {α} (h : nonempty α) : α :=
classical.choice h
/-- Using `classical.choice`, extracts a term from a `nonempty` type. -/
@[reducible] protected noncomputable def classical.arbitrary (α) [h : nonempty α] : α :=
classical.choice h
/-- Given `f : α → β`, if `α` is nonempty then `β` is also nonempty.
`nonempty` cannot be a `functor`, because `functor` is restricted to `Type`. -/
lemma nonempty.map {α β} (f : α → β) : nonempty α → nonempty β
| ⟨h⟩ := ⟨f h⟩
protected lemma nonempty.map2 {α β γ : Sort*} (f : α → β → γ) : nonempty α → nonempty β → nonempty γ
| ⟨x⟩ ⟨y⟩ := ⟨f x y⟩
protected lemma nonempty.congr {α β} (f : α → β) (g : β → α) :
nonempty α ↔ nonempty β :=
⟨nonempty.map f, nonempty.map g⟩
lemma nonempty.elim_to_inhabited {α : Sort*} [h : nonempty α] {p : Prop}
(f : inhabited α → p) : p :=
h.elim $ f ∘ inhabited.mk
instance {α β} [h : nonempty α] [h2 : nonempty β] : nonempty (α × β) :=
h.elim $ λ g, h2.elim $ λ g2, ⟨⟨g, g2⟩⟩
instance {ι : Sort*} {α : ι → Sort*} [Π i, nonempty (α i)] : nonempty (Π i, α i) :=
⟨λ _, classical.arbitrary _⟩
lemma classical.nonempty_pi {ι} {α : ι → Sort*} : nonempty (Π i, α i) ↔ ∀ i, nonempty (α i) :=
⟨λ ⟨f⟩ a, ⟨f a⟩, @pi.nonempty _ _⟩
lemma subsingleton_of_not_nonempty {α : Sort*} (h : ¬ nonempty α) : subsingleton α :=
⟨λ x, false.elim $ not_nonempty_iff_imp_false.mp h x⟩
lemma function.surjective.nonempty {α β : Sort*} [h : nonempty β] {f : α → β}
(hf : function.surjective f) :
nonempty α :=
let ⟨y⟩ := h, ⟨x, hx⟩ := hf y in ⟨x⟩
|
{-# OPTIONS --without-K --safe #-}
open import Relation.Binary using (Rel; Setoid; IsEquivalence)
module Magma.Structures
{a ℓ} {A : Set a} -- The underlying set
(_≈_ : Rel A ℓ) -- The underlying equality relation
where
open import Algebra.Core
open import Level using (_⊔_)
open import Data.Product using (_,_; proj₁; proj₂)
open import Algebra.Definitions _≈_
open import Algebra.Structures _≈_
open import Magma.Definitions _≈_
record IsIdempotentMagma (∙ : Op₂ A) : Set (a ⊔ ℓ) where
field
isMagma : IsMagma ∙
idem : Idempotent ∙
open IsMagma isMagma public
record IsAlternateMagma (∙ : Op₂ A) : Set (a ⊔ ℓ) where
field
isMagma : IsMagma ∙
alter : Alternative ∙
open IsMagma isMagma public
record IsFlexibleMagma (∙ : Op₂ A) : Set (a ⊔ ℓ) where
field
isMagma : IsMagma ∙
flex : Flexible ∙
open IsMagma isMagma public
record IsMedialMagma (∙ : Op₂ A) : Set (a ⊔ ℓ) where
field
isMagma : IsMagma ∙
medial : Medial ∙
open IsMagma isMagma public
record IsSemimedialMagma (∙ : Op₂ A) : Set (a ⊔ ℓ) where
field
isMagma : IsMagma ∙
semiMedial : Semimedial ∙
open IsMagma isMagma public
record IsLeftUnitalMagma (∙ : Op₂ A) (ε : A) : Set (a ⊔ ℓ) where
field
isMagma : IsMagma ∙
identity : LeftIdentity ε ∙
open IsMagma isMagma public
record IsRightUnitalMagma (∙ : Op₂ A) (ε : A) : Set (a ⊔ ℓ) where
field
isMagma : IsMagma ∙
identity : RightIdentity ε ∙
open IsMagma isMagma public
|
### University of Washington: Machine Learning and Statistics
# Lecture 1: Regression (linear, errors on variables, outliers)
Andrew Connolly and Stephen Portillo
##### Resources for this notebook include:
- [Textbook](https://press.princeton.edu/books/hardcover/9780691198309/statistics-data-mining-and-machine-learning-in-astronomy) Chapter 8.
- [astroML website](https://www.astroml.org/index.html)
This notebook is developed based on material from A. Connolly, Z. Ivezic, M. Juric, S. Portillo, G. Richards, B. Sipocz, J. VanderPlas, D. Hogg, and many others.
The notebook and assoociated material are available from [github](https://github.com/uw-astro/astr-598a-win22).
Make sure you are using the latest version of Jupyterlab (>3.0)
> pip install jupyterlab --upgrade
### Installing the latest (v1.0) of astroML from git
> pip install --pre -U astroml
<a id='toc'></a>
## This notebook includes:
[Ordinary least squares method](#ordinaryLSQ)
[Total least squares method](#totalLSQ)
[Dealing witth Outliers](#outliers)
### Fitting a Line using a Maximum Likelihood Estimator
Assume the scatter in our measurements (the residuals) is generated by a gaussian process. I.e.:
>$ y_i = m x_i + b + r_i $
where $r_i$ is drawn from $N(0, \sigma)$. Here, $\sigma$ is the error the measurement induces.
Let us compute the likelihood. First, we ask ourselves what is the probability $p(y_i|x_i, M(a, b), \sigma)$ that a particular point $y_i$ would be measured (M is our model). It is just the normal distribution:
>$ p(y_i|x_i, M(m, b), \sigma) = N(y_i - M(x)|\sigma) = \frac{1}{\sqrt{2 \pi \sigma^2}} \exp \left( - \frac{(y_i - M(x_i))^2}{2 \sigma^2} \right) $.
We can write down the $\ln L$
>$ \ln L(m, b) = constant - \frac{1}{2 \sigma^2} \sum_{i=1}^N (y_i - M(x_i))^2 $
This is the expression that we now minimize with respect to $m$ and $b$ to find ML estimators for those parameters. This is equivalent to minimizing the sum of the squares or a _least-squares method_
## Ordinary least squares method <a id='ordinaryLSQ'></a>
[Go to top](#toc)
### NOTE: We suppress warnings for the packages (this is not recommended)
```python
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
```
```python
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib
matplotlib.rc('text', usetex=False)
import seaborn as sns
from scipy import optimize
from astroML.linear_model import TLS_logL
from astroML.plotting import setup_text_plots
from astroML.plotting.mcmc import convert_to_stdev
setup_text_plots(fontsize=8, usetex=True)
# random seed
np.random.seed(42)
```
WARNING (theano.tensor.blas): Using NumPy C-API based implementation for BLAS functions.
```python
# We'll use the data from Table 1 of Hogg et al. 2010
from astroML.datasets import fetch_hogg2010test
data = fetch_hogg2010test()
data = data[5:] # no outliers (the first 5 points are outliers, discussed later)
x = data['x']
y = data['y']
sigma_x = data['sigma_x']
sigma_y = data['sigma_y']
rho_xy = data['rho_xy']
y_obs = y
```
```python
# Plot the data with y error bars
plt.errorbar(x, y, yerr=sigma_y, fmt=".k", capsize=0)
plt.xlabel('x', fontsize=18)
plt.ylabel('y', fontsize=18)
plt.xlim(0, 300)
plt.ylim(100, 600)
```
### We have data $y(x)$ and we want to fit this model (i.e. we want to obtain m and b):
$$\mathbf{y} = m \, \mathbf{x} + b$$
For this problem the maximum likelihood and full posterior probability distribution (under infinitely broad priors) for the slope and intercept of the line are known analytically. The analytic result for the posterior probability distribution is a 2-d Gaussian with mean
$$\mathbf{w} = \left(\begin{array}{c}
m \\ b
\end{array}\right) = (\mathbf{A}^\mathrm{T}\,C^{-1}\mathbf{A})^{-1} \, \mathbf{A}^\mathrm{T}\,C^{-1}\,\mathbf{y}$$
and covariance matrix
$$\mathbf{V} = (\mathbf{A}^\mathrm{T}\,C^{-1}\mathbf{A})^{-1}$$
where
$$\mathbf{y} = \left(\begin{array}{c}
y_1 \\ y_2 \\ \vdots \\ y_N
\end{array}\right) \quad , \quad \mathbf{A} = \left(\begin{array}{cc}
x_1 & 1 \\ x_2 & 1 \\ \vdots & \vdots \\ x_N & 1
\end{array}\right) \quad,\, \mathrm{and} \quad
\mathbf{C} = \left(\begin{array}{cccc}
\sigma_1^2 & 0 & \cdots & 0 \\
0 & \sigma_2^2 & \cdots & 0 \\
&&\ddots& \\
0 & 0 & \cdots & \sigma_N^2
\end{array}\right)$$
Sometimes we call $A$ the design matrix. There are various functions in Python (and astroML/scikit-learn) for computing this but let's do it explicitly and step by step. With numpy, it only takes a few lines of code - here it is:
```python
A = np.vander(x, 2) # Take a look at the documentation to see what this function does!
# https://numpy.org/doc/stable/reference/generated/numpy.vander.html
ATA = np.dot(A.T, A / sigma_y[:, None]**2)
w = np.linalg.solve(ATA, np.dot(A.T, y / sigma_y**2))
V = np.linalg.inv(ATA)
```
Let's take a look and see what this prediction looks like.
To do this, we'll sample 99 slopes and intercepts from this 2-D Gaussian and overplot them on the data.
```python
plt.errorbar(x, y, yerr=sigma_y, fmt=".k", capsize=0)
xGrid = np.linspace(40, 240)
# note that we are drawing 99 lines here, with m and b randomly sampled from w and V
for m, b in np.random.multivariate_normal(w, V, size=99):
plt.plot(xGrid, m*xGrid + b, "g", alpha=0.02)
plt.xlim(0, 250)
plt.ylim(100, 600)
plt.xlabel('x', fontsize=16)
plt.ylabel('y', fontsize=16)
```
```python
# let's visualize the covariance between m and b
a = np.random.multivariate_normal(w, V, size=2400)
plt.scatter(a[:,0], a[:,1], alpha=0.1)
plt.xlabel('m', fontsize=16)
plt.ylabel('b', fontsize=16)
```
### Another approach: the probabilistic model
In order to perform posterior inference on a model and dataset, we need a function that computes the value of the posterior probability given a proposed setting of the parameters of the model. For reasons that will become clear below, we actually only need to return a value that is *proportional* to the probability.
The posterior probability for parameters $\mathbf{w} = (m,\,b)$ conditioned on a dataset $\mathbf{y}$ is given by
$$p(\mathbf{w} \,|\, \mathbf{y}) = \frac{p(\mathbf{y} \,|\, \mathbf{w}) \, p(\mathbf{w})}{p(\mathbf{y})}$$
where $p(\mathbf{y} \,|\, \mathbf{w})$ is the *likelihood* and $p(\mathbf{w})$ is the *prior*. For this example, we're modeling the likelihood by assuming that the datapoints are independent with known Gaussian uncertainties $\sigma_n$. This specifies a likelihood function:
$$p(\mathbf{y} \,|\, \mathbf{w}) = \prod_{n=1}^N \frac{1}{\sqrt{2\,\pi\,\sigma_n^2}} \,
\exp \left(-\frac{[y_n - f_\mathbf{w}(x_n)]^2}{2\,\sigma_n^2}\right)$$
where $f_\mathbf{w}(x) = m\,x + b$ is the linear model.
For numerical reasons, we will acutally want to compute the logarithm of the likelihood. In this case, this becomes:
$$\ln p(\mathbf{y} \,|\, \mathbf{w}) = -\frac{1}{2}\sum_{n=1}^N \frac{[y_n - f_\mathbf{w}(x_n)]^2}{\sigma_n^2} + \mathrm{constant} \quad.$$
By maxmizing $p(\mathbf{y} \,|\, \mathbf{w})$, we obtain posterior probability distributions for $m$ and $b$.
### Using astroML (or scikit-learn) to estimate the MLE for the parameters
We use a standardized form for the regression
- Define the regression model (clf = LinearRegression())
- Fit the model (clf.fit)
- Predict the values given the model (clf.predict)
```python
from astroML.linear_model import LinearRegression
clf = LinearRegression()
clf.fit(x[:, None], y, sigma_y)
y_fit = clf.predict(x[:, None])
plt.errorbar(x, y, yerr=sigma_y, fmt=".k", capsize=0)
plt.plot(x, y_fit, "g", alpha=1)
plt.plot(xGrid, w[0]*xGrid + w[1], "r", alpha=0.5)
plt.xlim(0, 250)
plt.ylim(100, 600)
plt.xlabel('x', fontsize=16)
plt.ylabel('y', fontsize=16)
```
## Total Least Squares regression <a id='totalLSQ'></a>
[Go to top](#toc)
What do we do if we have errors on the dependent and independent axes (or if the x axes errors are larger)?
### Exercise - using the approaches given above estimate the slope and intercept when the errors in x dominate
```python
# read data and select training and cross validation sample
cal_x,cal_dx,cal_y,cal_dy = np.loadtxt("data/X_Y.clean.txt", skiprows=1, unpack=True)
cal_dxy=np.zeros(len(x))
# define classifiers
n_constraints = [2,]
# set plots
fig = plt.figure(figsize=(10, 20))
ax = fig.add_subplot(211)
plt.errorbar(cal_x, cal_y, yerr=cal_dy, xerr=cal_dx, fmt=".k", capsize=0)
ax.scatter(cal_x, cal_y,c='black')
ax.set_xlim(cal_x.min()/1.05,cal_x.max()*1.05)
ax.set_ylim(cal_y.min()*1.05,cal_y.max()/1.05)
ax.set_xlabel("$\log \Sigma_{H_2}\ (M_\odot\ pc^{-2})$")
ax.set_ylabel("$\log \Sigma_{SFR}\ (M_\odot\ yr^{-1}\ kpc^{-2})$")
```
### LSQ with uncertainties in both the dependent and independent axes
In almost all real-world applications, the assumption that one
variable (the independent variable) is essentially free
from any uncertainty is not valid. Both the dependent and independent
variables will have measurement uncertainties (e.g. Tully-Fisher relations).
The impact of errors on the ``independent'' variables is a bias in the
derived regression coefficients. This is straightforward to show if
we consider a linear model with a dependent and independent
variable, $y^*$ and $x^*$. We can write the objective function as
before,
\begin{equation}
y^*_i=\theta_0 + \theta_1x^*_{i}.
\end{equation}
Now let us assume that we observe
$y$ and $x$, which are noisy
representations of $y^*$ and $x^*$, i.e.,
\begin{eqnarray}
x_i&=&x^*_i + \delta_i,\\
y_i &=& y^* + \epsilon_i,
\end{eqnarray}
with $\delta$ and $\epsilon$ centered normal distributions.
Solving for $y$ we get
\begin{equation}
y= \theta_0 + \theta_1 (x_i - \delta_i) +\epsilon_i.
\end{equation}
The uncertainty in $x$ is now part of the regression equation and
scales with the regression coefficients (biasing the regression
coefficient). This problem is known in the statistics literature
as *total least squares* and belongs to the class of ``errors-in-variables''
problems.
For a detailed discussion of the solution to this problem, which is essentially
maximum likelihood estimation, please see
Chapter 8 in the reference book. Two other recommended references are
Hogg et al. (2010, astro-ph/1008.4686) and Kelly et al. (2011, astro-ph/1112.1745).
How can we account for the measurement uncertainties in both the
independent and dependent variables? Assuming they are Gaussian
> $ \Sigma_i = \left[
\begin{array}{cc}
\sigma_{x_i}^2 & \sigma_{xy_i} \\
\sigma_{xy_i} & \sigma_{y_i}^2
\end{array}
\right]
$
If we go back to the start of the lecture and write the equation for a line in terms of its normal vector
> $ {\bf n} = \left [
\begin{array}{c}
-\sin \alpha\\
\cos \alpha\\
\end{array}
\right ] $
with $\theta_1 = \arctan(\alpha)$ and $\alpha$ is the angle
between the line and the $x$-axis.
The covariance matrix projects onto
this space as
>$ S_i^2 = {\bf n}^T \Sigma_i {\bf n} $
and the distance between a point and the line is
>$\Delta_i = {\bf n}^T
z_i - \theta_0\ \cos \alpha, $
where $z_i$ represents the data point
>$(x_i,y_i)$.
The log-likelihood is then
>$ {\rm lnL} = - \sum_i \frac{\Delta_i^2}{2 S_i^2}$
and we can maximize the liklihood as a brute-force search or through MCMC
<b> THINKING OF A PROJECT: IT WOULD BE A REALLY INTERESTING PROBLEM TO EXTEND AND GENERALIZE THIS - IT WOULD GET A LOT OF CITATIONS!</b>
```python
# Define some convenience functions
# translate between typical slope-intercept representation,
# and the normal vector representation
def get_m_b(beta):
b = np.dot(beta, beta) / beta[1]
m = -beta[0] / beta[1]
return m, b
def get_beta(m, b):
denom = (1 + m * m)
return np.array([-b * m / denom, b / denom])
# compute the ellipse principal axes and rotation from covariance
def get_principal(sigma_x, sigma_y, rho_xy):
sigma_xy2 = rho_xy * sigma_x * sigma_y
alpha = 0.5 * np.arctan2(2 * sigma_xy2,
(sigma_x ** 2 - sigma_y ** 2))
tmp1 = 0.5 * (sigma_x ** 2 + sigma_y ** 2)
tmp2 = np.sqrt(0.25 * (sigma_x ** 2 - sigma_y ** 2) ** 2 + sigma_xy2 ** 2)
return np.sqrt(tmp1 + tmp2), np.sqrt(tmp1 - tmp2), alpha
# plot ellipses
def plot_ellipses(x, y, sigma_x, sigma_y, rho_xy, factor=2, ax=None):
if ax is None:
ax = plt.gca()
sigma1, sigma2, alpha = get_principal(sigma_x, sigma_y, rho_xy)
for i in range(len(x)):
ax.add_patch(Ellipse((x[i], y[i]),
factor * sigma1[i], factor * sigma2[i],
alpha[i] * 180. / np.pi,
fc='none', ec='k'))
# Find best-fit parameters
def get_best_fit(x, y, sigma_x, sigma_y, rho_xy):
X = np.vstack((x, y)).T
dX = np.zeros((len(x), 2, 2))
dX[:, 0, 0] = sigma_x ** 2
dX[:, 1, 1] = sigma_y ** 2
dX[:, 0, 1] = dX[:, 1, 0] = rho_xy * sigma_x * sigma_y
# note: TLS_logL was imported from astroML.linear_model
min_func = lambda beta: -TLS_logL(beta, X, dX)
# this is optimization, not MCMC
return optimize.fmin(min_func, x0=[-1, 1])
# plot results
def plot_best_fit(x, y, sigma_x, sigma_y, rho_xy, beta_fit, mLSQ, bLSQ):
fig = plt.figure(figsize=(8, 5))
fig.subplots_adjust(left=0.1, right=0.95, wspace=0.25, bottom=0.15, top=0.9)
ax = fig.add_subplot(121)
ax.scatter(x, y, c='k', s=9)
plot_ellipses(x, y, sigma_x, sigma_y, rho_xy, ax=ax)
# plot the best-fit line
m_fit, b_fit = get_m_b(beta_fit)
x_fit = np.linspace(0, 300, 10)
ax.plot(x_fit, m_fit * x_fit + b_fit, '-k')
ax.plot(x_fit, mLSQ * x_fit + bLSQ, '--', c='red')
ax.set_xlim(40, 250)
ax.set_ylim(100, 600)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
# plot the likelihood contour in m, b
ax = fig.add_subplot(122)
m = np.linspace(1.7, 2.8, 100)
b = np.linspace(-60, 110, 100)
logL = np.zeros((len(m), len(b)))
X = np.vstack((x, y)).T
dX = np.zeros((len(x), 2, 2))
dX[:, 0, 0] = sigma_x ** 2
dX[:, 1, 1] = sigma_y ** 2
dX[:, 0, 1] = dX[:, 1, 0] = rho_xy * sigma_x * sigma_y
for i in range(len(m)):
for j in range(len(b)):
logL[i, j] = TLS_logL(get_beta(m[i], b[j]), X, dX)
ax.contour(m, b, convert_to_stdev(logL.T),
levels=(0.683, 0.955, 0.997), colors='k')
ax.plot([-1000, 1000], [bLSQ, bLSQ], ':k', lw=1, c='red')
ax.plot([mLSQ, mLSQ], [-1000, 1000], ':k', lw=1, c='red')
ax.set_xlabel('slope')
ax.set_ylabel('intercept')
ax.set_xlim(1.7, 2.8)
ax.set_ylim(-60, 110)
plt.show()
```
```python
# for comparison, let's get the standard LSQ solution - no errors
mux = np.mean(x)
muy = np.mean(y)
mLSQ = np.sum(x*y-mux*muy)/np.sum((x-mux)**2)
bLSQ = muy - mLSQ*mux
print('mLSQ=', mLSQ)
print('bLSQ=', bLSQ)
```
mLSQ= 2.191027996426704
bLSQ= 32.00396939102313
```python
## let's do only errors in y - this is standard LSQ
# Find best-fit parameters
err_x = 0*sigma_x
err_y = sigma_y
rho = 0*rho_xy
best_fit1 = get_best_fit(x, y, err_x, err_y, rho)
m_fit1, b_fit1 = get_m_b(best_fit1)
print('m=', m_fit1)
print('b=', b_fit1)
# plot best fit
plot_best_fit(x, y, err_x, err_y, rho, best_fit1, mLSQ, bLSQ)
```
```python
## now only errors in x; note that we could switch the axes and use standard LSQ
# Find best-fit parameters
err_x = sigma_x
err_y = 0*sigma_y
rho = rho_xy
best_fit2 = get_best_fit(x, y, err_x, err_y, rho)
m_fit2, b_fit2 = get_m_b(best_fit2)
print('m=', m_fit2)
print('b=', b_fit2)
# plot best fit
plot_best_fit(x, y, err_x, err_y, rho, best_fit2, mLSQ, bLSQ)
```
```python
## errors in x and y, but without covariance
# Find best-fit parameters
err_x = sigma_x
err_y = sigma_y
rho = 0*rho_xy
best_fit3 = get_best_fit(x, y, err_x, err_y, rho)
m_fit3, b_fit3 = get_m_b(best_fit3)
print('m=', m_fit3)
print('b=', b_fit3)
# plot best fit
plot_best_fit(x, y, err_x, err_y, rho, best_fit3, mLSQ, bLSQ)
```
```python
## errors in x and y with covariance
# Find best-fit parameters
err_x = sigma_x
err_y = sigma_y
rho = rho_xy
best_fit4 = get_best_fit(x, y, err_x, err_y, rho)
m_fit4, b_fit4 = get_m_b(best_fit4)
print('m=', m_fit4)
print('b=', b_fit4)
# plot best fit
plot_best_fit(x, y, err_x, err_y, rho, best_fit4, mLSQ, bLSQ)
```
```python
# compare all 4 versions and LSQ
print('m=', m_fit1, m_fit2, m_fit3, m_fit4, mLSQ)
```
m= 2.299303276704902 2.596107117541477 2.3849344880407286 2.248785375229947 2.191027996426704
### Using astroML
## Data sets used in the examples below
Use simulation data from [Kelly 2007](https://iopscience.iop.org/article/10.1086/519947/pdf).
This simulator, called `simulation_kelly` is available from `astroML.datasets`.
The function returns the $\xi_i$, $\eta_i$, $x_i$, $y_i$, $\epsilon_{x,i}$, $\epsilon_{y,i}$ and
the input regression coefficients $\alpha$ and $\beta$ and intrinsic scatter $\epsilon$.
A total of ``size`` values generated, measurement errors are scaled by parameters ``scalex`` and
``scaley`` following section 7.1 in [Kelly 2007](https://iopscience.iop.org/article/10.1086/519947/pdf).
```python
from astroML.datasets import simulation_kelly
ksi, eta, xi, yi, xi_error, yi_error, alpha_in, beta_in = simulation_kelly(size=100, scalex=0.2, scaley=0.2,
alpha=2, beta=1, epsilon=(0, 0.75))
ksi_0 = np.arange(np.min(xi[0]) - 0.5, np.max(xi[0]) + 0.5)
eta_0 = alpha_in + ksi_0 * beta_in
figure = plt.figure(figsize=(10, 8))
figure.subplots_adjust(left=0.1, right=0.95,
bottom=0.1, top=0.95,
hspace=0.1, wspace=0.15)
ax = figure.add_subplot(111)
ax.scatter(xi[0], yi, alpha=0.5)
ax.errorbar(xi[0], yi, xerr=xi_error[0], yerr=yi_error, alpha=0.3, ls='')
ax.set_xlabel(r'$\xi$')
ax.set_ylabel(r'$\eta$')
ax.plot(ksi_0, eta_0, color='orange')
```
### Linear regression with uncertainties in both dependent and independent axes
The class ``LinearRegressionwithErrors`` can be used to take into account measurement errors in
both the dependent and independent variables.
The implementation relies on the [PyMC3](https://docs.pymc.io/) and [Theano](http://deeplearning.net/software/theano/)
packages.
Note: The first initialization of the fitter is expected to take a couple of minutes, as ``Theano``
performs some code compilation for the underlying model. Sampling for consecutive runs is expected
to start up significantly faster.
```python
from astroML.linear_model import LinearRegressionwithErrors
from astroML.plotting import plot_regressions, plot_regression_from_trace
```
```python
linreg_xy_err = LinearRegressionwithErrors()
linreg_xy_err.fit(xi, yi, yi_error, xi_error)
```
```python
plot_regressions(ksi, eta, xi[0], yi, xi_error[0], yi_error, add_regression_lines=True, alpha_in=alpha_in, beta_in=beta_in)
plot_regression_from_trace(linreg_xy_err, (xi, yi, xi_error, yi_error), ax=plt.gca(), chains=20)
```
## Multivariate regression
For multivariate data (where we fit a hyperplane rather than a straight line) we simply extend the description
of the regression function to multiple dimensions. The formalism used in the previous example becomes:
$$ \eta_i = \alpha + \beta^T \xi_i + \epsilon_i $$
where both $\beta^T$ and $\xi_i$ are now N-element vectors.
### Generate a dataset:
We use the same function as above to generate 100 datapoints in 2 dimensions. Note that the size of the ``beta``
parameter needs to match the dimensions.
```python
ksi2, eta2, xi2, yi2, xi_error2, yi_error2, alpha_in2, beta_in2 = simulation_kelly(size=100, scalex=0.2, scaley=0.2,
alpha=2, beta=[0.5, 1],
multidim=2)
```
The previously used ``LinearRegressionwithErrors`` class can be used with multidimensional data, thus the fitting is done the exact same way as before:
```python
linreg_xy_err2 = LinearRegressionwithErrors()
linreg_xy_err2.fit(xi2, yi2, yi_error2, xi_error2)
```
There are several ways to explore the fits, in the following we show a few ways to plot this dataset. As in this example the fitted hyperplane was 2D, we can use a 3D plot to show both the fit and the underlying regession we used to generate the data from. In this 3D plot, the blue plane is the true regression, while the red plane is our fit, that takes into account the errors on the data points.
Other plotting libraries can also be used to e.g. create pairplots of the parameters (e.g. Arviz' ``plot_pair`` function, or Seaborn's ``jointplot``).
```python
x0 = np.linspace(np.min(xi2[0])-0.5, np.max(xi2[0])+0.5, 50)
x1 = np.linspace(np.min(xi2[1])-0.5, np.max(xi2[1])+0.5, 50)
x0, x1 = np.meshgrid(x0, x1)
y0 = alpha_in + x0 * beta_in2[0] + x1 * beta_in2[1]
y_fitted = linreg_xy_err2.coef_[0] + x0 * linreg_xy_err2.coef_[1] + x1 * linreg_xy_err2.coef_[2]
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('xi2[0] ')
ax.set_ylabel('xi2[1] ')
ax.set_zlabel('yi2 ')
ax.scatter(xi2[0], xi2[1], yi2, s=20)
ax.plot_surface(x0, x1, y0, alpha=0.2, facecolor='blue', label='True regression')
ax.plot_surface(x0, x1, y_fitted, alpha=0.2, facecolor='red', label='Fitted regression')
```
### Sampler statistics and traceplots
The PyMC3 trace is available in the ``.trace`` attribute of the class instances (e.g. ``linreg_xy_err2.trace`` in the previous example), after we performed the fit. This can be then used for checking for convergence, and generating statistics for the samples. We would refer to use the tools provided by PyMC3, e.g. the ``traceplot()`` that takes the trace object as its input. Note that in the multidimensional case, there will be multiple ``ksi`` and ``slope`` traces in aggreement with the dimensionality of the input ``xi`` data, in the ``traceplot`` they are plotted with different colours.
```python
import pymc3 as pm
matplotlib.rc('text', usetex=False)
pm.traceplot(linreg_xy_err2.trace)
```
### CONCLUSION
Beware of your measurement uncertainties, especially if both variables have them!
## Dealing with outliers <a id='outliers'></a>
[Go to top](#toc)
The $L_2$ ($\sum_{i=1}^N (y_i - M(x_i))^2$) norm is sensitive to outliers (i.e. it squares the residuals). A number of approaches exist for correcting for outliers. These include "sigma-clipping", using interquartile ranges, taking the median of solutions of subsets of the data, and least trimmed squares (which searchs for the subset of points that minimizes $\sum_i^K (y_i - \theta_ix_i)^2$).
We can also change the **loss function** or **likelihood** to reduce the weight of outliers. An example of this is known as the _Huber loss function_
> $
\sum_{i=1}^N e(y_i|y),
$
where
>$
e(t) = \left\{
\begin{array}{ll}
\frac{1}{2} t^2 & \mbox{if} \; |t| \leq c, \\
c|t| - \frac{1}{2} c^2 & \mbox{if} \; |t| \geq c,
\end{array}
\right )
$
this is continuous and differentiable and transitions to an $L_1$ norm ($\sum_{i=1}^N |y_i - M(x_i)|$) for large excursions
```python
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
from scipy import optimize
#------------------------------------------------------------
# Get data: this includes outliers
data = fetch_hogg2010test()
x = data['x']
y = data['y']
dy = data['sigma_y']
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
ax.errorbar(x[4:], y[4:], dy[4:], fmt='.k', lw=1, ecolor='gray')
ax.errorbar(x[:4], y[:4], dy[:4], fmt='.k', lw=1, ecolor='red')
ax.set_xlim(0, 350)
ax.set_ylim(100, 700)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
```
```python
# Define the standard squared-loss function
def squared_loss(m, b, x, y, dy):
y_fit = m * x + b
return np.sum(((y - y_fit) / dy) ** 2, -1)
# Define the log-likelihood via the Huber loss function
def huber_loss(m, b, x, y, dy, c=2):
y_fit = m * x + b
t = abs((y - y_fit) / dy)
flag = t > c
return np.sum((~flag) * (0.5 * t ** 2) - (flag) * c * (0.5 * c - t), -1)
f_squared = lambda beta: squared_loss(beta[0], beta[1], x=x[4:], y=y[4:], dy=dy[4:])
f_squared_outlier = lambda beta: squared_loss(beta[0], beta[1], x=x, y=y, dy=dy)
f_huber = lambda beta: huber_loss(beta[0], beta[1], x=x, y=y, dy=dy, c=1)
#------------------------------------------------------------
# compute the maximum likelihood using the huber loss
beta0 = (2, 30)
beta_squared = optimize.fmin(f_squared, beta0)
beta_squared_outlier = optimize.fmin(f_squared_outlier, beta0)
beta_huber = optimize.fmin(f_huber, beta0)
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
x_fit = np.linspace(0, 350, 10)
ax.plot(x_fit, beta_squared[0] * x_fit + beta_squared[1], '--k',
label="squared loss:\n $y=%.2fx + %.1f$" % tuple(beta_squared))
ax.plot(x_fit, beta_squared_outlier[0] * x_fit + beta_squared_outlier[1], '--k', color='red',
label="squared loss with outliers:\n $y=%.2fx + %.1f$" % tuple(beta_squared_outlier))
ax.plot(x_fit, beta_huber[0] * x_fit + beta_huber[1], '-k', color='blue',
label="huber loss:\n $y=%.2fx + %.1f$" % tuple(beta_huber))
ax.legend(loc=4, prop=dict(size=14))
ax.errorbar(x[4:], y[4:], dy[4:], fmt='.k', lw=1, ecolor='gray')
ax.errorbar(x[:4], y[:4], dy[:4], fmt='.k', lw=1, ecolor='red')
ax.set_xlim(0, 350)
ax.set_ylim(100, 700)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
```
### From a Bayesian Perspective
We can assume the data are drawn from two Gaussians error distribution (one for the function and the other for the outliers)
$\begin{eqnarray}
& p(\{y_i\}|\{x_i\}, \{\sigma_i\}, \theta_0, \theta_1, \mu_b, V_b, p_b)
\propto \nonumber\\
& \prod_{i=1}^{N} \bigg[
\frac{1-p_b}{\sqrt{2\pi\sigma_i^2}}
\exp\left(-\frac{(y_i - \theta_1 x_i - \theta_0)^2}
{2 \sigma_i^2}\right)
+ \frac{p_b}{\sqrt{2\pi(V_b + \sigma_i^2)}}
\exp\left(-\frac{(y_i - \mu_b)^2}{2(V_b + \sigma_i^2)}\right)
\bigg].
\end{eqnarray}
$
$V_b$ is the variance of the outlier distribution. If we use MCMC we can marginalize over the nuisance parameters $p_b$, $V_b$, $\mu_b$. We could also calculate the probability that a point is drawn from the outlier or "model" Gaussian.
```python
import numpy as np
import pymc3 as pm
from matplotlib import pyplot as plt
from theano import shared as tshared
import theano.tensor as tt
from astroML.datasets import fetch_hogg2010test
from astroML.plotting.mcmc import convert_to_stdev
# ----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
if "setup_text_plots" not in globals():
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
np.random.seed(0)
# ------------------------------------------------------------
# Get data: this includes outliers. We need to convert them to Theano variables
data = fetch_hogg2010test()
xi = tshared(data['x'])
yi = tshared(data['y'])
dyi = tshared(data['sigma_y'])
size = len(data)
# ----------------------------------------------------------------------
# Define basic linear model
def model(xi, theta, intercept):
slope = np.tan(theta)
return slope * xi + intercept
# ----------------------------------------------------------------------
# First model: no outlier correction
with pm.Model():
# set priors on model gradient and y-intercept
inter = pm.Uniform('inter', -1000, 1000)
theta = pm.Uniform('theta', -np.pi / 2, np.pi / 2)
y = pm.Normal('y', mu=model(xi, theta, inter), sd=dyi, observed=yi)
trace0 = pm.sample(draws=5000, tune=1000)
# ----------------------------------------------------------------------
# Second model: nuisance variables correcting for outliers
# This is the mixture model given in equation 17 in Hogg et al
def mixture_likelihood(yi, xi):
"""Equation 17 of Hogg 2010"""
sigmab = tt.exp(log_sigmab)
mu = model(xi, theta, inter)
Vi = dyi ** 2
Vb = sigmab ** 2
root2pi = np.sqrt(2 * np.pi)
L_in = (1. / root2pi / dyi * np.exp(-0.5 * (yi - mu) ** 2 / Vi))
L_out = (1. / root2pi / np.sqrt(Vi + Vb)
* np.exp(-0.5 * (yi - Yb) ** 2 / (Vi + Vb)))
return tt.sum(tt.log((1 - Pb) * L_in + Pb * L_out))
with pm.Model():
# uniform prior on Pb, the fraction of bad points
Pb = pm.Uniform('Pb', 0, 1.0, testval=0.1)
# uniform prior on Yb, the centroid of the outlier distribution
Yb = pm.Uniform('Yb', -10000, 10000, testval=0)
# uniform prior on log(sigmab), the spread of the outlier distribution
log_sigmab = pm.Uniform('log_sigmab', -10, 10, testval=5)
inter = pm.Uniform('inter', -200, 400)
theta = pm.Uniform('theta', -np.pi / 2, np.pi / 2, testval=np.pi / 4)
y_mixture = pm.DensityDist('mixturenormal', logp=mixture_likelihood,
observed={'yi': yi, 'xi': xi})
trace1 = pm.sample(draws=5000, tune=1000)
# ----------------------------------------------------------------------
# Third model: marginalizes over the probability that each point is an outlier.
# define priors on beta = (slope, intercept)
def outlier_likelihood(yi, xi):
"""likelihood for full outlier posterior"""
sigmab = tt.exp(log_sigmab)
mu = model(xi, theta, inter)
Vi = dyi ** 2
Vb = sigmab ** 2
logL_in = -0.5 * tt.sum(qi * (np.log(2 * np.pi * Vi)
+ (yi - mu) ** 2 / Vi))
logL_out = -0.5 * tt.sum((1 - qi) * (np.log(2 * np.pi * (Vi + Vb))
+ (yi - Yb) ** 2 / (Vi + Vb)))
return logL_out + logL_in
with pm.Model():
# uniform prior on Pb, the fraction of bad points
Pb = pm.Uniform('Pb', 0, 1.0, testval=0.1)
# uniform prior on Yb, the centroid of the outlier distribution
Yb = pm.Uniform('Yb', -10000, 10000, testval=0)
# uniform prior on log(sigmab), the spread of the outlier distribution
log_sigmab = pm.Uniform('log_sigmab', -10, 10, testval=5)
inter = pm.Uniform('inter', -1000, 1000)
theta = pm.Uniform('theta', -np.pi / 2, np.pi / 2)
# qi is bernoulli distributed
qi = pm.Bernoulli('qi', p=1 - Pb, shape=size)
y_outlier = pm.DensityDist('outliernormal', logp=outlier_likelihood,
observed={'yi': yi, 'xi': xi})
trace2 = pm.sample(draws=5000, tune=1000)
# ------------------------------------------------------------
# plot the data
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(left=0.1, right=0.95, wspace=0.25,
bottom=0.1, top=0.95, hspace=0.2)
# first axes: plot the data
ax1 = fig.add_subplot(221)
ax1.errorbar(data['x'], data['y'], data['sigma_y'], fmt='.k', ecolor='gray', lw=1)
ax1.set_xlabel('$x$')
ax1.set_ylabel('$y$')
#------------------------------------------------------------
# Go through models; compute and plot likelihoods
linestyles = [':', '--', '-']
labels = ['no outlier correction\n(dotted fit)',
'mixture model\n(dashed fit)',
'outlier rejection\n(solid fit)']
x = np.linspace(0, 350, 10)
bins = [(np.linspace(140, 300, 51), np.linspace(0.6, 1.6, 51)),
(np.linspace(-40, 120, 51), np.linspace(1.8, 2.8, 51)),
(np.linspace(-40, 120, 51), np.linspace(1.8, 2.8, 51))]
for i, trace in enumerate([trace0, trace1, trace2]):
H2D, bins1, bins2 = np.histogram2d(np.tan(trace['theta']),
trace['inter'], bins=50)
w = np.where(H2D == H2D.max())
# choose the maximum posterior slope and intercept
slope_best = bins1[w[0][0]]
intercept_best = bins2[w[1][0]]
# plot the best-fit line
ax1.plot(x, intercept_best + slope_best * x, linestyles[i], c='k')
# For the model which identifies bad points,
# plot circles around points identified as outliers.
if i == 2:
Pi = trace['qi'].mean(0)
outlier_x = data['x'][Pi < 0.32]
outlier_y = data['y'][Pi < 0.32]
ax1.scatter(outlier_x, outlier_y, lw=1, s=400, alpha=0.5,
facecolors='none', edgecolors='red')
# plot the likelihood contours
ax = plt.subplot(222 + i)
H, xbins, ybins = np.histogram2d(trace['inter'],
np.tan(trace['theta']), bins=bins[i])
H[H == 0] = 1E-16
Nsigma = convert_to_stdev(np.log(H))
ax.contour(0.5 * (xbins[1:] + xbins[:-1]),
0.5 * (ybins[1:] + ybins[:-1]),
Nsigma.T, levels=[0.683, 0.955], colors='black')
ax.set_xlabel('intercept')
ax.set_ylabel('slope')
ax.grid(color='gray')
ax.xaxis.set_major_locator(plt.MultipleLocator(40))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.2))
ax.text(0.96, 0.96, labels[i], ha='right', va='top',
bbox=dict(fc='w', ec='none', alpha=0.5),
transform=ax.transAxes)
ax.set_xlim(bins[i][0][0], bins[i][0][-1])
ax.set_ylim(bins[i][1][0], bins[i][1][-1])
ax1.set_xlim(0, 350)
ax1.set_ylim(100, 700)
plt.show()
```
### Exercise - using the approaches given above estimate the slope and intercept when the errors are in both axes
|
(** Score: 10/10 *)
theory GabrielaLimonta
imports "~~/src/HOL/IMP/Hoare_Sound_Complete" "~~/src/HOL/IMP/VCG"
begin
(* Homework 11.1 *)
abbreviation "xx \<equiv> ''x''" abbreviation "yy \<equiv> ''y''"
abbreviation "aa \<equiv> ''a''" abbreviation "bb \<equiv> ''b''"
definition Cdiff :: com where "Cdiff \<equiv>
bb ::= N 0;;
WHILE (Less (V bb) (V xx)) DO
(yy ::= Plus (V yy) (N (-1));;
bb ::= Plus (V bb) (N 1))"
definition P_Cdiff :: "int \<Rightarrow> int \<Rightarrow> assn" where
"P_Cdiff x y \<equiv> \<lambda>s. s xx = x \<and> s yy = y \<and> 0 \<le> x"
definition Q_Cdiff :: "int \<Rightarrow> int \<Rightarrow> assn" where
"Q_Cdiff x y \<equiv> \<lambda>t. t yy = y - x"
definition iCdiff :: "int \<Rightarrow> int \<Rightarrow> assn" where
"iCdiff x y \<equiv> \<lambda>s. s xx = x \<and> y = s yy + s bb \<and> s bb \<le> x"
definition ACdiff :: "int \<Rightarrow> int \<Rightarrow> acom" where
"ACdiff x y \<equiv>
(bb ::= N 0) ;;
{iCdiff x y} WHILE (Less (V bb) (V xx)) DO
(yy ::= Plus (V yy) (N (-1));;
bb ::= Plus (V bb) (N 1))"
lemma strip_ACdiff: "strip (ACdiff x y) = Cdiff"
unfolding Cdiff_def P_Cdiff_def Q_Cdiff_def iCdiff_def ACdiff_def
by simp
lemma Cdiff_correct: "\<turnstile> {P_Cdiff x y} strip (ACdiff x y) {Q_Cdiff x y}"
unfolding strip_ACdiff[of x y, symmetric]
apply(rule vc_sound')
unfolding Cdiff_def P_Cdiff_def Q_Cdiff_def iCdiff_def ACdiff_def
by auto
end
|
{-# OPTIONS --prop --rewriting #-}
open import Examples.Sorting.Sequential.Comparable
module Examples.Sorting.Sequential.MergeSort.Merge (M : Comparable) where
open Comparable M
open import Examples.Sorting.Sequential.Core M
open import Calf costMonoid
open import Calf.Types.Bool
open import Calf.Types.Nat
open import Calf.Types.List
open import Calf.Types.Eq
open import Calf.Types.Bounded costMonoid
open import Relation.Nullary
open import Relation.Binary.PropositionalEquality as Eq using (_≡_; refl; module ≡-Reasoning)
open import Data.Product using (_×_; _,_; ∃; proj₁; proj₂)
open import Function
open import Data.Nat as Nat using (ℕ; zero; suc; z≤n; s≤s; _+_; _*_)
import Data.Nat.Properties as N
open import Examples.Sorting.Sequential.MergeSort.Split M
merge/clocked : cmp (Π nat λ _ → Π pair λ _ → F (list A))
merge/clocked zero (l₁ , l₂ ) = ret (l₁ ++ l₂)
merge/clocked (suc k) ([] , l₂ ) = ret l₂
merge/clocked (suc k) (x ∷ xs , [] ) = ret (x ∷ xs)
merge/clocked (suc k) (x ∷ xs , y ∷ ys) =
bind (F (list A)) (x ≤ᵇ y) λ b →
if b
then (bind (F (list A)) (merge/clocked k (xs , y ∷ ys)) (ret ∘ (x ∷_)))
else (bind (F (list A)) (merge/clocked k (x ∷ xs , ys)) (ret ∘ (y ∷_)))
merge/clocked/correct : ∀ k l₁ l₂ →
◯ (∃ λ l → merge/clocked k (l₁ , l₂) ≡ ret l × (length l₁ + length l₂ Nat.≤ k → Sorted l₁ → Sorted l₂ → SortedOf (l₁ ++ l₂) l))
merge/clocked/correct zero l₁ l₂ u = l₁ ++ l₂ , refl , λ { h [] [] → refl , [] }
merge/clocked/correct (suc k) [] l₂ u = l₂ , refl , λ { h [] sorted₂ → refl , sorted₂ }
merge/clocked/correct (suc k) (x ∷ xs) [] u = x ∷ xs , refl , λ { h sorted₁ [] → ++-identityʳ (x ∷ xs) , sorted₁ }
merge/clocked/correct (suc k) (x ∷ xs) (y ∷ ys) u with h-cost x y
merge/clocked/correct (suc k) (x ∷ xs) (y ∷ ys) u | ⇓ b withCost q [ _ , h-eq ] rewrite eq/ref h-eq
with ≤ᵇ-reflects-≤ u (Eq.trans (eq/ref h-eq) (step/ext (F bool) (ret b) q u))
merge/clocked/correct (suc k) (x ∷ xs) (y ∷ ys) u | ⇓ false withCost q [ _ , h-eq ] | ofⁿ ¬p =
let (l , ≡ , h-sorted) = merge/clocked/correct k (x ∷ xs) ys u in
y ∷ l , (
let open ≡-Reasoning in
begin
step (F (list A)) q (bind (F (list A)) (merge/clocked k (x ∷ xs , ys)) (ret ∘ (y ∷_)))
≡⟨ step/ext (F (list A)) (bind (F (list A)) (merge/clocked k _) _) q u ⟩
bind (F (list A)) (merge/clocked k (x ∷ xs , ys)) (ret ∘ (y ∷_))
≡⟨ Eq.cong (λ e → bind (F (list A)) e _) ≡ ⟩
ret (y ∷ l)
∎
) , (
λ { (s≤s h) (h₁ ∷ sorted₁) (h₂ ∷ sorted₂) →
let h = Eq.subst (Nat._≤ k) (N.+-suc (length xs) (length ys)) h in
let (↭ , sorted) = h-sorted h (h₁ ∷ sorted₁) sorted₂ in
(
let open PermutationReasoning in
begin
(x ∷ xs ++ y ∷ ys)
↭⟨ ++-comm-↭ (x ∷ xs) (y ∷ ys) ⟩
(y ∷ ys ++ x ∷ xs)
≡⟨⟩
y ∷ (ys ++ x ∷ xs)
<⟨ ++-comm-↭ ys (x ∷ xs) ⟩
y ∷ (x ∷ xs ++ ys)
<⟨ ↭ ⟩
y ∷ l
∎
) , (
let p = ≰⇒≥ ¬p in
All-resp-↭ (↭) (++⁺-All (p ∷ ≤-≤* p h₁) h₂) ∷ sorted
)
}
)
merge/clocked/correct (suc k) (x ∷ xs) (y ∷ ys) u | ⇓ true withCost q [ _ , h-eq ] | ofʸ p =
let (l , ≡ , h-sorted) = merge/clocked/correct k xs (y ∷ ys) u in
x ∷ l , (
let open ≡-Reasoning in
begin
step (F (list A)) q (bind (F (list A)) (merge/clocked k (xs , y ∷ ys)) (ret ∘ (x ∷_)))
≡⟨ step/ext (F (list A)) (bind (F (list A)) (merge/clocked k _) _) q u ⟩
bind (F (list A)) (merge/clocked k (xs , y ∷ ys)) (ret ∘ (x ∷_))
≡⟨ Eq.cong (λ e → bind (F (list A)) e _) ≡ ⟩
ret (x ∷ l)
∎
) , (
λ { (s≤s h) (h₁ ∷ sorted₁) (h₂ ∷ sorted₂) →
let (↭ , sorted) = h-sorted h sorted₁ (h₂ ∷ sorted₂) in
prep x ↭ , All-resp-↭ (↭) (++⁺-All h₁ (p ∷ ≤-≤* p h₂)) ∷ sorted
}
)
merge/clocked/cost : cmp (Π nat λ _ → Π pair λ _ → cost)
merge/clocked/cost zero (l₁ , l₂ ) = zero
merge/clocked/cost (suc k) ([] , l₂ ) = zero
merge/clocked/cost (suc k) (x ∷ xs , [] ) = zero
merge/clocked/cost (suc k) (x ∷ xs , y ∷ ys) =
bind cost (x ≤ᵇ y) λ b →
1 + (
if b
then (bind cost (merge/clocked k (xs , y ∷ ys)) (λ l → merge/clocked/cost k (xs , y ∷ ys) + 0))
else (bind cost (merge/clocked k (x ∷ xs , ys)) (λ l → merge/clocked/cost k (x ∷ xs , ys) + 0))
)
merge/clocked/cost/closed : cmp (Π nat λ _ → Π pair λ _ → cost)
merge/clocked/cost/closed k _ = k
merge/clocked/cost≤merge/clocked/cost/closed : ∀ k p → ◯ (merge/clocked/cost k p Nat.≤ merge/clocked/cost/closed k p)
merge/clocked/cost≤merge/clocked/cost/closed zero (l₁ , l₂ ) u = N.≤-refl
merge/clocked/cost≤merge/clocked/cost/closed (suc k) ([] , l₂ ) u = z≤n
merge/clocked/cost≤merge/clocked/cost/closed (suc k) (x ∷ xs , [] ) u = z≤n
merge/clocked/cost≤merge/clocked/cost/closed (suc k) (x ∷ xs , y ∷ ys) u with h-cost x y
... | ⇓ false withCost q [ _ , h-eq ] rewrite eq/ref h-eq =
let (l , ≡ , _) = merge/clocked/correct k (x ∷ xs) ys u in
begin
step cost q (1 + bind cost (merge/clocked k (x ∷ xs , ys)) (λ l → merge/clocked/cost k (x ∷ xs , ys) + 0))
≡⟨ step/ext cost _ q u ⟩
1 + bind cost (merge/clocked k (x ∷ xs , ys)) (λ l → merge/clocked/cost k (x ∷ xs , ys) + 0)
≡⟨⟩
suc (bind cost (merge/clocked k (x ∷ xs , ys)) (λ l → merge/clocked/cost k (x ∷ xs , ys) + 0))
≡⟨ Eq.cong (λ e → suc (bind cost e λ l → merge/clocked/cost k (x ∷ xs , ys) + 0)) (≡) ⟩
suc (merge/clocked/cost k (x ∷ xs , ys) + 0)
≡⟨ Eq.cong suc (N.+-identityʳ _) ⟩
suc (merge/clocked/cost k (x ∷ xs , ys))
≤⟨ s≤s (merge/clocked/cost≤merge/clocked/cost/closed k (x ∷ xs , ys) u) ⟩
suc (merge/clocked/cost/closed k (x ∷ xs , ys))
≡⟨⟩
suc k
∎
where open ≤-Reasoning
... | ⇓ true withCost q [ _ , h-eq ] rewrite eq/ref h-eq =
let (l , ≡ , _) = merge/clocked/correct k xs (y ∷ ys) u in
begin
step cost q (1 + bind cost (merge/clocked k (xs , y ∷ ys)) (λ l → merge/clocked/cost k (xs , y ∷ ys) + 0))
≡⟨ step/ext cost _ q u ⟩
1 + bind cost (merge/clocked k (xs , y ∷ ys)) (λ l → merge/clocked/cost k (xs , y ∷ ys) + 0)
≡⟨⟩
suc (bind cost (merge/clocked k (xs , y ∷ ys)) (λ l → merge/clocked/cost k (xs , y ∷ ys) + 0))
≡⟨ Eq.cong (λ e → suc (bind cost e λ l → merge/clocked/cost k (xs , y ∷ ys) + 0)) (≡) ⟩
suc (merge/clocked/cost k (xs , y ∷ ys) + 0)
≡⟨ Eq.cong suc (N.+-identityʳ _) ⟩
suc (merge/clocked/cost k (xs , y ∷ ys))
≤⟨ s≤s (merge/clocked/cost≤merge/clocked/cost/closed k (xs , y ∷ ys) u) ⟩
suc (merge/clocked/cost/closed k (xs , y ∷ ys))
≡⟨⟩
suc k
∎
where open ≤-Reasoning
merge/clocked≤merge/clocked/cost : ∀ k p → IsBounded (list A) (merge/clocked k p) (merge/clocked/cost k p)
merge/clocked≤merge/clocked/cost zero (l₁ , l₂ ) = bound/ret
merge/clocked≤merge/clocked/cost (suc k) ([] , l₂ ) = bound/ret
merge/clocked≤merge/clocked/cost (suc k) (x ∷ xs , [] ) = bound/ret
merge/clocked≤merge/clocked/cost (suc k) (x ∷ xs , y ∷ ys) =
bound/bind 1 _ (h-cost x y) λ b →
bound/bool {p = λ b → if_then_else_ b _ _} b
(bound/bind (merge/clocked/cost k (x ∷ xs , ys)) _ (merge/clocked≤merge/clocked/cost k (x ∷ xs , ys)) λ l → bound/ret)
(bound/bind (merge/clocked/cost k (xs , y ∷ ys)) _ (merge/clocked≤merge/clocked/cost k (xs , y ∷ ys)) λ l → bound/ret)
merge/clocked≤merge/clocked/cost/closed : ∀ k p → IsBounded (list A) (merge/clocked k p) (merge/clocked/cost/closed k p)
merge/clocked≤merge/clocked/cost/closed k p = bound/relax (merge/clocked/cost≤merge/clocked/cost/closed k p) (merge/clocked≤merge/clocked/cost k p)
merge : cmp (Π pair λ _ → F (list A))
merge (l₁ , l₂) = merge/clocked (length l₁ + length l₂) (l₁ , l₂)
merge/correct : ∀ l₁ l₂ →
◯ (∃ λ l → merge (l₁ , l₂) ≡ ret l × (Sorted l₁ → Sorted l₂ → SortedOf (l₁ ++ l₂) l))
merge/correct l₁ l₂ u =
let (l , ≡ , h-sorted) = merge/clocked/correct (length l₁ + length l₂) l₁ l₂ u in
l , ≡ , h-sorted N.≤-refl
merge/cost : cmp (Π pair λ _ → cost)
merge/cost (l₁ , l₂) = merge/clocked/cost (length l₁ + length l₂) (l₁ , l₂)
merge/cost/closed : cmp (Π pair λ _ → cost)
merge/cost/closed (l₁ , l₂) = merge/clocked/cost/closed (length l₁ + length l₂) (l₁ , l₂)
merge≤merge/cost : ∀ p → IsBounded (list A) (merge p) (merge/cost p)
merge≤merge/cost (l₁ , l₂) = merge/clocked≤merge/clocked/cost (length l₁ + length l₂) (l₁ , l₂)
merge≤merge/cost/closed : ∀ p → IsBounded (list A) (merge p) (merge/cost/closed p)
merge≤merge/cost/closed (l₁ , l₂) = merge/clocked≤merge/clocked/cost/closed (length l₁ + length l₂) (l₁ , l₂)
|
{-# OPTIONS --safe #-}
module Cubical.Algebra.CommMonoid.CommMonoidProd where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.HLevels
open import Cubical.Data.Sigma
open import Cubical.Algebra.CommMonoid.Base
open CommMonoidStr
private
variable
ℓ ℓ' : Level
CommMonoidProd : CommMonoid ℓ → CommMonoid ℓ' → CommMonoid (ℓ-max ℓ ℓ')
CommMonoidProd M N = makeCommMonoid ε× _·×_ is-set× assoc× ·IdR× comm×
where
ε× : (fst M) × (fst N)
ε× = (ε (snd M)) , (ε (snd N))
_·×_ : (fst M) × (fst N) → (fst M) × (fst N) → (fst M) × (fst N)
(x₁ , x₂) ·× (y₁ , y₂) = (_·_ (snd M) x₁ y₁) , (_·_ (snd N) x₂ y₂)
is-set× : isSet ((fst M) × (fst N))
is-set× = isSet× (is-set (snd M)) (is-set (snd N))
assoc× : ∀ x y z → x ·× (y ·× z) ≡ (x ·× y) ·× z
assoc× _ _ _ = cong₂ (_,_) (·Assoc (snd M) _ _ _) (·Assoc (snd N) _ _ _)
·IdR× : ∀ x → x ·× ε× ≡ x
·IdR× _ = cong₂ (_,_) (·IdR (snd M) _) (·IdR (snd N) _)
comm× : ∀ x y → x ·× y ≡ y ·× x
comm× _ _ = cong₂ (_,_) (·Comm (snd M) _ _) (·Comm (snd N) _ _)
|
/-
Copyright (c) 2019 Johan Commelin. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johan Commelin
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.data.finset.basic
import Mathlib.data.multiset.nat_antidiagonal
import Mathlib.PostPort
namespace Mathlib
/-!
# The "antidiagonal" {(0,n), (1,n-1), ..., (n,0)} as a finset.
-/
namespace finset
namespace nat
/-- The antidiagonal of a natural number `n` is
the finset of pairs `(i,j)` such that `i+j = n`. -/
def antidiagonal (n : ℕ) : finset (ℕ × ℕ) :=
mk (multiset.nat.antidiagonal n) (multiset.nat.nodup_antidiagonal n)
/-- A pair (i,j) is contained in the antidiagonal of `n` if and only if `i+j=n`. -/
@[simp] theorem mem_antidiagonal {n : ℕ} {x : ℕ × ℕ} : x ∈ antidiagonal n ↔ prod.fst x + prod.snd x = n := sorry
/-- The cardinality of the antidiagonal of `n` is `n+1`. -/
@[simp] theorem card_antidiagonal (n : ℕ) : card (antidiagonal n) = n + 1 := sorry
/-- The antidiagonal of `0` is the list `[(0,0)]` -/
@[simp] theorem antidiagonal_zero : antidiagonal 0 = singleton (0, 0) :=
rfl
theorem antidiagonal_succ {n : ℕ} : antidiagonal (n + 1) =
insert (0, n + 1)
(map (function.embedding.prod_map (function.embedding.mk Nat.succ nat.succ_injective) (function.embedding.refl ℕ))
(antidiagonal n)) := sorry
theorem map_swap_antidiagonal {n : ℕ} : map (function.embedding.mk prod.swap (function.right_inverse.injective prod.swap_right_inverse)) (antidiagonal n) =
antidiagonal n := sorry
|
State Before: l : Type ?u.76780
m : Type ?u.76783
n : Type u_1
o : Type ?u.76789
m' : o → Type ?u.76794
n' : o → Type ?u.76799
R : Type ?u.76802
S : Type ?u.76805
α : Type v
β : Type w
γ : Type ?u.76812
inst✝⁴ : DecidableEq n
inst✝³ : Zero α
inst✝² : One α
inst✝¹ : Zero β
inst✝ : One β
f : α → β
h₀ : f 0 = 0
h₁ : f 1 = 1
⊢ map 1 f = 1 State After: case a.h
l : Type ?u.76780
m : Type ?u.76783
n : Type u_1
o : Type ?u.76789
m' : o → Type ?u.76794
n' : o → Type ?u.76799
R : Type ?u.76802
S : Type ?u.76805
α : Type v
β : Type w
γ : Type ?u.76812
inst✝⁴ : DecidableEq n
inst✝³ : Zero α
inst✝² : One α
inst✝¹ : Zero β
inst✝ : One β
f : α → β
h₀ : f 0 = 0
h₁ : f 1 = 1
i✝ x✝ : n
⊢ map 1 f i✝ x✝ = OfNat.ofNat 1 i✝ x✝ Tactic: ext State Before: case a.h
l : Type ?u.76780
m : Type ?u.76783
n : Type u_1
o : Type ?u.76789
m' : o → Type ?u.76794
n' : o → Type ?u.76799
R : Type ?u.76802
S : Type ?u.76805
α : Type v
β : Type w
γ : Type ?u.76812
inst✝⁴ : DecidableEq n
inst✝³ : Zero α
inst✝² : One α
inst✝¹ : Zero β
inst✝ : One β
f : α → β
h₀ : f 0 = 0
h₁ : f 1 = 1
i✝ x✝ : n
⊢ map 1 f i✝ x✝ = OfNat.ofNat 1 i✝ x✝ State After: case a.h
l : Type ?u.76780
m : Type ?u.76783
n : Type u_1
o : Type ?u.76789
m' : o → Type ?u.76794
n' : o → Type ?u.76799
R : Type ?u.76802
S : Type ?u.76805
α : Type v
β : Type w
γ : Type ?u.76812
inst✝⁴ : DecidableEq n
inst✝³ : Zero α
inst✝² : One α
inst✝¹ : Zero β
inst✝ : One β
f : α → β
h₀ : f 0 = 0
h₁ : f 1 = 1
i✝ x✝ : n
⊢ f (if i✝ = x✝ then 1 else 0) = if i✝ = x✝ then 1 else 0 Tactic: simp only [one_apply, map_apply] State Before: case a.h
l : Type ?u.76780
m : Type ?u.76783
n : Type u_1
o : Type ?u.76789
m' : o → Type ?u.76794
n' : o → Type ?u.76799
R : Type ?u.76802
S : Type ?u.76805
α : Type v
β : Type w
γ : Type ?u.76812
inst✝⁴ : DecidableEq n
inst✝³ : Zero α
inst✝² : One α
inst✝¹ : Zero β
inst✝ : One β
f : α → β
h₀ : f 0 = 0
h₁ : f 1 = 1
i✝ x✝ : n
⊢ f (if i✝ = x✝ then 1 else 0) = if i✝ = x✝ then 1 else 0 State After: no goals Tactic: split_ifs <;> simp [h₀, h₁] |
(* Title: HOL/Auth/n_german_lemma_on_inv__47.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_german Protocol Case Study*}
theory n_german_lemma_on_inv__47 imports n_german_base
begin
section{*All lemmas on causal relation between inv__47 and some rule r*}
lemma n_RecvReqVsinv__47:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvReq N i)" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvReq N i" apply fastforce done
from a2 obtain p__Inv3 p__Inv4 where a2:"p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i=p__Inv3)\<or>(i~=p__Inv3\<and>i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv3) ''Cmd'')) (Const InvAck)) (eqn (IVar (Ident ''CurCmd'')) (Const Empty))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv3)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv3) ''Cmd'')) (Const InvAck)) (eqn (IVar (Ident ''CurCmd'')) (Const Empty))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv3\<and>i~=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv3) ''Cmd'')) (Const InvAck)) (eqn (IVar (Ident ''CurCmd'')) (Const Empty))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvAckVsinv__47:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done
from a2 obtain p__Inv3 p__Inv4 where a2:"p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i=p__Inv3)\<or>(i~=p__Inv3\<and>i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv4) ''Cmd'')) (Const Inv)) (eqn (IVar (Ident ''CurCmd'')) (Const ReqS))) (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv3) ''Cmd'')) (Const InvAck))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv3)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv3) ''Cmd'')) (Const Inv)) (eqn (IVar (Ident ''CurCmd'')) (Const ReqS))) (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv4) ''Cmd'')) (Const InvAck))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv3\<and>i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvInvAckVsinv__47:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvInvAck i" apply fastforce done
from a2 obtain p__Inv3 p__Inv4 where a2:"p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i=p__Inv3)\<or>(i~=p__Inv3\<and>i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv3)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv3\<and>i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntSVsinv__47:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntS i" apply fastforce done
from a2 obtain p__Inv3 p__Inv4 where a2:"p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i=p__Inv3)\<or>(i~=p__Inv3\<and>i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv3)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv3\<and>i~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntEVsinv__47:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done
from a2 obtain p__Inv3 p__Inv4 where a2:"p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i=p__Inv3)\<or>(i~=p__Inv3\<and>i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv3)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv3\<and>i~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_StoreVsinv__47:
assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqESVsinv__47:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqES i" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvGntSVsinv__47:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvGntS i" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendInvSVsinv__47:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInvS i" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendInvEVsinv__47:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInvE i" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvGntEVsinv__47:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvGntE i" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqSVsinv__47:
assumes a1: "\<exists> j. j\<le>N\<and>r=n_SendReqS j" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqEIVsinv__47:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqEI i" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__47 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
(* A few things for when CPDT's crush is overkill, also we export crush
because it's so useful with tedious simple proofs *)
Require Export CpdtTactics.
(* Our assumption is that what's used in an if is a sumbool, so we don't need to keep X *)
Ltac destruct_if := repeat match goal with
| [ H : context[if ?X then _ else _] |- _] => destruct X
| [ |- context[if ?X then _ else _]] => destruct X
end.
Ltac destruct_match := repeat match goal with
| [ H : context[match ?X with _ => _ end] |- _] => let H := fresh "H" in destruct X eqn:H
| [ |- context[match ?X with _ => _ end]] => let H := fresh "H" in destruct X eqn:H
end.
(* "Borrowed" from CPDT *)
Ltac notHyp P :=
match goal with
| [ _ : P |- _ ] => fail 1
| _ =>
match P with
| ?P1 /\ ?P2 => first [ notHyp P1 | notHyp P2 | fail 2 ]
| _ => idtac
end
end.
Ltac extend pf :=
let t := type of pf in
notHyp t; generalize pf; intro.
(* An imitation of Hol's RESTAC. Use for things in Prop only unless you want garbage code *)
Ltac restac := repeat match goal with
| [ F : (?A -> _), a : ?A |- _] => extend (F a)
| [ F : (?A <-> _), a : ?A |- _] => extend (proj1 F a)
| [ F : (_ <-> ?A), a : ?A |- _] => extend (proj2 F a)
| [ H : forall x, ?P x -> _, H' : ?P ?X |- _ ] => extend (H X H')
end; try solve [intuition].
Hint Extern 4 => discriminate. (* I don't know why this isn't part of auto *)
(* crush specialised for sumbool *)
Ltac crush_dec lemmas invOne :=
first [ solve [left; crush' lemmas invOne]
| solve [right; crush' lemmas invOne]
| crush' lemmas invOne]. (* if not solved, simplify anyway *)
(* Introduce a new term and the call inversion on it *)
Ltac introvert expr := let H := fresh "H" in set (H := expr); inversion H; try subst.
Definition f_nat_lt_wf : forall {A : Type} (f : A -> nat), well_founded (fun x y => f x < f y).
Proof.
intros A f; unfold well_founded; intro a; constructor; induction (f a); [crush | constructor; crush ].
Defined.
Hint Immediate f_nat_lt_wf.
|
(* Author: Tobias Nipkow
Copyright 1998 TUM
Is there an optimal order of arguments for `next'?
Currently we can have laws like `delta A (a#w) = delta A w o delta A a'
Otherwise we could have `acceps A == fin A o delta A (start A)'
and use foldl instead of foldl2.
*)
section "Projection functions for automata"
theory AutoProj
imports Main
begin
definition start :: "'a * 'b * 'c \<Rightarrow> 'a" where "start A = fst A"
definition "next" :: "'a * 'b * 'c \<Rightarrow> 'b" where "next A = fst(snd(A))"
definition fin :: "'a * 'b * 'c \<Rightarrow> 'c" where "fin A = snd(snd(A))"
lemma [simp]: "next(q,d,f) = d"
by(simp add:next_def)
lemma [simp]: "fin(q,d,f) = f"
by(simp add:fin_def)
end
|
{-
This is mostly for convenience, when working with ideals
(which are defined for general rings) in a commutative ring.
-}
{-# OPTIONS --safe #-}
module Cubical.Algebra.CommRing.Ideal where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Function
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Powerset renaming ( _∈_ to _∈p_ ; _⊆_ to _⊆p_
; subst-∈ to subst-∈p )
open import Cubical.Functions.Logic
open import Cubical.Data.Nat using (ℕ ; zero ; suc ; tt)
renaming ( --zero to ℕzero ; suc to ℕsuc
_+_ to _+ℕ_ ; _·_ to _·ℕ_
; +-assoc to +ℕ-assoc ; +-comm to +ℕ-comm
; ·-assoc to ·ℕ-assoc ; ·-comm to ·ℕ-comm)
open import Cubical.Data.FinData hiding (rec ; elim)
open import Cubical.Data.Sigma
open import Cubical.HITs.PropositionalTruncation
open import Cubical.Algebra.CommRing
open import Cubical.Algebra.Ring
open import Cubical.Algebra.Ring.Ideal renaming (IdealsIn to IdealsInRing)
open import Cubical.Algebra.Ring.BigOps
open import Cubical.Algebra.RingSolver.ReflectionSolving
private
variable
ℓ : Level
IdealsIn : (R : CommRing ℓ) → Type _
IdealsIn R = IdealsInRing (CommRing→Ring R)
module _ (Ring@(R , str) : CommRing ℓ) where
open CommRingStr str
makeIdeal : (I : R → hProp ℓ)
→ (+-closed : {x y : R} → x ∈p I → y ∈p I → (x + y) ∈p I)
→ (0r-closed : 0r ∈p I)
→ (·-closedLeft : {x : R} → (r : R) → x ∈p I → r · x ∈p I)
→ IdealsIn (R , str)
makeIdeal I +-closed 0r-closed ·-closedLeft = I ,
(record
{ +-closed = +-closed
; -closed = λ x∈pI → subst-∈p I (useSolver _)
(·-closedLeft (- 1r) x∈pI)
; 0r-closed = 0r-closed
; ·-closedLeft = ·-closedLeft
; ·-closedRight = λ r x∈pI →
subst-∈p I
(·-comm r _)
(·-closedLeft r x∈pI)
})
where useSolver : (x : R) → - 1r · x ≡ - x
useSolver = solve Ring
-- better?
module CommIdeal (R' : CommRing ℓ) where
private R = fst R'
open CommRingStr (snd R')
open Sum (CommRing→Ring R')
open CommRingTheory R'
open RingTheory (CommRing→Ring R')
record isCommIdeal (I : ℙ R) : Type ℓ where
constructor
makeIsCommIdeal
field
+Closed : ∀ {x y : R} → x ∈p I → y ∈p I → (x + y) ∈p I
contains0 : 0r ∈p I
·Closed : ∀ {x : R} (r : R) → x ∈p I → r · x ∈p I
·RClosed : ∀ {x : R} (r : R) → x ∈p I → x · r ∈p I
·RClosed r x∈pI = subst-∈p I (·-comm _ _) (·Closed r x∈pI)
open isCommIdeal
isPropIsCommIdeal : (I : ℙ R) → isProp (isCommIdeal I)
+Closed (isPropIsCommIdeal I ici₁ ici₂ i) x∈pI y∈pI =
I _ .snd (ici₁ .+Closed x∈pI y∈pI) (ici₂ .+Closed x∈pI y∈pI) i
contains0 (isPropIsCommIdeal I ici₁ ici₂ i) = I 0r .snd (ici₁ .contains0) (ici₂ .contains0) i
·Closed (isPropIsCommIdeal I ici₁ ici₂ i) r x∈pI =
I _ .snd (ici₁ .·Closed r x∈pI) (ici₂ .·Closed r x∈pI) i
CommIdeal : Type (ℓ-suc ℓ)
CommIdeal = Σ[ I ∈ ℙ R ] isCommIdeal I
--inclusion and containment of ideals
_⊆_ : CommIdeal → CommIdeal → Type ℓ
I ⊆ J = I .fst ⊆p J .fst
infix 5 _∈_
_∈_ : R → CommIdeal → Type ℓ
x ∈ I = x ∈p I .fst
subst-∈ : (I : CommIdeal) {x y : R} → x ≡ y → x ∈ I → y ∈ I
subst-∈ I = subst-∈p (I .fst)
CommIdeal≡Char : {I J : CommIdeal} → I ⊆ J → J ⊆ I → I ≡ J
CommIdeal≡Char I⊆J J⊆I = Σ≡Prop isPropIsCommIdeal (⊆-extensionality _ _ (I⊆J , J⊆I))
∑Closed : (I : CommIdeal) {n : ℕ} (V : FinVec R n)
→ (∀ i → V i ∈ I) → ∑ V ∈ I
∑Closed I {n = zero} _ _ = I .snd .contains0
∑Closed I {n = suc n} V h = I .snd .+Closed (h zero) (∑Closed I (V ∘ suc) (h ∘ suc))
0Ideal : CommIdeal
fst 0Ideal x = (x ≡ 0r) , is-set _ _
+Closed (snd 0Ideal) x≡0 y≡0 = cong₂ (_+_) x≡0 y≡0 ∙ +Rid _
contains0 (snd 0Ideal) = refl
·Closed (snd 0Ideal) r x≡0 = cong (r ·_) x≡0 ∙ 0RightAnnihilates _
1Ideal : CommIdeal
fst 1Ideal x = ⊤
+Closed (snd 1Ideal) _ _ = lift tt
contains0 (snd 1Ideal) = lift tt
·Closed (snd 1Ideal) _ _ = lift tt
contains1Is1 : (I : CommIdeal) → 1r ∈ I → I ≡ 1Ideal
contains1Is1 I 1∈I = CommIdeal≡Char (λ _ _ → lift tt)
λ x _ → subst-∈ I (·Rid _) (I .snd .·Closed x 1∈I) -- x≡x·1 ∈ I
_+i_ : CommIdeal → CommIdeal → CommIdeal
fst (I +i J) x =
(∃[ (y , z) ∈ (R × R) ] ((y ∈ I) × (z ∈ J) × (x ≡ y + z))) --have a record for this?
, isPropPropTrunc
+Closed (snd (I +i J)) {x = x₁} {y = x₂} = map2 +ClosedΣ
where
+ClosedΣ : Σ[ (y₁ , z₁) ∈ (R × R) ] ((y₁ ∈ I) × (z₁ ∈ J) × (x₁ ≡ y₁ + z₁))
→ Σ[ (y₂ , z₂) ∈ (R × R) ] ((y₂ ∈ I) × (z₂ ∈ J) × (x₂ ≡ y₂ + z₂))
→ Σ[ (y₃ , z₃) ∈ (R × R) ] ((y₃ ∈ I) × (z₃ ∈ J) × (x₁ + x₂ ≡ y₃ + z₃))
+ClosedΣ ((y₁ , z₁) , y₁∈I , z₁∈J , x₁≡y₁+z₁) ((y₂ , z₂) , y₂∈I , z₂∈J , x₂≡y₂+z₂) =
(y₁ + y₂ , z₁ + z₂) , +Closed (snd I) y₁∈I y₂∈I , +Closed (snd J) z₁∈J z₂∈J
, cong₂ (_+_) x₁≡y₁+z₁ x₂≡y₂+z₂ ∙ +ShufflePairs _ _ _ _
contains0 (snd (I +i J)) = ∣ (0r , 0r) , contains0 (snd I) , contains0 (snd J) , sym (+Rid _) ∣
·Closed (snd (I +i J)) {x = x} r = map ·ClosedΣ
where
·ClosedΣ : Σ[ (y₁ , z₁) ∈ (R × R) ] ((y₁ ∈ I) × (z₁ ∈ J) × (x ≡ y₁ + z₁))
→ Σ[ (y₂ , z₂) ∈ (R × R) ] ((y₂ ∈ I) × (z₂ ∈ J) × (r · x ≡ y₂ + z₂))
·ClosedΣ ((y₁ , z₁) , y₁∈I , z₁∈J , x≡y₁+z₁) =
(r · y₁ , r · z₁) , ·Closed (snd I) r y₁∈I , ·Closed (snd J) r z₁∈J
, cong (r ·_) x≡y₁+z₁ ∙ ·Rdist+ _ _ _
infixl 6 _+i_
+iComm⊆ : ∀ (I J : CommIdeal) → (I +i J) ⊆ (J +i I)
+iComm⊆ I J x = map λ ((y , z) , y∈I , z∈J , x≡y+z) → (z , y) , z∈J , y∈I , x≡y+z ∙ +Comm _ _
+iComm : ∀ (I J : CommIdeal) → I +i J ≡ J +i I
+iComm I J = CommIdeal≡Char (+iComm⊆ I J) (+iComm⊆ J I)
+iLidLIncl : ∀ (I : CommIdeal) → (0Ideal +i I) ⊆ I
+iLidLIncl I x = rec (I .fst x .snd) λ ((y , z) , y≡0 , z∈I , x≡y+z)
→ subst-∈ I (sym (x≡y+z ∙∙ cong (_+ z) y≡0 ∙∙ +Lid z)) z∈I
+iLidRIncl : ∀ (I : CommIdeal) → I ⊆ (0Ideal +i I)
+iLidRIncl I x x∈I = ∣ (0r , x) , refl , x∈I , sym (+Lid _) ∣
+iLid : ∀ (I : CommIdeal) → 0Ideal +i I ≡ I
+iLid I = CommIdeal≡Char (+iLidLIncl I) (+iLidRIncl I)
+iLincl : ∀ (I J : CommIdeal) → I ⊆ (I +i J)
+iLincl I J x x∈I = ∣ (x , 0r) , x∈I , J .snd .contains0 , sym (+Rid x) ∣
+iRincl : ∀ (I J : CommIdeal) → J ⊆ (I +i J)
+iRincl I J x x∈J = ∣ (0r , x) , I .snd .contains0 , x∈J , sym (+Lid x) ∣
+iRespLincl : ∀ (I J K : CommIdeal) → I ⊆ J → (I +i K) ⊆ (J +i K)
+iRespLincl I J K I⊆J x = map λ ((y , z) , y∈I , z∈K , x≡y+z) → ((y , z) , I⊆J y y∈I , z∈K , x≡y+z)
+iAssocLIncl : ∀ (I J K : CommIdeal) → (I +i (J +i K)) ⊆ ((I +i J) +i K)
+iAssocLIncl I J K x = elim (λ _ → ((I +i J) +i K) .fst x .snd) (uncurry3
λ (y , z) y∈I → elim (λ _ → isPropΠ λ _ → ((I +i J) +i K) .fst x .snd)
λ ((u , v) , u∈J , v∈K , z≡u+v) x≡y+z
→ ∣ (y + u , v) , ∣ _ , y∈I , u∈J , refl ∣ , v∈K
, x≡y+z ∙∙ cong (y +_) z≡u+v ∙∙ +Assoc _ _ _ ∣)
+iAssocRIncl : ∀ (I J K : CommIdeal) → ((I +i J) +i K) ⊆ (I +i (J +i K))
+iAssocRIncl I J K x = elim (λ _ → (I +i (J +i K)) .fst x .snd) (uncurry3
λ (y , z) → elim (λ _ → isPropΠ2 λ _ _ → (I +i (J +i K)) .fst x .snd)
λ ((u , v) , u∈I , v∈J , y≡u+v) z∈K x≡y+z
→ ∣ (u , v + z) , u∈I , ∣ _ , v∈J , z∈K , refl ∣
, x≡y+z ∙∙ cong (_+ z) y≡u+v ∙∙ sym (+Assoc _ _ _) ∣)
+iAssoc : ∀ (I J K : CommIdeal) → I +i (J +i K) ≡ (I +i J) +i K
+iAssoc I J K = CommIdeal≡Char (+iAssocLIncl I J K) (+iAssocRIncl I J K)
+iIdemLIncl : ∀ (I : CommIdeal) → (I +i I) ⊆ I
+iIdemLIncl I x = rec (I .fst x .snd) λ ((y , z) , y∈I , z∈I , x≡y+z)
→ subst-∈ I (sym x≡y+z) (I .snd .+Closed y∈I z∈I)
+iIdemRIncl : ∀ (I : CommIdeal) → I ⊆ (I +i I)
+iIdemRIncl I x x∈I = ∣ (0r , x) , I .snd .contains0 , x∈I , sym (+Lid _) ∣
+iIdem : ∀ (I : CommIdeal) → I +i I ≡ I
+iIdem I = CommIdeal≡Char (+iIdemLIncl I) (+iIdemRIncl I)
-- where to put this?
mul++dist : ∀ {n m : ℕ} (α U : FinVec R n) (β V : FinVec R m) (j : Fin (n +ℕ m))
→ ((λ i → α i · U i) ++Fin (λ i → β i · V i)) j ≡ (α ++Fin β) j · (U ++Fin V) j
mul++dist {n = zero} α U β V j = refl
mul++dist {n = suc n} α U β V zero = refl
mul++dist {n = suc n} α U β V (suc j) = mul++dist (α ∘ suc) (U ∘ suc) β V j
-- define multiplication of ideals
_·i_ : CommIdeal → CommIdeal → CommIdeal
fst (I ·i J) x = (∃[ n ∈ ℕ ] Σ[ (α , β) ∈ (FinVec R n × FinVec R n) ]
(∀ i → α i ∈ I) × (∀ i → β i ∈ J) × (x ≡ ∑ λ i → α i · β i))
, isPropPropTrunc
+Closed (snd (I ·i J)) = map2
λ (n , (α , β) , ∀αi∈I , ∀βi∈J , x≡∑αβ) (m , (γ , δ) , ∀γi∈I , ∀δi∈J , y≡∑γδ)
→ n +ℕ m , (α ++Fin γ , β ++Fin δ) , ++FinPres∈ (I .fst) ∀αi∈I ∀γi∈I
, ++FinPres∈ (J .fst) ∀βi∈J ∀δi∈J
, cong₂ (_+_) x≡∑αβ y≡∑γδ ∙∙ sym (∑Split++ (λ i → α i · β i) (λ i → γ i · δ i))
∙∙ ∑Ext (mul++dist α β γ δ)
contains0 (snd (I ·i J)) = ∣ 0 , ((λ ()) , (λ ())) , (λ ()) , (λ ()) , refl ∣
·Closed (snd (I ·i J)) r = map
λ (n , (α , β) , ∀αi∈I , ∀βi∈J , x≡∑αβ)
→ n , ((λ i → r · α i) , β) , (λ i → I .snd .·Closed r (∀αi∈I i)) , ∀βi∈J
, cong (r ·_) x≡∑αβ ∙ ∑Mulrdist r (λ i → α i · β i) ∙ ∑Ext λ i → ·Assoc r (α i) (β i)
infixl 7 _·i_
prodInProd : ∀ (I J : CommIdeal) (x y : R) → x ∈ I → y ∈ J → (x · y) ∈ (I ·i J)
prodInProd _ _ x y x∈I y∈J =
∣ 1 , ((λ _ → x) , λ _ → y) , (λ _ → x∈I) , (λ _ → y∈J) , sym (+Rid _) ∣
·iLincl : ∀ (I J : CommIdeal) → (I ·i J) ⊆ I
·iLincl I J x = elim (λ _ → I .fst x .snd)
λ (_ , (α , β) , α∈I , _ , x≡∑αβ) → subst-∈ I (sym x≡∑αβ)
(∑Closed I (λ i → α i · β i) λ i → ·RClosed (I .snd) _ (α∈I i))
·iComm⊆ : ∀ (I J : CommIdeal) → (I ·i J) ⊆ (J ·i I)
·iComm⊆ I J x = map λ (n , (α , β) , ∀αi∈I , ∀βi∈J , x≡∑αβ)
→ (n , (β , α) , ∀βi∈J , ∀αi∈I , x≡∑αβ ∙ ∑Ext (λ i → ·-comm (α i) (β i)))
·iComm : ∀ (I J : CommIdeal) → I ·i J ≡ J ·i I
·iComm I J = CommIdeal≡Char (·iComm⊆ I J) (·iComm⊆ J I)
I⊆I1 : ∀ (I : CommIdeal) → I ⊆ (I ·i 1Ideal)
I⊆I1 I x x∈I = ∣ 1 , ((λ _ → x) , λ _ → 1r) , (λ _ → x∈I) , (λ _ → lift tt) , useSolver x ∣
where
useSolver : ∀ x → x ≡ x · 1r + 0r
useSolver = solve R'
·iRid : ∀ (I : CommIdeal) → I ·i 1Ideal ≡ I
·iRid I = CommIdeal≡Char (·iLincl I 1Ideal) (I⊆I1 I)
-- a useful corollary
·iRContains1id : ∀ (I J : CommIdeal) → 1r ∈ J → I ·i J ≡ I
·iRContains1id I J 1∈J = cong (I ·i_) (contains1Is1 J 1∈J) ∙ ·iRid I
·iAssocLIncl : ∀ (I J K : CommIdeal) → (I ·i (J ·i K)) ⊆ ((I ·i J) ·i K)
·iAssocLIncl I J K x = rec isPropPropTrunc
λ (_ , (α , β) , α∈I , β∈JK , x≡∑αβ)
→ subst-∈ ((I ·i J) ·i K) (sym x≡∑αβ)
(∑Closed ((I ·i J) ·i K) (λ i → α i · β i)
λ i → rec isPropPropTrunc
(λ (_ , (γ , δ) , γ∈J , δ∈K , βi≡∑γδ)
→ subst-∈ ((I ·i J) ·i K) -- each αᵢβᵢ ≡...≡ ∑αᵢγⱼδⱼ ∈IJK
(sym (cong (α i ·_) βi≡∑γδ ∙∙ ∑Mulrdist (α i) (λ j → γ j · δ j)
∙∙ ∑Ext (λ j → ·Assoc (α i) (γ j) (δ j))))
(∑Closed ((I ·i J) ·i K) (λ j → α i · γ j · δ j) -- each αᵢγⱼδⱼ∈IJK
λ j → prodInProd (I ·i J) K _ _
(prodInProd I J _ _ (α∈I i) (γ∈J j)) (δ∈K j)))
(β∈JK i))
·iAssocRIncl : ∀ (I J K : CommIdeal) → ((I ·i J) ·i K) ⊆ (I ·i (J ·i K))
·iAssocRIncl I J K x = rec isPropPropTrunc
λ (_ , (α , β) , α∈IJ , β∈K , x≡∑αβ)
→ subst-∈ (I ·i (J ·i K)) (sym x≡∑αβ)
(∑Closed (I ·i (J ·i K)) (λ i → α i · β i)
λ i → rec isPropPropTrunc
(λ (_ , (γ , δ) , γ∈I , δ∈J , αi≡∑γδ)
→ subst-∈ (I ·i (J ·i K))
(sym (cong (_· β i) αi≡∑γδ ∙∙ ∑Mulldist (β i) (λ j → γ j · δ j)
∙∙ ∑Ext (λ j → sym (·Assoc (γ j) (δ j) (β i)))))
(∑Closed (I ·i (J ·i K)) (λ j → γ j · (δ j · β i))
λ j → prodInProd I (J ·i K) _ _ (γ∈I j)
(prodInProd J K _ _ (δ∈J j) (β∈K i))))
(α∈IJ i))
·iAssoc : ∀ (I J K : CommIdeal) → I ·i (J ·i K) ≡ (I ·i J) ·i K
·iAssoc I J K = CommIdeal≡Char (·iAssocLIncl I J K) (·iAssocRIncl I J K)
·iRdist+iLIncl : ∀ (I J K : CommIdeal) → (I ·i (J +i K)) ⊆ (I ·i J +i I ·i K)
·iRdist+iLIncl I J K x = rec isPropPropTrunc
λ (n , (α , β) , α∈I , β∈J+K , x≡∑αβ) → subst-∈ ((I ·i J) +i (I ·i K)) (sym x≡∑αβ)
(∑Closed ((I ·i J) +i (I ·i K)) (λ i → α i · β i) -- each αi·βi ∈ IJ+IK
λ i → rec isPropPropTrunc
(λ ((γi , δi) , γi∈J , δi∈K , βi≡γi+δi) →
∣ (α i · γi , α i · δi) , prodInProd I J _ _ (α∈I i) γi∈J
, prodInProd I K _ _ (α∈I i) δi∈K
, cong (α i ·_) βi≡γi+δi ∙ ·Rdist+ _ _ _ ∣)
(β∈J+K i))
·iRdist+iRIncl : ∀ (I J K : CommIdeal) → ((I ·i J) +i (I ·i K)) ⊆ (I ·i (J +i K))
·iRdist+iRIncl I J K x = rec isPropPropTrunc λ ((y , z) , y∈IJ , z∈IK , x≡y+z)
→ subst-∈ (I ·i (J +i K)) (sym x≡y+z)
((I ·i (J +i K)) .snd .+Closed (inclHelperLeft _ y∈IJ) (inclHelperRight _ z∈IK))
where
inclHelperLeft : (I ·i J) ⊆ (I ·i (J +i K))
inclHelperLeft x' = map (λ (n , (α , β) , α∈I , β∈J , x'≡∑αβ)
→ n , (α , β) , α∈I , (λ i → +iLincl J K _ (β∈J i)) , x'≡∑αβ)
inclHelperRight : (I ·i K) ⊆ (I ·i (J +i K))
inclHelperRight x' = map (λ (n , (α , β) , α∈I , β∈K , x'≡∑αβ)
→ n , (α , β) , α∈I , (λ i → +iRincl J K _ (β∈K i)) , x'≡∑αβ)
·iRdist+i : ∀ (I J K : CommIdeal) → I ·i (J +i K) ≡ I ·i J +i I ·i K
·iRdist+i I J K = CommIdeal≡Char (·iRdist+iLIncl I J K) (·iRdist+iRIncl I J K)
-- only one absorption law, i.e. CommIdeal , +i , ·i does not form a dist. lattice
·iAbsorb+iLIncl : ∀ (I J : CommIdeal) → (I +i (I ·i J)) ⊆ I
·iAbsorb+iLIncl I J x = rec (I .fst x .snd) λ ((y , z) , y∈I , z∈IJ , x≡y+z)
→ subst-∈ I (sym x≡y+z) (I .snd .+Closed y∈I (·iLincl I J _ z∈IJ))
·iAbsorb+iRIncl : ∀ (I J : CommIdeal) → I ⊆ (I +i (I ·i J))
·iAbsorb+iRIncl I J = +iLincl I (I ·i J)
·iAbsorb+i : ∀ (I J : CommIdeal) → I +i (I ·i J) ≡ I
·iAbsorb+i I J = CommIdeal≡Char (·iAbsorb+iLIncl I J) (·iAbsorb+iRIncl I J)
|
lemma open_closed: "open S \<longleftrightarrow> closed (- S)" |
lemma compact_negations: fixes s :: "'a::real_normed_vector set" assumes "compact s" shows "compact ((\<lambda>x. - x) ` s)" |
{-# OPTIONS --cubical --no-import-sorts --safe #-}
open import Cubical.Core.Everything
open import Cubical.Foundations.HLevels
module Cubical.Algebra.Semigroup.Construct.Right {ℓ} (Aˢ : hSet ℓ) where
open import Cubical.Foundations.Prelude
open import Cubical.Algebra.Semigroup
import Cubical.Algebra.Magma.Construct.Right Aˢ as RMagma
open RMagma public hiding (Right-isMagma; RightMagma)
private
A = ⟨ Aˢ ⟩
isSetA = Aˢ .snd
▸-assoc : Associative _▸_
▸-assoc _ _ _ = refl
Right-isSemigroup : IsSemigroup A _▸_
Right-isSemigroup = record
{ isMagma = RMagma.Right-isMagma
; assoc = ▸-assoc
}
RightSemigroup : Semigroup ℓ
RightSemigroup = record { isSemigroup = Right-isSemigroup }
|
{-# OPTIONS --without-K #-}
module sets.list.core where
open import sum
import sets.vec.core as V
open import sets.nat.core
List : ∀ {i} → Set i → Set i
List A = Σ ℕ (V.Vec A)
module _ {i}{A : Set i} where
vec-to-list : ∀ {n} → V.Vec A n → List A
vec-to-list {n} xs = n , xs
[] : List A
[] = 0 , V.[]
infixr 4 _∷_
_∷_ : A → List A → List A
x ∷ (n , xs) = suc n , x V.∷ xs
|
rm(list=ls(all=TRUE))
prior_heads = 10
prior_tails = 10
heads = 3
tails = 7
post_heads = prior_heads + heads
post_tails = prior_tails + tails
epost_heads = prior_heads + (2 * heads)
epost_tails = prior_tails + (2 * tails)
q = seq(0, 1, 0.005)
prior = dbeta(q, prior_heads, prior_tails)
post = dbeta(q, post_heads, post_tails)
epost = dbeta(q, epost_heads, epost_tails)
ymax = max(epost) + (0.02 * max(epost))
post_prob_unfair_heads = pbeta(0.5, post_heads, post_tails,
lower.tail = F)
epost_prob_unfair_heads = pbeta(0.5, epost_heads, epost_tails,
lower.tail = F)
post_mode = (post_heads - 1) / (post_heads + post_tails - 2)
epost_mode = (epost_heads - 1) / (epost_heads + epost_tails - 2)
cat(paste('post prob unfair heads: ', post_prob_unfair_heads, '\n'))
cat(paste('emp post prob unfair heads: ', epost_prob_unfair_heads, '\n'))
cat(paste('post mode: ', post_mode, '\n'))
cat(paste('emp post mode: ', epost_mode, '\n'))
pdf('coin_flip.pdf', width=7.087, height=4.5)
par(yaxs="i", xaxs="i", cex.lab=1.2, cex.axis=1.0, mgp=c(2.2, 0.7, 0),
oma=c(0, 0, 0.2, 0.5), mar=c(3.2, 3.2, 0.5, 0.1))
plot(x=c(), y = c(), xlab=bquote(theta), ylab='Density', xlim=c(0,1),
ylim=c(0, ymax))
lines(q, prior)
lines(q, post, col='blue')
lines(q, epost, col='red')
dev.off()
|
function dd=tritrig(a,b,c,A,B,C)
if A==90 | B==90
'error, right angle can only be C'
end
x=0;
if C==90
if a~=0 & b~=0
c=sqrt(a^2+b^2);
A=asind(a/c);
B=asind(b/c);
x=1;
end
if a~=0 & c~=0 & x==0
b=sqrt(c^2-a^2);
A=asind(a/c);
B=asind(b/c);
x=1;
end
if b~=0 & c~=0 & x==0
a=sqrt(c^2-b^2);
A=asind(a/c);
B=asind(b/c);
x=1;
end
if A~=0 & x==0
B=90-A;
if a~=0
b=a/tand(A);
c=a/sind(a);
x=1;
end
if b~=0 & x==0
a=tand(A)*b;
c=b/cosd(A);
x=1;
end
if c~=0 & x==0
a=sind(A)*c;
b=cosd(A)*c;
x=1;
end
end
if B~=0 & x==0
A=90-B;
if a~=0
b=a/tand(A);
c=a/sind(a);
x=1;
end
if b~=0 & x==0
a=tand(A)*b;
c=b/cosd(A);
x=1;
end
if c~=0 & x==0
a=sind(A)*c;
b=cosd(A)*c;
x=1;
end
end
end
if a~=0 & b~=0 & c~=0 & x==0
A=acosd((b^2+c^2-a^2)/2/b/c);
B=asind(sind(A)*b/a);
C=180-A-B;
x=1;
end
if a~=0 & b~=0 & C~=0 & x==0
c=sqrt(a^2+b^2-2*a*b*cosd(C));
A=asind(sind(B)*a/b);
B=asind(sind(A)*b/a);
x=1;
end
if a~=0 & c~=0 & B~=0 & x==0
b=sqrt(a^2+c^2-2*a*c*cosd(B));
A=asind(sind(B)*a/b);
C=180-B-A;
x=1;
end
if b~=0 & c~=0 & A~=0 & x==0
a=sqrt(b^2+c^2-2*b*c*cosd(A));
B=asind(sind(A)*b/a);
C=180-A-B;
x=1;
end
if A~=0 & B~=0 & x==0
C=180-A-B;
if a~=0
b=sind(B)*a/sind(A);
c=sind(C)*a/sind(A);
x=1;
end
if b~=0 & x==0
a=sind(A)*b/sind(B);
c=sind(C)*b/sind(B);
x=1;
end
if c~=0 & x==0
a=sind(A)*c/sind(C);
b=sind(B)*c/sind(C);
x=1;
end
end
if A~=0 & C~=0 & x==0
B=180-A-C;
if a~=0
b=sind(B)*a/sind(A);
c=sind(C)*a/sind(A);
x=1;
end
if b~=0 & x==0
a=sind(A)*b/sind(B);
c=sind(C)*b/sind(B);
x=1;
end
if c~=0 & x==0
a=sind(A)*c/sind(C);
b=sind(B)*c/sind(C);
x=1;
end
end
if B~=0 & C~=0 & x==0
A=180-B-C;
if a~=0
b=sind(B)*a/sind(A);
c=sind(C)*a/sind(A);
x=1;
end
if b~=0 & x==0
a=sind(A)*b/sind(B);
c=sind(C)*b/sind(B);
x=1;
end
if c~=0 & x==0
a=sind(A)*c/sind(C);
b=sind(B)*c/sind(C);
x=1;
end
end
if x==0
'not enough information was given'
else
a
b
c
A
B
C
end
%How to use tritrig:
%tritrig is in the form 'tritrig(a,b,c,A,B,C)' where a b c A B C are the
%angles and the lengths of a triangle. If your triangle has a right angle
%in it, it must be 'C' and the hypotenuse must be 'c'. The angles and
%lengths that you do not know must be typed as '0'
%Eg.
%tritrig(3,4,5,0,0,0)
%A =
% 36.8699
%B=
% 53.1301
%C=
% 90.0000 |
State Before: l : Type ?u.903237
m : Type u_1
n : Type ?u.903243
o : Type ?u.903246
m' : o → Type ?u.903251
n' : o → Type ?u.903256
R : Type ?u.903259
S : Type ?u.903262
α : Type v
β : Type w
γ : Type ?u.903269
inst✝³ : NonAssocSemiring α
inst✝² : Fintype m
inst✝¹ : Fintype n
inst✝ : DecidableEq m
v : m → α
⊢ mulVec 1 v = v State After: case h
l : Type ?u.903237
m : Type u_1
n : Type ?u.903243
o : Type ?u.903246
m' : o → Type ?u.903251
n' : o → Type ?u.903256
R : Type ?u.903259
S : Type ?u.903262
α : Type v
β : Type w
γ : Type ?u.903269
inst✝³ : NonAssocSemiring α
inst✝² : Fintype m
inst✝¹ : Fintype n
inst✝ : DecidableEq m
v : m → α
x✝ : m
⊢ mulVec 1 v x✝ = v x✝ Tactic: ext State Before: case h
l : Type ?u.903237
m : Type u_1
n : Type ?u.903243
o : Type ?u.903246
m' : o → Type ?u.903251
n' : o → Type ?u.903256
R : Type ?u.903259
S : Type ?u.903262
α : Type v
β : Type w
γ : Type ?u.903269
inst✝³ : NonAssocSemiring α
inst✝² : Fintype m
inst✝¹ : Fintype n
inst✝ : DecidableEq m
v : m → α
x✝ : m
⊢ mulVec 1 v x✝ = v x✝ State After: no goals Tactic: rw [← diagonal_one, mulVec_diagonal, one_mul] |
lemma Bseq_eq_bounded: fixes f :: "nat \<Rightarrow> 'a::real_normed_vector" shows "Bseq f \<longleftrightarrow> bounded (range f)" |
From CoqAlgs Require Export Base.
Set Implicit Arguments.
(* Commutative rings with unit. *)
Class UCRing : Type :=
{
carrier : Type;
add : carrier -> carrier -> carrier;
mul : carrier -> carrier -> carrier;
zero : carrier;
one : carrier;
neg : carrier -> carrier;
add_assoc: forall x y z : carrier, add (add x y) z = add x (add y z);
add_comm : forall x y : carrier, add x y = add y x;
zero_l : forall x : carrier, add zero x = x;
zero_r : forall x : carrier, add x zero = x;
neg_l : forall x : carrier, add (neg x) x = zero;
neg_r : forall x : carrier, add x (neg x) = zero;
mul_assoc: forall x y z : carrier, mul (mul x y) z = mul x (mul y z);
mul_comm : forall x y : carrier, mul x y = mul y x;
one_l : forall x : carrier, mul one x = x;
one_r : forall x : carrier, mul x one = x;
distr_l : forall x y z : carrier,
mul x (add y z) = add (mul x y) (mul x z);
distr_r : forall x y z : carrier,
mul (add x y) z = add (mul x z) (mul y z);
}.
Notation "x + y" := (add x y).
Notation "x - y" := (add x (neg y)).
Notation "x * y" := (mul x y).
Notation "0" := zero.
Notation "1" := one.
Notation "- x" := (neg x).
Coercion carrier : UCRing >-> Sortclass.
(* Basic tactics for rewriting UCRing axioms. *)
#[global] Hint Rewrite @zero_l @zero_r @one_l @one_r @neg_l @neg_r : units.
#[global] Hint Rewrite @add_assoc @mul_assoc : assoc.
#[global] Hint Rewrite <- @add_assoc @mul_assoc : assoc'.
#[global] Hint Rewrite @add_comm @mul_comm : comm.
#[global] Hint Rewrite @distr_l @distr_r : distr.
#[global] Hint Rewrite <- @distr_l @distr_r : distr'.
Ltac rng := cbn; intros; autorewrite with units assoc distr; try congruence.
Ltac rng' := cbn; intros; autorewrite with units assoc' distr'; try congruence.
(* Basic lemmas. *)
Lemma add_cancel_l :
forall (X : UCRing) (a b b' : X), a + b = a + b' -> b = b'.
Proof.
intros.
assert (-a + (a + b) = b); rng'.
assert (-a + (a + b') = b'); rng'.
Qed.
Lemma add_cancel_r :
forall (X : UCRing) (a a' b : X), a + b = a' + b -> a = a'.
Proof.
intros. rewrite (add_comm a), (add_comm a') in H.
eapply add_cancel_l. exact H.
Qed.
Lemma neg_neg :
forall (X : UCRing) (x : X), --x = x.
Proof.
intros.
assert (-x + x = 0); rng.
assert (-x + --x = 0); rng.
eapply add_cancel_l. rewrite H0. rng.
Qed.
Lemma minus_a_a :
forall (X : UCRing) (a : X), a - a = 0.
Proof. rng. Qed.
Lemma mul_0_l :
forall (X : UCRing) (x : X), 0 * x = 0.
Proof.
intros.
assert (0 * x = (0 + 0) * x); rng.
rewrite distr_r in H.
assert (0 * x - 0 * x = 0 * x + (0 * x - 0 * x)).
rewrite <- add_assoc, <- H. trivial.
rewrite (minus_a_a X (0 * x)) in H0. rewrite zero_r in H0. rng.
Qed.
Lemma mul_0_r :
forall (X : UCRing) (x : X), x * 0 = 0.
Proof.
intros. rewrite mul_comm. apply mul_0_l.
Qed.
Lemma minus_zero :
forall X : UCRing, -0 = 0.
Proof.
intro.
rewrite <- (neg_l zero) at 2.
rewrite zero_r.
reflexivity.
Qed.
Lemma minus_one_l :
forall (X : UCRing) (a : X), -(1) * a = -a.
Proof.
intros.
apply (add_cancel_l X (1 * a)).
rewrite <- distr_r, one_l, 2!minus_a_a, mul_0_l.
reflexivity.
Qed.
Lemma minus_one_r :
forall (X : UCRing) (a : X), a * -(1) = -a.
Proof.
intros. rewrite mul_comm. apply minus_one_l.
Qed.
Lemma minus_one_x2 :
forall X : UCRing, -(1) * -(1) = 1.
Proof.
intros. rewrite minus_one_l, neg_neg. trivial.
Qed.
Lemma mul_minus_minus :
forall (X : UCRing) (a b : X), -a * -b = a * b.
Proof.
intros. rewrite <- (minus_one_l X a), <- (minus_one_l X b).
rewrite <- mul_assoc, (mul_comm (-(1))).
rewrite 2!mul_assoc, <- (mul_assoc (-(1))), minus_one_x2. rng.
Qed.
Lemma neg_add :
forall (X : UCRing) (a b : X), -(a + b) = -a + -b.
Proof.
intros.
assert (-(a + b) + (a + b) = 0); rng.
assert ((-a - b) + (a + b) = 0); rng.
rewrite (add_comm a). rewrite <- (add_assoc (-b)). rewrite neg_l. rng.
rewrite <- H0 in H. apply add_cancel_r in H. assumption.
Qed.
Lemma neg_mul :
forall (X : UCRing) (a b : X), -(a * b) = (-a) * b.
Proof.
intros. rewrite <- (minus_one_l X (a * b)), <- (minus_one_l X a).
rewrite ?mul_assoc. trivial.
Qed.
Lemma neg_eq :
forall (X : UCRing) (a b : X), -a = -b -> a = b.
Proof.
intros. rewrite <- (neg_neg X a), <- (neg_neg X b).
rewrite H. trivial.
Qed.
(* Hint base for lemma rewriting. *)
#[global] Hint Rewrite
add_cancel_l add_cancel_r
neg_neg neg_add neg_mul neg_eq
mul_0_l mul_0_r mul_minus_minus
minus_zero minus_one_l minus_one_r minus_one_x2
: lemmas. |
(* Property from Case-Analysis for Rippling and Inductive Proof,
Moa Johansson, Lucas Dixon and Alan Bundy, ITP 2010.
This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.*)
theory TIP_prop_43
imports "../../Test_Base"
begin
datatype 'a list = nil2 | cons2 "'a" "'a list"
fun x :: "'a list => 'a list => 'a list" where
"x (nil2) z = z"
| "x (cons2 z2 xs) z = cons2 z2 (x xs z)"
fun takeWhile :: "('a => bool) => 'a list => 'a list" where
"takeWhile y (nil2) = nil2"
| "takeWhile y (cons2 z2 xs) =
(if y z2 then cons2 z2 (takeWhile y xs) else nil2)"
fun dropWhile :: "('a => bool) => 'a list => 'a list" where
"dropWhile y (nil2) = nil2"
| "dropWhile y (cons2 z2 xs) =
(if y z2 then dropWhile y xs else cons2 z2 xs)"
theorem property0 :
"((x (takeWhile p xs) (dropWhile p xs)) = xs)"
oops
end
|
= = = Artist Residency & Artist Housing = = =
|
/-
Copyright (c) 2022 Markus Himmel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Markus Himmel
Ported by: Joël Riou
! This file was ported from Lean 3 source module data.set.opposite
! leanprover-community/mathlib commit fc2ed6f838ce7c9b7c7171e58d78eaf7b438fb0e
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Data.Opposite
import Mathlib.Data.Set.Image
/-!
# The opposite of a set
The opposite of a set `s` is simply the set obtained by taking the opposite of each member of `s`.
-/
variable {α : Type _}
open Opposite
namespace Set
/-- The opposite of a set `s` is the set obtained by taking the opposite of each member of `s`. -/
protected def op (s : Set α) : Set αᵒᵖ :=
unop ⁻¹' s
#align set.op Set.op
/-- The unop of a set `s` is the set obtained by taking the unop of each member of `s`. -/
protected def unop (s : Set αᵒᵖ) : Set α :=
op ⁻¹' s
#align set.unop Set.unop
@[simp]
theorem mem_op {s : Set α} {a : αᵒᵖ} : a ∈ s.op ↔ unop a ∈ s :=
Iff.rfl
#align set.mem_op Set.mem_op
@[simp 1100]
theorem op_mem_op {s : Set α} {a : α} : op a ∈ s.op ↔ a ∈ s := by rfl
#align set.op_mem_op Set.op_mem_op
@[simp]
theorem mem_unop {s : Set αᵒᵖ} {a : α} : a ∈ s.unop ↔ op a ∈ s :=
Iff.rfl
#align set.mem_unop Set.mem_unop
@[simp 1100]
theorem unop_mem_unop {s : Set αᵒᵖ} {a : αᵒᵖ} : unop a ∈ s.unop ↔ a ∈ s := by rfl
#align set.unop_mem_unop Set.unop_mem_unop
@[simp]
@[simp]
theorem unop_op (s : Set αᵒᵖ) : s.unop.op = s := rfl
#align set.unop_op Set.unop_op
/-- The members of the opposite of a set are in bijection with the members of the set itself. -/
@[simps]
def opEquiv_self (s : Set α) : s.op ≃ s :=
⟨fun x ↦ ⟨unop x, x.2⟩, fun x ↦ ⟨op x, x.2⟩, fun _ ↦ rfl, fun _ ↦ rfl⟩
#align set.op_equiv_self Set.opEquiv_self
#align set.op_equiv_self_apply_coe Set.opEquiv_self_apply_coe
#align set.op_equiv_self_symm_apply_coe Set.opEquiv_self_symm_apply_coe
/-- Taking opposites as an equivalence of powersets. -/
@[simps]
def opEquiv : Set α ≃ Set αᵒᵖ :=
⟨Set.op, Set.unop, op_unop, unop_op⟩
#align set.op_equiv Set.opEquiv
#align set.op_equiv_symm_apply Set.opEquiv_symm_apply
#align set.op_equiv_apply Set.opEquiv_apply
@[simp]
theorem singleton_op (x : α) : ({x} : Set α).op = {op x} := rfl
#align set.singleton_op Set.singleton_op
@[simp]
theorem singleton_unop (x : αᵒᵖ) : ({x} : Set αᵒᵖ).unop = {unop x} := rfl
#align set.singleton_unop Set.singleton_unop
@[simp]
theorem singleton_op_unop (x : α) : ({op x} : Set αᵒᵖ).unop = {x} := rfl
#align set.singleton_op_unop Set.singleton_op_unop
@[simp]
theorem singleton_unop_op (x : αᵒᵖ) : ({unop x} : Set α).op = {x} := rfl
#align set.singleton_unop_op Set.singleton_unop_op
end Set
|
/-
Copyright (c) Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import data.complex.determinant
import data.complex.is_R_or_C
/-!
# Normed space structure on `ℂ`.
This file gathers basic facts on complex numbers of an analytic nature.
## Main results
This file registers `ℂ` as a normed field, expresses basic properties of the norm, and gives
tools on the real vector space structure of `ℂ`. Notably, in the namespace `complex`,
it defines functions:
* `re_clm`
* `im_clm`
* `of_real_clm`
* `conj_cle`
They are bundled versions of the real part, the imaginary part, the embedding of `ℝ` in `ℂ`, and
the complex conjugate as continuous `ℝ`-linear maps. The last two are also bundled as linear
isometries in `of_real_li` and `conj_lie`.
We also register the fact that `ℂ` is an `is_R_or_C` field.
-/
noncomputable theory
namespace complex
open_locale complex_conjugate
instance : has_norm ℂ := ⟨abs⟩
instance : normed_group ℂ :=
normed_group.of_core ℂ
{ norm_eq_zero_iff := λ z, abs_eq_zero,
triangle := abs_add,
norm_neg := abs_neg }
instance : normed_field ℂ :=
{ norm := abs,
dist_eq := λ _ _, rfl,
norm_mul' := abs_mul,
.. complex.field }
instance : nondiscrete_normed_field ℂ :=
{ non_trivial := ⟨2, by simp [norm]; norm_num⟩ }
instance {R : Type*} [normed_field R] [normed_algebra R ℝ] : normed_algebra R ℂ :=
{ norm_algebra_map_eq := λ x, (abs_of_real $ algebra_map R ℝ x).trans (norm_algebra_map_eq ℝ x),
to_algebra := complex.algebra }
/-- The module structure from `module.complex_to_real` is a normed space. -/
@[priority 900] -- see Note [lower instance priority]
instance _root_.normed_space.complex_to_real {E : Type*} [normed_group E] [normed_space ℂ E] :
normed_space ℝ E :=
normed_space.restrict_scalars ℝ ℂ E
@[simp] lemma norm_eq_abs (z : ℂ) : ∥z∥ = abs z := rfl
lemma dist_eq (z w : ℂ) : dist z w = abs (z - w) := rfl
lemma dist_self_conj (z : ℂ) : dist z (conj z) = 2 * |z.im| :=
by simp only [dist_eq, sub_conj, of_real_mul, of_real_bit0, of_real_one, abs_mul, abs_two,
abs_of_real, abs_I, mul_one]
lemma dist_conj_self (z : ℂ) : dist (conj z) z = 2 * |z.im| :=
by rw [dist_comm, dist_self_conj]
@[simp] lemma norm_real (r : ℝ) : ∥(r : ℂ)∥ = ∥r∥ := abs_of_real _
@[simp] lemma norm_rat (r : ℚ) : ∥(r : ℂ)∥ = |(r : ℝ)| :=
by { rw ← of_real_rat_cast, exact norm_real _ }
@[simp] lemma norm_nat (n : ℕ) : ∥(n : ℂ)∥ = n := abs_of_nat _
@[simp] lemma norm_int {n : ℤ} : ∥(n : ℂ)∥ = |n| :=
by simp [← rat.cast_coe_int] {single_pass := tt}
lemma norm_int_of_nonneg {n : ℤ} (hn : 0 ≤ n) : ∥(n : ℂ)∥ = n :=
by simp [hn]
@[continuity] lemma continuous_abs : continuous abs := continuous_norm
@[continuity] lemma continuous_norm_sq : continuous norm_sq :=
by simpa [← norm_sq_eq_abs] using continuous_abs.pow 2
@[simp, norm_cast] lemma nnnorm_real (r : ℝ) : ∥(r : ℂ)∥₊ = ∥r∥₊ :=
subtype.ext $ norm_real r
@[simp, norm_cast] lemma nnnorm_nat (n : ℕ) : ∥(n : ℂ)∥₊ = n :=
subtype.ext $ by simp
@[simp, norm_cast] lemma nnnorm_int (n : ℤ) : ∥(n : ℂ)∥₊ = ∥n∥₊ :=
subtype.ext $ by simp only [coe_nnnorm, norm_int, int.norm_eq_abs]
lemma nnnorm_eq_one_of_pow_eq_one {ζ : ℂ} {n : ℕ} (h : ζ ^ n = 1) (hn : n ≠ 0) :
∥ζ∥₊ = 1 :=
begin
refine (@pow_left_inj nnreal _ _ _ _ zero_le' zero_le' hn.bot_lt).mp _,
rw [←nnnorm_pow, h, nnnorm_one, one_pow],
end
lemma norm_eq_one_of_pow_eq_one {ζ : ℂ} {n : ℕ} (h : ζ ^ n = 1) (hn : n ≠ 0) :
∥ζ∥ = 1 :=
congr_arg coe (nnnorm_eq_one_of_pow_eq_one h hn)
/-- The `abs` function on `ℂ` is proper. -/
lemma tendsto_abs_cocompact_at_top : filter.tendsto abs (filter.cocompact ℂ) filter.at_top :=
tendsto_norm_cocompact_at_top
/-- The `norm_sq` function on `ℂ` is proper. -/
lemma tendsto_norm_sq_cocompact_at_top :
filter.tendsto norm_sq (filter.cocompact ℂ) filter.at_top :=
by simpa [mul_self_abs] using
tendsto_abs_cocompact_at_top.at_top_mul_at_top tendsto_abs_cocompact_at_top
open continuous_linear_map
/-- Continuous linear map version of the real part function, from `ℂ` to `ℝ`. -/
def re_clm : ℂ →L[ℝ] ℝ := re_lm.mk_continuous 1 (λ x, by simp [real.norm_eq_abs, abs_re_le_abs])
@[continuity] lemma continuous_re : continuous re := re_clm.continuous
@[simp] lemma re_clm_coe : (coe (re_clm) : ℂ →ₗ[ℝ] ℝ) = re_lm := rfl
@[simp]
@[simp] lemma re_clm_norm : ∥re_clm∥ = 1 :=
le_antisymm (linear_map.mk_continuous_norm_le _ zero_le_one _) $
calc 1 = ∥re_clm 1∥ : by simp
... ≤ ∥re_clm∥ : unit_le_op_norm _ _ (by simp)
/-- Continuous linear map version of the real part function, from `ℂ` to `ℝ`. -/
def im_clm : ℂ →L[ℝ] ℝ := im_lm.mk_continuous 1 (λ x, by simp [real.norm_eq_abs, abs_im_le_abs])
@[continuity] lemma continuous_im : continuous im := im_clm.continuous
@[simp] lemma im_clm_coe : (coe (im_clm) : ℂ →ₗ[ℝ] ℝ) = im_lm := rfl
@[simp] lemma im_clm_apply (z : ℂ) : (im_clm : ℂ → ℝ) z = z.im := rfl
@[simp] lemma im_clm_norm : ∥im_clm∥ = 1 :=
le_antisymm (linear_map.mk_continuous_norm_le _ zero_le_one _) $
calc 1 = ∥im_clm I∥ : by simp
... ≤ ∥im_clm∥ : unit_le_op_norm _ _ (by simp)
lemma restrict_scalars_one_smul_right' {E : Type*} [normed_group E] [normed_space ℂ E] (x : E) :
continuous_linear_map.restrict_scalars ℝ ((1 : ℂ →L[ℂ] ℂ).smul_right x : ℂ →L[ℂ] E) =
re_clm.smul_right x + I • im_clm.smul_right x :=
by { ext ⟨a, b⟩, simp [mk_eq_add_mul_I, add_smul, mul_smul, smul_comm I] }
lemma restrict_scalars_one_smul_right (x : ℂ) :
continuous_linear_map.restrict_scalars ℝ ((1 : ℂ →L[ℂ] ℂ).smul_right x : ℂ →L[ℂ] ℂ) = x • 1 :=
by { ext1 z, dsimp, apply mul_comm }
/-- The complex-conjugation function from `ℂ` to itself is an isometric linear equivalence. -/
def conj_lie : ℂ ≃ₗᵢ[ℝ] ℂ := ⟨conj_ae.to_linear_equiv, abs_conj⟩
@[simp] lemma conj_lie_apply (z : ℂ) : conj_lie z = conj z := rfl
@[simp] lemma conj_lie_symm : conj_lie.symm = conj_lie := rfl
lemma isometry_conj : isometry (conj : ℂ → ℂ) := conj_lie.isometry
@[simp] lemma dist_conj_conj (z w : ℂ) : dist (conj z) (conj w) = dist z w :=
isometry_conj.dist_eq z w
lemma dist_conj_comm (z w : ℂ) : dist (conj z) w = dist z (conj w) :=
by rw [← dist_conj_conj, conj_conj]
/-- The determinant of `conj_lie`, as a linear map. -/
@[simp] lemma det_conj_lie : (conj_lie.to_linear_equiv : ℂ →ₗ[ℝ] ℂ).det = -1 :=
det_conj_ae
/-- The determinant of `conj_lie`, as a linear equiv. -/
@[simp] lemma linear_equiv_det_conj_lie : conj_lie.to_linear_equiv.det = -1 :=
linear_equiv_det_conj_ae
@[continuity] lemma continuous_conj : continuous (conj : ℂ → ℂ) := conj_lie.continuous
/-- Continuous linear equiv version of the conj function, from `ℂ` to `ℂ`. -/
def conj_cle : ℂ ≃L[ℝ] ℂ := conj_lie
@[simp] lemma conj_cle_coe : conj_cle.to_linear_equiv = conj_ae.to_linear_equiv := rfl
@[simp] lemma conj_cle_apply (z : ℂ) : conj_cle z = conj z := rfl
@[simp] lemma conj_cle_norm : ∥(conj_cle : ℂ →L[ℝ] ℂ)∥ = 1 :=
conj_lie.to_linear_isometry.norm_to_continuous_linear_map
/-- Linear isometry version of the canonical embedding of `ℝ` in `ℂ`. -/
def of_real_li : ℝ →ₗᵢ[ℝ] ℂ := ⟨of_real_am.to_linear_map, norm_real⟩
lemma isometry_of_real : isometry (coe : ℝ → ℂ) := of_real_li.isometry
@[continuity] lemma continuous_of_real : continuous (coe : ℝ → ℂ) := of_real_li.continuous
/-- Continuous linear map version of the canonical embedding of `ℝ` in `ℂ`. -/
def of_real_clm : ℝ →L[ℝ] ℂ := of_real_li.to_continuous_linear_map
@[simp] lemma of_real_clm_coe : (of_real_clm : ℝ →ₗ[ℝ] ℂ) = of_real_am.to_linear_map := rfl
@[simp] lemma of_real_clm_apply (x : ℝ) : of_real_clm x = x := rfl
@[simp] lemma of_real_clm_norm : ∥of_real_clm∥ = 1 := of_real_li.norm_to_continuous_linear_map
noncomputable instance : is_R_or_C ℂ :=
{ re := ⟨complex.re, complex.zero_re, complex.add_re⟩,
im := ⟨complex.im, complex.zero_im, complex.add_im⟩,
I := complex.I,
I_re_ax := by simp only [add_monoid_hom.coe_mk, complex.I_re],
I_mul_I_ax := by simp only [complex.I_mul_I, eq_self_iff_true, or_true],
re_add_im_ax := λ z, by simp only [add_monoid_hom.coe_mk, complex.re_add_im,
complex.coe_algebra_map, complex.of_real_eq_coe],
of_real_re_ax := λ r, by simp only [add_monoid_hom.coe_mk, complex.of_real_re,
complex.coe_algebra_map, complex.of_real_eq_coe],
of_real_im_ax := λ r, by simp only [add_monoid_hom.coe_mk, complex.of_real_im,
complex.coe_algebra_map, complex.of_real_eq_coe],
mul_re_ax := λ z w, by simp only [complex.mul_re, add_monoid_hom.coe_mk],
mul_im_ax := λ z w, by simp only [add_monoid_hom.coe_mk, complex.mul_im],
conj_re_ax := λ z, rfl,
conj_im_ax := λ z, rfl,
conj_I_ax := by simp only [complex.conj_I, ring_hom.coe_mk],
norm_sq_eq_def_ax := λ z, by simp only [←complex.norm_sq_eq_abs, ←complex.norm_sq_apply,
add_monoid_hom.coe_mk, complex.norm_eq_abs],
mul_im_I_ax := λ z, by simp only [mul_one, add_monoid_hom.coe_mk, complex.I_im],
inv_def_ax := λ z, by simp only [complex.inv_def, complex.norm_sq_eq_abs, complex.coe_algebra_map,
complex.of_real_eq_coe, complex.norm_eq_abs],
div_I_ax := complex.div_I }
lemma _root_.is_R_or_C.re_eq_complex_re : ⇑(is_R_or_C.re : ℂ →+ ℝ) = complex.re := rfl
lemma _root_.is_R_or_C.im_eq_complex_im : ⇑(is_R_or_C.im : ℂ →+ ℝ) = complex.im := rfl
section
variables {α β γ : Type*}
[add_comm_monoid α] [topological_space α] [add_comm_monoid γ] [topological_space γ]
/-- The natural `add_equiv` from `ℂ` to `ℝ × ℝ`. -/
@[simps apply symm_apply_re symm_apply_im { simp_rhs := tt }]
def equiv_real_prod_add_hom : ℂ ≃+ ℝ × ℝ :=
{ map_add' := by simp, .. equiv_real_prod }
/-- The natural `linear_equiv` from `ℂ` to `ℝ × ℝ`. -/
@[simps apply symm_apply_re symm_apply_im { simp_rhs := tt }]
def equiv_real_prod_add_hom_lm : ℂ ≃ₗ[ℝ] ℝ × ℝ :=
{ map_smul' := by simp [equiv_real_prod_add_hom], .. equiv_real_prod_add_hom }
/-- The natural `continuous_linear_equiv` from `ℂ` to `ℝ × ℝ`. -/
@[simps apply symm_apply_re symm_apply_im { simp_rhs := tt }]
def equiv_real_prodₗ : ℂ ≃L[ℝ] ℝ × ℝ :=
equiv_real_prod_add_hom_lm.to_continuous_linear_equiv
end
lemma has_sum_iff {α} (f : α → ℂ) (c : ℂ) :
has_sum f c ↔ has_sum (λ x, (f x).re) c.re ∧ has_sum (λ x, (f x).im) c.im :=
begin
-- For some reason, `continuous_linear_map.has_sum` is orders of magnitude faster than
-- `has_sum.mapL` here:
refine ⟨λ h, ⟨re_clm.has_sum h, im_clm.has_sum h⟩, _⟩,
rintro ⟨h₁, h₂⟩,
convert (h₁.prod_mk h₂).mapL equiv_real_prodₗ.symm.to_continuous_linear_map,
{ ext x; refl },
{ cases c, refl }
end
end complex
namespace is_R_or_C
local notation `reC` := @is_R_or_C.re ℂ _
local notation `imC` := @is_R_or_C.im ℂ _
local notation `IC` := @is_R_or_C.I ℂ _
local notation `absC` := @is_R_or_C.abs ℂ _
local notation `norm_sqC` := @is_R_or_C.norm_sq ℂ _
@[simp] lemma re_to_complex {x : ℂ} : reC x = x.re := rfl
@[simp] lemma im_to_complex {x : ℂ} : imC x = x.im := rfl
@[simp] lemma I_to_complex : IC = complex.I := rfl
@[simp] lemma norm_sq_to_complex {x : ℂ} : norm_sqC x = complex.norm_sq x :=
by simp [is_R_or_C.norm_sq, complex.norm_sq]
@[simp] lemma abs_to_complex {x : ℂ} : absC x = complex.abs x :=
by simp [is_R_or_C.abs, complex.abs]
end is_R_or_C
|
iGlobal is a completely free online directory that aims to provide information and contact details for businesses, services, trades, and professionals, so users can find everything they are looking for in one place.
This general disclaimer is intended to regulate access and use of this information guide and to define the relationship between users and the guide.
All of the content of this website is copyrighted by iGlobal, with the exception of images, sounds and computer graphics used under license, and any media extracted via automatic methods of tracking from public sources. All rights are reserved.
iGlobal is not responsible for the consequences or damages arising from access to this website, the use of information contained therein, or access to other materials on the Internet accessed through links.
iGlobal also reserves the right to make modifications to the website and information contained within the website at any time and without prior notice.
For general inquiries about iGlobal services, please refer to our Suggestions form.
You may request to cancel/delete your listing by filling out the proper form found in our directory and clicking on Cancel. |
= Old Baltimore Pike =
|
# Práctica Background Subtraction
En esta segunda práctica del bloque de análisis de secuencias haremos trabajo con 3 técnicas de background subtraction.
En primer lugar debéis implementar vuestro propio método de eliminación de fondo basado en la idea que explicamos del filtro exponencial. Podéis hacer las adaptaciones que encontréis necesario a la idea general del método para mejorar su rendimiento.
En segundo lugar debéis de usar las implementaciones del MOG y del MOG2 que se encuentran en la librería
[OpenCV](https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_video/py_bg_subtraction/py_bg_subtraction.html).
En tercer lugar lo que quiero es que hagáis una comparativa entre su método y los dos basados en Mixture de gausiano utilizando el dataset de la competición [changedetection.net](http://changedetection.net/) del año 2012.
En concreto debe usar una secuencia de cada categoría exceptuando la de *Thermal*.
Las métricas que a usar para cada secuencia son:
* **TP** : True Positive
* **FP** : False Positive
* **FN** : False Negative
* **TN** : True Negative
* **Re (Recall)** : TP / (TP + FN)
* **F-Measure** : (2 * Precision * Recall) / (Precision + Recall)
* **Precision** : TP / (TP + FP)
Finalmente se pide que hagáis un análisis de los resultados obtenidos por cada secuencia: problemas, carencias, puntos fuertes de los algoritmos ...
**Entrega:**
* Explicación del algoritmo implementado y código fuente.
* Resultados de las secuencias en formato imagen o video.
* Resultados de las secuencias en formato tabla resumen de las métricas.
* Análisis de los resultados
Secuencias a usar:
- baseline: todo
- cameraJitter: badminton, traffic
- dynamicBackground: canoe, fall, fountain02
- intermittentObjectMotion: sofa,
- shadow: busStation, copyMachine, cubicle, peopleInShade
### Métricas
```python
def recall(tp, fn):
return (tp/(tp+fn))
def precision(tp, fp):
return (tp/(tp+fp))
def f_measure(precision, recall):
return ((2*precision*recall)/(precision+recall))
def all_metrics (resultPath, groundtruthPath, start_frame, end_frame):
# Cargamos el resultado del filtro indicado
result = loadImages(resultPath)
groundTruth = loadImages(groundtruthPath)
# ground truth empieza en start_frame, los frames están numerados de 1 hasta n, pero el array de frames va de 0 a n-1
total_frames = end_frame-start_frame
# Inicializamos un array para cada métrica donde se el resultado para cada frame
tp = np.zeros(total_frames)
fp = np.zeros(total_frames)
fn = np.zeros(total_frames)
tn = np.zeros(total_frames)
# Calculamos las métricas True Positive, False Positive, False Negative, True Negative para cada frame
for i in range(total_frames):
tp[i], fp[i], fn[i], tn[i] = comparator(result[i+start_frame], groundTruth[i+start_frame])
# Calculamos las métricas Recall, Precision y F-measure para cada frame
recall_frame = recall(tp=tp, fn=fn)
precision_frame = precision(tp=tp, fp=fp)
fMeasure_frame = f_measure(precision=precision_frame, recall=recall_frame)
# Calculamos las métricas anteriores para toda la secuencia en conjunto
tp_secuencia = tp.sum()
fp_secuencia = fp.sum()
fn_secuencia = fn.sum()
tn_secuencia = tn.sum()
recall_secuencia = recall(tp=tp_secuencia, fn=fn_secuencia)
precision_secuencia = precision(tp=tp_secuencia, fp=fp_secuencia)
fMeasure_secuencia = f_measure(precision=precision_secuencia, recall=recall_secuencia)
cm_frames = np.array([tp,fp,fn,tn])
cm_secuencia = np.array ([tp_secuencia, fp_secuencia, fn_secuencia, tn_secuencia])
r_p_fm_frames = np.array([recall_frame, precision_frame, fMeasure_frame])
r_p_fm_secuencia = np.array([recall_secuencia, precision_secuencia, fMeasure_secuencia])
return cm_frames, cm_secuencia, r_p_fm_frames, r_p_fm_secuencia
```
### Filtro exponencial para _background subtraction_
Parte de la idea de eliminación de fondo más básica: restar el fotograman **_fondo_** (_background_) del fotograma actual para obtener el **_primer plano_** (_foreground_).
El primer problema o tarea para realizar este algoritmo es conseguir ese fotograma **_fondo_**. Para ello se necesita una imagen que represente el fondo sin ningún otro objeto, pero esto no siempre es posible, por lo tanto hay que buscar otras soluciones. Una de ellas es estimar el fondo como la media o la mediana a partir de los primeros _n_ fotogramas.
El segundo problema que surge es que el fondo en los casos prácticos varía con el tiempo y por lo tanto es posible que ese fotograma _fondo_ que hemos estimado no nos sirve más tarde. Por lo tanto la mejora que se propone es estimar el fondo recursivamente respecto el fotograma anterior siguiendo la siguiente fórmula:
$$
\begin{align}
B_t=(1-{\alpha})B_{t-1}+{\alpha}I_t
\end{align}
$$
Donde $B_t$ es la estimación del fondo para el fotograma actual, $B_{t-1}$ es el fondo para el fotograma anterior, $I_t$ es el fotograma actual y $\alpha$ es el factor de aprendizaje que indica cuánto del fotograma actual se considerará fondo para el siguiente fotograma.
```python
from utils import *
import numpy as np
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
```
Para mantener la limpieza del _notebook_ se han definidos las funciones para realizar el análisis dentro del archivo _utils.py_.
Las funciones que se encuentran son las siguientes:
- **comparator**: compara un _frame_ resultante de un modelo de _background subtraction_ con el resultado esperado (_groundtruth_)
- **loadImages**: devuelve una lista con las imágenes dentro de la carpeta especificada
- **exponentialFilter**: implementación del algoritmo basado en filtro exponencial
- **MOG**: implementación del algoritmo MOG disponible [aquí](https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_video/py_bg_subtraction/py_bg_subtraction.html)
- **MOG2**: implementación del algoritmo MOG2 disponible [aquí](https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_video/py_bg_subtraction/py_bg_subtraction.html)
- **im2vid**: genera video a partir de las imágenes ubicadas en una carpeta
- **showVideo**: muestra el video en una ventana flotante para visualizar mejor el resultado del modelo
Notar que, a pesar de tratarse de algoritmos que trabajan con video, el _dataset_ que se proporciona para realizar las pruebas son imágenes que representan los fotogramas de una secuencia. Por lo tanto, las funciones que se han realizado, se han pensado para trabajar con imágenes en vez de video.
Pasos a seguir para cada secuencia:
1. Cargar las imágenes en una variable (path de las imágenes)
2. Aplicar el filtro deseado EF, MOG y MOG2 (path donde se guardarán las imágenes aplicadas por el filtro)
3. Obtener las métricas con comparator (hacer bucle a lo largo de las imágenes resultado -> path del groundtruth)
4. Mostrar los resultados y videos
5. Comparar los resultados (hacer gráfica a lo largo del
```python
# dynamicBackground canoe dataset output paths
expFilter_canoe_path = 'DATA/dynamicBackground/results/canoe/expFilter/'
MOG_canoe_path = 'DATA/dynamicBackground/results/canoe/MOG/'
MOG2_canoe_path = 'DATA/dynamicBackground/results/canoe/MOG2'
# intermittentObject sofa dataset output paths
expFilter_sofa_path = 'DATA/intermittentObjectMotion/results/sofa/expFilter/'
MOG_sofa_path = 'DATA/intermittentObjectMotion/results/sofa/MOG'
MOG2_sofa_path = 'DATA/intermittentObjectMotion/results/sofa/MOG2'
# shadow copyMachine dataset output paths
expFilter_copyMachine_path = 'DATA/shadow/results/copyMachine/expFilter/'
MOG_copyMachine_path = 'DATA/shadow/results/copyMachine/MOG'
MOG2_copyMachine_path = 'DATA/shadow/results/copyMachine/MOG2'
```
# 1. Secuencia _highway_ del dataset _baseline_
Cargamos las imágenes de entrada y el ground truth con el que se calcularán las métricas.
```python
# baseline highway dataset groundtruth y input paths
gt_highway_path = 'DATA/baseline/baseline/highway/groundtruth/*.png'
in_highway_path = 'DATA/baseline/baseline/highway/input/*.jpg'
groundTruth = loadImages(gt_highway_path)
frames = loadImages(in_highway_path)
```
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:00<00:00, 2667.40it/s]
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:01<00:00, 1208.82it/s]
## 1.1. Algoritmo filtro exponencial
Aplicamos el algoritmo basado en el filtro exponencial que hemos implementado.
```python
# baseline highway dataset output path
expFilter_highway_path = 'DATA/baseline/results/highway/expFilter/'
exponentialFilter(frames, 0.3, expFilter_highway_path, 20, bgFilter ='MEDIAN')
```
Algorithm application: 100%|█████████████████████████████████████████████████████| 1680/1680 [00:01<00:00, 1032.18it/s]
Convertimos la secuencia de imágenes en video para poder visualizar mejor el resultado.
```python
#im2vid(expFilter_highway_path+'*.png','testvid.mp4')
```
Mostramos el video en una ventana flotante.
```python
#showVideo('testvid.mp4')
```
### 1.1.1. Cálculo de métricas
Calculamos las métricas comparando con el _ground truth_ propocionado. Para cada secuencia se proporciona una ROI (Region Of Interest) sobre la cual se realiza el groundtruth, para simplificar el cálculo se han seleccionado aquellas secuencias en las que la ROI sea toda la imagen. Por otro lado, también se especifica el _frame_ de inicio y final sobre los que se ha de realizar la verificación del algoritmo, por lo tanto solo se calcularán las métricas dentro de ese rango.
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_EF = all_metrics (expFilter_highway_path+'*.png', gt_highway_path, start_frame=469, end_frame=1699)
```
Loading images: 100%|█████████████████████████████████████████████████████████████| 1700/1700 [00:06<00:00, 262.06it/s]
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:00<00:00, 2594.43it/s]
```python
fig, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
ax1.plot(r_p_fm_frames_EF[0])
ax1.set_title("recall")
ax2.plot(r_p_fm_frames_EF[1])
ax2.set_title("precision")
ax3.plot(r_p_fm_frames_EF[2])
ax3.set_title("f-measure")
plt.ylim([0,1])
fig.tight_layout()
plt.show()
```
```python
precision_expFilter
```
array([0.05653631, 0.04029126, 0.01318373, ..., 0.62876909, 0.61493709,
0.6044432 ])
```python
plt.plot(tp_expFilter)
plt.show()
```
## 1.2. Algoritmo MOG
Aplicamos el algoritmo MOG (Mixture Of Gaussians)
```python
# baseline highway dataset output path
MOG_highway_path = 'DATA/baseline/results/highway/MOG/'
MOG(frames,MOG_highway_path)
```
Algorithm application2: 100%|█████████████████████████████████████████████████████| 1700/1700 [00:03<00:00, 498.40it/s]
```python
im2vid(MOG_highway_path+'*.png','testvid.mp4')
```
Loading images: 100%|█████████████████████████████████████████████████████████████| 1700/1700 [00:06<00:00, 260.80it/s]
Converting to video: 100%|███████████████████████████████████████████████████████| 1700/1700 [00:00<00:00, 3580.98it/s]
```python
showVideo('testvid.mp4')
```
### 1.2.1. Cálculo de métricas
```python
cm_frames_MOG,\
cm_secuencia_MOG,\
r_p_fm_frames_MOG,\
r_p_fm_secuencia_MOG = all_metrics (MOG_highway_path+'*.png', gt_highway_path, start_frame=469, end_frame=1699)
```
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:00<00:00, 2133.43it/s]
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:00<00:00, 2555.43it/s]
```python
fig, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
ax1.plot(r_p_fm_frames_MOG[0])
ax2.plot(r_p_fm_frames_MOG[1])
ax3.plot(r_p_fm_frames_MOG[2])
plt.ylim([0,1])
plt.show()
```
```python
plt.plot(r_p_fm_frames_MOG[0])
plt.show()
```
## 1.3. Algoritmo MOG2
```python
# baseline highway dataset output path
MOG2_highway_path = 'DATA/baseline/results/highway/MOG2/'
MOG2(frames,MOG2_highway_path)
```
Algorithm application: 100%|██████████████████████████████████████████████████████| 1700/1700 [00:04<00:00, 407.06it/s]
### 1.3.1. Cálculo de métricas
```python
cm_frames_MOG2,\
cm_secuencia_MOG2,\
r_p_fm_frames_MOG2,\
r_p_fm_secuencia_MOG2 = all_metrics (MOG2_highway_path+'*.png', gt_highway_path, start_frame=469, end_frame=1699)
```
Loading images: 100%|█████████████████████████████████████████████████████████████| 1700/1700 [00:06<00:00, 255.43it/s]
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:00<00:00, 2536.53it/s]
## Comparación resultados
```python
cm_frames_gt,\
cm_secuencia_gt,\
r_p_fm_frames_gt,\
r_p_fm_secuencia_gt = all_metrics (gt_highway_path, gt_highway_path, start_frame=469, end_frame=1699)
```
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:00<00:00, 2622.26it/s]
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:00<00:00, 2626.55it/s]
```python
fig, ax = plt.subplots(3,3, sharex=True, sharey=True)
ax[0,0].plot(r_p_fm_frames_EF[0])
ax[0,0].set_title("recall-EF")
ax[1,0].plot(r_p_fm_frames_EF[1])
ax[1,0].set_title("precision-EF")
ax[2,0].plot(r_p_fm_frames_EF[2])
ax[2,0].set_title("f-measure-EF")
ax[0,1].plot(r_p_fm_frames_MOG[0])
ax[0,1].set_title("recall-MOG")
ax[1,1].plot(r_p_fm_frames_MOG[1])
ax[1,1].set_title("precision-MOG")
ax[2,1].plot(r_p_fm_frames_MOG[2])
ax[2,1].set_title("f-measure-MOG")
ax[0,2].plot(r_p_fm_frames_MOG2[0])
ax[0,2].set_title("recall-MOG2")
ax[1,2].plot(r_p_fm_frames_MOG2[1])
ax[1,2].set_title("precision-MOG2")
ax[2,2].plot(r_p_fm_frames_MOG2[2])
ax[2,2].set_title("f-measure-MOG2")
plt.ylim([0,1])
fig.tight_layout()
plt.show()
```
```python
import pandas as pd
```
```python
r_p_fm_secuencia_EF
```
array([0.60293747, 0.5 , 0.54666514])
```python
r_p_fm_secuencia_MOG
```
array([0.50234751, 0.5 , 0.501171 ])
```python
r_p_fm_secuencia_MOG2
```
array([0.92913693, 0.5 , 0.65013849])
```python
cm_secuencia_gt
```
array([ 5448641., 0., 0., 86228220.])
```python
cm_secuencia_MOG-cm_secuencia_gt
```
array([-3168986., 68877., 2258349., -14606.])
```python
np.concatenate((r_p_fm_secuencia_EF,cm_secuencia_EF))
```
array([6.02937474e-01, 5.00000000e-01, 5.46665145e-01, 2.95299300e+06,
3.57410400e+06, 1.94468400e+06, 6.80320250e+07])
```python
cm_secuencia_EF.sum()
```
94464000.0
```python
cm_secuencia_MOG.sum()
```
94464000.0
```python
cm_secuencia_MOG2.sum()
```
92811260.0
```python
cm_secuencia_gt.sum()
```
91676861.0
```python
mog_result = loadImages(MOG2_highway_path+'*.png')
```
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:00<00:00, 1703.61it/s]
```python
mog_result[469].shape
```
(240, 320, 3)
```python
ef_result = loadImages(expFilter_highway_path+'*.png')
```
Loading images: 100%|████████████████████████████████████████████████████████████| 1700/1700 [00:01<00:00, 1313.18it/s]
```python
ef_result[469] = cv2.cvtColor(ef_result[469], cv2.COLOR_BGR2GRAY)
```
```python
ef_result[469]
```
array([[ 0, 0, 1, ..., 254, 3, 0],
[ 2, 0, 0, ..., 255, 0, 0],
[254, 255, 0, ..., 0, 2, 1],
...,
[ 2, 0, 2, ..., 0, 0, 0],
[ 1, 0, 0, ..., 0, 1, 1],
[ 0, 1, 0, ..., 1, 1, 0]], dtype=uint8)
```python
df_highway = pd.DataFrame(np.array([np.concatenate((r_p_fm_secuencia_EF,cm_secuencia_EF)),
np.concatenate((r_p_fm_secuencia_MOG,cm_secuencia_MOG)),
np.concatenate((r_p_fm_secuencia_MOG2,cm_secuencia_MOG2)),
np.concatenate((r_p_fm_secuencia_gt,cm_secuencia_gt))]),
columns = ['Recall', 'Precision','F-measure','TP','FP','FN','TN'])
df_highway['Algorithm'] = ['Exponential Filter','MOG','MOG2','Ground truth']
df_highway.set_index('Algorithm')
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Recall</th>
<th>Precision</th>
<th>F-measure</th>
<th>TP</th>
<th>FP</th>
<th>FN</th>
<th>TN</th>
</tr>
<tr>
<th>Algorithm</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>Exponential Filter</th>
<td>0.602937</td>
<td>0.5</td>
<td>0.546665</td>
<td>2952993.0</td>
<td>3574104.0</td>
<td>1944684.0</td>
<td>68032025.0</td>
</tr>
<tr>
<th>MOG</th>
<td>0.502348</td>
<td>0.5</td>
<td>0.501171</td>
<td>2279655.0</td>
<td>68877.0</td>
<td>2258349.0</td>
<td>86213614.0</td>
</tr>
<tr>
<th>MOG2</th>
<td>0.929137</td>
<td>0.5</td>
<td>0.650138</td>
<td>4012554.0</td>
<td>1135816.0</td>
<td>306028.0</td>
<td>78852773.0</td>
</tr>
<tr>
<th>Ground truth</th>
<td>1.000000</td>
<td>1.0</td>
<td>1.000000</td>
<td>5448641.0</td>
<td>0.0</td>
<td>0.0</td>
<td>86228220.0</td>
</tr>
</tbody>
</table>
</div>
```python
r_p_fm_secuencia_EF
```
array([0.60293747, 0.5 , 0.54666514])
```python
df_highway
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Recall</th>
<th>Precision</th>
<th>F-measure</th>
<th>Algorithm</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0.602937</td>
<td>0.5</td>
<td>0.546665</td>
<td>Exponential Filter</td>
</tr>
<tr>
<th>1</th>
<td>0.502348</td>
<td>0.5</td>
<td>0.501171</td>
<td>MOG</td>
</tr>
<tr>
<th>2</th>
<td>0.929137</td>
<td>0.5</td>
<td>0.650138</td>
<td>MOG2</td>
</tr>
</tbody>
</table>
</div>
```python
df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9],[10,11,12]]),
columns=['a', 'b', 'c'])
df2
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>a</th>
<th>b</th>
<th>c</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>1</td>
<td>2</td>
<td>3</td>
</tr>
<tr>
<th>1</th>
<td>4</td>
<td>5</td>
<td>6</td>
</tr>
<tr>
<th>2</th>
<td>7</td>
<td>8</td>
<td>9</td>
</tr>
<tr>
<th>3</th>
<td>10</td>
<td>11</td>
<td>12</td>
</tr>
</tbody>
</table>
</div>
# 2. Secuencia _badminton_ del dataset _cameraJitter_
Cargamos las imágenes de entrada y el ground truth con el que se calcularán las métricas.
```python
#camaraJitter badminton dataset input
gt_badminton_path = 'DATA/cameraJitter/cameraJitter/badminton/groundtruth/*.png'
in_badminton_path = 'DATA/cameraJitter/cameraJitter/badminton/input/*.jpg'
groundTruth = loadImages(gt_badminton_path)
frames = loadImages(in_badminton_path)
```
Loading images: 100%|█████████████████████████████████████████████████████████████| 1150/1150 [00:03<00:00, 296.71it/s]
Loading images: 100%|█████████████████████████████████████████████████████████████| 1150/1150 [00:08<00:00, 129.45it/s]
## 2.1. Algoritmo filtro exponencial
```python
# camaraJitter badminton dataset output path
expFilter_badminton_path = 'DATA/cameraJitter/results/badminton/expFilter/'
```
### 2.1.1. Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_EF = all_metrics (expFilter_badminton_path+'*.jpg', gt_badminton_path, start_frame=800, end_frame=1150)
```
## 2.2. Algoritmo MOG
```python
# camaraJitter badminton dataset output paths
MOG_badminton_path = 'DATA/cameraJitter/results/badminton/MOG/'
```
### 2.2.1. Cálculo de métricas
```python
cm_frames_MOG,\
cm_secuencia_MOG,\
r_p_fm_frames_MOG,\
r_p_fm_secuencia_MOG = all_metrics (MOG_badminton_path+'*.jpg', gt_badminton_path, start_frame=800, end_frame=1150)
```
## 2.3. Algoritmo MOG2
```python
# camaraJitter badminton dataset output paths
MOG2_badminton_path = 'DATA/cameraJitter/results/badminton/MOG2'
```
### 2.3.1. Cálculo de métricas
```python
cm_frames_MOG2,\
cm_secuencia_MOG2,\
r_p_fm_frames_MOG2,\
r_p_fm_secuencia_MOG2 = all_metrics (MOG2_badminton_path+'*.jpg', gt_badminton_path, start_frame=800, end_frame=1150)
```
## Comparación resultados
# 3. Secuencia _canoe_ del dataset _dynamicBackground_
```python
Cargamos las imágenes de entrada y el ground truth con el que se calcularán las métricas.
```
```python
# dynamicBackground canoe dataset input
gt_canoe_path = 'DATA/dynamicBackground/dynamicBackground/canoe/groundtruth/*.png'
in_canoe_path = 'DATA/dynamicBackground/dynamicBackground/canoe/input/*.jpg'
groundTruth = loadImages(gt_canoe_path)
frames = loadImages(in_canoe_path)
```
```python
# dynamicBackground canoe dataset output path
expFilter_canoe_path = 'DATA/dynamicBackground/results/canoe/expFilter/'
MOG_canoe_path = 'DATA/dynamicBackground/results/canoe/MOG/'
MOG2_canoe_path = 'DATA/dynamicBackground/results/canoe/MOG2'
```
## 3.1. Algoritmo filtro exponencial
```python
# dynamicBackground canoe dataset output paths
expFilter_canoe_path = 'DATA/dynamicBackground/results/canoe/expFilter/'
```
### 3.1.1 Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (expFilter_canoe_path+'*.jpg', gt_canoe_path, start_frame=469, end_frame=1699)
```
## 3.2. Algoritmo MOG
```python
# dynamicBackground canoe dataset output path
MOG_canoe_path = 'DATA/dynamicBackground/results/canoe/MOG/'
```
### 3.2.1 Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (MOG_canoe_path+'*.jpg', gt_canoe_path, start_frame=469, end_frame=1699)
```
## 3.3. Algoritmo MOG2
```python
# dynamicBackground canoe dataset output path
MOG2_canoe_path = 'DATA/dynamicBackground/results/canoe/MOG2'
```
### 3.3.1. Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (MOG2_canoe_path+'*.jpg', gt_canoe_path, start_frame=469, end_frame=1699)
```
## 3.4. Comparación resultados
# 4. Secuencia _sofa_ del dataset _intermittentObjectMotion_
```python
Cargamos las imágenes de entrada y el ground truth con el que se calcularán las métricas.
```
```python
# intermittentObjectMotion sofa dataset input
gt_sofa_path = 'DATA/intermittentObjectMotion/intermittentObjectMotion/sofa/groundtruth/*.png'
in_sofa_path = 'DATA/intermittentObjectMotion/intermittentObjectMotion/sofa/input/*.jpg'
groundTruth = loadImages(gt_sofa_path)
frames = loadImages(in_sofa_path)
```
## 4.1. Algoritmo filtro exponencial
### 4.1.1. Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (expFilter_highway_path+'*.jpg', gt_highway_path, start_frame=469, end_frame=1699)
```
## 4.2. Algoritmo MOG
### 4.2.1. Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (expFilter_highway_path+'*.jpg', gt_highway_path, start_frame=469, end_frame=1699)
```
## 4.3. Algoritmo MOG2
### 4.3.1. Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (expFilter_highway_path+'*.jpg', gt_highway_path, start_frame=469, end_frame=1699)
```
## 4.4. Comparación resultados
# 5. Secuencia _copyMachine_ del dataset _shadow_
```python
Cargamos las imágenes de entrada y el ground truth con el que se calcularán las métricas.
```
```python
# shadow copyMachine dataset input
gt_copyMachine_path = 'DATA/shadow/shadow/copyMachine/groundtruth/*.png'
in_copyMachine_path = 'DATA/shadow/shadow/copyMachine/input/*.jpg'
groundTruth = loadImages(gt_copyMachine_path)
frames = loadImages(in_copyMachine_path)
```
## 5.1. Algoritmo filtro exponencial
### 5.1.1. Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (expFilter_highway_path+'*.jpg', gt_highway_path, start_frame=469, end_frame=1699)
```
## 5.2. Algoritmo MOG
### 5.2.1. Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (expFilter_highway_path+'*.jpg', gt_highway_path, start_frame=469, end_frame=1699)
```
## 5.3. Algoritmo MOG2
### 5.3.1. Cálculo de métricas
```python
cm_frames_EF,\
cm_secuencia_EF,\
r_p_fm_frames_EF,\
r_p_fm_secuencia_RF = all_metrics (expFilter_highway_path+'*.jpg', gt_highway_path, start_frame=469, end_frame=1699)
```
## 5.4. Comparación resultados
|
(*
* Copyright 2014, General Dynamics C4 Systems
*
* SPDX-License-Identifier: GPL-2.0-only
*)
(*
CSpace invariants
*)
theory ArchCSpaceInv_AI
imports "../CSpaceInv_AI"
begin
context Arch begin global_naming ARM
definition
safe_ioport_insert :: "cap \<Rightarrow> cap \<Rightarrow> 'a::state_ext state \<Rightarrow> bool"
where
"safe_ioport_insert newcap oldcap s \<equiv> True"
declare safe_ioport_insert_def[simp]
lemma safe_ioport_insert_triv:
"\<not>is_arch_cap newcap \<Longrightarrow> safe_ioport_insert newcap oldcap s"
by (clarsimp simp: safe_ioport_insert_def)
lemma set_cap_ioports':
"\<lbrace>\<lambda>s. valid_ioports s
\<and> cte_wp_at (\<lambda>cap'. safe_ioport_insert cap cap' s) ptr s\<rbrace>
set_cap cap ptr
\<lbrace>\<lambda>rv. valid_ioports\<rbrace>"
by wpsimp
lemma unique_table_refs_no_cap_asidE:
"\<lbrakk>caps_of_state s p = Some cap;
unique_table_refs (caps_of_state s)\<rbrakk>
\<Longrightarrow> no_cap_to_obj_with_diff_ref cap S s"
apply (clarsimp simp: no_cap_to_obj_with_diff_ref_def
cte_wp_at_caps_of_state)
apply (unfold unique_table_refs_def)
apply (drule_tac x=p in spec, drule_tac x="(a,b)" in spec)
apply (drule spec)+
apply (erule impE, assumption)+
apply (clarsimp simp: is_cap_simps)
done
lemmas unique_table_refs_no_cap_asidD
= unique_table_refs_no_cap_asidE[where S="{}"]
lemma replace_cap_invs:
"\<lbrace>\<lambda>s. invs s \<and> cte_wp_at (replaceable s p cap) p s
\<and> cap \<noteq> cap.NullCap
\<and> ex_cte_cap_wp_to (appropriate_cte_cap cap) p s
\<and> s \<turnstile> cap\<rbrace>
set_cap cap p
\<lbrace>\<lambda>rv s. invs s\<rbrace>"
apply (simp add: invs_def valid_state_def valid_mdb_def2)
apply (rule hoare_pre)
apply (wp replace_cap_valid_pspace
set_cap_caps_of_state2 set_cap_idle
replace_cap_ifunsafe valid_irq_node_typ
set_cap_typ_at set_cap_irq_handlers
set_cap_valid_arch_caps
set_cap_cap_refs_respects_device_region_replaceable)
apply (clarsimp simp: valid_pspace_def cte_wp_at_caps_of_state replaceable_def)
apply (rule conjI)
apply (fastforce simp: tcb_cap_valid_def
dest!: cte_wp_tcb_cap_valid [OF caps_of_state_cteD])
apply (rule conjI)
apply (erule_tac P="\<lambda>cps. mdb_cte_at cps (cdt s)" in rsubst)
apply (rule ext)
apply (safe del: disjE)[1]
apply (simp add: gen_obj_refs_empty final_NullCap)+
apply (rule conjI)
apply (simp add: untyped_mdb_def is_cap_simps)
apply (erule disjE)
apply (clarsimp, rule conjI, clarsimp+)[1]
apply (erule allEI, erule allEI)
apply (drule_tac x="fst p" in spec, drule_tac x="snd p" in spec)
apply (clarsimp simp: gen_obj_refs_subset)
apply (drule(1) disjoint_subset, simp)
apply (rule conjI)
apply (erule descendants_inc_minor)
apply simp
apply (elim disjE)
apply clarsimp
apply clarsimp
apply (rule conjI)
apply (erule disjE)
apply (simp add: fun_upd_def[symmetric] fun_upd_idem)
apply (simp add: untyped_inc_def not_is_untyped_no_range)
apply (rule conjI)
apply (erule disjE)
apply (simp add: fun_upd_def[symmetric] fun_upd_idem)
apply (simp add: ut_revocable_def)
apply (rule conjI)
apply (erule disjE)
apply (clarsimp simp: irq_revocable_def)
apply clarsimp
apply (clarsimp simp: irq_revocable_def)
apply (rule conjI)
apply (erule disjE)
apply (simp add: fun_upd_def[symmetric] fun_upd_idem)
apply (simp add: reply_master_revocable_def)
apply (rule conjI)
apply (erule disjE)
apply (simp add: fun_upd_def[symmetric] fun_upd_idem)
apply (clarsimp simp add: reply_mdb_def)
apply (thin_tac "\<forall>a b. (a, b) \<in> cte_refs cp nd \<and> Q a b\<longrightarrow> R a b" for cp nd Q R)
apply (thin_tac "is_pt_cap cap \<longrightarrow> P" for cap P)+
apply (thin_tac "is_pd_cap cap \<longrightarrow> P" for cap P)+
apply (rule conjI)
apply (unfold reply_caps_mdb_def)[1]
apply (erule allEI, erule allEI)
apply (clarsimp split: if_split simp add: is_cap_simps
simp del: split_paired_Ex split_paired_All)
apply (rename_tac ptra ptrb rights')
apply (rule_tac x="(ptra,ptrb)" in exI)
apply fastforce
apply (unfold reply_masters_mdb_def)[1]
apply (erule allEI, erule allEI)
apply (fastforce split: if_split_asm simp: is_cap_simps)
apply (rule conjI)
apply (erule disjE)
apply (clarsimp simp add: is_reply_cap_to_def)
apply (drule caps_of_state_cteD)
apply (subgoal_tac "cte_wp_at (is_reply_cap_to t) p s")
apply (erule(1) valid_reply_capsD [OF has_reply_cap_cte_wpD])
apply (erule cte_wp_at_lift)
apply(fastforce simp add:is_reply_cap_to_def)
apply (simp add: is_cap_simps)
apply (frule(1) valid_global_refsD2)
apply (frule(1) cap_refs_in_kernel_windowD)
apply (rule conjI)
apply (erule disjE)
apply (clarsimp simp: valid_reply_masters_def cte_wp_at_caps_of_state)
apply (cases p,fastforce simp:is_master_reply_cap_to_def)
apply (simp add: is_cap_simps)
apply (elim disjE)
apply simp
apply (clarsimp simp: valid_table_capsD[OF caps_of_state_cteD]
valid_arch_caps_def unique_table_refs_no_cap_asidE)
apply simp
apply (rule Ball_emptyI, simp add: gen_obj_refs_subset)
done
definition
"is_simple_cap_arch cap \<equiv> \<not>is_pt_cap cap \<and> \<not> is_pd_cap cap"
declare is_simple_cap_arch_def[simp]
lemma is_simple_cap_arch:
"\<not>is_arch_cap cap \<Longrightarrow> is_simple_cap_arch cap"
by (simp add: is_cap_simps)
definition
"is_derived_arch cap cap' \<equiv>
((is_pt_cap cap \<or> is_pd_cap cap) \<longrightarrow>
cap_asid cap = cap_asid cap' \<and> cap_asid cap \<noteq> None) \<and>
(vs_cap_ref cap = vs_cap_ref cap' \<or> is_pg_cap cap')"
lemma is_derived_arch_non_arch:
"\<not>is_arch_cap cap \<Longrightarrow> \<not> is_arch_cap cap' \<Longrightarrow>
is_derived_arch cap cap'"
unfolding is_derived_arch_def is_pg_cap_def is_pt_cap_def is_pd_cap_def
vs_cap_ref_def is_arch_cap_def
by (auto split: cap.splits)
lemma
cap_master_cap_arch_simps:
"cap_master_arch_cap ((arch_cap.PageCap dev ref rghts sz mapdata)) =
(arch_cap.PageCap dev ref UNIV sz None)"
"cap_master_arch_cap ( (arch_cap.ASIDPoolCap pool asid)) =
(arch_cap.ASIDPoolCap pool 0)"
"cap_master_arch_cap ( (arch_cap.PageTableCap ptr x)) =
(arch_cap.PageTableCap ptr None)"
"cap_master_arch_cap ( (arch_cap.PageDirectoryCap ptr y)) =
(arch_cap.PageDirectoryCap ptr None)"
"cap_master_arch_cap ( arch_cap.ASIDControlCap) =
arch_cap.ASIDControlCap"
by (simp add: cap_master_arch_cap_def)+
lemmas cap_master_cap_def = cap_master_cap_def[simplified cap_master_arch_cap_def]
lemma same_master_cap_same_types:
"cap_master_cap cap = cap_master_cap cap' \<Longrightarrow>
(is_pt_cap cap = is_pt_cap cap') \<and> (is_pd_cap cap = is_pd_cap cap')"
by (clarsimp simp: cap_master_cap_def is_cap_simps
split: cap.splits arch_cap.splits)
lemma is_derived_cap_arch_asid_issues:
"is_derived_arch cap cap' \<Longrightarrow>
cap_master_cap cap = cap_master_cap cap'
\<Longrightarrow> ((is_pt_cap cap \<or> is_pd_cap cap) \<longrightarrow> cap_asid cap \<noteq> None)
\<and> (is_pg_cap cap \<or> (vs_cap_ref cap = vs_cap_ref cap'))"
apply (simp add: is_derived_arch_def)
by (auto simp: cap_master_cap_def is_cap_simps
cap_asid_def
split: cap.splits arch_cap.splits option.splits)
lemma is_derived_cap_arch_asid:
"is_derived_arch cap cap' \<Longrightarrow> cap_master_cap cap = cap_master_cap cap' \<Longrightarrow>
is_pt_cap cap' \<or> is_pd_cap cap' \<Longrightarrow> cap_asid cap = cap_asid cap'"
unfolding is_derived_arch_def
apply (cases cap; cases cap'; simp)
by (auto simp: is_cap_simps cap_master_cap_def split: arch_cap.splits)
definition
safe_parent_for_arch :: "cap \<Rightarrow> cap \<Rightarrow> bool"
where
"safe_parent_for_arch cap parent \<equiv> False"
declare safe_parent_for_arch_def[simp]
lemma safe_parent_for_arch_not_arch:
"\<not>is_arch_cap cap \<Longrightarrow> \<not>safe_parent_for_arch cap p"
by (clarsimp simp: safe_parent_for_arch_def is_cap_simps)
lemma safe_parent_cap_range_arch:
"\<And>cap pcap. safe_parent_for_arch cap pcap \<Longrightarrow> cap_range cap \<subseteq> cap_range pcap"
by (clarsimp simp: safe_parent_for_arch_def cap_range_def)
definition
"cap_asid_base_arch cap \<equiv> case cap of
ASIDPoolCap _ asid \<Rightarrow> Some asid
| _ \<Rightarrow> None"
declare cap_asid_base_arch_def[abs_def, simp]
definition cap_asid_base :: "cap \<Rightarrow> asid option" where
"cap_asid_base cap \<equiv> arch_cap_fun_lift cap_asid_base_arch None cap"
lemmas cap_asid_base_simps [simp] =
cap_asid_base_def [simplified, split_simps cap.split arch_cap.split]
definition
"cap_vptr_arch acap \<equiv> case acap of
(PageCap _ _ _ _ (Some (_, vptr))) \<Rightarrow> Some vptr
| (PageTableCap _ (Some (_, vptr))) \<Rightarrow> Some vptr
| _ \<Rightarrow> None"
definition
"cap_vptr cap \<equiv> arch_cap_fun_lift cap_vptr_arch None cap"
declare cap_vptr_arch_def[abs_def, simp]
lemmas cap_vptr_simps [simp] =
cap_vptr_def [simplified, split_simps cap.split arch_cap.split option.split prod.split]
end
context begin interpretation Arch .
requalify_facts replace_cap_invs
end
end
|
module _ where
open import Data.Nat using (_+_ ; _≤′_ ; suc)
open import Induction.Nat using (<-rec)
open import Esterel.Lang.CanFunction
open import Function using (_∋_ ; _∘_ ; id ; _$_)
open import Data.Nat.Properties.Simple using ( +-comm ; +-assoc)
open import utility
open import noetherian using (noetherian ; ∥_∥s)
open import Esterel.Lang
open import Esterel.Lang.Properties
open import Esterel.Environment as Env
open import Esterel.Context
open import Data.Product
open import Data.Sum
open import Data.Bool
open import Data.List using ([] ; [_] ; _∷_ ; List ; _++_)
open import Relation.Nullary
open import Relation.Binary.PropositionalEquality using (_≡_ ; refl ; sym ; subst ; cong ; trans ; module ≡-Reasoning ; cong₂ ; subst₂ ; inspect)
open import Data.Empty
open import sn-calculus
open import context-properties -- get view, E-views
open import Esterel.Lang.Binding
open import Data.Maybe using ( just )
-- open import coherence
open import Data.List.Any
open import Data.List.Any.Properties
open import Esterel.Lang.CanFunction.Base
open import eval
open import blocked
open import Data.List.All
open import eval
open ≡-Reasoning using (_≡⟨_⟩_ ; _≡⟨⟩_ ; _∎)
open import Relation.Nullary.Decidable
using (⌊_⌋)
open import Data.FiniteMap
import Data.OrderedListMap as OMap
open import Data.Nat as Nat using (ℕ)
open import Esterel.Variable.Signal as Signal
using (Signal ; _ₛ)
open import Esterel.Variable.Shared as SharedVar
using (SharedVar ; _ₛₕ)
open import Esterel.Variable.Sequential as SeqVar
using (SeqVar ; _ᵥ)
open import Esterel.CompletionCode as Code
using () renaming (CompletionCode to Code)
open import sn-calculus-compatconf using (1-step)
open import Data.OrderedListMap Signal Signal.unwrap Signal.Status as SigM
open import Data.OrderedListMap SharedVar SharedVar.unwrap (Σ SharedVar.Status (λ _ → ℕ)) as ShrM
open import Data.OrderedListMap SeqVar SeqVar.unwrap ℕ as SeqM
open import binding-preserve
open import sn-calculus-props
open import par-swap
open import par-swap.union-properties
open import calculus
open import calculus.properties
evalsn≡ₑ-consistent : ∀{output output'} θ p → CB (ρ⟨ θ , GO ⟩· p) → evalsn≡ₑ p θ output → evalsn≡ₑ p θ output' → output ≡ output'
evalsn≡ₑ-consistent θ p ⊢cb-p (evalsn-complete ρθ·p≡q complete-q) (evalsn-complete ρθ·p≡r complete-r)
with sn≡ₑ-consistent (proj₂ (sn≡ₑ-preserve-cb ⊢cb-p ρθ·p≡q)) (rtrn (rsym ρθ·p≡q ⊢cb-p) ρθ·p≡r)
... | (s , qsn⟶*s , rsn⟶*s)
with inescapability-of-complete-sn complete-q qsn⟶*s
... | complete-s
with ρ-stays-ρ-sn⟶* qsn⟶*s | ρ-stays-ρ-sn⟶* rsn⟶*s
... | θq , _ , _ , refl | θr , _ , _ , refl
with equality-of-complete-sn⟶* complete-q qsn⟶*s
| equality-of-complete-sn⟶* complete-r rsn⟶*s
... | refl , refl | refl , refl = refl
eval∥R∪sn≡ₑ-consistent : ∀ {output output'} θ p →
CB (ρ⟨ θ , GO ⟩· p) →
eval∥R∪sn≡ₑ p θ output →
eval∥R∪sn≡ₑ p θ output' →
output ≡ output'
eval∥R∪sn≡ₑ-consistent θ p ⊢cb-p (eval∥R∪sn-complete ρθ·p≡q complete-q) (eval∥R∪sn-complete ρθ·p≡r complete-r)
with ∥R∪sn≡ₑ-consistent (proj₂ (∥R∪sn≡ₑ-preserve-cb ⊢cb-p ρθ·p≡q)) (∪trn (∪sym ρθ·p≡q ⊢cb-p) ρθ·p≡r)
... | (s , qsn⟶*s , rsn⟶*s)
with inescapability-of-complete-∪ complete-q qsn⟶*s
... | complete-s
with ρ-stays-ρ-∪ qsn⟶*s | ρ-stays-ρ-∪ rsn⟶*s
... | θq , _ , _ , refl | θr , _ , _ , refl
with equality-of-complete-∪ complete-q qsn⟶*s
| equality-of-complete-∪ complete-r rsn⟶*s
... | refl , refl | refl , refl = refl
eval≡ₑ->eval∥R∪sn≡ : ∀ {p θ output} ->
eval≡ₑ p θ output →
eval∥R∪sn≡ₑ p θ output
eval≡ₑ->eval∥R∪sn≡ (eval-complete ρθ·p≡q complete-q) =
eval∥R∪sn-complete (≡ₑ-to-∥R∪sn≡ₑ ρθ·p≡q) complete-q
eval≡ₑ-consistent : ∀ {output output'} θ p →
CB (ρ⟨ θ , GO ⟩· p) →
eval≡ₑ p θ output →
eval≡ₑ p θ output' →
output ≡ output'
eval≡ₑ-consistent θ p CBρp eval≡₁ eval≡₂
= eval∥R∪sn≡ₑ-consistent θ p CBρp
(eval≡ₑ->eval∥R∪sn≡ eval≡₁)
(eval≡ₑ->eval∥R∪sn≡ eval≡₂)
sn≡ₑ=>eval : ∀ p θp outputp q θq outputq → CB (ρ⟨ θp , GO ⟩· p) → (ρ⟨ θp , GO ⟩· p) sn≡ₑ (ρ⟨ θq , GO ⟩· q) → evalsn≡ₑ p θp outputp → evalsn≡ₑ q θq outputq → outputp ≡ outputq
sn≡ₑ=>eval _ _ _ _ _ _ CB eq (evalsn-complete ρθ·p≡q complete-q) (evalsn-complete ρθ·p≡q₁ complete-q₁)
with sn≡ₑ-consistent (proj₂ (sn≡ₑ-preserve-cb CB ρθ·p≡q)) (rtrn (rsym ρθ·p≡q CB) (rtrn eq ρθ·p≡q₁))
... | (s , qsn⟶*s , rsn⟶*s)
with inescapability-of-complete-sn complete-q qsn⟶*s
... | complete-s
with ρ-stays-ρ-sn⟶* qsn⟶*s | ρ-stays-ρ-sn⟶* rsn⟶*s
... | θq , _ , _ , refl | θr , _ , _ , refl
with equality-of-complete-sn⟶* complete-q qsn⟶*s
| equality-of-complete-sn⟶* complete-q₁ rsn⟶*s
... | refl , refl | refl , refl = refl
∥R∪sn≡ₑ=>eval : ∀ p θp outputp q θq outputq →
CB (ρ⟨ θp , GO ⟩· p) →
(ρ⟨ θp , GO ⟩· p) ∥R∪sn≡ₑ (ρ⟨ θq , GO ⟩· q) →
eval∥R∪sn≡ₑ p θp outputp →
eval∥R∪sn≡ₑ q θq outputq →
outputp ≡ outputq
∥R∪sn≡ₑ=>eval _ _ _ _ _ _ CB eq (eval∥R∪sn-complete ρθ·p≡q complete-q) (eval∥R∪sn-complete ρθ·p≡q₁ complete-q₁)
with ∥R∪sn≡ₑ-consistent (proj₂ (∥R∪sn≡ₑ-preserve-cb CB ρθ·p≡q)) (∪trn (∪sym ρθ·p≡q CB) (∪trn eq ρθ·p≡q₁))
... | (s , qsn⟶*s , rsn⟶*s)
with inescapability-of-complete-∪ complete-q qsn⟶*s
... | complete-s
with ρ-stays-ρ-∪ qsn⟶*s | ρ-stays-ρ-∪ rsn⟶*s
... | θq , _ , _ , refl | θr , _ , _ , refl
with equality-of-complete-∪ complete-q qsn⟶*s
| equality-of-complete-∪ complete-q₁ rsn⟶*s
... | refl , refl | refl , refl = refl
≡ₑ=>eval : ∀ p θp outputp q θq outputq →
CB (ρ⟨ θp , GO ⟩· p) →
(ρ⟨ θp , GO ⟩· p) ≡ₑ (ρ⟨ θq , GO ⟩· q) # [] →
eval≡ₑ p θp outputp →
eval≡ₑ q θq outputq →
outputp ≡ outputq
≡ₑ=>eval p θp outputp q θq outputq CBρp ρp≡ₑρq eval≡₁ eval≡₂
= ∥R∪sn≡ₑ=>eval p θp outputp q θq outputq CBρp
(≡ₑ-to-∥R∪sn≡ₑ ρp≡ₑρq)
(eval≡ₑ->eval∥R∪sn≡ eval≡₁)
(eval≡ₑ->eval∥R∪sn≡ eval≡₂)
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj3synthconj3 : forall (lv0 : natural) (lv1 : natural) (lv2 : natural), (@eq natural (plus lv0 lv1) (plus lv2 lv0)).
Admitted.
QuickChick conj3synthconj3.
|
-- data construction, disproving
module Auto-DataConstruction where
open import Auto.Prelude
module Disproving where
h0 : {X : Set} → (xs ys : List X) → (xs ++ ys) ≡ (ys ++ xs)
h0 = {!-d Fin!}
|
/-
Copyright (c) 2020 Aaron Anderson. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Aaron Anderson
-/
import data.finset.fold
import data.multiset.gcd
/-!
# GCD and LCM operations on finsets
## Main definitions
- `finset.gcd` - the greatest common denominator of a `finset` of elements of a `gcd_monoid`
- `finset.lcm` - the least common multiple of a `finset` of elements of a `gcd_monoid`
## Implementation notes
Many of the proofs use the lemmas `gcd.def` and `lcm.def`, which relate `finset.gcd`
and `finset.lcm` to `multiset.gcd` and `multiset.lcm`.
TODO: simplify with a tactic and `data.finset.lattice`
## Tags
finset, gcd
-/
variables {α β γ : Type*}
namespace finset
open multiset
variables [comm_cancel_monoid_with_zero α] [nontrivial α] [gcd_monoid α]
/-! ### lcm -/
section lcm
/-- Least common multiple of a finite set -/
def lcm (s : finset β) (f : β → α) : α := s.fold gcd_monoid.lcm 1 f
variables {s s₁ s₂ : finset β} {f : β → α}
lemma lcm_def : s.lcm f = (s.1.map f).lcm := rfl
@[simp] lemma lcm_empty : (∅ : finset β).lcm f = 1 :=
fold_empty
@[simp] lemma lcm_dvd_iff {a : α} : s.lcm f ∣ a ↔ (∀b ∈ s, f b ∣ a) :=
begin
apply iff.trans multiset.lcm_dvd,
simp only [multiset.mem_map, and_imp, exists_imp_distrib],
exact ⟨λ k b hb, k _ _ hb rfl, λ k a' b hb h, h ▸ k _ hb⟩,
end
lemma lcm_dvd {a : α} : (∀b ∈ s, f b ∣ a) → s.lcm f ∣ a :=
lcm_dvd_iff.2
lemma dvd_lcm {b : β} (hb : b ∈ s) : f b ∣ s.lcm f :=
lcm_dvd_iff.1 (dvd_refl _) _ hb
@[simp] lemma lcm_insert [decidable_eq β] {b : β} :
(insert b s : finset β).lcm f = gcd_monoid.lcm (f b) (s.lcm f) :=
begin
by_cases h : b ∈ s,
{ rw [insert_eq_of_mem h,
(lcm_eq_right_iff (f b) (s.lcm f) (multiset.normalize_lcm (s.1.map f))).2 (dvd_lcm h)] },
apply fold_insert h,
end
@[simp] lemma lcm_singleton {b : β} : ({b} : finset β).lcm f = normalize (f b) :=
multiset.lcm_singleton
@[simp] lemma normalize_lcm : normalize (s.lcm f) = s.lcm f := by simp [lcm_def]
lemma lcm_union [decidable_eq β] : (s₁ ∪ s₂).lcm f = gcd_monoid.lcm (s₁.lcm f) (s₂.lcm f) :=
finset.induction_on s₁ (by rw [empty_union, lcm_empty, lcm_one_left, normalize_lcm]) $ λ a s has ih,
by rw [insert_union, lcm_insert, lcm_insert, ih, lcm_assoc]
theorem lcm_congr {f g : β → α} (hs : s₁ = s₂) (hfg : ∀a ∈ s₂, f a = g a) :
s₁.lcm f = s₂.lcm g :=
by { subst hs, exact finset.fold_congr hfg }
lemma lcm_mono_fun {g : β → α} (h : ∀ b ∈ s, f b ∣ g b) : s.lcm f ∣ s.lcm g :=
lcm_dvd (λ b hb, dvd_trans (h b hb) (dvd_lcm hb))
lemma lcm_mono (h : s₁ ⊆ s₂) : s₁.lcm f ∣ s₂.lcm f :=
lcm_dvd $ assume b hb, dvd_lcm (h hb)
end lcm
/-! ### gcd -/
section gcd
/-- Greatest common divisor of a finite set -/
def gcd (s : finset β) (f : β → α) : α := s.fold gcd_monoid.gcd 0 f
variables {s s₁ s₂ : finset β} {f : β → α}
lemma gcd_def : s.gcd f = (s.1.map f).gcd := rfl
@[simp] lemma gcd_empty : (∅ : finset β).gcd f = 0 :=
fold_empty
lemma dvd_gcd_iff {a : α} : a ∣ s.gcd f ↔ ∀b ∈ s, a ∣ f b :=
begin
apply iff.trans multiset.dvd_gcd,
simp only [multiset.mem_map, and_imp, exists_imp_distrib],
exact ⟨λ k b hb, k _ _ hb rfl, λ k a' b hb h, h ▸ k _ hb⟩,
end
lemma gcd_dvd {b : β} (hb : b ∈ s) : s.gcd f ∣ f b :=
dvd_gcd_iff.1 (dvd_refl _) _ hb
lemma dvd_gcd {a : α} : (∀b ∈ s, a ∣ f b) → a ∣ s.gcd f :=
dvd_gcd_iff.2
@[simp] lemma gcd_insert [decidable_eq β] {b : β} :
(insert b s : finset β).gcd f = gcd_monoid.gcd (f b) (s.gcd f) :=
begin
by_cases h : b ∈ s,
{ rw [insert_eq_of_mem h,
(gcd_eq_right_iff (f b) (s.gcd f) (multiset.normalize_gcd (s.1.map f))).2 (gcd_dvd h)] ,},
apply fold_insert h,
end
@[simp] lemma gcd_singleton {b : β} : ({b} : finset β).gcd f = normalize (f b) :=
multiset.gcd_singleton
@[simp] lemma normalize_gcd : normalize (s.gcd f) = s.gcd f := by simp [gcd_def]
lemma gcd_union [decidable_eq β] : (s₁ ∪ s₂).gcd f = gcd_monoid.gcd (s₁.gcd f) (s₂.gcd f) :=
finset.induction_on s₁ (by rw [empty_union, gcd_empty, gcd_zero_left, normalize_gcd]) $
λ a s has ih, by rw [insert_union, gcd_insert, gcd_insert, ih, gcd_assoc]
theorem gcd_congr {f g : β → α} (hs : s₁ = s₂) (hfg : ∀a ∈ s₂, f a = g a) :
s₁.gcd f = s₂.gcd g :=
by { subst hs, exact finset.fold_congr hfg }
lemma gcd_mono_fun {g : β → α} (h : ∀ b ∈ s, f b ∣ g b) : s.gcd f ∣ s.gcd g :=
dvd_gcd (λ b hb, dvd_trans (gcd_dvd hb) (h b hb))
lemma gcd_mono (h : s₁ ⊆ s₂) : s₂.gcd f ∣ s₁.gcd f :=
dvd_gcd $ assume b hb, gcd_dvd (h hb)
theorem gcd_eq_zero_iff : s.gcd f = 0 ↔ ∀ (x : β), x ∈ s → f x = 0 :=
begin
rw [gcd_def, multiset.gcd_eq_zero_iff],
split; intro h,
{ intros b bs,
apply h (f b),
simp only [multiset.mem_map, mem_def.1 bs],
use b,
simp [mem_def.1 bs] },
{ intros a as,
rw multiset.mem_map at as,
rcases as with ⟨b, ⟨bs, rfl⟩⟩,
apply h b (mem_def.1 bs) }
end
lemma gcd_eq_gcd_filter_ne_zero [decidable_pred (λ (x : β), f x = 0)] :
s.gcd f = (s.filter (λ x, f x ≠ 0)).gcd f :=
begin
classical,
transitivity ((s.filter (λ x, f x = 0)) ∪ (s.filter (λ x, f x ≠ 0))).gcd f,
{ rw filter_union_filter_neg_eq },
rw gcd_union,
transitivity gcd_monoid.gcd (0 : α) _,
{ refine congr (congr rfl _) rfl,
apply s.induction_on, { simp },
intros a s has h,
rw filter_insert,
split_ifs with h1; simp [h, h1], },
simp [gcd_zero_left, normalize_gcd],
end
lemma gcd_mul_left {a : α} : s.gcd (λ x, a * f x) = normalize a * s.gcd f :=
begin
classical,
apply s.induction_on,
{ simp },
intros b t hbt h,
rw [gcd_insert, gcd_insert, h, ← gcd_mul_left],
apply gcd_eq_of_associated_right,
apply associated_mul_mul _ (associated.refl _),
apply normalize_associated,
end
lemma gcd_mul_right {a : α} : s.gcd (λ x, f x * a) = s.gcd f * normalize a :=
begin
classical,
apply s.induction_on,
{ simp },
intros b t hbt h,
rw [gcd_insert, gcd_insert, h, ← gcd_mul_right],
apply gcd_eq_of_associated_right,
apply associated_mul_mul (associated.refl _),
apply normalize_associated,
end
end gcd
end finset
namespace finset
section integral_domain
variables [nontrivial β] [integral_domain α] [gcd_monoid α]
lemma gcd_eq_of_dvd_sub {s : finset β} {f g : β → α} {a : α}
(h : ∀ x : β, x ∈ s → a ∣ f x - g x) :
gcd_monoid.gcd a (s.gcd f) = gcd_monoid.gcd a (s.gcd g) :=
begin
classical,
revert h,
apply s.induction_on,
{ simp },
intros b s bs hi h,
rw [gcd_insert, gcd_insert, gcd_comm (f b), ← gcd_assoc, hi (λ x hx, h _ (mem_insert_of_mem hx)),
gcd_comm a, gcd_assoc, gcd_comm a (gcd_monoid.gcd _ _),
gcd_comm (g b), gcd_assoc _ _ a, gcd_comm _ a],
refine congr rfl _,
apply gcd_eq_of_dvd_sub_right (h _ (mem_insert_self _ _)),
end
end integral_domain
end finset
|
[STATEMENT]
lemma card1_eE: "finite S \<Longrightarrow> \<exists>y. y \<in> S \<Longrightarrow> 1 \<le> card S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>finite S; \<exists>y. y \<in> S\<rbrakk> \<Longrightarrow> 1 \<le> card S
[PROOF STEP]
using card_0_eq
[PROOF STATE]
proof (prove)
using this:
finite ?A \<Longrightarrow> (card ?A = 0) = (?A = {})
goal (1 subgoal):
1. \<lbrakk>finite S; \<exists>y. y \<in> S\<rbrakk> \<Longrightarrow> 1 \<le> card S
[PROOF STEP]
by fastforce |
Formal statement is: lemma (in metric_space) eventually_nhds_metric: "eventually P (nhds a) \<longleftrightarrow> (\<exists>d>0. \<forall>x. dist x a < d \<longrightarrow> P x)" Informal statement is: In a metric space, a point $a$ has a neighborhood $U$ such that $P$ holds for all points in $U$ if and only if there exists a positive real number $d$ such that $P$ holds for all points $x$ with $d(x,a) < d$. |
#ifndef PYTHONIC_TYPES_NUMPY_BROADCAST_HPP
#define PYTHONIC_TYPES_NUMPY_BROADCAST_HPP
#include <array>
#ifdef USE_BOOST_SIMD
#include <boost/simd/sdk/simd/native.hpp>
#include <boost/simd/include/functions/load.hpp>
#include <boost/simd/include/functions/store.hpp>
#endif
//#include "pythonic/types/tuple.hpp"
#include "pythonic/types/vectorizable_type.hpp"
namespace pythonic {
namespace types {
/* Type adaptor for broadcasted array values
*
* Used when the args of a binary operator do not have the same dimensions:
* in that case their first dimension always yields a copy
*/
template<class T>
struct broadcasted {
static const bool is_vectorizable = false;
static const bool is_strided = false;
typedef typename T::dtype dtype;
typedef typename T::value_type value_type;
static constexpr size_t value = T::value + 1;
T const & ref;
array<long, value> shape;
broadcasted(T const& ref) : ref(ref), shape() {
shape[0] = 1;
std::copy(ref.shape.begin(), ref.shape.end(), shape.begin() + 1);
}
T const & operator[](long i) const { return ref;}
T const & fast(long i) const { return ref;}
#ifdef USE_BOOST_SIMD
template<class I> // template to prevent automatic instantiation, but the declaration is still needed
void load(I) const {
static_assert(I::this_should_never_happen, "this is *not* vectorizable");
}
#endif
long flat_size() const { return 0;}
};
/* Type adaptor for scalar values
*
* Have them behave like infinite arrays of that value
*
* B is the original type of the broadcast value, and T is the type of the expression it is combined with
* if both B and T are integer types, we choose T instead of B to prevent automatic conversion into larger types
*
* That way, np.ones(10, dtype=np.uint8) + 1 yields an array of np.uint8, although 1 is of type long
*/
template<class dtype, bool is_vectorizable> struct broadcast_base {
dtype _value;
template<class V> broadcast_base(V v) : _value(v) {}
template<class I> void load(I) const { static_assert(sizeof(I) != sizeof(I), "this method should never be instantiated");}
};
#ifdef USE_BOOST_SIMD
template<class dtype> struct broadcast_base<dtype, true> {
dtype _value;
boost::simd::native<dtype, BOOST_SIMD_DEFAULT_EXTENSION> _splated ;
template<class V> broadcast_base(V v) : _value(v), _splated(boost::simd::splat<boost::simd::native<dtype, BOOST_SIMD_DEFAULT_EXTENSION>>(_value))
{}
template<class I> auto load(I) const -> decltype(this -> _splated) { return _splated; }
};
#endif
template<class T, class B>
struct broadcast {
// Perform the type conversion here if it seems valid (although it is not always)
typedef typename std::conditional<std::is_integral<T>::value and std::is_integral<B>::value,
T,
typename __combined<T, B>::type>::type dtype;
static const bool is_vectorizable = types::is_vectorizable<dtype>::value;
static const bool is_strided = false;
typedef dtype value_type;
static constexpr size_t value = 0;
broadcast_base<dtype, is_vectorizable> _base;
broadcast() = default;
template<class V>
broadcast(V v) : _base(v) {}
dtype operator[](long ) const {
return _base._value;
}
dtype fast(long ) const {
return _base._value;
}
template<class I>
auto load(I i) const -> decltype(this -> _base.load(i)) { return _base.load(i); }
long flat_size() const { return 0; }
};
}
}
#endif
|
function spm_slice_timing(P, sliceorder, refslice, timing, prefix)
% Correct differences in slice acquisition times
% FORMAT spm_slice_timing(P, sliceorder, refslice, timing, prefix)
% P - char array of image filenames
% can also be a cell array of the above (multiple subjects).
% sliceorder - slice acquisition order, a vector of integers, each
% integer referring the slice number in the image file
% (1=first), and the order of integers representing their
% temporal acquisition order
% OR vector containig the acquisition time for each slice
% in milliseconds
% refslice - slice for time 0
% OR time in milliseconds for the reference slice
% timing - additional information for sequence timing
% timing(1) = time between slices
% = TA / (nslices - 1)
% timing(2) = time between last slices and next volume
% = TR - TA
% OR timing = [0 TR] when previous inputs are specified in
% milliseconds
% prefix - filename prefix for corrected image files, defaults to 'a'
%__________________________________________________________________________
%
% Note: The sliceorder arg that specifies slice acquisition order is
% a vector of N numbers, where N is the number of slices per volume.
% Each number refers to the position of a slice within the image file.
% The order of numbers within the vector is the temporal order in which
% those slices were acquired.
%
% To check the order of slices within an image file, use the SPM Display
% option and move the crosshairs to a voxel co-ordinate of z=1. This
% corresponds to a point in the first slice of the volume.
%
% The function corrects differences in slice acquisition times.
% This routine is intended to correct for the staggered order of
% slice acquisition that is used during echoplanar scanning. The
% correction is necessary to make the data on each slice correspond
% to the same point in time. Without correction, the data on one
% slice will represent a point in time as far removed as 1/2 the TR
% from an adjacent slice (in the case of an interleaved sequence).
%
% This routine "shifts" a signal in time to provide an output
% vector that represents the same (continuous) signal sampled
% starting either later or earlier. This is accomplished by a simple
% shift of the phase of the sines that make up the signal.
%
% Recall that a Fourier transform allows for a representation of any
% signal as the linear combination of sinusoids of different
% frequencies and phases. Effectively, we will add a constant
% to the phase of every frequency, shifting the data in time.
%
% Shifter - This is the filter by which the signal will be convolved
% to introduce the phase shift. It is constructed explicitly in
% the Fourier domain. In the time domain, it may be described as
% an impulse (delta function) that has been shifted in time the
% amount described by TimeShift.
%
% The correction works by lagging (shifting forward) the time-series
% data on each slice using sinc-interpolation. This results in each
% time series having the values that would have been obtained had
% the slice been acquired at the same time as the reference slice.
%
% To make this clear, consider a neural event (and ensuing hemodynamic
% response) that occurs simultaneously on two adjacent slices. Values
% from slice "A" are acquired starting at time zero, simultaneous to
% the neural event, while values from slice "B" are acquired one
% second later. Without corection, the "B" values will describe a
% hemodynamic response that will appear to have began one second
% EARLIER on the "B" slice than on slice "A". To correct for this,
% the "B" values need to be shifted towards the Right, i.e., towards
% the last value.
%
% Written by Darren Gitelman at Northwestern U., 1998
%
% Based (in large part) on ACQCORRECT.PRO from G. Aguirre and E. Zarahn
% at U. Penn.
%
% Modified by R. Henson, C. Buechel and J. Ashburner, FIL, to
% handle different reference slices and memory mapping.
%
% Modified by M. Erb, at U. Tuebingen, 1999, to ask for non-continuous
% slice timing and number of sessions.
%
% Modified by R. Henson for more general slice order and SPM2.
%
% Modified by A. Hoffmann, M. Woletz and C. Windischberger from Medical
% University of Vienna, Austria, to handle multi-band EPI sequences.
%__________________________________________________________________________
% Copyright (C) 1998-2014 Wellcome Trust Centre for Neuroimaging
% Darren Gitelman et al.
% $Id: spm_slice_timing.m 6130 2014-08-01 17:41:18Z guillaume $
SVNid = '$Rev: 6130 $';
%-Say hello
%--------------------------------------------------------------------------
SPMid = spm('FnBanner',mfilename,SVNid);
%-Parameters & Arguments
%==========================================================================
if nargin < 4, error('Not enough input arguments.'); end
if nargin < 5, prefix = 'a'; end
if ~iscell(P), P = {P}; end
nsubjects = numel(P);
% Acquisition order: 1=first slice in image
% Reference slice: 1=first slice in image, in Analyze format, slice 1 = bottom
% TR: Interscan interval (TR) {secs}
% TA: Acquisition Time (TA) {secs} [Def: TR-TR/nslices], TA <= TR
% timing(2) = TR - TA, time between last slices and next volume
% timing(1) = TA / (nslices -1), time between slices
Vin = spm_vol(P{1}(1,:));
nslices = Vin(1).dim(3);
TR = (nslices-1)*timing(1)+timing(2);
fprintf('%-40s: %30s\n','Number of slices is...',num2str(nslices)) %-#
fprintf('%-40s: %30s\n','Time to Repeat (TR) is...',num2str(TR)) %-#
if ~isequal(1:nslices,sort(sliceorder))
if ~all(sliceorder >= 0 & sliceorder <= TR*1000)
error('Input is neither slice indices nor slice times.');
end
unit = 'slice times (ms)';
else
if ~ismember(refslice,sliceorder)
error('Reference slice should contain a slice index.');
end
unit = 'slice indices';
end
fprintf('%-40s: %30s\n','Parameters are specified as...',unit) %-#
if nslices ~= numel(sliceorder)
error('Mismatch between number of slices and length of ''sliceorder'' vector.');
end
%-Slice timing correction
%==========================================================================
for subj = 1:nsubjects
Vin = spm_vol(P{subj});
nimgo = numel(Vin);
nimg = 2^(floor(log2(nimgo))+1);
if Vin(1).dim(3) ~= nslices
error('Number of slices differ: %d vs %d.', nslices, Vin(1).dim(3));
end
% Create new header files
Vout = Vin;
for k=1:nimgo
Vout(k).fname = spm_file(Vin(k).fname, 'prefix', prefix);
if isfield(Vout(k),'descrip')
desc = [Vout(k).descrip ' '];
else
desc = '';
end
Vout(k).descrip = [desc 'acq-fix ref-slice ' num2str(refslice)];
end
Vout = spm_create_vol(Vout);
% Set up [time x voxels] matrix for holding image info
slices = zeros([Vout(1).dim(1:2) nimgo]);
stack = zeros([nimg Vout(1).dim(1)]);
task = sprintf('Correcting acquisition delay: session %d', subj);
spm_progress_bar('Init',nslices,task,'planes complete');
% Compute shifting amount from reference slice and slice order
if isequal(unit,'slice times (ms)')
% Compute time difference between the acquisition time of the
% reference slice and the current slice by using slice times
% supplied in sliceorder vector
shiftamount = (sliceorder - refslice) / (1000 * TR);
else
rslice = find(sliceorder==refslice);
[Y, I] = sort(sliceorder);
shiftamount = (I - rslice) * timing(1) / TR;
end
% For loop to perform correction slice by slice
for k = 1:nslices
% Read in slice data
B = spm_matrix([0 0 k]);
for m=1:nimgo
slices(:,:,m) = spm_slice_vol(Vin(m),B,Vin(1).dim(1:2),1);
end
% Set up shifting variables
len = size(stack,1);
phi = zeros(1,len);
% Check if signal is odd or even -- impacts how Phi is reflected
% across the Nyquist frequency. Opposite to use in pvwave.
OffSet = 0;
if rem(len,2) ~= 0, OffSet = 1; end
% Phi represents a range of phases up to the Nyquist frequency
% Shifted phi 1 to right.
for f = 1:len/2
phi(f+1) = -1*shiftamount(k)*2*pi/(len/f);
end
% Mirror phi about the center
% 1 is added on both sides to reflect Matlab's 1 based indices
% Offset is opposite to program in pvwave again because indices are 1 based
phi(len/2+1+1-OffSet:len) = -fliplr(phi(1+1:len/2+OffSet));
% Transform phi to the frequency domain and take the complex transpose
shifter = [cos(phi) + sin(phi)*sqrt(-1)].';
shifter = shifter(:,ones(size(stack,2),1)); % Tony's trick
% Loop over columns
for i=1:Vout(1).dim(2)
% Extract columns from slices
stack(1:nimgo,:) = reshape(slices(:,i,:),[Vout(1).dim(1) nimgo])';
% Fill in continous function to avoid edge effects
for g=1:size(stack,2)
stack(nimgo+1:end,g) = linspace(stack(nimgo,g),...
stack(1,g),nimg-nimgo)';
end
% Shift the columns
stack = real(ifft(fft(stack,[],1).*shifter,[],1));
% Re-insert shifted columns
slices(:,i,:) = reshape(stack(1:nimgo,:)',[Vout(1).dim(1) 1 nimgo]);
end
% Write out the slice for all volumes
for p = 1:nimgo
Vout(p) = spm_write_plane(Vout(p),slices(:,:,p),k);
end
spm_progress_bar('Set',k);
end
spm_progress_bar('Clear');
end
fprintf('%-40s: %30s\n','Completed',spm('time')) %-#
|
-- Conmutatividad_del_maximo.lean
-- Si a, b ∈ ℝ, entonces max(a,b) = max(b,a)
-- José A. Alonso Jiménez <https://jaalonso.github.io>
-- Sevilla, 30-septiembre-2022
-- ---------------------------------------------------------------------
-- ---------------------------------------------------------------------
-- Demostrar que si a, b ∈ ℝ, entonces max(a,b) = max(b,a)
-- ---------------------------------------------------------------------
import data.real.basic
variables a b : ℝ
-- 1ª demostración
-- ===============
example : max a b = max b a :=
begin
apply le_antisymm,
{ show max a b ≤ max b a,
apply max_le,
{ apply le_max_right },
{ apply le_max_left }},
{ show max b a ≤ max a b,
apply max_le,
{ apply le_max_right },
{ apply le_max_left }},
end
-- 2ª demostración
-- ===============
example : max a b = max b a :=
begin
have h : ∀ x y : ℝ, max x y ≤ max y x,
{ intros x y,
apply max_le,
{ apply le_max_right },
{ apply le_max_left }},
apply le_antisymm,
apply h,
apply h,
end
-- 3ª demostración
-- ===============
example : max a b = max b a :=
begin
have h : ∀ {x y : ℝ}, max x y ≤ max y x,
{ intros x y,
exact max_le (le_max_right y x) (le_max_left y x),},
exact le_antisymm h h,
end
-- 4ª demostración
-- ===============
example : max a b = max b a :=
begin
apply le_antisymm,
repeat {
apply max_le,
apply le_max_right,
apply le_max_left },
end
|
Archive from category "Wills,Trusts & Taxation"
Your executor carries out (or executes) the wishes set out in your will and choosing the right person or persons is an important decision. It should be somebody you trust to do this job. Ideally, it should be a job given to two people to act as co-executors. So what does an executor actually do? |
Quested V-Series monitors date back over 30 years and are trusted by myriad of professional engineers and producers world-wide. Chosen by both professionals and passionate producers of music, the V Series sets a benchmark, encompassing the true standards of a studio monitor.
Quested’s no-compromise approach to the development and evolution of the V Series means that the V2108, our longest running product, has built and maintained a lifetime of trust with some of the most respected artists and studios around the globe, yet it is still very much young at heart due to the evolved transducer and amplification designs.
As with all Quested products, the V-Series is hand built by a small dedicated team in England. S Series products come with a five year manufactures warranty. Get in touch to arrange a demo. |
/-
Copyright (c) 2020 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Floris van Doorn, Yaël Dillies
-/
import data.finset.n_ary
import data.finset.preimage
import data.set.pointwise
/-!
# Pointwise operations of finsets
This file defines pointwise algebraic operations on finsets.
## Main declarations
For finsets `s` and `t`:
* `0` (`finset.has_zero`): The singleton `{0}`.
* `1` (`finset.has_one`): The singleton `{1}`.
* `-s` (`finset.has_neg`): Negation, finset of all `-x` where `x ∈ s`.
* `s⁻¹` (`finset.has_inv`): Inversion, finset of all `x⁻¹` where `x ∈ s`.
* `s + t` (`finset.has_add`): Addition, finset of all `x + y` where `x ∈ s` and `y ∈ t`.
* `s * t` (`finset.has_mul`): Multiplication, finset of all `x * y` where `x ∈ s` and `y ∈ t`.
* `s - t` (`finset.has_sub`): Subtraction, finset of all `x - y` where `x ∈ s` and `y ∈ t`.
* `s / t` (`finset.has_div`): Division, finset of all `x / y` where `x ∈ s` and `y ∈ t`.
* `s +ᵥ t` (`finset.has_vadd`): Scalar addition, finset of all `x +ᵥ y` where `x ∈ s` and `y ∈ t`.
* `s • t` (`finset.has_scalar`): Scalar multiplication, finset of all `x • y` where `x ∈ s` and
`y ∈ t`.
* `s -ᵥ t` (`finset.has_vsub`): Scalar subtraction, finset of all `x -ᵥ y` where `x ∈ s` and
`y ∈ t`.
* `a • s` (`finset.has_scalar_finset`): Scaling, finset of all `a • x` where `x ∈ s`.
* `a +ᵥ s` (`finset.has_vadd_finset`): Translation, finset of all `a +ᵥ x` where `x ∈ s`.
For `α` a semigroup/monoid, `finset α` is a semigroup/monoid.
As an unfortunate side effect, this means that `n • s`, where `n : ℕ`, is ambiguous between
pointwise scaling and repeated pointwise addition; the former has `(2 : ℕ) • {1, 2} = {2, 4}`, while
the latter has `(2 : ℕ) • {1, 2} = {2, 3, 4}`. See note [pointwise nat action].
## Implementation notes
We put all instances in the locale `pointwise`, so that these instances are not available by
default. Note that we do not mark them as reducible (as argued by note [reducible non-instances])
since we expect the locale to be open whenever the instances are actually used (and making the
instances reducible changes the behavior of `simp`.
## Tags
finset multiplication, finset addition, pointwise addition, pointwise multiplication,
pointwise subtraction
-/
open function
open_locale pointwise
variables {F α β γ : Type*}
namespace finset
/-! ### `0`/`1` as finsets -/
section has_one
variables [has_one α] {s : finset α} {a : α}
/-- The finset `1 : finset α` is defined as `{1}` in locale `pointwise`. -/
@[to_additive "The finset `0 : finset α` is defined as `{0}` in locale `pointwise`."]
protected def has_one : has_one (finset α) := ⟨{1}⟩
localized "attribute [instance] finset.has_one finset.has_zero" in pointwise
@[simp, to_additive] lemma mem_one : a ∈ (1 : finset α) ↔ a = 1 := mem_singleton
@[simp, to_additive] lemma coe_one : ↑(1 : finset α) = (1 : set α) := coe_singleton 1
@[simp, to_additive] lemma one_subset : (1 : finset α) ⊆ s ↔ (1 : α) ∈ s := singleton_subset_iff
@[to_additive] lemma singleton_one : ({1} : finset α) = 1 := rfl
@[to_additive] lemma one_mem_one : (1 : α) ∈ (1 : finset α) := mem_singleton_self _
@[to_additive] lemma one_nonempty : (1 : finset α).nonempty := ⟨1, one_mem_one⟩
@[simp, to_additive] protected lemma map_one {f : α ↪ β} : map f 1 = {f 1} := map_singleton f 1
@[simp, to_additive] lemma image_one [decidable_eq β] {f : α → β} : image f 1 = {f 1} :=
image_singleton _ _
@[to_additive] lemma subset_one_iff_eq : s ⊆ 1 ↔ s = ∅ ∨ s = 1 := subset_singleton_iff
@[to_additive] lemma nonempty.subset_one_iff (h : s.nonempty) : s ⊆ 1 ↔ s = 1 :=
h.subset_singleton_iff
/-- The singleton operation as a `one_hom`. -/
@[to_additive "The singleton operation as a `zero_hom`."]
def singleton_one_hom : one_hom α (finset α) := ⟨singleton, singleton_one⟩
@[simp, to_additive] lemma coe_singleton_one_hom : (singleton_one_hom : α → finset α) = singleton :=
rfl
@[simp, to_additive] lemma singleton_one_hom_apply (a : α) : singleton_one_hom a = {a} := rfl
end has_one
/-! ### Finset negation/inversion -/
section has_inv
variables [decidable_eq α] [has_inv α] {s s₁ s₂ t t₁ t₂ u : finset α} {a b : α}
/-- The pointwise inversion of finset `s⁻¹` is defined as `{x⁻¹ | x ∈ s}` in locale `pointwise`. -/
@[to_additive "The pointwise negation of finset `-s` is defined as `{-x | x ∈ s}` in locale
`pointwise`."]
protected def has_inv : has_inv (finset α) := ⟨image has_inv.inv⟩
localized "attribute [instance] finset.has_inv finset.has_neg" in pointwise
@[to_additive] lemma inv_def : s⁻¹ = s.image (λ x, x⁻¹) := rfl
@[to_additive] lemma image_inv : s.image (λ x, x⁻¹) = s⁻¹ := rfl
@[to_additive] lemma mem_inv {x : α} : x ∈ s⁻¹ ↔ ∃ y ∈ s, y⁻¹ = x := mem_image
@[to_additive] lemma inv_mem_inv (ha : a ∈ s) : a⁻¹ ∈ s⁻¹ := mem_image_of_mem _ ha
@[to_additive] lemma card_inv_le : s⁻¹.card ≤ s.card := card_image_le
@[simp, to_additive] lemma inv_empty : (∅ : finset α)⁻¹ = ∅ := image_empty _
@[simp, to_additive] lemma inv_nonempty_iff : s⁻¹.nonempty ↔ s.nonempty := nonempty.image_iff _
alias inv_nonempty_iff ↔ finset.nonempty.inv finset.nonempty.of_inv
@[to_additive, mono] lemma inv_subset_inv (h : s ⊆ t) : s⁻¹ ⊆ t⁻¹ := image_subset_image h
attribute [mono] neg_subset_neg
@[simp, to_additive] lemma inv_singleton (a : α) : ({a} : finset α)⁻¹ = {a⁻¹} := image_singleton _ _
end has_inv
open_locale pointwise
section has_involutive_inv
variables [decidable_eq α] [has_involutive_inv α] {s t : finset α}
@[simp, norm_cast, to_additive]
lemma coe_inv (s : finset α) : ↑(s⁻¹) = (s : set α)⁻¹ := coe_image.trans set.image_inv
@[simp, to_additive] lemma card_inv : s⁻¹.card = s.card := card_image_of_injective _ inv_injective
@[simp, to_additive] lemma preimage_inv : s.preimage has_inv.inv (inv_injective.inj_on _) = s⁻¹ :=
coe_injective $ by rw [coe_preimage, set.inv_preimage, coe_inv]
end has_involutive_inv
/-! ### Finset addition/multiplication -/
section has_mul
variables [decidable_eq α] [decidable_eq β] [has_mul α] [has_mul β] [mul_hom_class F α β] (m : F)
{s s₁ s₂ t t₁ t₂ u : finset α} {a b : α}
/-- The pointwise multiplication of finsets `s * t` and `t` is defined as `{x * y | x ∈ s, y ∈ t}`
in locale `pointwise`. -/
@[to_additive "The pointwise addition of finsets `s + t` is defined as `{x + y | x ∈ s, y ∈ t}` in
locale `pointwise`."]
protected def has_mul : has_mul (finset α) := ⟨image₂ (*)⟩
localized "attribute [instance] finset.has_mul finset.has_add" in pointwise
@[to_additive]
lemma mul_def : s * t = (s.product t).image (λ p : α × α, p.1 * p.2) := rfl
@[to_additive]
lemma image_mul_product : (s.product t).image (λ x : α × α, x.fst * x.snd) = s * t := rfl
@[to_additive]
lemma mem_mul {x : α} : x ∈ s * t ↔ ∃ y z, y ∈ s ∧ z ∈ t ∧ y * z = x := mem_image₂
@[simp, norm_cast, to_additive]
lemma coe_mul (s t : finset α) : (↑(s * t) : set α) = ↑s * ↑t := coe_image₂ _ _ _
@[to_additive] lemma mul_mem_mul : a ∈ s → b ∈ t → a * b ∈ s * t := mem_image₂_of_mem
@[to_additive] lemma card_mul_le : (s * t).card ≤ s.card * t.card := card_image₂_le _ _ _
@[to_additive] lemma card_mul_iff :
(s * t).card = s.card * t.card ↔
((s : set α) ×ˢ (t : set α) : set (α × α)).inj_on (λ p, p.1 * p.2) := card_image₂_iff
@[simp, to_additive] lemma empty_mul (s : finset α) : ∅ * s = ∅ := image₂_empty_left
@[simp, to_additive] lemma mul_empty (s : finset α) : s * ∅ = ∅ := image₂_empty_right
@[simp, to_additive] lemma mul_eq_empty : s * t = ∅ ↔ s = ∅ ∨ t = ∅ := image₂_eq_empty_iff
@[simp, to_additive] lemma mul_nonempty : (s * t).nonempty ↔ s.nonempty ∧ t.nonempty :=
image₂_nonempty_iff
@[to_additive] lemma nonempty.mul : s.nonempty → t.nonempty → (s * t).nonempty := nonempty.image₂
@[to_additive] lemma nonempty.of_mul_left : (s * t).nonempty → s.nonempty := nonempty.of_image₂_left
@[to_additive] lemma nonempty.of_mul_right : (s * t).nonempty → t.nonempty :=
nonempty.of_image₂_right
@[simp, to_additive] lemma mul_singleton (a : α) : s * {a} = s.image (* a) := image₂_singleton_right
@[simp, to_additive] lemma singleton_mul (a : α) : {a} * s = s.image ((*) a) :=
image₂_singleton_left
@[simp, to_additive] lemma singleton_mul_singleton (a b : α) : ({a} : finset α) * {b} = {a * b} :=
image₂_singleton
@[to_additive, mono] lemma mul_subset_mul : s₁ ⊆ s₂ → t₁ ⊆ t₂ → s₁ * t₁ ⊆ s₂ * t₂ := image₂_subset
@[to_additive] lemma mul_subset_mul_left : t₁ ⊆ t₂ → s * t₁ ⊆ s * t₂ := image₂_subset_left
@[to_additive] lemma mul_subset_mul_right : s₁ ⊆ s₂ → s₁ * t ⊆ s₂ * t := image₂_subset_right
@[to_additive] lemma mul_subset_iff : s * t ⊆ u ↔ ∀ (x ∈ s) (y ∈ t), x * y ∈ u := image₂_subset_iff
attribute [mono] add_subset_add
@[to_additive] lemma union_mul : (s₁ ∪ s₂) * t = s₁ * t ∪ s₂ * t := image₂_union_left
@[to_additive] lemma mul_union : s * (t₁ ∪ t₂) = s * t₁ ∪ s * t₂ := image₂_union_right
@[to_additive] lemma inter_mul_subset : (s₁ ∩ s₂) * t ⊆ s₁ * t ∩ (s₂ * t) :=
image₂_inter_subset_left
@[to_additive] lemma mul_inter_subset : s * (t₁ ∩ t₂) ⊆ s * t₁ ∩ (s * t₂) :=
image₂_inter_subset_right
/-- If a finset `u` is contained in the product of two sets `s * t`, we can find two finsets `s'`,
`t'` such that `s' ⊆ s`, `t' ⊆ t` and `u ⊆ s' * t'`. -/
@[to_additive "If a finset `u` is contained in the sum of two sets `s + t`, we can find two finsets
`s'`, `t'` such that `s' ⊆ s`, `t' ⊆ t` and `u ⊆ s' + t'`."]
lemma subset_mul {s t : set α} : ↑u ⊆ s * t → ∃ s' t' : finset α, ↑s' ⊆ s ∧ ↑t' ⊆ t ∧ u ⊆ s' * t' :=
subset_image₂
@[to_additive] lemma image_mul : (s * t).image (m : α → β) = s.image m * t.image m :=
image_image₂_distrib $ map_mul m
/-- The singleton operation as a `mul_hom`. -/
@[to_additive "The singleton operation as an `add_hom`."]
def singleton_mul_hom : α →ₙ* finset α := ⟨singleton, λ a b, (singleton_mul_singleton _ _).symm⟩
@[simp, to_additive] lemma coe_singleton_mul_hom : (singleton_mul_hom : α → finset α) = singleton :=
rfl
@[simp, to_additive] lemma singleton_mul_hom_apply (a : α) : singleton_mul_hom a = {a} := rfl
end has_mul
/-! ### Finset subtraction/division -/
section has_div
variables [decidable_eq α] [has_div α] {s s₁ s₂ t t₁ t₂ u : finset α} {a b : α}
/-- The pointwise division of sfinets `s / t` is defined as `{x / y | x ∈ s, y ∈ t}` in locale
`pointwise`. -/
@[to_additive "The pointwise subtraction of finsets `s - t` is defined as `{x - y | x ∈ s, y ∈ t}`
in locale `pointwise`."]
protected def has_div : has_div (finset α) := ⟨image₂ (/)⟩
localized "attribute [instance] finset.has_div finset.has_sub" in pointwise
@[to_additive]
lemma div_def : s / t = (s.product t).image (λ p : α × α, p.1 / p.2) := rfl
@[to_additive add_image_prod]
lemma image_div_prod : (s.product t).image (λ x : α × α, x.fst / x.snd) = s / t := rfl
@[to_additive] lemma mem_div : a ∈ s / t ↔ ∃ b c, b ∈ s ∧ c ∈ t ∧ b / c = a := mem_image₂
@[simp, norm_cast, to_additive]
lemma coe_div (s t : finset α) : (↑(s / t) : set α) = ↑s / ↑t := coe_image₂ _ _ _
@[to_additive] lemma div_mem_div : a ∈ s → b ∈ t → a / b ∈ s / t := mem_image₂_of_mem
@[to_additive] lemma div_card_le : (s / t).card ≤ s.card * t.card := card_image₂_le _ _ _
@[simp, to_additive] lemma empty_div (s : finset α) : ∅ / s = ∅ := image₂_empty_left
@[simp, to_additive] lemma div_empty (s : finset α) : s / ∅ = ∅ := image₂_empty_right
@[simp, to_additive] lemma div_eq_empty : s / t = ∅ ↔ s = ∅ ∨ t = ∅ := image₂_eq_empty_iff
@[simp, to_additive] lemma div_nonempty : (s / t).nonempty ↔ s.nonempty ∧ t.nonempty :=
image₂_nonempty_iff
@[to_additive] lemma nonempty.div : s.nonempty → t.nonempty → (s / t).nonempty := nonempty.image₂
@[to_additive] lemma nonempty.of_div_left : (s / t).nonempty → s.nonempty := nonempty.of_image₂_left
@[to_additive] lemma nonempty.of_div_right : (s / t).nonempty → t.nonempty :=
nonempty.of_image₂_right
@[simp, to_additive] lemma div_singleton (a : α) : s / {a} = s.image (/ a) := image₂_singleton_right
@[simp, to_additive] lemma singleton_div (a : α) : {a} / s = s.image ((/) a) :=
image₂_singleton_left
@[simp, to_additive] lemma singleton_div_singleton (a b : α) : ({a} : finset α) / {b} = {a / b} :=
image₂_singleton
@[to_additive, mono] lemma div_subset_div : s₁ ⊆ s₂ → t₁ ⊆ t₂ → s₁ / t₁ ⊆ s₂ / t₂ := image₂_subset
@[to_additive] lemma div_subset_div_left : t₁ ⊆ t₂ → s / t₁ ⊆ s / t₂ := image₂_subset_left
@[to_additive] lemma div_subset_div_right : s₁ ⊆ s₂ → s₁ / t ⊆ s₂ / t := image₂_subset_right
@[to_additive] lemma div_subset_iff : s / t ⊆ u ↔ ∀ (x ∈ s) (y ∈ t), x / y ∈ u := image₂_subset_iff
attribute [mono] sub_subset_sub
@[to_additive] lemma union_div : (s₁ ∪ s₂) / t = s₁ / t ∪ s₂ / t := image₂_union_left
@[to_additive] lemma div_union : s / (t₁ ∪ t₂) = s / t₁ ∪ s / t₂ := image₂_union_right
@[to_additive] lemma inter_div_subset : (s₁ ∩ s₂) / t ⊆ s₁ / t ∩ (s₂ / t) :=
image₂_inter_subset_left
@[to_additive] lemma div_inter_subset : s / (t₁ ∩ t₂) ⊆ s / t₁ ∩ (s / t₂) :=
image₂_inter_subset_right
/-- If a finset `u` is contained in the product of two sets `s / t`, we can find two finsets `s'`,
`t'` such that `s' ⊆ s`, `t' ⊆ t` and `u ⊆ s' / t'`. -/
@[to_additive "If a finset `u` is contained in the sum of two sets `s - t`, we can find two finsets
`s'`, `t'` such that `s' ⊆ s`, `t' ⊆ t` and `u ⊆ s' - t'`."]
lemma subset_div {s t : set α} : ↑u ⊆ s / t → ∃ s' t' : finset α, ↑s' ⊆ s ∧ ↑t' ⊆ t ∧ u ⊆ s' / t' :=
subset_image₂
end has_div
/-! ### Instances -/
open_locale pointwise
section instances
variables [decidable_eq α] [decidable_eq β]
/-- Repeated pointwise addition (not the same as pointwise repeated addition!) of a `finset`. See
note [pointwise nat action]. -/
protected def has_nsmul [has_zero α] [has_add α] : has_scalar ℕ (finset α) := ⟨nsmul_rec⟩
/-- Repeated pointwise multiplication (not the same as pointwise repeated multiplication!) of a
`finset`. See note [pointwise nat action]. -/
@[to_additive]
protected def has_npow [has_one α] [has_mul α] : has_pow (finset α) ℕ := ⟨λ s n, npow_rec n s⟩
/-- Repeated pointwise addition/subtraction (not the same as pointwise repeated
addition/subtraction!) of a `finset`. See note [pointwise nat action]. -/
protected def has_zsmul [has_zero α] [has_add α] [has_neg α] : has_scalar ℤ (finset α) :=
⟨zsmul_rec⟩
/-- Repeated pointwise multiplication/division (not the same as pointwise repeated
multiplication/division!) of a `finset`. See note [pointwise nat action]. -/
@[to_additive] protected def has_zpow [has_one α] [has_mul α] [has_inv α] : has_pow (finset α) ℤ :=
⟨λ s n, zpow_rec n s⟩
localized "attribute [instance] finset.has_nsmul finset.has_npow finset.has_zsmul finset.has_zpow"
in pointwise
/-- `finset α` is a `semigroup` under pointwise operations if `α` is. -/
@[to_additive "`finset α` is an `add_semigroup` under pointwise operations if `α` is. "]
protected def semigroup [semigroup α] : semigroup (finset α) :=
coe_injective.semigroup _ coe_mul
/-- `finset α` is a `comm_semigroup` under pointwise operations if `α` is. -/
@[to_additive "`finset α` is an `add_comm_semigroup` under pointwise operations if `α` is. "]
protected def comm_semigroup [comm_semigroup α] : comm_semigroup (finset α) :=
coe_injective.comm_semigroup _ coe_mul
section mul_one_class
variables [mul_one_class α]
/-- `finset α` is a `mul_one_class` under pointwise operations if `α` is. -/
@[to_additive "`finset α` is an `add_zero_class` under pointwise operations if `α` is."]
protected def mul_one_class : mul_one_class (finset α) :=
coe_injective.mul_one_class _ (coe_singleton 1) coe_mul
localized "attribute [instance] finset.semigroup finset.add_semigroup finset.comm_semigroup
finset.add_comm_semigroup finset.mul_one_class finset.add_zero_class" in pointwise
@[to_additive] lemma subset_mul_left (s : finset α) {t : finset α} (ht : (1 : α) ∈ t) : s ⊆ s * t :=
λ a ha, mem_mul.2 ⟨a, 1, ha, ht, mul_one _⟩
@[to_additive] lemma subset_mul_right {s : finset α} (t : finset α) (hs : (1 : α) ∈ s) :
t ⊆ s * t :=
λ a ha, mem_mul.2 ⟨1, a, hs, ha, one_mul _⟩
/-- The singleton operation as a `monoid_hom`. -/
@[to_additive "The singleton operation as an `add_monoid_hom`."]
def singleton_monoid_hom : α →* finset α := { ..singleton_mul_hom, ..singleton_one_hom }
@[simp, to_additive] lemma coe_singleton_monoid_hom :
(singleton_monoid_hom : α → finset α) = singleton := rfl
@[simp, to_additive] lemma singleton_monoid_hom_apply (a : α) : singleton_monoid_hom a = {a} := rfl
end mul_one_class
section monoid
variables [monoid α] {s t : finset α} {a : α} {m n : ℕ}
@[simp, to_additive]
lemma coe_pow (s : finset α) (n : ℕ) : ↑(s ^ n) = (s ^ n : set α) :=
begin
change ↑(npow_rec n s) = _,
induction n with n ih,
{ rw [npow_rec, pow_zero, coe_one] },
{ rw [npow_rec, pow_succ, coe_mul, ih] }
end
/-- `finset α` is a `monoid` under pointwise operations if `α` is. -/
@[to_additive "`finset α` is an `add_monoid` under pointwise operations if `α` is. "]
protected def monoid : monoid (finset α) := coe_injective.monoid _ coe_one coe_mul coe_pow
localized "attribute [instance] finset.monoid finset.add_monoid" in pointwise
@[to_additive] lemma pow_mem_pow (ha : a ∈ s) : ∀ n : ℕ, a ^ n ∈ s ^ n
| 0 := by { rw pow_zero, exact one_mem_one }
| (n + 1) := by { rw pow_succ, exact mul_mem_mul ha (pow_mem_pow _) }
@[to_additive] lemma pow_subset_pow (hst : s ⊆ t) : ∀ n : ℕ, s ^ n ⊆ t ^ n
| 0 := by { rw pow_zero, exact subset.rfl }
| (n + 1) := by { rw pow_succ, exact mul_subset_mul hst (pow_subset_pow _) }
@[to_additive] lemma pow_subset_pow_of_one_mem (hs : (1 : α) ∈ s) : m ≤ n → s ^ m ⊆ s ^ n :=
begin
refine nat.le_induction _ (λ n h ih, _) _,
{ exact subset.rfl },
{ rw pow_succ,
exact ih.trans (subset_mul_right _ hs) }
end
@[simp, to_additive] lemma empty_pow (hn : n ≠ 0) : (∅ : finset α) ^ n = ∅ :=
by rw [←tsub_add_cancel_of_le (nat.succ_le_of_lt $ nat.pos_of_ne_zero hn), pow_succ, empty_mul]
@[to_additive] lemma mul_univ_of_one_mem [fintype α] (hs : (1 : α) ∈ s) : s * univ = univ :=
eq_univ_iff_forall.2 $ λ a, mem_mul.2 ⟨_, _, hs, mem_univ _, one_mul _⟩
@[to_additive] lemma univ_mul_of_one_mem [fintype α] (ht : (1 : α) ∈ t) : univ * t = univ :=
eq_univ_iff_forall.2 $ λ a, mem_mul.2 ⟨_, _, mem_univ _, ht, mul_one _⟩
@[simp, to_additive] lemma univ_mul_univ [fintype α] : (univ : finset α) * univ = univ :=
mul_univ_of_one_mem $ mem_univ _
@[simp, to_additive nsmul_univ] lemma univ_pow [fintype α] (hn : n ≠ 0) :
(univ : finset α) ^ n = univ :=
coe_injective $ by rw [coe_pow, coe_univ, set.univ_pow hn]
@[to_additive] protected lemma _root_.is_unit.finset : is_unit a → is_unit ({a} : finset α) :=
is_unit.map (singleton_monoid_hom : α →* finset α)
end monoid
/-- `finset α` is a `comm_monoid` under pointwise operations if `α` is. -/
@[to_additive "`finset α` is an `add_comm_monoid` under pointwise operations if `α` is. "]
protected def comm_monoid [comm_monoid α] : comm_monoid (finset α) :=
coe_injective.comm_monoid _ coe_one coe_mul coe_pow
open_locale pointwise
section division_monoid
variables [division_monoid α] {s t : finset α}
@[simp, to_additive] lemma coe_zpow (s : finset α) : ∀ n : ℤ, ↑(s ^ n) = (s ^ n : set α)
| (int.of_nat n) := coe_pow _ _
| (int.neg_succ_of_nat n) :=
by { refine (coe_inv _).trans _, convert congr_arg has_inv.inv (coe_pow _ _) }
@[to_additive] protected lemma mul_eq_one_iff : s * t = 1 ↔ ∃ a b, s = {a} ∧ t = {b} ∧ a * b = 1 :=
by simp_rw [←coe_inj, coe_mul, coe_one, set.mul_eq_one_iff, coe_singleton]
/-- `finset α` is a division monoid under pointwise operations if `α` is. -/
@[to_additive subtraction_monoid "`finset α` is a subtraction monoid under pointwise operations if
`α` is."]
protected def division_monoid : division_monoid (finset α) :=
coe_injective.division_monoid _ coe_one coe_mul coe_inv coe_div coe_pow coe_zpow
@[simp, to_additive] lemma is_unit_iff : is_unit s ↔ ∃ a, s = {a} ∧ is_unit a :=
begin
split,
{ rintro ⟨u, rfl⟩,
obtain ⟨a, b, ha, hb, h⟩ := finset.mul_eq_one_iff.1 u.mul_inv,
refine ⟨a, ha, ⟨a, b, h, singleton_injective _⟩, rfl⟩,
rw [←singleton_mul_singleton, ←ha, ←hb],
exact u.inv_mul },
{ rintro ⟨a, rfl, ha⟩,
exact ha.finset }
end
@[simp, to_additive] lemma is_unit_coe : is_unit (s : set α) ↔ is_unit s :=
by simp_rw [is_unit_iff, set.is_unit_iff, coe_eq_singleton]
end division_monoid
/-- `finset α` is a commutative division monoid under pointwise operations if `α` is. -/
@[to_additive subtraction_comm_monoid "`finset α` is a commutative subtraction monoid under
pointwise operations if `α` is."]
protected def division_comm_monoid [division_comm_monoid α] : division_comm_monoid (finset α) :=
coe_injective.division_comm_monoid _ coe_one coe_mul coe_inv coe_div coe_pow coe_zpow
/-- `finset α` has distributive negation if `α` has. -/
protected def has_distrib_neg [has_mul α] [has_distrib_neg α] : has_distrib_neg (finset α) :=
coe_injective.has_distrib_neg _ coe_neg coe_mul
localized "attribute [instance] finset.comm_monoid finset.add_comm_monoid finset.division_monoid
finset.subtraction_monoid finset.division_comm_monoid finset.subtraction_comm_monoid
finset.has_distrib_neg" in pointwise
section distrib
variables [distrib α] (s t u : finset α)
/-!
Note that `finset α` is not a `distrib` because `s * t + s * u` has cross terms that `s * (t + u)`
lacks.
```lean
-- {10, 16, 18, 20, 8, 9}
#eval {1, 2} * ({3, 4} + {5, 6} : finset ℕ)
-- {10, 11, 12, 13, 14, 15, 16, 18, 20, 8, 9}
#eval ({1, 2} : finset ℕ) * {3, 4} + {1, 2} * {5, 6}
```
-/
lemma mul_add_subset : s * (t + u) ⊆ s * t + s * u := image₂_distrib_subset_left mul_add
lemma add_mul_subset : (s + t) * u ⊆ s * u + t * u := image₂_distrib_subset_right add_mul
end distrib
section mul_zero_class
variables [mul_zero_class α] {s t : finset α}
/-! Note that `finset` is not a `mul_zero_class` because `0 * ∅ ≠ 0`. -/
lemma mul_zero_subset (s : finset α) : s * 0 ⊆ 0 := by simp [subset_iff, mem_mul]
lemma zero_mul_subset (s : finset α) : 0 * s ⊆ 0 := by simp [subset_iff, mem_mul]
lemma nonempty.mul_zero (hs : s.nonempty) : s * 0 = 0 :=
s.mul_zero_subset.antisymm $ by simpa [mem_mul] using hs
lemma nonempty.zero_mul (hs : s.nonempty) : 0 * s = 0 :=
s.zero_mul_subset.antisymm $ by simpa [mem_mul] using hs
end mul_zero_class
section group
variables [group α] [division_monoid β] [monoid_hom_class F α β] (m : F) {s t : finset α} {a b : α}
/-! Note that `finset` is not a `group` because `s / s ≠ 1` in general. -/
@[simp, to_additive] lemma one_mem_div_iff : (1 : α) ∈ s / t ↔ ¬ disjoint s t :=
by rw [←mem_coe, ←disjoint_coe, coe_div, set.one_mem_div_iff]
@[to_additive] lemma not_one_mem_div_iff : (1 : α) ∉ s / t ↔ disjoint s t :=
one_mem_div_iff.not_left
@[to_additive] lemma nonempty.one_mem_div (h : s.nonempty) : (1 : α) ∈ s / s :=
let ⟨a, ha⟩ := h in mem_div.2 ⟨a, a, ha, ha, div_self' _⟩
@[to_additive]
@[simp] lemma is_unit_iff_singleton : is_unit s ↔ ∃ a, s = {a} :=
by simp only [is_unit_iff, group.is_unit, and_true]
@[simp, to_additive]
lemma image_mul_left :
image (λ b, a * b) t = preimage t (λ b, a⁻¹ * b) ((mul_right_injective _).inj_on _) :=
coe_injective $ by simp
@[simp, to_additive]
lemma image_mul_right : image (* b) t = preimage t (* b⁻¹) ((mul_left_injective _).inj_on _) :=
coe_injective $ by simp
@[to_additive]
lemma image_mul_left' :
image (λ b, a⁻¹ * b) t = preimage t (λ b, a * b) ((mul_right_injective _).inj_on _) :=
by simp
@[to_additive]
lemma image_mul_right' : image (* b⁻¹) t = preimage t (* b) ((mul_left_injective _).inj_on _) :=
by simp
lemma image_div : (s / t).image (m : α → β) = s.image m / t.image m :=
image_image₂_distrib $ map_div m
end group
section group_with_zero
variables [group_with_zero α] {s t : finset α}
lemma div_zero_subset (s : finset α) : s / 0 ⊆ 0 := by simp [subset_iff, mem_div]
lemma zero_div_subset (s : finset α) : 0 / s ⊆ 0 := by simp [subset_iff, mem_div]
lemma nonempty.div_zero (hs : s.nonempty) : s / 0 = 0 :=
s.div_zero_subset.antisymm $ by simpa [mem_div] using hs
lemma nonempty.zero_div (hs : s.nonempty) : 0 / s = 0 :=
s.zero_div_subset.antisymm $ by simpa [mem_div] using hs
end group_with_zero
end instances
section group
variables [group α] {s t : finset α} {a b : α}
@[simp, to_additive]
lemma preimage_mul_left_singleton :
preimage {b} ((*) a) ((mul_right_injective _).inj_on _) = {a⁻¹ * b} :=
by { classical, rw [← image_mul_left', image_singleton] }
@[simp, to_additive]
lemma preimage_mul_right_singleton :
preimage {b} (* a) ((mul_left_injective _).inj_on _) = {b * a⁻¹} :=
by { classical, rw [← image_mul_right', image_singleton] }
@[simp, to_additive]
lemma preimage_mul_left_one : preimage 1 ((*) a) ((mul_right_injective _).inj_on _) = {a⁻¹} :=
by { classical, rw [← image_mul_left', image_one, mul_one] }
@[simp, to_additive]
lemma preimage_mul_right_one : preimage 1 (* b) ((mul_left_injective _).inj_on _) = {b⁻¹} :=
by { classical, rw [← image_mul_right', image_one, one_mul] }
@[to_additive]
lemma preimage_mul_left_one' : preimage 1 ((*) a⁻¹) ((mul_right_injective _).inj_on _) = {a} :=
by rw [preimage_mul_left_one, inv_inv]
@[to_additive]
lemma preimage_mul_right_one' : preimage 1 (* b⁻¹) ((mul_left_injective _).inj_on _) = {b} :=
by rw [preimage_mul_right_one, inv_inv]
end group
/-! ### Scalar addition/multiplication of finsets -/
section has_scalar
variables [decidable_eq β] [has_scalar α β] {s s₁ s₂ : finset α} {t t₁ t₂ u : finset β} {a : α}
{b : β}
/-- The pointwise product of two finsets `s` and `t`: `s • t = {x • y | x ∈ s, y ∈ t}`. -/
@[to_additive has_vadd "The pointwise sum of two finsets `s` and
`t`: `s +ᵥ t = {x +ᵥ y | x ∈ s, y ∈ t}`."]
protected def has_scalar : has_scalar (finset α) (finset β) := ⟨image₂ (•)⟩
localized "attribute [instance] finset.has_scalar finset.has_vadd" in pointwise
@[to_additive] lemma smul_def : s • t = (s.product t).image (λ p : α × β, p.1 • p.2) := rfl
@[to_additive]
lemma image_smul_product : (s.product t).image (λ x : α × β, x.fst • x.snd) = s • t := rfl
@[to_additive] lemma mem_smul {x : β} : x ∈ s • t ↔ ∃ y z, y ∈ s ∧ z ∈ t ∧ y • z = x := mem_image₂
@[simp, norm_cast, to_additive]
lemma coe_smul (s : finset α) (t : finset β) : (↑(s • t) : set β) = (s : set α) • t :=
coe_image₂ _ _ _
@[to_additive] lemma smul_mem_smul : a ∈ s → b ∈ t → a • b ∈ s • t := mem_image₂_of_mem
@[to_additive] lemma smul_card_le : (s • t).card ≤ s.card • t.card := card_image₂_le _ _ _
@[simp, to_additive] lemma empty_smul (t : finset β) : (∅ : finset α) • t = ∅ := image₂_empty_left
@[simp, to_additive] lemma smul_empty (s : finset α) : s • (∅ : finset β) = ∅ := image₂_empty_right
@[simp, to_additive] lemma smul_eq_empty : s • t = ∅ ↔ s = ∅ ∨ t = ∅ := image₂_eq_empty_iff
@[simp, to_additive] lemma smul_nonempty_iff : (s • t).nonempty ↔ s.nonempty ∧ t.nonempty :=
image₂_nonempty_iff
@[to_additive] lemma nonempty.smul : s.nonempty → t.nonempty → (s • t).nonempty := nonempty.image₂
@[to_additive] lemma nonempty.of_smul_left : (s • t).nonempty → s.nonempty :=
nonempty.of_image₂_left
@[to_additive] lemma nonempty.of_smul_right : (s • t).nonempty → t.nonempty :=
nonempty.of_image₂_right
@[simp, to_additive] lemma smul_singleton (b : β) : s • ({b} : finset β) = s.image (• b) :=
image₂_singleton_right
@[simp, to_additive] lemma singleton_smul (a : α) : ({a} : finset α) • t = t.image ((•) a) :=
image₂_singleton_left
@[simp, to_additive] lemma singleton_smul_singleton (a : α) (b : β) :
({a} : finset α) • ({b} : finset β) = {a • b} :=
image₂_singleton
@[to_additive, mono] lemma smul_subset_smul : s₁ ⊆ s₂ → t₁ ⊆ t₂ → s₁ • t₁ ⊆ s₂ • t₂ := image₂_subset
@[to_additive] lemma smul_subset_smul_left : t₁ ⊆ t₂ → s • t₁ ⊆ s • t₂ := image₂_subset_left
@[to_additive] lemma smul_subset_smul_right : s₁ ⊆ s₂ → s₁ • t ⊆ s₂ • t := image₂_subset_right
@[to_additive] lemma smul_subset_iff : s • t ⊆ u ↔ ∀ (a ∈ s) (b ∈ t), a • b ∈ u := image₂_subset_iff
attribute [mono] vadd_subset_vadd
@[to_additive] lemma union_smul [decidable_eq α] : (s₁ ∪ s₂) • t = s₁ • t ∪ s₂ • t :=
image₂_union_left
@[to_additive] lemma smul_union : s • (t₁ ∪ t₂) = s • t₁ ∪ s • t₂ := image₂_union_right
@[to_additive] lemma inter_smul_subset [decidable_eq α] : (s₁ ∩ s₂) • t ⊆ s₁ • t ∩ s₂ • t :=
image₂_inter_subset_left
@[to_additive] lemma smul_inter_subset : s • (t₁ ∩ t₂) ⊆ s • t₁ ∩ s • t₂ :=
image₂_inter_subset_right
/-- If a finset `u` is contained in the scalar product of two sets `s • t`, we can find two finsets
`s'`, `t'` such that `s' ⊆ s`, `t' ⊆ t` and `u ⊆ s' • t'`. -/
@[to_additive "If a finset `u` is contained in the scalar sum of two sets `s +ᵥ t`, we can find two
finsets `s'`, `t'` such that `s' ⊆ s`, `t' ⊆ t` and `u ⊆ s' +ᵥ t'`."]
lemma subset_smul {s : set α} {t : set β} :
↑u ⊆ s • t → ∃ (s' : finset α) (t' : finset β), ↑s' ⊆ s ∧ ↑t' ⊆ t ∧ u ⊆ s' • t' :=
subset_image₂
end has_scalar
/-! ### Scalar subtraction of finsets -/
section has_vsub
variables [decidable_eq α] [has_vsub α β] {s s₁ s₂ t t₁ t₂ : finset β} {u : finset α} {a : α}
{b c : β}
include α
/-- The pointwise product of two finsets `s` and `t`: `s -ᵥ t = {x -ᵥ y | x ∈ s, y ∈ t}`. -/
protected def has_vsub : has_vsub (finset α) (finset β) := ⟨image₂ (-ᵥ)⟩
localized "attribute [instance] finset.has_vsub" in pointwise
lemma vsub_def : s -ᵥ t = image₂ (-ᵥ) s t := rfl
@[simp] lemma image_vsub_product : image₂ (-ᵥ) s t = s -ᵥ t := rfl
lemma mem_vsub : a ∈ s -ᵥ t ↔ ∃ b c, b ∈ s ∧ c ∈ t ∧ b -ᵥ c = a := mem_image₂
@[simp, norm_cast]
lemma coe_vsub (s t : finset β) : (↑(s -ᵥ t) : set α) = (s : set β) -ᵥ t := coe_image₂ _ _ _
lemma vsub_mem_vsub : b ∈ s → c ∈ t → b -ᵥ c ∈ s -ᵥ t := mem_image₂_of_mem
lemma vsub_card_le : (s -ᵥ t : finset α).card ≤ s.card * t.card := card_image₂_le _ _ _
@[simp] lemma empty_vsub (t : finset β) : (∅ : finset β) -ᵥ t = ∅ := image₂_empty_left
@[simp] lemma vsub_empty (s : finset β) : s -ᵥ (∅ : finset β) = ∅ := image₂_empty_right
@[simp] lemma vsub_eq_empty : s -ᵥ t = ∅ ↔ s = ∅ ∨ t = ∅ := image₂_eq_empty_iff
@[simp] lemma vsub_nonempty : (s -ᵥ t : finset α).nonempty ↔ s.nonempty ∧ t.nonempty :=
image₂_nonempty_iff
lemma nonempty.vsub : s.nonempty → t.nonempty → (s -ᵥ t : finset α).nonempty := nonempty.image₂
lemma nonempty.of_vsub_left : (s -ᵥ t : finset α).nonempty → s.nonempty := nonempty.of_image₂_left
lemma nonempty.of_vsub_right : (s -ᵥ t : finset α).nonempty → t.nonempty := nonempty.of_image₂_right
@[simp] lemma vsub_singleton (b : β) : s -ᵥ ({b} : finset β) = s.image (-ᵥ b) :=
image₂_singleton_right
@[simp] lemma singleton_vsub (a : β) : ({a} : finset β) -ᵥ t = t.image ((-ᵥ) a) :=
image₂_singleton_left
@[simp] lemma singleton_vsub_singleton (a b : β) : ({a} : finset β) -ᵥ {b} = {a -ᵥ b} :=
image₂_singleton
@[mono] lemma vsub_subset_vsub : s₁ ⊆ s₂ → t₁ ⊆ t₂ → s₁ -ᵥ t₁ ⊆ s₂ -ᵥ t₂ := image₂_subset
lemma vsub_subset_vsub_left : t₁ ⊆ t₂ → s -ᵥ t₁ ⊆ s -ᵥ t₂ := image₂_subset_left
lemma vsub_subset_vsub_right : s₁ ⊆ s₂ → s₁ -ᵥ t ⊆ s₂ -ᵥ t := image₂_subset_right
lemma vsub_subset_iff : s -ᵥ t ⊆ u ↔ ∀ (x ∈ s) (y ∈ t), x -ᵥ y ∈ u := image₂_subset_iff
section
variables [decidable_eq β]
lemma union_vsub : (s₁ ∪ s₂) -ᵥ t = (s₁ -ᵥ t) ∪ (s₂ -ᵥ t) := image₂_union_left
lemma vsub_union : s -ᵥ (t₁ ∪ t₂) = (s -ᵥ t₁) ∪ (s -ᵥ t₂) := image₂_union_right
lemma inter_vsub_subset : (s₁ ∩ s₂) -ᵥ t ⊆ (s₁ -ᵥ t) ∩ (s₂ -ᵥ t) := image₂_inter_subset_left
lemma vsub_inter_subset : s -ᵥ (t₁ ∩ t₂) ⊆ (s -ᵥ t₁) ∩ (s -ᵥ t₂) := image₂_inter_subset_right
end
/-- If a finset `u` is contained in the pointwise subtraction of two sets `s -ᵥ t`, we can find two
finsets `s'`, `t'` such that `s' ⊆ s`, `t' ⊆ t` and `u ⊆ s' -ᵥ t'`. -/
lemma subset_vsub {s t : set β} :
↑u ⊆ s -ᵥ t → ∃ s' t' : finset β, ↑s' ⊆ s ∧ ↑t' ⊆ t ∧ u ⊆ s' -ᵥ t' :=
subset_image₂
end has_vsub
open_locale pointwise
/-! ### Translation/scaling of finsets -/
section has_scalar
variables [decidable_eq β] [has_scalar α β] {s s₁ s₂ t u : finset β} {a : α} {b : β}
/-- The scaling of a finset `s` by a scalar `a`: `a • s = {a • x | x ∈ s}`. -/
@[to_additive has_vadd_finset "The translation of a finset `s` by a vector `a`:
`a +ᵥ s = {a +ᵥ x | x ∈ s}`."]
protected def has_scalar_finset : has_scalar α (finset β) := ⟨λ a, image $ (•) a⟩
localized "attribute [instance] finset.has_scalar_finset finset.has_vadd_finset" in pointwise
@[to_additive] lemma smul_finset_def : a • s = s.image ((•) a) := rfl
@[to_additive] lemma image_smul : s.image (λ x, a • x) = a • s := rfl
@[to_additive]
lemma mem_smul_finset {x : β} : x ∈ a • s ↔ ∃ y, y ∈ s ∧ a • y = x :=
by simp only [finset.smul_finset_def, and.assoc, mem_image, exists_prop, prod.exists, mem_product]
@[simp, norm_cast, to_additive]
lemma coe_smul_finset (a : α) (s : finset β) : (↑(a • s) : set β) = a • s := coe_image
@[to_additive] lemma smul_finset_mem_smul_finset : b ∈ s → a • b ∈ a • s := mem_image_of_mem _
@[to_additive] lemma smul_finset_card_le : (a • s).card ≤ s.card := card_image_le
@[simp, to_additive] lemma smul_finset_empty (a : α) : a • (∅ : finset β) = ∅ := image_empty _
@[simp, to_additive] lemma smul_finset_eq_empty : a • s = ∅ ↔ s = ∅ := image_eq_empty
@[simp, to_additive] lemma smul_finset_nonempty : (a • s).nonempty ↔ s.nonempty :=
nonempty.image_iff _
@[to_additive] lemma nonempty.smul_finset (hs : s.nonempty) : (a • s).nonempty := hs.image _
@[to_additive, mono]
lemma smul_finset_subset_smul_finset : s ⊆ t → a • s ⊆ a • t := image_subset_image
attribute [mono] add_subset_add
@[simp, to_additive]
lemma smul_finset_singleton (b : β) : a • ({b} : finset β) = {a • b} := image_singleton _ _
@[to_additive] lemma smul_finset_union : a • (s₁ ∪ s₂) = a • s₁ ∪ a • s₂ := image_union _ _
@[to_additive] lemma smul_finset_inter_subset : a • (s₁ ∩ s₂) ⊆ a • s₁ ∩ (a • s₂) :=
image_inter_subset _ _ _
@[simp] lemma bUnion_smul_finset (s : finset α) (t : finset β) : s.bUnion (• t) = s • t :=
bUnion_image_left
end has_scalar
open_locale pointwise
section instances
variables [decidable_eq γ]
@[to_additive]
instance smul_comm_class_finset [has_scalar α γ] [has_scalar β γ] [smul_comm_class α β γ] :
smul_comm_class α β (finset γ) :=
⟨λ _ _ _, image_comm $ smul_comm _ _⟩
@[to_additive]
instance smul_comm_class_finset' [has_scalar α γ] [has_scalar β γ] [smul_comm_class α β γ] :
smul_comm_class α (finset β) (finset γ) :=
⟨λ a s t, coe_injective $ by simp only [coe_smul_finset, coe_smul, smul_comm]⟩
@[to_additive]
instance smul_comm_class_finset'' [has_scalar α γ] [has_scalar β γ] [smul_comm_class α β γ] :
smul_comm_class (finset α) β (finset γ) :=
by haveI := smul_comm_class.symm α β γ; exact smul_comm_class.symm _ _ _
@[to_additive]
instance smul_comm_class [has_scalar α γ] [has_scalar β γ] [smul_comm_class α β γ] :
smul_comm_class (finset α) (finset β) (finset γ) :=
⟨λ s t u, coe_injective $ by simp_rw [coe_smul, smul_comm]⟩
instance is_scalar_tower [has_scalar α β] [has_scalar α γ] [has_scalar β γ]
[is_scalar_tower α β γ] :
is_scalar_tower α β (finset γ) :=
⟨λ a b s, by simp only [←image_smul, image_image, smul_assoc]⟩
variables [decidable_eq β]
instance is_scalar_tower' [has_scalar α β] [has_scalar α γ] [has_scalar β γ]
[is_scalar_tower α β γ] :
is_scalar_tower α (finset β) (finset γ) :=
⟨λ a s t, coe_injective $ by simp only [coe_smul_finset, coe_smul, smul_assoc]⟩
instance is_scalar_tower'' [has_scalar α β] [has_scalar α γ] [has_scalar β γ]
[is_scalar_tower α β γ] :
is_scalar_tower (finset α) (finset β) (finset γ) :=
⟨λ a s t, coe_injective $ by simp only [coe_smul_finset, coe_smul, smul_assoc]⟩
instance is_central_scalar [has_scalar α β] [has_scalar αᵐᵒᵖ β] [is_central_scalar α β] :
is_central_scalar α (finset β) :=
⟨λ a s, coe_injective $ by simp only [coe_smul_finset, coe_smul, op_smul_eq_smul]⟩
/-- A multiplicative action of a monoid `α` on a type `β` gives a multiplicative action of
`finset α` on `finset β`. -/
@[to_additive "An additive action of an additive monoid `α` on a type `β` gives an additive action
of `finset α` on `finset β`"]
protected def mul_action [decidable_eq α] [monoid α] [mul_action α β] :
mul_action (finset α) (finset β) :=
{ mul_smul := λ _ _ _, image₂_assoc mul_smul,
one_smul := λ s, image₂_singleton_left.trans $ by simp_rw [one_smul, image_id'] }
/-- A multiplicative action of a monoid on a type `β` gives a multiplicative action on `finset β`.
-/
@[to_additive "An additive action of an additive monoid on a type `β` gives an additive action
on `finset β`."]
protected def mul_action_finset [monoid α] [mul_action α β] : mul_action α (finset β) :=
coe_injective.mul_action _ coe_smul_finset
localized "attribute [instance] finset.mul_action_finset finset.add_action_finset
finset.mul_action finset.add_action" in pointwise
/-- A distributive multiplicative action of a monoid on an additive monoid `β` gives a distributive
multiplicative action on `finset β`. -/
protected def distrib_mul_action_finset [monoid α] [add_monoid β] [distrib_mul_action α β] :
distrib_mul_action α (finset β) :=
function.injective.distrib_mul_action ⟨coe, coe_zero, coe_add⟩ coe_injective coe_smul_finset
/-- A multiplicative action of a monoid on a monoid `β` gives a multiplicative action on `set β`. -/
protected def mul_distrib_mul_action_finset [monoid α] [monoid β] [mul_distrib_mul_action α β] :
mul_distrib_mul_action α (finset β) :=
function.injective.mul_distrib_mul_action ⟨coe, coe_one, coe_mul⟩ coe_injective coe_smul_finset
localized "attribute [instance] finset.distrib_mul_action_finset
finset.mul_distrib_mul_action_finset" in pointwise
instance [decidable_eq α] [has_zero α] [has_mul α] [no_zero_divisors α] :
no_zero_divisors (finset α) :=
coe_injective.no_zero_divisors _ coe_zero coe_mul
instance [has_zero α] [has_zero β] [has_scalar α β] [no_zero_smul_divisors α β] :
no_zero_smul_divisors (finset α) (finset β) :=
⟨λ s t h, begin
by_contra' H,
have hst : (s • t).nonempty := h.symm.subst zero_nonempty,
simp_rw [←hst.of_smul_left.subset_zero_iff, ←hst.of_smul_right.subset_zero_iff, not_subset,
mem_zero] at H,
obtain ⟨⟨a, hs, ha⟩, b, ht, hb⟩ := H,
have := subset_of_eq h,
exact (eq_zero_or_eq_zero_of_smul_eq_zero $ mem_zero.1 $ this $ smul_mem_smul hs ht).elim ha hb,
end⟩
instance no_zero_smul_divisors_finset [has_zero α] [has_zero β] [has_scalar α β]
[no_zero_smul_divisors α β] : no_zero_smul_divisors α (finset β) :=
coe_injective.no_zero_smul_divisors _ coe_zero coe_smul_finset
end instances
lemma pairwise_disjoint_smul_iff [decidable_eq α] [left_cancel_semigroup α] {s : set α}
{t : finset α} :
s.pairwise_disjoint (• t) ↔ ((s : set α) ×ˢ (t : set α) : set (α × α)).inj_on (λ p, p.1 * p.2) :=
by simp_rw [←pairwise_disjoint_coe, coe_smul_finset, set.pairwise_disjoint_smul_iff]
end finset
|
-- Record projections should be positive in their argument
module Issue602-2 where
record A : Set₁ where
constructor mkA
field
f : Set
unA : A → Set
unA (mkA x) = x
data B (a : A) : Set where
mkB : unA a → B a
data D : Set where
d : B (mkA D) → D
|
/-
Copyright (c) 2018 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl
! This file was ported from Lean 3 source module tactic.mk_iff_of_inductive_prop
! leanprover-community/mathlib commit 4f8c490fa3c3086f55427f664db7742ecf88b852
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Tactic.Core
import Mathbin.Tactic.Lint.Default
/-!
# mk_iff_of_inductive_prop
This file defines a tactic `tactic.mk_iff_of_inductive_prop` that generates `iff` rules for
inductive `Prop`s. For example, when applied to `list.chain`, it creates a declaration with
the following type:
```lean
∀{α : Type*} (R : α → α → Prop) (a : α) (l : list α),
chain R a l ↔ l = [] ∨ ∃{b : α} {l' : list α}, R a b ∧ chain R b l ∧ l = b :: l'
```
This tactic can be called using either the `mk_iff_of_inductive_prop` user command or
the `mk_iff` attribute.
-/
open Tactic Expr
namespace MkIff
/-- `select m n` runs `tactic.right` `m` times, and then `tactic.left` `(n-m)` times.
Fails if `n < m`. -/
unsafe def select : ℕ → ℕ → tactic Unit
| 0, 0 => skip
| 0, n + 1 => left >> skip
| m + 1, n + 1 => right >> select m n
| n + 1, 0 => failure
#align mk_iff.select mk_iff.select
/-- `compact_relation bs as_ps`: Produce a relation of the form:
```lean
R as := ∃ bs, Λ_i a_i = p_i[bs]
```
This relation is user-visible, so we compact it by removing each `b_j` where a `p_i = b_j`, and
hence `a_i = b_j`. We need to take care when there are `p_i` and `p_j` with `p_i = p_j = b_k`.
TODO: this is a variant of `compact_relation` in `coinductive_predicates.lean`, export it there.
-/
unsafe def compact_relation :
List expr → List (expr × expr) → List (Option expr) × List (expr × expr)
| [], ps => ([], ps)
| b :: bs, ps =>
match ps.spanₓ fun ap : expr × expr => ¬ap.2 == b with
| (_, []) =>
let (bs, ps) := compact_relation bs ps
(b :: bs, ps)
| (ps₁, (a, _) :: ps₂) =>
let i := a.instantiate_local b.local_uniq_name
let (bs, ps) := compact_relation (bs.map i) ((ps₁ ++ ps₂).map fun ⟨a, p⟩ => (a, i p))
(none :: bs, ps)
#align mk_iff.compact_relation mk_iff.compact_relation
-- TODO: document
@[nolint doc_blame]
unsafe def constr_to_prop (univs : List level) (g : List expr) (idxs : List expr) (c : Name) :
tactic ((List (Option expr) × Sum expr ℕ) × expr) := do
let e ← get_env
let decl ← get_decl c
let some type' ← return <| decl.instantiate_type_univ_params univs
let type ← drop_pis g type'
let (args, res) ← open_pis type
let idxs_inst := res.get_app_args.drop g.length
let (bs, eqs) := compact_relation args (idxs.zip idxs_inst)
let bs' := bs.filterMap id
let eqs ←
eqs.mapM fun ⟨idx, inst⟩ => do
let ty := idx.local_type
let inst_ty ← infer_type inst
let sort u ← infer_type ty
is_def_eq ty inst_ty >> return ((const `eq [u] : expr) ty idx inst) <|>
return ((const `heq [u] : expr) ty idx inst_ty inst)
let (n, r) ←
match bs', eqs with
| [], [] => return (Sum.inr 0, mk_true)
| _, [] => do
let t : expr := bs'.getLastI.local_type
let sort l ← infer_type t
if l = level.zero then do
let r ← mk_exists_lst bs' t
return (Sum.inl bs', r)
else do
let r ← mk_exists_lst bs' mk_true
return (Sum.inr 0, r)
| _, _ => do
let r ← mk_exists_lst bs' (mk_and_lst eqs)
return (Sum.inr eqs, r)
return ((bs, n), r)
#align mk_iff.constr_to_prop mk_iff.constr_to_prop
-- TODO: document
@[nolint doc_blame]
unsafe def to_cases (s : List <| List (Option expr) × Sum expr ℕ) : tactic Unit := do
let h ← intro1
let i ← induction h
focus
((s i).enum.map fun ⟨p, (shape, t), _, vars, _⟩ => do
let si := (shape vars).filterMap fun ⟨c, v⟩ => c >>= fun _ => some v
select p (s - 1)
match t with
| Sum.inl e => do
si existsi
let some v ← return <| vars (shape - 1)
exact v
| Sum.inr n => do
si existsi
(iterate_exactly (n - 1) ((split >> constructor) >> skip) >> constructor) >> skip
done)
done
#align mk_iff.to_cases mk_iff.to_cases
/-- Iterate over two lists, if the first element of the first list is `none`, insert `none` into the
result and continue with the tail of first list. Otherwise, wrap the first element of the second
list with `some` and continue with the tails of both lists. Return when either list is empty.
Example:
```
list_option_merge [none, some (), none, some ()] [0, 1, 2, 3, 4] = [none, (some 0), none, (some 1)]
```
-/
def listOptionMerge {α : Type _} {β : Type _} : List (Option α) → List β → List (Option β)
| [], _ => []
| none :: xs, ys => none :: list_option_merge xs ys
| some _ :: xs, y :: ys => some y :: list_option_merge xs ys
| some _ :: xs, [] => []
#align mk_iff.list_option_merge MkIff.listOptionMerge
-- TODO: document
@[nolint doc_blame]
unsafe def to_inductive (cs : List Name) (gs : List expr)
(s : List (List (Option expr) × Sum expr ℕ)) (h : expr) : tactic Unit :=
match s.length with
| 0 => induction h >> skip
| n + 1 => do
let r ← elim_gen_sum n h
focus
((cs (r s)).map fun ⟨constr_name, h, bs, e⟩ => do
let n := (bs id).length
match e with
| Sum.inl e => elim_gen_prod (n - 1) h [] [] >> skip
| Sum.inr 0 => do
let (hs, h, _) ← elim_gen_prod n h [] []
clear h
| Sum.inr (e + 1) => do
let (hs, h, _) ← elim_gen_prod n h [] []
let (es, Eq, _) ← elim_gen_prod e h [] []
let es := es ++ [Eq]
/- `es.mmap' subst`: fails when we have dependent equalities (`heq`). `subst` will change the
dependent hypotheses, so that the `uniq` local names in `es` are wrong afterwards. Instead
we revert them and pull them out one-by-one. -/
revert_lst
es
es fun _ => intro1 >>= subst
let ctxt ← local_context
let gs := ctxt gs
let hs := (ctxt n).reverse
let m := gs some ++ list_option_merge bs hs
let args ←
m fun a =>
match a with
| some v => return v
| none => mk_mvar
let c ← mk_const constr_name
exact (c args)
done)
done
#align mk_iff.to_inductive mk_iff.to_inductive
end MkIff
namespace Tactic
open MkIff
/-- `mk_iff_of_inductive_prop i r` makes an `iff` rule for the inductively-defined proposition `i`.
The new rule `r` has the shape `∀ps is, i as ↔ ⋁_j, ∃cs, is = cs`, where `ps` are the type
parameters, `is` are the indices, `j` ranges over all possible constructors, the `cs` are the
parameters for each of the constructors, and the equalities `is = cs` are the instantiations for
each constructor for each of the indices to the inductive type `i`.
In each case, we remove constructor parameters (i.e. `cs`) when the corresponding equality would
be just `c = i` for some index `i`.
For example, `mk_iff_of_inductive_prop` on `list.chain` produces:
```lean
∀ {α : Type*} (R : α → α → Prop) (a : α) (l : list α),
chain R a l ↔ l = [] ∨ ∃{b : α} {l' : list α}, R a b ∧ chain R b l ∧ l = b :: l'
```
-/
unsafe def mk_iff_of_inductive_prop (i : Name) (r : Name) : tactic Unit := do
let e ← get_env
guard (e i)
let constrs := e.constructors_of i
let params := e.inductive_num_params i
let indices := e.inductive_num_indices i
let rec :=
match e.recursor_of i with
| some rec => rec
| none => i.append `rec
let decl ← get_decl i
let type := decl.type
let univ_names := decl.univ_params
let univs := univ_names.map level.param
let/- we use these names for our universe parameters, maybe we should construct a copy of them
using `uniq_name` -/
(g, q(Prop))
← open_pis type |
fail "Inductive type is not a proposition"
let lhs := (const i univs).mk_app g
let shape_rhss ← constrs.mapM (constr_to_prop univs (g.take params) (g.drop params))
let shape := shape_rhss.map Prod.fst
let rhss := shape_rhss.map Prod.snd
add_theorem_by r univ_names ((mk_iff lhs (mk_or_lst rhss)).pis g) do
let gs ← intro_lst (g local_pp_name)
split
focus' [to_cases shape, intro1 >>= to_inductive constrs (gs params) shape]
skip
#align tactic.mk_iff_of_inductive_prop tactic.mk_iff_of_inductive_prop
end Tactic
section
/- ./././Mathport/Syntax/Translate/Tactic/Mathlib/Core.lean:38:34: unsupported: setup_tactic_parser -/
/-- `mk_iff_of_inductive_prop i r` makes an `iff` rule for the inductively-defined proposition `i`.
The new rule `r` has the shape `∀ps is, i as ↔ ⋁_j, ∃cs, is = cs`, where `ps` are the type
parameters, `is` are the indices, `j` ranges over all possible constructors, the `cs` are the
parameters for each of the constructors, and the equalities `is = cs` are the instantiations for
each constructor for each of the indices to the inductive type `i`.
In each case, we remove constructor parameters (i.e. `cs`) when the corresponding equality would
be just `c = i` for some index `i`.
For example, `mk_iff_of_inductive_prop` on `list.chain` produces:
```lean
∀ {α : Type*} (R : α → α → Prop) (a : α) (l : list α),
chain R a l ↔ l = [] ∨ ∃{b : α} {l' : list α}, R a b ∧ chain R b l ∧ l = b :: l'
```
See also the `mk_iff` user attribute.
-/
@[user_command]
unsafe def mk_iff_of_inductive_prop_cmd (_ : parse (tk "mk_iff_of_inductive_prop")) : parser Unit :=
do
let i ← ident
let r ← ident
tactic.mk_iff_of_inductive_prop i r
#align mk_iff_of_inductive_prop_cmd mk_iff_of_inductive_prop_cmd
add_tactic_doc
{ Name := "mk_iff_of_inductive_prop"
category := DocCategory.cmd
declNames := [`` mk_iff_of_inductive_prop_cmd]
tags := ["logic", "environment"] }
/- ./././Mathport/Syntax/Translate/Expr.lean:207:4: warning: unsupported notation `parser.optional -/
/--
Applying the `mk_iff` attribute to an inductively-defined proposition `mk_iff` makes an `iff` rule
`r` with the shape `∀ps is, i as ↔ ⋁_j, ∃cs, is = cs`, where `ps` are the type parameters, `is` are
the indices, `j` ranges over all possible constructors, the `cs` are the parameters for each of the
constructors, and the equalities `is = cs` are the instantiations for each constructor for each of
the indices to the inductive type `i`.
In each case, we remove constructor parameters (i.e. `cs`) when the corresponding equality would
be just `c = i` for some index `i`.
For example, if we try the following:
```lean
@[mk_iff] structure foo (m n : ℕ) : Prop :=
(equal : m = n)
(sum_eq_two : m + n = 2)
```
Then `#check foo_iff` returns:
```lean
foo_iff : ∀ (m n : ℕ), foo m n ↔ m = n ∧ m + n = 2
```
You can add an optional string after `mk_iff` to change the name of the generated lemma.
For example, if we try the following:
```lean
@[mk_iff bar] structure foo (m n : ℕ) : Prop :=
(equal : m = n)
(sum_eq_two : m + n = 2)
```
Then `#check bar` returns:
```lean
bar : ∀ (m n : ℕ), foo m n ↔ m = n ∧ m + n = 2
```
See also the user command `mk_iff_of_inductive_prop`.
-/
@[user_attribute]
unsafe def mk_iff_attr : user_attribute Unit (Option Name)
where
Name := `mk_iff
descr := "Generate an `iff` lemma for an inductive `Prop`."
parser := parser.optional ident
after_set :=
some fun n _ _ => do
let tgt ← mk_iff_attr.get_param n
tactic.mk_iff_of_inductive_prop n (tgt (n "_iff"))
#align mk_iff_attr mk_iff_attr
add_tactic_doc
{ Name := "mk_iff"
category := DocCategory.attr
declNames := [`mk_iff_attr]
tags := ["logic", "environment"] }
end
|
[STATEMENT]
lemma underS_underS_trans:
assumes TRANS: "trans r" and ANTISYM: "antisym r" and
IN1: "a \<in> underS r b" and IN2: "b \<in> underS r c"
shows "a \<in> underS r c"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a \<in> underS r c
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. a \<in> underS r c
[PROOF STEP]
have "a \<in> under r b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a \<in> under r b
[PROOF STEP]
using IN1 underS_subset_under
[PROOF STATE]
proof (prove)
using this:
a \<in> underS r b
underS ?r ?a \<subseteq> under ?r ?a
goal (1 subgoal):
1. a \<in> under r b
[PROOF STEP]
by fast
[PROOF STATE]
proof (state)
this:
a \<in> under r b
goal (1 subgoal):
1. a \<in> underS r c
[PROOF STEP]
with assms under_underS_trans
[PROOF STATE]
proof (chain)
picking this:
trans r
antisym r
a \<in> underS r b
b \<in> underS r c
\<lbrakk>trans ?r; antisym ?r; ?a \<in> under ?r ?b; ?b \<in> underS ?r ?c\<rbrakk> \<Longrightarrow> ?a \<in> underS ?r ?c
a \<in> under r b
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
trans r
antisym r
a \<in> underS r b
b \<in> underS r c
\<lbrakk>trans ?r; antisym ?r; ?a \<in> under ?r ?b; ?b \<in> underS ?r ?c\<rbrakk> \<Longrightarrow> ?a \<in> underS ?r ?c
a \<in> under r b
goal (1 subgoal):
1. a \<in> underS r c
[PROOF STEP]
by fast
[PROOF STATE]
proof (state)
this:
a \<in> underS r c
goal:
No subgoals!
[PROOF STEP]
qed |
module Svc {
@ A component for getting time
passive component Time {
@ Port to retrieve time
sync input port timeGetPort: Fw.Time
}
}
|
[STATEMENT]
lemma exec_move_FAcc:
"pc < length (compE2 e) \<Longrightarrow> exec_move ci P t (e\<bullet>F{D}) h (stk, loc, pc, xcp) = exec_move ci P t e h (stk, loc, pc, xcp)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pc < length (compE2 e) \<Longrightarrow> exec_move ci P t (e\<bullet>F{D}) h (stk, loc, pc, xcp) = exec_move ci P t e h (stk, loc, pc, xcp)
[PROOF STEP]
unfolding exec_move_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pc < length (compE2 e) \<Longrightarrow> exec_meth ci (compP2 P) (compE2 (e\<bullet>F{D})) (compxE2 (e\<bullet>F{D}) 0 0) t h (stk, loc, pc, xcp) = exec_meth ci (compP2 P) (compE2 e) (compxE2 e 0 0) t h (stk, loc, pc, xcp)
[PROOF STEP]
by(auto intro!: ext intro: exec_meth_take) |
module KubeMeta
include("client.jl")
include("informer.jl")
export TaskListWatcher, TaskInformer, run, K8sClient, request, client
include("controllers/Controllers.jl")
using .Controllers
export Controller, DifferentiableController, DifferentiableAgent, add_task, update_task, delete_task
end # module
|
proposition connected_Times: assumes S: "connected S" and T: "connected T" shows "connected (S \<times> T)" |
{-# OPTIONS --cubical #-}
module _ where
open import Agda.Primitive.Cubical
open import Agda.Builtin.Cubical.Path
data ⊤ : Set where
tt : ⊤
data S : Set where
base : S
loop : base ≡ base
postulate
P' : base ≡ base → Set
pl : P' loop
foo : P' loop
foo with tt
... | w = pl
|
Formal statement is: lemma decseq_imp_monoseq: "decseq X \<Longrightarrow> monoseq X" Informal statement is: If a sequence is decreasing, then it is monotone. |
Jacqueline Jossa has hit back at her haters with a series of fiery videos after coming under fire for her latest parenting choices.
The mum-of-two – who shares four-year-old Ella and two-month-old Mia with husband Dan Osborne – took to Instagram yesterday after having enough of nasty comments about her skills as a mum.
The former EastEnders actress then went on to defend her decision to post photos of her two daughters online, as she continued, “People say “dont put pictures of your kids out there”. No Im putting pictures of my life, my kids are my life.
After urging her fans to just be nice, the telly star went on to blast claims that shes a bad mum.
“I dont believe what theyre saying, I know Im a good parent, she said, before adding, “It just annoys me that people feel like they have a right to do it.” Too right!
Clearly not done with her epic rant, Jac – who faced rumours shed split with TOWIE hubby Dan earlier this year – also touched on her right to want to look good as a new mum.
“There shouldnt be anything wrong with trying to get in shape after having a baby and wanting to do it at the same time as being a mum”, she said.
Following another inspirational message about building each other up not tearing each other down, and loving yourselves and you babies, Jacqueline finished her social media tirade with one final selfie.
In the snap, the telly star can be seen staring into the camera with her brunette hair tumbling over her face as she detailed the struggles new parents face.
keep doing what your doing”.
The post Im a good mum! Jacqueline Jossa lashes out at nasty trolls as shes forced to defend parenting skills appeared first on Woman Magazine. |
(************************************************************************)
(* * The Coq Proof Assistant / The Coq Development Team *)
(* v * INRIA, CNRS and contributors - Copyright 1999-2018 *)
(* <O___,, * (see CREDITS file for the list of authors) *)
(* \VV/ **************************************************************)
(* // * This file is distributed under the terms of the *)
(* * GNU Lesser General Public License Version 2.1 *)
(* * (see LICENSE file for the text of the license) *)
(************************************************************************)
Require Ring.
Import Ring_polynom Ring_tac Ring_theory InitialRing Setoid List Morphisms.
Require Import ZArith_base.
Set Implicit Arguments.
(* Set Universe Polymorphism. *)
Section MakeFieldPol.
(* Field elements : R *)
Variable R:Type.
Bind Scope R_scope with R.
Delimit Scope R_scope with ring.
Local Open Scope R_scope.
Variable (rO rI : R) (radd rmul rsub: R->R->R) (ropp : R->R).
Variable (rdiv : R->R->R) (rinv : R->R).
Variable req : R -> R -> Prop.
Notation "0" := rO : R_scope.
Notation "1" := rI : R_scope.
Infix "+" := radd : R_scope.
Infix "-" := rsub : R_scope.
Infix "*" := rmul : R_scope.
Infix "/" := rdiv : R_scope.
Notation "- x" := (ropp x) : R_scope.
Notation "/ x" := (rinv x) : R_scope.
Infix "==" := req (at level 70, no associativity) : R_scope.
(* Equality properties *)
Variable Rsth : Equivalence req.
Variable Reqe : ring_eq_ext radd rmul ropp req.
Variable SRinv_ext : forall p q, p == q -> / p == / q.
(* Field properties *)
Record almost_field_theory : Prop := mk_afield {
AF_AR : almost_ring_theory rO rI radd rmul rsub ropp req;
AF_1_neq_0 : ~ 1 == 0;
AFdiv_def : forall p q, p / q == p * / q;
AFinv_l : forall p, ~ p == 0 -> / p * p == 1
}.
Section AlmostField.
Variable AFth : almost_field_theory.
Let ARth := AFth.(AF_AR).
Let rI_neq_rO := AFth.(AF_1_neq_0).
Let rdiv_def := AFth.(AFdiv_def).
Let rinv_l := AFth.(AFinv_l).
Add Morphism radd with signature (req ==> req ==> req) as radd_ext.
Proof. exact (Radd_ext Reqe). Qed.
Add Morphism rmul with signature (req ==> req ==> req) as rmul_ext.
Proof. exact (Rmul_ext Reqe). Qed.
Add Morphism ropp with signature (req ==> req) as ropp_ext.
Proof. exact (Ropp_ext Reqe). Qed.
Add Morphism rsub with signature (req ==> req ==> req) as rsub_ext.
Proof. exact (ARsub_ext Rsth Reqe ARth). Qed.
Add Morphism rinv with signature (req ==> req) as rinv_ext.
Proof. exact SRinv_ext. Qed.
Let eq_trans := Setoid.Seq_trans _ _ Rsth.
Let eq_sym := Setoid.Seq_sym _ _ Rsth.
Let eq_refl := Setoid.Seq_refl _ _ Rsth.
Let radd_0_l := ARadd_0_l ARth.
Let radd_comm := ARadd_comm ARth.
Let radd_assoc := ARadd_assoc ARth.
Let rmul_1_l := ARmul_1_l ARth.
Let rmul_0_l := ARmul_0_l ARth.
Let rmul_comm := ARmul_comm ARth.
Let rmul_assoc := ARmul_assoc ARth.
Let rdistr_l := ARdistr_l ARth.
Let ropp_mul_l := ARopp_mul_l ARth.
Let ropp_add := ARopp_add ARth.
Let rsub_def := ARsub_def ARth.
Let radd_0_r := ARadd_0_r Rsth ARth.
Let rmul_0_r := ARmul_0_r Rsth ARth.
Let rmul_1_r := ARmul_1_r Rsth ARth.
Let ropp_0 := ARopp_zero Rsth Reqe ARth.
Let rdistr_r := ARdistr_r Rsth Reqe ARth.
(* Coefficients : C *)
Variable C: Type.
Bind Scope C_scope with C.
Delimit Scope C_scope with coef.
Variable (cO cI: C) (cadd cmul csub : C->C->C) (copp : C->C).
Variable ceqb : C->C->bool.
Variable phi : C -> R.
Variable CRmorph : ring_morph rO rI radd rmul rsub ropp req
cO cI cadd cmul csub copp ceqb phi.
Notation "0" := cO : C_scope.
Notation "1" := cI : C_scope.
Infix "+" := cadd : C_scope.
Infix "-" := csub : C_scope.
Infix "*" := cmul : C_scope.
Notation "- x" := (copp x) : C_scope.
Infix "=?" := ceqb : C_scope.
Notation "[ x ]" := (phi x) (at level 0).
Let phi_0 := CRmorph.(morph0).
Let phi_1 := CRmorph.(morph1).
Lemma ceqb_spec c c' : BoolSpec ([c] == [c']) True (c =? c')%coef.
Proof.
generalize (CRmorph.(morph_eq) c c').
destruct (c =? c')%coef; auto.
Qed.
(* Power coefficients : Cpow *)
Variable Cpow : Type.
Variable Cp_phi : N -> Cpow.
Variable rpow : R -> Cpow -> R.
Variable pow_th : power_theory rI rmul req Cp_phi rpow.
(* sign function *)
Variable get_sign : C -> option C.
Variable get_sign_spec : sign_theory copp ceqb get_sign.
Variable cdiv:C -> C -> C*C.
Variable cdiv_th : div_theory req cadd cmul phi cdiv.
Let rpow_pow := pow_th.(rpow_pow_N).
(* Polynomial expressions : (PExpr C) *)
Bind Scope PE_scope with PExpr.
Delimit Scope PE_scope with poly.
Notation NPEeval := (PEeval rO rI radd rmul rsub ropp phi Cp_phi rpow).
Notation "P @ l" := (NPEeval l P) (at level 10, no associativity).
Arguments PEc _ _%coef.
Notation "0" := (PEc 0) : PE_scope.
Notation "1" := (PEc 1) : PE_scope.
Infix "+" := PEadd : PE_scope.
Infix "-" := PEsub : PE_scope.
Infix "*" := PEmul : PE_scope.
Notation "- e" := (PEopp e) : PE_scope.
Infix "^" := PEpow : PE_scope.
Definition NPEequiv e e' := forall l, e@l == e'@l.
Infix "===" := NPEequiv (at level 70, no associativity) : PE_scope.
Instance NPEequiv_eq : Equivalence NPEequiv.
Proof.
split; red; unfold NPEequiv; intros; [reflexivity|symmetry|etransitivity];
eauto.
Qed.
Instance NPEeval_ext : Proper (eq ==> NPEequiv ==> req) NPEeval.
Proof.
intros l l' <- e e' He. now rewrite (He l).
Qed.
Notation Nnorm :=
(norm_subst cO cI cadd cmul csub copp ceqb cdiv).
Notation NPphi_dev :=
(Pphi_dev rO rI radd rmul rsub ropp cO cI ceqb phi get_sign).
Notation NPphi_pow :=
(Pphi_pow rO rI radd rmul rsub ropp cO cI ceqb phi Cp_phi rpow get_sign).
(* add abstract semi-ring to help with some proofs *)
Add Ring Rring : (ARth_SRth ARth).
(* additional ring properties *)
Lemma rsub_0_l r : 0 - r == - r.
Proof.
rewrite rsub_def; ring.
Qed.
Lemma rsub_0_r r : r - 0 == r.
Proof.
rewrite rsub_def, ropp_0; ring.
Qed.
(***************************************************************************
Properties of division
***************************************************************************)
Theorem rdiv_simpl p q : ~ q == 0 -> q * (p / q) == p.
Proof.
intros.
rewrite rdiv_def.
transitivity (/ q * q * p); [ ring | ].
now rewrite rinv_l.
Qed.
Instance rdiv_ext: Proper (req ==> req ==> req) rdiv.
Proof.
intros p1 p2 Ep q1 q2 Eq. now rewrite !rdiv_def, Ep, Eq.
Qed.
Lemma rmul_reg_l p q1 q2 :
~ p == 0 -> p * q1 == p * q2 -> q1 == q2.
Proof.
intros H EQ.
assert (H' : p * (q1 / p) == p * (q2 / p)).
{ now rewrite !rdiv_def, !rmul_assoc, EQ. }
now rewrite !rdiv_simpl in H'.
Qed.
Theorem field_is_integral_domain r1 r2 :
~ r1 == 0 -> ~ r2 == 0 -> ~ r1 * r2 == 0.
Proof.
intros H1 H2. contradict H2.
transitivity (/r1 * r1 * r2).
- now rewrite rinv_l.
- now rewrite <- rmul_assoc, H2.
Qed.
Theorem ropp_neq_0 r :
~ -(1) == 0 -> ~ r == 0 -> ~ -r == 0.
Proof.
intros.
setoid_replace (- r) with (- (1) * r).
- apply field_is_integral_domain; trivial.
- now rewrite <- ropp_mul_l, rmul_1_l.
Qed.
Theorem rdiv_r_r r : ~ r == 0 -> r / r == 1.
Proof.
intros. rewrite rdiv_def, rmul_comm. now apply rinv_l.
Qed.
Theorem rdiv1 r : r == r / 1.
Proof.
transitivity (1 * (r / 1)).
- symmetry; apply rdiv_simpl. apply rI_neq_rO.
- apply rmul_1_l.
Qed.
Theorem rdiv2 a b c d :
~ b == 0 ->
~ d == 0 ->
a / b + c / d == (a * d + c * b) / (b * d).
Proof.
intros H H0.
assert (~ b * d == 0) by now apply field_is_integral_domain.
apply rmul_reg_l with (b * d); trivial.
rewrite rdiv_simpl; trivial.
rewrite rdistr_r.
apply radd_ext.
- now rewrite <- rmul_assoc, (rmul_comm d), rmul_assoc, rdiv_simpl.
- now rewrite (rmul_comm c), <- rmul_assoc, rdiv_simpl.
Qed.
Theorem rdiv2b a b c d e :
~ (b*e) == 0 ->
~ (d*e) == 0 ->
a / (b*e) + c / (d*e) == (a * d + c * b) / (b * (d * e)).
Proof.
intros H H0.
assert (~ b == 0) by (contradict H; rewrite H; ring).
assert (~ e == 0) by (contradict H; rewrite H; ring).
assert (~ d == 0) by (contradict H0; rewrite H0; ring).
assert (~ b * (d * e) == 0)
by (repeat apply field_is_integral_domain; trivial).
apply rmul_reg_l with (b * (d * e)); trivial.
rewrite rdiv_simpl; trivial.
rewrite rdistr_r.
apply radd_ext.
- transitivity ((b * e) * (a / (b * e)) * d);
[ ring | now rewrite rdiv_simpl ].
- transitivity ((d * e) * (c / (d * e)) * b);
[ ring | now rewrite rdiv_simpl ].
Qed.
Theorem rdiv5 a b : - (a / b) == - a / b.
Proof.
now rewrite !rdiv_def, ropp_mul_l.
Qed.
Theorem rdiv3b a b c d e :
~ (b * e) == 0 ->
~ (d * e) == 0 ->
a / (b*e) - c / (d*e) == (a * d - c * b) / (b * (d * e)).
Proof.
intros H H0.
rewrite !rsub_def, rdiv5, ropp_mul_l.
now apply rdiv2b.
Qed.
Theorem rdiv6 a b :
~ a == 0 -> ~ b == 0 -> / (a / b) == b / a.
Proof.
intros H H0.
assert (Hk : ~ a / b == 0).
{ contradict H.
transitivity (b * (a / b)).
- now rewrite rdiv_simpl.
- rewrite H. apply rmul_0_r. }
apply rmul_reg_l with (a / b); trivial.
rewrite (rmul_comm (a / b)), rinv_l; trivial.
rewrite !rdiv_def.
transitivity (/ a * a * (/ b * b)); [ | ring ].
now rewrite !rinv_l, rmul_1_l.
Qed.
Theorem rdiv4 a b c d :
~ b == 0 ->
~ d == 0 ->
(a / b) * (c / d) == (a * c) / (b * d).
Proof.
intros H H0.
assert (~ b * d == 0) by now apply field_is_integral_domain.
apply rmul_reg_l with (b * d); trivial.
rewrite rdiv_simpl; trivial.
transitivity (b * (a / b) * (d * (c / d))); [ ring | ].
rewrite !rdiv_simpl; trivial.
Qed.
Theorem rdiv4b a b c d e f :
~ b * e == 0 ->
~ d * f == 0 ->
((a * f) / (b * e)) * ((c * e) / (d * f)) == (a * c) / (b * d).
Proof.
intros H H0.
assert (~ b == 0) by (contradict H; rewrite H; ring).
assert (~ e == 0) by (contradict H; rewrite H; ring).
assert (~ d == 0) by (contradict H0; rewrite H0; ring).
assert (~ f == 0) by (contradict H0; rewrite H0; ring).
assert (~ b*d == 0) by now apply field_is_integral_domain.
assert (~ e*f == 0) by now apply field_is_integral_domain.
rewrite rdiv4; trivial.
transitivity ((e * f) * (a * c) / ((e * f) * (b * d))).
- apply rdiv_ext; ring.
- rewrite <- rdiv4, rdiv_r_r; trivial.
Qed.
Theorem rdiv7 a b c d :
~ b == 0 ->
~ c == 0 ->
~ d == 0 ->
(a / b) / (c / d) == (a * d) / (b * c).
Proof.
intros.
rewrite (rdiv_def (a / b)).
rewrite rdiv6; trivial.
apply rdiv4; trivial.
Qed.
Theorem rdiv7b a b c d e f :
~ b * f == 0 ->
~ c * e == 0 ->
~ d * f == 0 ->
((a * e) / (b * f)) / ((c * e) / (d * f)) == (a * d) / (b * c).
Proof.
intros Hbf Hce Hdf.
assert (~ c==0) by (contradict Hce; rewrite Hce; ring).
assert (~ e==0) by (contradict Hce; rewrite Hce; ring).
assert (~ b==0) by (contradict Hbf; rewrite Hbf; ring).
assert (~ f==0) by (contradict Hbf; rewrite Hbf; ring).
assert (~ b*c==0) by now apply field_is_integral_domain.
assert (~ e*f==0) by now apply field_is_integral_domain.
rewrite rdiv7; trivial.
transitivity ((e * f) * (a * d) / ((e * f) * (b * c))).
- apply rdiv_ext; ring.
- now rewrite <- rdiv4, rdiv_r_r.
Qed.
Theorem rinv_nz a : ~ a == 0 -> ~ /a == 0.
Proof.
intros H H0. apply rI_neq_rO.
rewrite <- (rdiv_r_r H), rdiv_def, H0. apply rmul_0_r.
Qed.
Theorem rdiv8 a b : ~ b == 0 -> a == 0 -> a / b == 0.
Proof.
intros H H0.
now rewrite rdiv_def, H0, rmul_0_l.
Qed.
Theorem cross_product_eq a b c d :
~ b == 0 -> ~ d == 0 -> a * d == c * b -> a / b == c / d.
Proof.
intros.
transitivity (a / b * (d / d)).
- now rewrite rdiv_r_r, rmul_1_r.
- now rewrite rdiv4, H1, (rmul_comm b d), <- rdiv4, rdiv_r_r.
Qed.
(* Results about [pow_pos] and [pow_N] *)
Instance pow_ext : Proper (req ==> eq ==> req) (pow_pos rmul).
Proof.
intros x y H p p' <-.
induction p as [p IH| p IH|];simpl; trivial; now rewrite !IH, ?H.
Qed.
Instance pow_N_ext : Proper (req ==> eq ==> req) (pow_N rI rmul).
Proof.
intros x y H n n' <-. destruct n; simpl; trivial. now apply pow_ext.
Qed.
Lemma pow_pos_0 p : pow_pos rmul 0 p == 0.
Proof.
induction p;simpl;trivial; now rewrite !IHp.
Qed.
Lemma pow_pos_1 p : pow_pos rmul 1 p == 1.
Proof.
induction p;simpl;trivial; ring [IHp].
Qed.
Lemma pow_pos_cst c p : pow_pos rmul [c] p == [pow_pos cmul c p].
Proof.
induction p;simpl;trivial; now rewrite !CRmorph.(morph_mul), !IHp.
Qed.
Lemma pow_pos_mul_l x y p :
pow_pos rmul (x * y) p == pow_pos rmul x p * pow_pos rmul y p.
Proof.
induction p;simpl;trivial; ring [IHp].
Qed.
Lemma pow_pos_add_r x p1 p2 :
pow_pos rmul x (p1+p2) == pow_pos rmul x p1 * pow_pos rmul x p2.
Proof.
exact (Ring_theory.pow_pos_add Rsth rmul_ext rmul_assoc x p1 p2).
Qed.
Lemma pow_pos_mul_r x p1 p2 :
pow_pos rmul x (p1*p2) == pow_pos rmul (pow_pos rmul x p1) p2.
Proof.
induction p1;simpl;intros; rewrite ?pow_pos_mul_l, ?pow_pos_add_r;
simpl; trivial; ring [IHp1].
Qed.
Lemma pow_pos_nz x p : ~x==0 -> ~pow_pos rmul x p == 0.
Proof.
intros Hx. induction p;simpl;trivial;
repeat (apply field_is_integral_domain; trivial).
Qed.
Lemma pow_pos_div a b p : ~ b == 0 ->
pow_pos rmul (a / b) p == pow_pos rmul a p / pow_pos rmul b p.
Proof.
intros.
induction p; simpl; trivial.
- rewrite IHp.
assert (nz := pow_pos_nz p H).
rewrite !rdiv4; trivial.
apply field_is_integral_domain; trivial.
- rewrite IHp.
assert (nz := pow_pos_nz p H).
rewrite !rdiv4; trivial.
Qed.
(* === is a morphism *)
Instance PEadd_ext : Proper (NPEequiv ==> NPEequiv ==> NPEequiv) (@PEadd C).
Proof. intros ? ? E ? ? E' l. simpl. now rewrite E, E'. Qed.
Instance PEsub_ext : Proper (NPEequiv ==> NPEequiv ==> NPEequiv) (@PEsub C).
Proof. intros ? ? E ? ? E' l. simpl. now rewrite E, E'. Qed.
Instance PEmul_ext : Proper (NPEequiv ==> NPEequiv ==> NPEequiv) (@PEmul C).
Proof. intros ? ? E ? ? E' l. simpl. now rewrite E, E'. Qed.
Instance PEopp_ext : Proper (NPEequiv ==> NPEequiv) (@PEopp C).
Proof. intros ? ? E l. simpl. now rewrite E. Qed.
Instance PEpow_ext : Proper (NPEequiv ==> eq ==> NPEequiv) (@PEpow C).
Proof.
intros ? ? E ? ? <- l. simpl. rewrite !rpow_pow. apply pow_N_ext; trivial.
Qed.
Lemma PE_1_l (e : PExpr C) : (1 * e === e)%poly.
Proof.
intros l. simpl. rewrite phi_1. apply rmul_1_l.
Qed.
Lemma PE_1_r (e : PExpr C) : (e * 1 === e)%poly.
Proof.
intros l. simpl. rewrite phi_1. apply rmul_1_r.
Qed.
Lemma PEpow_0_r (e : PExpr C) : (e ^ 0 === 1)%poly.
Proof.
intros l. simpl. now rewrite !rpow_pow.
Qed.
Lemma PEpow_1_r (e : PExpr C) : (e ^ 1 === e)%poly.
Proof.
intros l. simpl. now rewrite !rpow_pow.
Qed.
Lemma PEpow_1_l n : (1 ^ n === 1)%poly.
Proof.
intros l. simpl. rewrite rpow_pow. destruct n; simpl.
- now rewrite phi_1.
- now rewrite phi_1, pow_pos_1.
Qed.
Lemma PEpow_add_r (e : PExpr C) n n' :
(e ^ (n+n') === e ^ n * e ^ n')%poly.
Proof.
intros l. simpl. rewrite !rpow_pow.
destruct n; simpl.
- rewrite rmul_1_l. trivial.
- destruct n'; simpl.
+ rewrite rmul_1_r. trivial.
+ apply pow_pos_add_r.
Qed.
Lemma PEpow_mul_l (e e' : PExpr C) n :
((e * e') ^ n === e ^ n * e' ^ n)%poly.
Proof.
intros l. simpl. rewrite !rpow_pow. destruct n; simpl; trivial.
- symmetry; apply rmul_1_l.
- apply pow_pos_mul_l.
Qed.
Lemma PEpow_mul_r (e : PExpr C) n n' :
(e ^ (n * n') === (e ^ n) ^ n')%poly.
Proof.
intros l. simpl. rewrite !rpow_pow.
destruct n, n'; simpl; trivial.
- now rewrite pow_pos_1.
- apply pow_pos_mul_r.
Qed.
Lemma PEpow_nz l e n : ~ e @ l == 0 -> ~ (e^n) @ l == 0.
Proof.
intros. simpl. rewrite rpow_pow. destruct n; simpl.
- apply rI_neq_rO.
- now apply pow_pos_nz.
Qed.
(***************************************************************************
Some equality test
***************************************************************************)
Local Notation "a &&& b" := (if a then b else false)
(at level 40, left associativity).
(* equality test *)
Fixpoint PExpr_eq (e e' : PExpr C) {struct e} : bool :=
match e, e' with
| PEc c, PEc c' => ceqb c c'
| PEX _ p, PEX _ p' => Pos.eqb p p'
| e1 + e2, e1' + e2' => PExpr_eq e1 e1' &&& PExpr_eq e2 e2'
| e1 - e2, e1' - e2' => PExpr_eq e1 e1' &&& PExpr_eq e2 e2'
| e1 * e2, e1' * e2' => PExpr_eq e1 e1' &&& PExpr_eq e2 e2'
| - e, - e' => PExpr_eq e e'
| e ^ n, e' ^ n' => N.eqb n n' &&& PExpr_eq e e'
| _, _ => false
end%poly.
Lemma if_true (a b : bool) : a &&& b = true -> a = true /\ b = true.
Proof.
destruct a, b; split; trivial.
Qed.
Theorem PExpr_eq_semi_ok e e' :
PExpr_eq e e' = true -> (e === e')%poly.
Proof.
revert e'; induction e; destruct e'; simpl; try discriminate.
- intros H l. now apply (morph_eq CRmorph).
- case Pos.eqb_spec; intros; now subst.
- intros H; destruct (if_true _ _ H). now rewrite IHe1, IHe2.
- intros H; destruct (if_true _ _ H). now rewrite IHe1, IHe2.
- intros H; destruct (if_true _ _ H). now rewrite IHe1, IHe2.
- intros H. now rewrite IHe.
- intros H. destruct (if_true _ _ H).
apply N.eqb_eq in H0. now rewrite IHe, H0.
Qed.
Lemma PExpr_eq_spec e e' : BoolSpec (e === e')%poly True (PExpr_eq e e').
Proof.
assert (H := PExpr_eq_semi_ok e e').
destruct PExpr_eq; constructor; intros; trivial. now apply H.
Qed.
(** Smart constructors for polynomial expression,
with reduction of constants *)
Definition NPEadd e1 e2 :=
match e1, e2 with
| PEc c1, PEc c2 => PEc (c1 + c2)
| PEc c, _ => if (c =? 0)%coef then e2 else e1 + e2
| _, PEc c => if (c =? 0)%coef then e1 else e1 + e2
(* Peut t'on factoriser ici ??? *)
| _, _ => (e1 + e2)
end%poly.
Infix "++" := NPEadd (at level 60, right associativity).
Theorem NPEadd_ok e1 e2 : (e1 ++ e2 === e1 + e2)%poly.
Proof.
intros l.
destruct e1, e2; simpl; try reflexivity; try (case ceqb_spec);
try intro H; try rewrite H; simpl;
try apply eq_refl; try (ring [phi_0]).
apply (morph_add CRmorph).
Qed.
Definition NPEsub e1 e2 :=
match e1, e2 with
| PEc c1, PEc c2 => PEc (c1 - c2)
| PEc c, _ => if (c =? 0)%coef then - e2 else e1 - e2
| _, PEc c => if (c =? 0)%coef then e1 else e1 - e2
(* Peut-on factoriser ici *)
| _, _ => e1 - e2
end%poly.
Infix "--" := NPEsub (at level 50, left associativity).
Theorem NPEsub_ok e1 e2: (e1 -- e2 === e1 - e2)%poly.
Proof.
intros l.
destruct e1, e2; simpl; try reflexivity; try case ceqb_spec;
try intro H; try rewrite H; simpl;
try rewrite phi_0; try reflexivity;
try (symmetry; apply rsub_0_l); try (symmetry; apply rsub_0_r).
apply (morph_sub CRmorph).
Qed.
Definition NPEopp e1 :=
match e1 with PEc c1 => PEc (- c1) | _ => - e1 end%poly.
Theorem NPEopp_ok e : (NPEopp e === -e)%poly.
Proof.
intros l. destruct e; simpl; trivial. apply (morph_opp CRmorph).
Qed.
Definition NPEpow x n :=
match n with
| N0 => 1
| Npos p =>
if (p =? 1)%positive then x else
match x with
| PEc c =>
if (c =? 1)%coef then 1
else if (c =? 0)%coef then 0
else PEc (pow_pos cmul c p)
| _ => x ^ n
end
end%poly.
Infix "^^" := NPEpow (at level 35, right associativity).
Theorem NPEpow_ok e n : (e ^^ n === e ^ n)%poly.
Proof.
intros l. unfold NPEpow; destruct n.
- simpl; now rewrite rpow_pow.
- case Pos.eqb_spec; [intro; subst | intros _].
+ simpl. now rewrite rpow_pow.
+ destruct e;simpl;trivial.
repeat case ceqb_spec; intros; rewrite ?rpow_pow, ?H; simpl.
* now rewrite phi_1, pow_pos_1.
* now rewrite phi_0, pow_pos_0.
* now rewrite pow_pos_cst.
Qed.
Fixpoint NPEmul (x y : PExpr C) {struct x} : PExpr C :=
match x, y with
| PEc c1, PEc c2 => PEc (c1 * c2)
| PEc c, _ => if (c =? 1)%coef then y else if (c =? 0)%coef then 0 else x * y
| _, PEc c => if (c =? 1)%coef then x else if (c =? 0)%coef then 0 else x * y
| e1 ^ n1, e2 ^ n2 => if (n1 =? n2)%N then (NPEmul e1 e2)^^n1 else x * y
| _, _ => x * y
end%poly.
Infix "**" := NPEmul (at level 40, left associativity).
Theorem NPEmul_ok e1 e2 : (e1 ** e2 === e1 * e2)%poly.
Proof.
intros l.
revert e2; induction e1;destruct e2; simpl;try reflexivity;
repeat (case ceqb_spec; intro H; try rewrite H; clear H);
simpl; try reflexivity; try ring [phi_0 phi_1].
apply (morph_mul CRmorph).
case N.eqb_spec; [intros <- | reflexivity].
rewrite NPEpow_ok. simpl.
rewrite !rpow_pow. rewrite IHe1.
destruct n; simpl; [ ring | apply pow_pos_mul_l ].
Qed.
(* simplification *)
Fixpoint PEsimp (e : PExpr C) : PExpr C :=
match e with
| e1 + e2 => (PEsimp e1) ++ (PEsimp e2)
| e1 * e2 => (PEsimp e1) ** (PEsimp e2)
| e1 - e2 => (PEsimp e1) -- (PEsimp e2)
| - e1 => NPEopp (PEsimp e1)
| e1 ^ n1 => (PEsimp e1) ^^ n1
| _ => e
end%poly.
Theorem PEsimp_ok e : (PEsimp e === e)%poly.
Proof.
induction e; simpl.
- reflexivity.
- reflexivity.
- intro l; trivial.
- intro l; trivial.
- rewrite NPEadd_ok. now f_equiv.
- rewrite NPEsub_ok. now f_equiv.
- rewrite NPEmul_ok. now f_equiv.
- rewrite NPEopp_ok. now f_equiv.
- rewrite NPEpow_ok. now f_equiv.
Qed.
(****************************************************************************
Datastructure
***************************************************************************)
(* The input: syntax of a field expression *)
Inductive FExpr : Type :=
| FEO : FExpr
| FEI : FExpr
| FEc: C -> FExpr
| FEX: positive -> FExpr
| FEadd: FExpr -> FExpr -> FExpr
| FEsub: FExpr -> FExpr -> FExpr
| FEmul: FExpr -> FExpr -> FExpr
| FEopp: FExpr -> FExpr
| FEinv: FExpr -> FExpr
| FEdiv: FExpr -> FExpr -> FExpr
| FEpow: FExpr -> N -> FExpr .
Fixpoint FEeval (l : list R) (pe : FExpr) {struct pe} : R :=
match pe with
| FEO => rO
| FEI => rI
| FEc c => phi c
| FEX x => BinList.nth 0 x l
| FEadd x y => FEeval l x + FEeval l y
| FEsub x y => FEeval l x - FEeval l y
| FEmul x y => FEeval l x * FEeval l y
| FEopp x => - FEeval l x
| FEinv x => / FEeval l x
| FEdiv x y => FEeval l x / FEeval l y
| FEpow x n => rpow (FEeval l x) (Cp_phi n)
end.
Strategy expand [FEeval].
(* The result of the normalisation *)
Record linear : Type := mk_linear {
num : PExpr C;
denum : PExpr C;
condition : list (PExpr C) }.
(***************************************************************************
Semantics and properties of side condition
***************************************************************************)
Fixpoint PCond (l : list R) (le : list (PExpr C)) {struct le} : Prop :=
match le with
| nil => True
| e1 :: nil => ~ req (e1 @ l) rO
| e1 :: l1 => ~ req (e1 @ l) rO /\ PCond l l1
end.
Theorem PCond_cons l a l1 :
PCond l (a :: l1) <-> ~ a @ l == 0 /\ PCond l l1.
Proof.
destruct l1.
- simpl. split; [split|destruct 1]; trivial.
- reflexivity.
Qed.
Theorem PCond_cons_inv_l l a l1 : PCond l (a::l1) -> ~ a @ l == 0.
Proof.
rewrite PCond_cons. now destruct 1.
Qed.
Theorem PCond_cons_inv_r l a l1 : PCond l (a :: l1) -> PCond l l1.
Proof.
rewrite PCond_cons. now destruct 1.
Qed.
Theorem PCond_app l l1 l2 :
PCond l (l1 ++ l2) <-> PCond l l1 /\ PCond l l2.
Proof.
induction l1.
- simpl. split; [split|destruct 1]; trivial.
- simpl app. rewrite !PCond_cons, IHl1. symmetry; apply and_assoc.
Qed.
(* An unsatisfiable condition: issued when a division by zero is detected *)
Definition absurd_PCond := cons 0%poly nil.
Lemma absurd_PCond_bottom : forall l, ~ PCond l absurd_PCond.
Proof.
unfold absurd_PCond; simpl.
red; intros.
apply H.
apply phi_0.
Qed.
(***************************************************************************
Normalisation
***************************************************************************)
Definition default_isIn e1 p1 e2 p2 :=
if PExpr_eq e1 e2 then
match Z.pos_sub p1 p2 with
| Zpos p => Some (Npos p, 1%poly)
| Z0 => Some (N0, 1%poly)
| Zneg p => Some (N0, e2 ^^ Npos p)
end
else None.
Fixpoint isIn e1 p1 e2 p2 {struct e2}: option (N * PExpr C) :=
match e2 with
| e3 * e4 =>
match isIn e1 p1 e3 p2 with
| Some (N0, e5) => Some (N0, e5 ** (e4 ^^ Npos p2))
| Some (Npos p, e5) =>
match isIn e1 p e4 p2 with
| Some (n, e6) => Some (n, e5 ** e6)
| None => Some (Npos p, e5 ** (e4 ^^ Npos p2))
end
| None =>
match isIn e1 p1 e4 p2 with
| Some (n, e5) => Some (n, (e3 ^^ Npos p2) ** e5)
| None => None
end
end
| e3 ^ N0 => None
| e3 ^ Npos p3 => isIn e1 p1 e3 (Pos.mul p3 p2)
| _ => default_isIn e1 p1 e2 p2
end%poly.
Definition ZtoN z := match z with Zpos p => Npos p | _ => N0 end.
Definition NtoZ n := match n with Npos p => Zpos p | _ => Z0 end.
Lemma Z_pos_sub_gt p q : (p > q)%positive ->
Z.pos_sub p q = Zpos (p - q).
Proof. intros; now apply Z.pos_sub_gt, Pos.gt_lt. Qed.
Ltac simpl_pos_sub := rewrite ?Z_pos_sub_gt in * by assumption.
Lemma default_isIn_ok e1 e2 p1 p2 :
match default_isIn e1 p1 e2 p2 with
| Some(n, e3) =>
let n' := ZtoN (Zpos p1 - NtoZ n) in
(e2 ^ N.pos p2 === e1 ^ n' * e3)%poly
/\ (Zpos p1 > NtoZ n)%Z
| _ => True
end.
Proof.
unfold default_isIn.
case PExpr_eq_spec; trivial. intros EQ.
rewrite Z.pos_sub_spec.
case Pos.compare_spec;intros H; split; try reflexivity.
- simpl. now rewrite PE_1_r, H, EQ.
- rewrite NPEpow_ok, EQ, <- PEpow_add_r. f_equiv.
simpl. f_equiv. now rewrite Pos.add_comm, Pos.sub_add.
- simpl. rewrite PE_1_r, EQ. f_equiv.
rewrite Z.pos_sub_gt by now apply Pos.sub_decr. simpl. f_equiv.
rewrite Pos.sub_sub_distr, Pos.add_comm; trivial.
rewrite Pos.add_sub; trivial.
apply Pos.sub_decr; trivial.
- simpl. now apply Z.lt_gt, Pos.sub_decr.
Qed.
Ltac npe_simpl := rewrite ?NPEmul_ok, ?NPEpow_ok, ?PEpow_mul_l.
Ltac npe_ring := intro l; simpl; ring.
Theorem isIn_ok e1 p1 e2 p2 :
match isIn e1 p1 e2 p2 with
| Some(n, e3) =>
let n' := ZtoN (Zpos p1 - NtoZ n) in
(e2 ^ N.pos p2 === e1 ^ n' * e3)%poly
/\ (Zpos p1 > NtoZ n)%Z
| _ => True
end.
Proof.
Opaque NPEpow.
revert p1 p2.
induction e2; intros p1 p2;
try refine (default_isIn_ok e1 _ p1 p2); simpl isIn.
- specialize (IHe2_1 p1 p2).
destruct isIn as [([|p],e)|].
+ split; [|reflexivity].
clear IHe2_2.
destruct IHe2_1 as (IH,_).
npe_simpl. rewrite IH. npe_ring.
+ specialize (IHe2_2 p p2).
destruct isIn as [([|p'],e')|].
* destruct IHe2_1 as (IH1,GT1).
destruct IHe2_2 as (IH2,GT2).
split; [|simpl; apply Zgt_trans with (Z.pos p); trivial].
npe_simpl. rewrite IH1, IH2. simpl. simpl_pos_sub. simpl.
replace (N.pos p1) with (N.pos p + N.pos (p1 - p))%N.
rewrite PEpow_add_r; npe_ring.
{ simpl. f_equal. rewrite Pos.add_comm, Pos.sub_add. trivial.
now apply Pos.gt_lt. }
* destruct IHe2_1 as (IH1,GT1).
destruct IHe2_2 as (IH2,GT2).
assert (Z.pos p1 > Z.pos p')%Z by (now apply Zgt_trans with (Zpos p)).
split; [|simpl; trivial].
npe_simpl. rewrite IH1, IH2. simpl. simpl_pos_sub. simpl.
replace (N.pos (p1 - p')) with (N.pos (p1 - p) + N.pos (p - p'))%N.
rewrite PEpow_add_r; npe_ring.
{ simpl. f_equal. rewrite Pos.add_sub_assoc, Pos.sub_add; trivial.
now apply Pos.gt_lt.
now apply Pos.gt_lt. }
* destruct IHe2_1 as (IH,GT). split; trivial.
npe_simpl. rewrite IH. npe_ring.
+ specialize (IHe2_2 p1 p2).
destruct isIn as [(n,e)|]; trivial.
destruct IHe2_2 as (IH,GT). split; trivial.
set (d := ZtoN (Z.pos p1 - NtoZ n)) in *; clearbody d.
npe_simpl. rewrite IH. npe_ring.
- destruct n; trivial.
specialize (IHe2 p1 (p * p2)%positive).
destruct isIn as [(n,e)|]; trivial.
destruct IHe2 as (IH,GT). split; trivial.
set (d := ZtoN (Z.pos p1 - NtoZ n)) in *; clearbody d.
now rewrite <- PEpow_mul_r.
Qed.
Record rsplit : Type := mk_rsplit {
rsplit_left : PExpr C;
rsplit_common : PExpr C;
rsplit_right : PExpr C}.
(* Stupid name clash *)
Notation left := rsplit_left.
Notation right := rsplit_right.
Notation common := rsplit_common.
Fixpoint split_aux e1 p e2 {struct e1}: rsplit :=
match e1 with
| e3 * e4 =>
let r1 := split_aux e3 p e2 in
let r2 := split_aux e4 p (right r1) in
mk_rsplit (left r1 ** left r2)
(common r1 ** common r2)
(right r2)
| e3 ^ N0 => mk_rsplit 1 1 e2
| e3 ^ Npos p3 => split_aux e3 (Pos.mul p3 p) e2
| _ =>
match isIn e1 p e2 1 with
| Some (N0,e3) => mk_rsplit 1 (e1 ^^ Npos p) e3
| Some (Npos q, e3) => mk_rsplit (e1 ^^ Npos q) (e1 ^^ Npos (p - q)) e3
| None => mk_rsplit (e1 ^^ Npos p) 1 e2
end
end%poly.
Lemma split_aux_ok1 e1 p e2 :
(let res := match isIn e1 p e2 1 with
| Some (N0,e3) => mk_rsplit 1 (e1 ^^ Npos p) e3
| Some (Npos q, e3) => mk_rsplit (e1 ^^ Npos q) (e1 ^^ Npos (p - q)) e3
| None => mk_rsplit (e1 ^^ Npos p) 1 e2
end
in
e1 ^ Npos p === left res * common res
/\ e2 === right res * common res)%poly.
Proof.
Opaque NPEpow NPEmul.
intros. unfold res;clear res; generalize (isIn_ok e1 p e2 xH).
destruct (isIn e1 p e2 1) as [([|p'],e')|]; simpl.
- intros (H1,H2); split; npe_simpl.
+ now rewrite PE_1_l.
+ rewrite PEpow_1_r in H1. rewrite H1. npe_ring.
- intros (H1,H2); split; npe_simpl.
+ rewrite <- PEpow_add_r. f_equiv. simpl. f_equal.
rewrite Pos.add_comm, Pos.sub_add; trivial.
now apply Z.gt_lt in H2.
+ rewrite PEpow_1_r in H1. rewrite H1. simpl_pos_sub. simpl. npe_ring.
- intros _; split; npe_simpl; now rewrite PE_1_r.
Qed.
Theorem split_aux_ok: forall e1 p e2,
(e1 ^ Npos p === left (split_aux e1 p e2) * common (split_aux e1 p e2)
/\ e2 === right (split_aux e1 p e2) * common (split_aux e1 p e2))%poly.
Proof.
induction e1;intros k e2; try refine (split_aux_ok1 _ k e2);simpl.
destruct (IHe1_1 k e2) as (H1,H2).
destruct (IHe1_2 k (right (split_aux e1_1 k e2))) as (H3,H4).
clear IHe1_1 IHe1_2.
- npe_simpl; split.
* rewrite H1, H3. npe_ring.
* rewrite H2 at 1. rewrite H4 at 1. npe_ring.
- destruct n; simpl.
+ rewrite PEpow_0_r, PEpow_1_l, !PE_1_r. now split.
+ rewrite <- PEpow_mul_r. simpl. apply IHe1.
Qed.
Definition split e1 e2 := split_aux e1 xH e2.
Theorem split_ok_l e1 e2 :
(e1 === left (split e1 e2) * common (split e1 e2))%poly.
Proof.
destruct (split_aux_ok e1 xH e2) as (H,_). now rewrite <- H, PEpow_1_r.
Qed.
Theorem split_ok_r e1 e2 :
(e2 === right (split e1 e2) * common (split e1 e2))%poly.
Proof.
destruct (split_aux_ok e1 xH e2) as (_,H). trivial.
Qed.
Lemma split_nz_l l e1 e2 :
~ e1 @ l == 0 -> ~ left (split e1 e2) @ l == 0.
Proof.
intros H. contradict H. rewrite (split_ok_l e1 e2); simpl.
now rewrite H, rmul_0_l.
Qed.
Lemma split_nz_r l e1 e2 :
~ e2 @ l == 0 -> ~ right (split e1 e2) @ l == 0.
Proof.
intros H. contradict H. rewrite (split_ok_r e1 e2); simpl.
now rewrite H, rmul_0_l.
Qed.
Fixpoint Fnorm (e : FExpr) : linear :=
match e with
| FEO => mk_linear 0 1 nil
| FEI => mk_linear 1 1 nil
| FEc c => mk_linear (PEc c) 1 nil
| FEX x => mk_linear (PEX C x) 1 nil
| FEadd e1 e2 =>
let x := Fnorm e1 in
let y := Fnorm e2 in
let s := split (denum x) (denum y) in
mk_linear
((num x ** right s) ++ (num y ** left s))
(left s ** (right s ** common s))
(condition x ++ condition y)%list
| FEsub e1 e2 =>
let x := Fnorm e1 in
let y := Fnorm e2 in
let s := split (denum x) (denum y) in
mk_linear
((num x ** right s) -- (num y ** left s))
(left s ** (right s ** common s))
(condition x ++ condition y)%list
| FEmul e1 e2 =>
let x := Fnorm e1 in
let y := Fnorm e2 in
let s1 := split (num x) (denum y) in
let s2 := split (num y) (denum x) in
mk_linear (left s1 ** left s2)
(right s2 ** right s1)
(condition x ++ condition y)%list
| FEopp e1 =>
let x := Fnorm e1 in
mk_linear (NPEopp (num x)) (denum x) (condition x)
| FEinv e1 =>
let x := Fnorm e1 in
mk_linear (denum x) (num x) (num x :: condition x)
| FEdiv e1 e2 =>
let x := Fnorm e1 in
let y := Fnorm e2 in
let s1 := split (num x) (num y) in
let s2 := split (denum x) (denum y) in
mk_linear (left s1 ** right s2)
(left s2 ** right s1)
(num y :: condition x ++ condition y)%list
| FEpow e1 n =>
let x := Fnorm e1 in
mk_linear ((num x)^^n) ((denum x)^^n) (condition x)
end.
(* Example *)
(*
Eval compute
in (Fnorm
(FEdiv
(FEc cI)
(FEadd (FEinv (FEX xH%positive)) (FEinv (FEX (xO xH)%positive))))).
*)
Theorem Pcond_Fnorm l e :
PCond l (condition (Fnorm e)) -> ~ (denum (Fnorm e))@l == 0.
Proof.
induction e; simpl condition; rewrite ?PCond_cons, ?PCond_app;
simpl denum; intros (Hc1,Hc2) || intros Hc; rewrite ?NPEmul_ok.
- simpl. rewrite phi_1; exact rI_neq_rO.
- simpl. rewrite phi_1; exact rI_neq_rO.
- simpl; intros. rewrite phi_1; exact rI_neq_rO.
- simpl; intros. rewrite phi_1; exact rI_neq_rO.
- rewrite <- split_ok_r. simpl. apply field_is_integral_domain.
+ apply split_nz_l, IHe1, Hc1.
+ apply IHe2, Hc2.
- rewrite <- split_ok_r. simpl. apply field_is_integral_domain.
+ apply split_nz_l, IHe1, Hc1.
+ apply IHe2, Hc2.
- simpl. apply field_is_integral_domain.
+ apply split_nz_r, IHe1, Hc1.
+ apply split_nz_r, IHe2, Hc2.
- now apply IHe.
- trivial.
- destruct Hc2 as (Hc2,_). simpl. apply field_is_integral_domain.
+ apply split_nz_l, IHe1, Hc2.
+ apply split_nz_r, Hc1.
- rewrite NPEpow_ok. apply PEpow_nz, IHe, Hc.
Qed.
(***************************************************************************
Main theorem
***************************************************************************)
Ltac uneval :=
repeat match goal with
| |- context [ ?x @ ?l * ?y @ ?l ] => change (x@l * y@l) with ((x*y)@l)
| |- context [ ?x @ ?l + ?y @ ?l ] => change (x@l + y@l) with ((x+y)@l)
end.
Theorem Fnorm_FEeval_PEeval l fe:
PCond l (condition (Fnorm fe)) ->
FEeval l fe == (num (Fnorm fe)) @ l / (denum (Fnorm fe)) @ l.
Proof.
induction fe; simpl condition; rewrite ?PCond_cons, ?PCond_app; simpl;
intros (Hc1,Hc2) || intros Hc;
try (specialize (IHfe1 Hc1);apply Pcond_Fnorm in Hc1);
try (specialize (IHfe2 Hc2);apply Pcond_Fnorm in Hc2);
try set (F1 := Fnorm fe1) in *; try set (F2 := Fnorm fe2) in *.
- now rewrite phi_1, phi_0, rdiv_def.
- now rewrite phi_1; apply rdiv1.
- rewrite phi_1; apply rdiv1.
- rewrite phi_1; apply rdiv1.
- rewrite NPEadd_ok, !NPEmul_ok. simpl.
rewrite <- rdiv2b; uneval; rewrite <- ?split_ok_l, <- ?split_ok_r; trivial.
now f_equiv.
- rewrite NPEsub_ok, !NPEmul_ok. simpl.
rewrite <- rdiv3b; uneval; rewrite <- ?split_ok_l, <- ?split_ok_r; trivial.
now f_equiv.
- rewrite !NPEmul_ok. simpl.
rewrite IHfe1, IHfe2.
rewrite (split_ok_l (num F1) (denum F2) l),
(split_ok_r (num F1) (denum F2) l),
(split_ok_l (num F2) (denum F1) l),
(split_ok_r (num F2) (denum F1) l) in *.
apply rdiv4b; trivial.
- rewrite NPEopp_ok; simpl; rewrite (IHfe Hc); apply rdiv5.
- rewrite (IHfe Hc2); apply rdiv6; trivial;
apply Pcond_Fnorm; trivial.
- destruct Hc2 as (Hc2,Hc3).
rewrite !NPEmul_ok. simpl.
assert (U1 := split_ok_l (num F1) (num F2) l).
assert (U2 := split_ok_r (num F1) (num F2) l).
assert (U3 := split_ok_l (denum F1) (denum F2) l).
assert (U4 := split_ok_r (denum F1) (denum F2) l).
rewrite (IHfe1 Hc2), (IHfe2 Hc3), U1, U2, U3, U4.
simpl in U2, U3, U4. apply rdiv7b;
rewrite <- ?U2, <- ?U3, <- ?U4; try apply Pcond_Fnorm; trivial.
- rewrite !NPEpow_ok. simpl. rewrite !rpow_pow, (IHfe Hc).
destruct n; simpl.
+ apply rdiv1.
+ apply pow_pos_div. apply Pcond_Fnorm; trivial.
Qed.
Theorem Fnorm_crossproduct l fe1 fe2 :
let nfe1 := Fnorm fe1 in
let nfe2 := Fnorm fe2 in
(num nfe1 * denum nfe2) @ l == (num nfe2 * denum nfe1) @ l ->
PCond l (condition nfe1 ++ condition nfe2) ->
FEeval l fe1 == FEeval l fe2.
Proof.
simpl. rewrite PCond_app. intros Hcrossprod (Hc1,Hc2).
rewrite !Fnorm_FEeval_PEeval; trivial.
apply cross_product_eq; trivial;
apply Pcond_Fnorm; trivial.
Qed.
(* Correctness lemmas of reflexive tactics *)
Notation Ninterp_PElist :=
(interp_PElist rO rI radd rmul rsub ropp req phi Cp_phi rpow).
Notation Nmk_monpol_list :=
(mk_monpol_list cO cI cadd cmul csub copp ceqb cdiv).
Theorem Fnorm_ok:
forall n l lpe fe,
Ninterp_PElist l lpe ->
Peq ceqb (Nnorm n (Nmk_monpol_list lpe) (num (Fnorm fe))) (Pc cO) = true ->
PCond l (condition (Fnorm fe)) -> FEeval l fe == 0.
Proof.
intros n l lpe fe Hlpe H H1.
rewrite (Fnorm_FEeval_PEeval l fe H1).
apply rdiv8. apply Pcond_Fnorm; trivial.
transitivity (0@l); trivial.
rewrite (norm_subst_ok Rsth Reqe ARth CRmorph pow_th cdiv_th n l lpe); trivial.
change (0 @ l) with (Pphi 0 radd rmul phi l (Pc cO)).
apply (Peq_ok Rsth Reqe CRmorph); trivial.
Qed.
Notation ring_rw_correct :=
(ring_rw_correct Rsth Reqe ARth CRmorph pow_th cdiv_th get_sign_spec).
Notation ring_rw_pow_correct :=
(ring_rw_pow_correct Rsth Reqe ARth CRmorph pow_th cdiv_th get_sign_spec).
Notation ring_correct :=
(ring_correct Rsth Reqe ARth CRmorph pow_th cdiv_th).
(* simplify a field expression into a fraction *)
(* TODO: simplify when den is constant... *)
Definition display_linear l num den :=
NPphi_dev l num / NPphi_dev l den.
Definition display_pow_linear l num den :=
NPphi_pow l num / NPphi_pow l den.
Theorem Field_rw_correct n lpe l :
Ninterp_PElist l lpe ->
forall lmp, Nmk_monpol_list lpe = lmp ->
forall fe nfe, Fnorm fe = nfe ->
PCond l (condition nfe) ->
FEeval l fe ==
display_linear l (Nnorm n lmp (num nfe)) (Nnorm n lmp (denum nfe)).
Proof.
intros Hlpe lmp lmp_eq fe nfe eq_nfe H; subst nfe lmp.
rewrite (Fnorm_FEeval_PEeval _ _ H).
unfold display_linear; apply rdiv_ext;
eapply ring_rw_correct; eauto.
Qed.
Theorem Field_rw_pow_correct n lpe l :
Ninterp_PElist l lpe ->
forall lmp, Nmk_monpol_list lpe = lmp ->
forall fe nfe, Fnorm fe = nfe ->
PCond l (condition nfe) ->
FEeval l fe ==
display_pow_linear l (Nnorm n lmp (num nfe)) (Nnorm n lmp (denum nfe)).
Proof.
intros Hlpe lmp lmp_eq fe nfe eq_nfe H; subst nfe lmp.
rewrite (Fnorm_FEeval_PEeval _ _ H).
unfold display_pow_linear; apply rdiv_ext;
eapply ring_rw_pow_correct;eauto.
Qed.
Theorem Field_correct n l lpe fe1 fe2 :
Ninterp_PElist l lpe ->
forall lmp, Nmk_monpol_list lpe = lmp ->
forall nfe1, Fnorm fe1 = nfe1 ->
forall nfe2, Fnorm fe2 = nfe2 ->
Peq ceqb (Nnorm n lmp (num nfe1 * denum nfe2))
(Nnorm n lmp (num nfe2 * denum nfe1)) = true ->
PCond l (condition nfe1 ++ condition nfe2) ->
FEeval l fe1 == FEeval l fe2.
Proof.
intros Hlpe lmp eq_lmp nfe1 eq1 nfe2 eq2 Hnorm Hcond; subst nfe1 nfe2 lmp.
apply Fnorm_crossproduct; trivial.
eapply ring_correct; eauto.
Qed.
(* simplify a field equation : generate the crossproduct and simplify
polynomials *)
(** This allows rewriting modulo the simplification of PEeval on PMul *)
Declare Equivalent Keys PEeval rmul.
Theorem Field_simplify_eq_correct :
forall n l lpe fe1 fe2,
Ninterp_PElist l lpe ->
forall lmp, Nmk_monpol_list lpe = lmp ->
forall nfe1, Fnorm fe1 = nfe1 ->
forall nfe2, Fnorm fe2 = nfe2 ->
forall den, split (denum nfe1) (denum nfe2) = den ->
NPphi_dev l (Nnorm n lmp (num nfe1 * right den)) ==
NPphi_dev l (Nnorm n lmp (num nfe2 * left den)) ->
PCond l (condition nfe1 ++ condition nfe2) ->
FEeval l fe1 == FEeval l fe2.
Proof.
intros n l lpe fe1 fe2 Hlpe lmp Hlmp nfe1 eq1 nfe2 eq2 den eq3 Hcrossprod Hcond.
apply Fnorm_crossproduct; rewrite ?eq1, ?eq2; trivial.
simpl.
rewrite (split_ok_l (denum nfe1) (denum nfe2) l), eq3.
rewrite (split_ok_r (denum nfe1) (denum nfe2) l), eq3.
simpl.
rewrite !rmul_assoc.
apply rmul_ext; trivial.
rewrite (ring_rw_correct n lpe l Hlpe Logic.eq_refl (num nfe1 * right den) Logic.eq_refl),
(ring_rw_correct n lpe l Hlpe Logic.eq_refl (num nfe2 * left den) Logic.eq_refl).
rewrite Hlmp.
apply Hcrossprod.
Qed.
Theorem Field_simplify_eq_pow_correct :
forall n l lpe fe1 fe2,
Ninterp_PElist l lpe ->
forall lmp, Nmk_monpol_list lpe = lmp ->
forall nfe1, Fnorm fe1 = nfe1 ->
forall nfe2, Fnorm fe2 = nfe2 ->
forall den, split (denum nfe1) (denum nfe2) = den ->
NPphi_pow l (Nnorm n lmp (num nfe1 * right den)) ==
NPphi_pow l (Nnorm n lmp (num nfe2 * left den)) ->
PCond l (condition nfe1 ++ condition nfe2) ->
FEeval l fe1 == FEeval l fe2.
Proof.
intros n l lpe fe1 fe2 Hlpe lmp Hlmp nfe1 eq1 nfe2 eq2 den eq3 Hcrossprod Hcond.
apply Fnorm_crossproduct; rewrite ?eq1, ?eq2; trivial.
simpl.
rewrite (split_ok_l (denum nfe1) (denum nfe2) l), eq3.
rewrite (split_ok_r (denum nfe1) (denum nfe2) l), eq3.
simpl.
rewrite !rmul_assoc.
apply rmul_ext; trivial.
rewrite
(ring_rw_pow_correct n lpe l Hlpe Logic.eq_refl (num nfe1 * right den) Logic.eq_refl),
(ring_rw_pow_correct n lpe l Hlpe Logic.eq_refl (num nfe2 * left den) Logic.eq_refl).
rewrite Hlmp.
apply Hcrossprod.
Qed.
Theorem Field_simplify_aux_ok l fe1 fe2 den :
FEeval l fe1 == FEeval l fe2 ->
split (denum (Fnorm fe1)) (denum (Fnorm fe2)) = den ->
PCond l (condition (Fnorm fe1) ++ condition (Fnorm fe2)) ->
(num (Fnorm fe1) * right den) @ l == (num (Fnorm fe2) * left den) @ l.
Proof.
rewrite PCond_app; intros Hfe Hden (Hc1,Hc2); simpl.
assert (Hc1' := Pcond_Fnorm _ _ Hc1).
assert (Hc2' := Pcond_Fnorm _ _ Hc2).
set (N1 := num (Fnorm fe1)) in *. set (N2 := num (Fnorm fe2)) in *.
set (D1 := denum (Fnorm fe1)) in *. set (D2 := denum (Fnorm fe2)) in *.
assert (~ (common den) @ l == 0).
{ intro H. apply Hc1'.
rewrite (split_ok_l D1 D2 l).
rewrite Hden. simpl. ring [H]. }
apply (@rmul_reg_l ((common den) @ l)); trivial.
rewrite !(rmul_comm ((common den) @ l)), <- !rmul_assoc.
change
(N1@l * (right den * common den) @ l ==
N2@l * (left den * common den) @ l).
rewrite <- Hden, <- split_ok_l, <- split_ok_r.
apply (@rmul_reg_l (/ D2@l)). { apply rinv_nz; trivial. }
rewrite (rmul_comm (/ D2 @ l)), <- !rmul_assoc.
rewrite <- rdiv_def, rdiv_r_r, rmul_1_r by trivial.
apply (@rmul_reg_l (/ (D1@l))). { apply rinv_nz; trivial. }
rewrite !(rmul_comm (/ D1@l)), <- !rmul_assoc.
rewrite <- !rdiv_def, rdiv_r_r, rmul_1_r by trivial.
rewrite (rmul_comm (/ D2@l)), <- rdiv_def.
unfold N1,N2,D1,D2; rewrite <- !Fnorm_FEeval_PEeval; trivial.
Qed.
Theorem Field_simplify_eq_pow_in_correct :
forall n l lpe fe1 fe2,
Ninterp_PElist l lpe ->
forall lmp, Nmk_monpol_list lpe = lmp ->
forall nfe1, Fnorm fe1 = nfe1 ->
forall nfe2, Fnorm fe2 = nfe2 ->
forall den, split (denum nfe1) (denum nfe2) = den ->
forall np1, Nnorm n lmp (num nfe1 * right den) = np1 ->
forall np2, Nnorm n lmp (num nfe2 * left den) = np2 ->
FEeval l fe1 == FEeval l fe2 ->
PCond l (condition nfe1 ++ condition nfe2) ->
NPphi_pow l np1 ==
NPphi_pow l np2.
Proof.
intros. subst nfe1 nfe2 lmp np1 np2.
rewrite !(Pphi_pow_ok Rsth Reqe ARth CRmorph pow_th get_sign_spec).
repeat (rewrite <- (norm_subst_ok Rsth Reqe ARth CRmorph pow_th);trivial).
simpl. apply Field_simplify_aux_ok; trivial.
Qed.
Theorem Field_simplify_eq_in_correct :
forall n l lpe fe1 fe2,
Ninterp_PElist l lpe ->
forall lmp, Nmk_monpol_list lpe = lmp ->
forall nfe1, Fnorm fe1 = nfe1 ->
forall nfe2, Fnorm fe2 = nfe2 ->
forall den, split (denum nfe1) (denum nfe2) = den ->
forall np1, Nnorm n lmp (num nfe1 * right den) = np1 ->
forall np2, Nnorm n lmp (num nfe2 * left den) = np2 ->
FEeval l fe1 == FEeval l fe2 ->
PCond l (condition nfe1 ++ condition nfe2) ->
NPphi_dev l np1 == NPphi_dev l np2.
Proof.
intros. subst nfe1 nfe2 lmp np1 np2.
rewrite !(Pphi_dev_ok Rsth Reqe ARth CRmorph get_sign_spec).
repeat (rewrite <- (norm_subst_ok Rsth Reqe ARth CRmorph pow_th);trivial).
apply Field_simplify_aux_ok; trivial.
Qed.
Section Fcons_impl.
Variable Fcons : PExpr C -> list (PExpr C) -> list (PExpr C).
Hypothesis PCond_fcons_inv : forall l a l1,
PCond l (Fcons a l1) -> ~ a @ l == 0 /\ PCond l l1.
Fixpoint Fapp (l m:list (PExpr C)) {struct l} : list (PExpr C) :=
match l with
| nil => m
| cons a l1 => Fcons a (Fapp l1 m)
end.
Lemma fcons_ok : forall l l1,
(forall lock, lock = PCond l -> lock (Fapp l1 nil)) -> PCond l l1.
Proof.
intros l l1 h1; assert (H := h1 (PCond l) (refl_equal _));clear h1.
induction l1; simpl; intros.
trivial.
elim PCond_fcons_inv with (1 := H); intros.
destruct l1; trivial. split; trivial. apply IHl1; trivial.
Qed.
End Fcons_impl.
Section Fcons_simpl.
(* Some general simpifications of the condition: eliminate duplicates,
split multiplications *)
Fixpoint Fcons (e:PExpr C) (l:list (PExpr C)) {struct l} : list (PExpr C) :=
match l with
nil => cons e nil
| cons a l1 => if PExpr_eq e a then l else cons a (Fcons e l1)
end.
Theorem PFcons_fcons_inv:
forall l a l1, PCond l (Fcons a l1) -> ~ a @ l == 0 /\ PCond l l1.
Proof.
induction l1 as [|e l1]; simpl Fcons.
- simpl; now split.
- case PExpr_eq_spec; intros H; rewrite !PCond_cons; intros (H1,H2);
repeat split; trivial.
+ now rewrite H.
+ now apply IHl1.
+ now apply IHl1.
Qed.
(* equality of normal forms rather than syntactic equality *)
Fixpoint Fcons0 (e:PExpr C) (l:list (PExpr C)) {struct l} : list (PExpr C) :=
match l with
nil => cons e nil
| cons a l1 =>
if Peq ceqb (Nnorm O nil e) (Nnorm O nil a) then l
else cons a (Fcons0 e l1)
end.
Theorem PFcons0_fcons_inv:
forall l a l1, PCond l (Fcons0 a l1) -> ~ a @ l == 0 /\ PCond l l1.
Proof.
induction l1 as [|e l1]; simpl Fcons0.
- simpl; now split.
- generalize (ring_correct O l nil a e). lazy zeta; simpl Peq.
case Peq; intros H; rewrite !PCond_cons; intros (H1,H2);
repeat split; trivial.
+ now rewrite H.
+ now apply IHl1.
+ now apply IHl1.
Qed.
(* split factorized denominators *)
Fixpoint Fcons00 (e:PExpr C) (l:list (PExpr C)) {struct e} : list (PExpr C) :=
match e with
PEmul e1 e2 => Fcons00 e1 (Fcons00 e2 l)
| PEpow e1 _ => Fcons00 e1 l
| _ => Fcons0 e l
end.
Theorem PFcons00_fcons_inv:
forall l a l1, PCond l (Fcons00 a l1) -> ~ a @ l == 0 /\ PCond l l1.
Proof.
intros l a; elim a; try (intros; apply PFcons0_fcons_inv; trivial; fail).
- intros p H p0 H0 l1 H1.
simpl in H1.
destruct (H _ H1) as (H2,H3).
destruct (H0 _ H3) as (H4,H5). split; trivial.
simpl.
apply field_is_integral_domain; trivial.
- intros. destruct (H _ H0). split; trivial.
apply PEpow_nz; trivial.
Qed.
Definition Pcond_simpl_gen :=
fcons_ok _ PFcons00_fcons_inv.
(* Specific case when the equality test of coefs is complete w.r.t. the
field equality: non-zero coefs can be eliminated, and opposite can
be simplified (if -1 <> 0) *)
Hypothesis ceqb_complete : forall c1 c2, [c1] == [c2] -> ceqb c1 c2 = true.
Lemma ceqb_spec' c1 c2 : Bool.reflect ([c1] == [c2]) (ceqb c1 c2).
Proof.
assert (H := morph_eq CRmorph c1 c2).
assert (H' := @ceqb_complete c1 c2).
destruct (ceqb c1 c2); constructor.
- now apply H.
- intro E. specialize (H' E). discriminate.
Qed.
Fixpoint Fcons1 (e:PExpr C) (l:list (PExpr C)) {struct e} : list (PExpr C) :=
match e with
| PEmul e1 e2 => Fcons1 e1 (Fcons1 e2 l)
| PEpow e _ => Fcons1 e l
| PEopp e => if (-(1) =? 0)%coef then absurd_PCond else Fcons1 e l
| PEc c => if (c =? 0)%coef then absurd_PCond else l
| _ => Fcons0 e l
end.
Theorem PFcons1_fcons_inv:
forall l a l1, PCond l (Fcons1 a l1) -> ~ a @ l == 0 /\ PCond l l1.
Proof.
intros l a; elim a; try (intros; apply PFcons0_fcons_inv; trivial; fail).
- simpl; intros c l1.
case ceqb_spec'; intros H H0.
+ elim (@absurd_PCond_bottom l H0).
+ split; trivial. rewrite <- phi_0; trivial.
- intros p H p0 H0 l1 H1. simpl in H1.
destruct (H _ H1) as (H2,H3).
destruct (H0 _ H3) as (H4,H5).
split; trivial. simpl. apply field_is_integral_domain; trivial.
- simpl; intros p H l1.
case ceqb_spec'; intros H0 H1.
+ elim (@absurd_PCond_bottom l H1).
+ destruct (H _ H1).
split; trivial.
apply ropp_neq_0; trivial.
rewrite (morph_opp CRmorph), phi_0, phi_1 in H0. trivial.
- intros. destruct (H _ H0);split;trivial. apply PEpow_nz; trivial.
Qed.
Definition Fcons2 e l := Fcons1 (PEsimp e) l.
Theorem PFcons2_fcons_inv:
forall l a l1, PCond l (Fcons2 a l1) -> ~ a @ l == 0 /\ PCond l l1.
Proof.
unfold Fcons2; intros l a l1 H; split;
case (PFcons1_fcons_inv l (PEsimp a) l1); trivial.
intros H1 H2 H3; case H1.
transitivity (a@l); trivial.
apply PEsimp_ok.
Qed.
Definition Pcond_simpl_complete :=
fcons_ok _ PFcons2_fcons_inv.
End Fcons_simpl.
End AlmostField.
Section FieldAndSemiField.
Record field_theory : Prop := mk_field {
F_R : ring_theory rO rI radd rmul rsub ropp req;
F_1_neq_0 : ~ 1 == 0;
Fdiv_def : forall p q, p / q == p * / q;
Finv_l : forall p, ~ p == 0 -> / p * p == 1
}.
Definition F2AF f :=
mk_afield
(Rth_ARth Rsth Reqe f.(F_R)) f.(F_1_neq_0) f.(Fdiv_def) f.(Finv_l).
Record semi_field_theory : Prop := mk_sfield {
SF_SR : semi_ring_theory rO rI radd rmul req;
SF_1_neq_0 : ~ 1 == 0;
SFdiv_def : forall p q, p / q == p * / q;
SFinv_l : forall p, ~ p == 0 -> / p * p == 1
}.
End FieldAndSemiField.
End MakeFieldPol.
Definition SF2AF R (rO rI:R) radd rmul rdiv rinv req Rsth
(sf:semi_field_theory rO rI radd rmul rdiv rinv req) :=
mk_afield _ _
(SRth_ARth Rsth sf.(SF_SR))
sf.(SF_1_neq_0)
sf.(SFdiv_def)
sf.(SFinv_l).
Section Complete.
Variable R : Type.
Variable (rO rI : R) (radd rmul rsub: R->R->R) (ropp : R -> R).
Variable (rdiv : R -> R -> R) (rinv : R -> R).
Variable req : R -> R -> Prop.
Notation "0" := rO. Notation "1" := rI.
Notation "x + y" := (radd x y). Notation "x * y " := (rmul x y).
Notation "x - y " := (rsub x y). Notation "- x" := (ropp x).
Notation "x / y " := (rdiv x y). Notation "/ x" := (rinv x).
Notation "x == y" := (req x y) (at level 70, no associativity).
Variable Rsth : Setoid_Theory R req.
Add Parametric Relation : R req
reflexivity proved by Rsth.(@Equivalence_Reflexive _ _)
symmetry proved by Rsth.(@Equivalence_Symmetric _ _)
transitivity proved by Rsth.(@Equivalence_Transitive _ _)
as R_setoid3.
Variable Reqe : ring_eq_ext radd rmul ropp req.
Add Morphism radd with signature (req ==> req ==> req) as radd_ext3.
Proof. exact (Radd_ext Reqe). Qed.
Add Morphism rmul with signature (req ==> req ==> req) as rmul_ext3.
Proof. exact (Rmul_ext Reqe). Qed.
Add Morphism ropp with signature (req ==> req) as ropp_ext3.
Proof. exact (Ropp_ext Reqe). Qed.
Section AlmostField.
Variable AFth : almost_field_theory rO rI radd rmul rsub ropp rdiv rinv req.
Let ARth := AFth.(AF_AR).
Let rI_neq_rO := AFth.(AF_1_neq_0).
Let rdiv_def := AFth.(AFdiv_def).
Let rinv_l := AFth.(AFinv_l).
Hypothesis S_inj : forall x y, 1+x==1+y -> x==y.
Hypothesis gen_phiPOS_not_0 : forall p, ~ gen_phiPOS1 rI radd rmul p == 0.
Lemma add_inj_r p x y :
gen_phiPOS1 rI radd rmul p + x == gen_phiPOS1 rI radd rmul p + y -> x==y.
Proof.
elim p using Pos.peano_ind; simpl; intros.
apply S_inj; trivial.
apply H.
apply S_inj.
rewrite !(ARadd_assoc ARth).
rewrite <- (ARgen_phiPOS_Psucc Rsth Reqe ARth); trivial.
Qed.
Lemma gen_phiPOS_inj x y :
gen_phiPOS rI radd rmul x == gen_phiPOS rI radd rmul y ->
x = y.
Proof.
rewrite <- !(same_gen Rsth Reqe ARth).
case (Pos.compare_spec x y).
intros.
trivial.
intros.
elim gen_phiPOS_not_0 with (y - x)%positive.
apply add_inj_r with x.
symmetry.
rewrite (ARadd_0_r Rsth ARth).
rewrite <- (ARgen_phiPOS_add Rsth Reqe ARth).
now rewrite Pos.add_comm, Pos.sub_add.
intros.
elim gen_phiPOS_not_0 with (x - y)%positive.
apply add_inj_r with y.
rewrite (ARadd_0_r Rsth ARth).
rewrite <- (ARgen_phiPOS_add Rsth Reqe ARth).
now rewrite Pos.add_comm, Pos.sub_add.
Qed.
Lemma gen_phiN_inj x y :
gen_phiN rO rI radd rmul x == gen_phiN rO rI radd rmul y ->
x = y.
Proof.
destruct x; destruct y; simpl; intros; trivial.
elim gen_phiPOS_not_0 with p.
symmetry .
rewrite (same_gen Rsth Reqe ARth); trivial.
elim gen_phiPOS_not_0 with p.
rewrite (same_gen Rsth Reqe ARth); trivial.
rewrite gen_phiPOS_inj with (1 := H); trivial.
Qed.
Lemma gen_phiN_complete x y :
gen_phiN rO rI radd rmul x == gen_phiN rO rI radd rmul y ->
N.eqb x y = true.
Proof.
intros. now apply N.eqb_eq, gen_phiN_inj.
Qed.
End AlmostField.
Section Field.
Variable Fth : field_theory rO rI radd rmul rsub ropp rdiv rinv req.
Let Rth := Fth.(F_R).
Let rI_neq_rO := Fth.(F_1_neq_0).
Let rdiv_def := Fth.(Fdiv_def).
Let rinv_l := Fth.(Finv_l).
Let AFth := F2AF Rsth Reqe Fth.
Let ARth := Rth_ARth Rsth Reqe Rth.
Lemma ring_S_inj x y : 1+x==1+y -> x==y.
Proof.
intros.
rewrite <- (ARadd_0_l ARth x), <- (ARadd_0_l ARth y).
rewrite <- (Ropp_def Rth 1), (ARadd_comm ARth 1).
rewrite <- !(ARadd_assoc ARth). now apply (Radd_ext Reqe).
Qed.
Hypothesis gen_phiPOS_not_0 : forall p, ~ gen_phiPOS1 rI radd rmul p == 0.
Let gen_phiPOS_inject :=
gen_phiPOS_inj AFth ring_S_inj gen_phiPOS_not_0.
Lemma gen_phiPOS_discr_sgn x y :
~ gen_phiPOS rI radd rmul x == - gen_phiPOS rI radd rmul y.
Proof.
red; intros.
apply gen_phiPOS_not_0 with (y + x)%positive.
rewrite (ARgen_phiPOS_add Rsth Reqe ARth).
transitivity (gen_phiPOS1 1 radd rmul y + - gen_phiPOS1 1 radd rmul y).
apply (Radd_ext Reqe); trivial.
reflexivity.
rewrite (same_gen Rsth Reqe ARth).
rewrite (same_gen Rsth Reqe ARth).
trivial.
apply (Ropp_def Rth).
Qed.
Lemma gen_phiZ_inj x y :
gen_phiZ rO rI radd rmul ropp x == gen_phiZ rO rI radd rmul ropp y ->
x = y.
Proof.
destruct x; destruct y; simpl; intros.
trivial.
elim gen_phiPOS_not_0 with p.
rewrite (same_gen Rsth Reqe ARth).
symmetry ; trivial.
elim gen_phiPOS_not_0 with p.
rewrite (same_gen Rsth Reqe ARth).
rewrite <- (Ropp_opp Rsth Reqe Rth (gen_phiPOS 1 radd rmul p)).
rewrite <- H.
apply (ARopp_zero Rsth Reqe ARth).
elim gen_phiPOS_not_0 with p.
rewrite (same_gen Rsth Reqe ARth).
trivial.
rewrite gen_phiPOS_inject with (1 := H); trivial.
elim gen_phiPOS_discr_sgn with (1 := H).
elim gen_phiPOS_not_0 with p.
rewrite (same_gen Rsth Reqe ARth).
rewrite <- (Ropp_opp Rsth Reqe Rth (gen_phiPOS 1 radd rmul p)).
rewrite H.
apply (ARopp_zero Rsth Reqe ARth).
elim gen_phiPOS_discr_sgn with p0 p.
symmetry ; trivial.
replace p0 with p; trivial.
apply gen_phiPOS_inject.
rewrite <- (Ropp_opp Rsth Reqe Rth (gen_phiPOS 1 radd rmul p)).
rewrite <- (Ropp_opp Rsth Reqe Rth (gen_phiPOS 1 radd rmul p0)).
rewrite H; trivial.
reflexivity.
Qed.
Lemma gen_phiZ_complete x y :
gen_phiZ rO rI radd rmul ropp x == gen_phiZ rO rI radd rmul ropp y ->
Zeq_bool x y = true.
Proof.
intros.
replace y with x.
unfold Zeq_bool.
rewrite Z.compare_refl; trivial.
apply gen_phiZ_inj; trivial.
Qed.
End Field.
End Complete.
Arguments FEO [C].
Arguments FEI [C].
|
(* Title: HOL/Auth/n_germanSimp_lemma_inv__34_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_germanSimp Protocol Case Study*}
theory n_germanSimp_lemma_inv__34_on_rules imports n_germanSimp_lemma_on_inv__34
begin
section{*All lemmas on causal relation between inv__34*}
lemma lemma_inv__34_on_rules:
assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__34 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__0 N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__1 N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_StoreVsinv__34) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqSVsinv__34) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__0 N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqE__part__0Vsinv__34) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__1 N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqE__part__1Vsinv__34) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__0Vsinv__34) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__1Vsinv__34) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInvAckVsinv__34) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvInvAckVsinv__34) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntSVsinv__34) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntEVsinv__34) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntSVsinv__34) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntEVsinv__34) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
Formal statement is: lemma infdist_le: "a \<in> A \<Longrightarrow> infdist x A \<le> dist x a" Informal statement is: If $a \in A$, then the infimum of the distances from $x$ to $A$ is less than or equal to the distance from $x$ to $a$. |
! t2.for - tests zext function
integer*4 i,j,k
byte b
logical*1 lx
b = '30'x
lx= '80'x
i = zext(b)
j = zext(lx)
type 1, 'b,i,...lx,j=', b, i, lx, j
i = i .and. 'f0'x
j = j .and. 'f0'x
type 1, 'b,i,...lx,j=', b, i, lx, j
i = 'f0f0f0ff'x
j = i .and. 'ffff'x
type 2, 'i,j=',i,j
type *, ' '
k = '01ff'x
type *, 'k = ', k
stop
1 format(1x,a, 1x,z8.8, 1x,z8.8, 1x,'... ', z8.8, 1x, z8.8)
2 format(1x,a, 1x,z8.8, 1x,z8.8)
end
|
/-
Copyright (c) 2021 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser
-/
import data.fintype.perm
import group_theory.perm.sign
import logic.equiv.option
/-!
# Permutations of `option α`
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
-/
open equiv
@[simp] lemma equiv.option_congr_one {α : Type*} : (1 : perm α).option_congr = 1 :=
equiv.option_congr_refl
@[simp] lemma equiv.option_congr_swap {α : Type*} [decidable_eq α] (x y : α) :
option_congr (swap x y) = swap (some x) (some y) :=
begin
ext (_ | i),
{ simp [swap_apply_of_ne_of_ne] },
{ by_cases hx : i = x,
simp [hx, swap_apply_of_ne_of_ne],
by_cases hy : i = y;
simp [hx, hy, swap_apply_of_ne_of_ne], }
end
@[simp] lemma equiv.option_congr_sign {α : Type*} [decidable_eq α] [fintype α] (e : perm α) :
perm.sign e.option_congr = perm.sign e :=
begin
apply perm.swap_induction_on e,
{ simp [perm.one_def] },
{ intros f x y hne h,
simp [h, hne, perm.mul_def, ←equiv.option_congr_trans] }
end
@[simp] lemma map_equiv_remove_none {α : Type*} [decidable_eq α] (σ : perm (option α)) :
(remove_none σ).option_congr = swap none (σ none) * σ :=
begin
ext1 x,
have : option.map ⇑(remove_none σ) x = (swap none (σ none)) (σ x),
{ cases x,
{ simp },
{ cases h : σ (some x),
{ simp [remove_none_none _ h], },
{ have hn : σ (some x) ≠ none := by simp [h],
have hσn : σ (some x) ≠ σ none := σ.injective.ne (by simp),
simp [remove_none_some _ ⟨_, h⟩, ←h, swap_apply_of_ne_of_ne hn hσn] } } },
simpa using this,
end
/-- Permutations of `option α` are equivalent to fixing an
`option α` and permuting the remaining with a `perm α`.
The fixed `option α` is swapped with `none`. -/
@[simps] def equiv.perm.decompose_option {α : Type*} [decidable_eq α] :
perm (option α) ≃ option α × perm α :=
{ to_fun := λ σ, (σ none, remove_none σ),
inv_fun := λ i, swap none i.1 * i.2.option_congr,
left_inv := λ σ, by simp,
right_inv := λ ⟨x, σ⟩, begin
have : remove_none (swap none x * σ.option_congr) = σ :=
equiv.option_congr_injective (by simp [←mul_assoc]),
simp [←perm.eq_inv_iff_eq, this],
end }
lemma equiv.perm.decompose_option_symm_of_none_apply {α : Type*} [decidable_eq α]
(e : perm α) (i : option α) :
equiv.perm.decompose_option.symm (none, e) i = i.map e :=
by simp
lemma equiv.perm.decompose_option_symm_sign {α : Type*} [decidable_eq α] [fintype α] (e : perm α) :
perm.sign (equiv.perm.decompose_option.symm (none, e)) = perm.sign e :=
by simp
/-- The set of all permutations of `option α` can be constructed by augmenting the set of
permutations of `α` by each element of `option α` in turn. -/
lemma finset.univ_perm_option {α : Type*} [decidable_eq α] [fintype α] :
@finset.univ (perm $ option α) _ =
(finset.univ : finset $ option α × perm α).map equiv.perm.decompose_option.symm.to_embedding :=
(finset.univ_map_equiv_to_embedding _).symm
|
module Data.Num.Pred where
open import Data.Num.Core
open import Data.Num
open import Data.Num.Properties
open import Data.Num.Continuous
-- open import Data.Num.Bijection
open import Data.Nat
open import Data.Nat.Properties.Simple
open import Data.Nat.Properties.Extra
open import Data.Fin using (Fin; suc; zero; #_)
open import Data.Vec
open import Data.Product hiding (map)
open import Relation.Binary
open import Relation.Binary.PropositionalEquality
open import Relation.Nullary
open import Relation.Nullary.Negation
open import Relation.Nullary.Decidable using (True; fromWitness; toWitness)
open ≡-Reasoning
open ≤-Reasoning renaming (begin_ to start_; _∎ to _□; _≡⟨_⟩_ to _≈⟨_⟩_)
open DecTotalOrder decTotalOrder using (reflexive) renaming (refl to ≤-refl)
-- infixl 6 _∔_
infixl 6 _∔_
data Term : ℕ → Set where
var : ∀ {n} → Fin n → Term n
_∔_ : ∀ {n} → Term n → Term n → Term n
data Predicate : ℕ → Set where
-- equality
_≋P_ : ∀ {n} → (t₁ : Term n) → (t₂ : Term n) → Predicate n
-- implication
_→P_ : ∀ {n} → (p₁ : Predicate n) → (p₂ : Predicate n) → Predicate n
-- ∀, introduces new variable
∀P : ∀ {n} → (p : Predicate (suc n)) → Predicate n
record Signature : Set₁ where
constructor sig
field
carrier : Set
_⊕_ : carrier → carrier → carrier
_≈_ : carrier → carrier → Set
open Signature
ℕ-sig : Signature
ℕ-sig = sig ℕ _+_ _≡_
Numeral-sig : (b d : ℕ) → True (Continuous? b d 0) → Signature
Numeral-sig b d cont = sig (Numeral b d 0) (_⊹_ {cont = cont}) _≋_
-- BijN-sig : ℕ → Signature
-- BijN-sig b = sig (BijN b) (_⊹_ {surj = fromWitness (BijN⇒Surjective b)}) _≡_
--
Env : Set → ℕ → Set
Env = Vec
-- the decoder
⟦_⟧T : ∀ {n}
→ Term n
→ (sig : Signature)
→ Vec (carrier sig) n
→ carrier sig
⟦ var i ⟧T _ env = lookup i env
⟦ term₁ ∔ term₂ ⟧T (sig A _⊕_ _≈_) env = ⟦ term₁ ⟧T (sig A _⊕_ _≈_) env ⊕ ⟦ term₂ ⟧T (sig A _⊕_ _≈_) env
⟦_⟧P : ∀ {n}
→ Predicate n
→ (sig : Signature)
→ Env (carrier sig) n
→ Set
⟦ t₁ ≋P t₂ ⟧P (sig carrier _⊕_ _≈_) env
= ⟦ t₁ ⟧T (sig carrier _⊕_ _≈_) env ≈ ⟦ t₂ ⟧T (sig carrier _⊕_ _≈_) env
⟦ p →P q ⟧P signature env = ⟦ p ⟧P signature env → ⟦ q ⟧P signature env
⟦ ∀P pred ⟧P signature env = ∀ x → ⟦ pred ⟧P signature (x ∷ env)
module Example-1 where
≋-trans : Predicate zero
≋-trans = let x = var (# 0)
y = var (# 1)
z = var (# 2)
in ∀P (∀P (∀P (((x ≋P y) →P (y ≋P z)) →P (x ≋P z))))
≋-trans-ℕ : Set
≋-trans-ℕ = ⟦ ≋-trans ⟧P ℕ-sig []
≋-trans-Numeral : (b d : ℕ) → True (Continuous? b d 0) → Set
≋-trans-Numeral b d prop = ⟦ ≋-trans ⟧P (Numeral-sig b d prop) []
-- lemma for env
lookup-map : ∀ {i j} → {A : Set i} {B : Set j}
→ (f : A → B) →
∀ {n} → (xs : Vec A n) (i : Fin n)
→ lookup i (map f xs) ≡ f (lookup i xs)
lookup-map f [] ()
lookup-map f (x ∷ xs) zero = refl
lookup-map f (x ∷ xs) (suc i) = lookup-map f xs i
-- ------------------------------------------------------------------------
-- -- toℕ : preserving structures of terms and predicates
-- ------------------------------------------------------------------------
toℕ-term-homo : ∀ {b d n}
→ (cont : True (Continuous? b d 0))
→ (t : Term n)
→ (env : Vec (Numeral b d 0) n)
→ ⟦ t ⟧T ℕ-sig (map ⟦_⟧ env) ≡ ⟦ ⟦ t ⟧T (Numeral-sig b d cont) env ⟧
toℕ-term-homo cont (var i) env = lookup-map ⟦_⟧ env i
toℕ-term-homo {b} {d} cont (t₁ ∔ t₂) env
rewrite toℕ-term-homo cont t₁ env | toℕ-term-homo cont t₂ env
= sym (toℕ-⊹-homo cont (⟦ t₁ ⟧T (Numeral-sig b d cont) env) (⟦ t₂ ⟧T (Numeral-sig b d cont) env))
mutual
toℕ-pred-ℕ⇒Numeral : ∀ {b d n}
→ (cont : True (Continuous? b (suc d) 0))
→ (pred : Predicate n)
→ (env : Vec (Numeral b (suc d) 0) n)
→ ⟦ pred ⟧P ℕ-sig (map ⟦_⟧ env)
→ ⟦ pred ⟧P (Numeral-sig b (suc d) cont) env
toℕ-pred-ℕ⇒Numeral {b} {d} cont (t₁ ≋P t₂) env sem-ℕ =
begin
⟦ ⟦ t₁ ⟧T (Numeral-sig b (suc d) cont) env ⟧
≡⟨ sym (toℕ-term-homo cont t₁ env) ⟩
⟦ t₁ ⟧T ℕ-sig (map ⟦_⟧ env)
≡⟨ sem-ℕ ⟩
⟦ t₂ ⟧T ℕ-sig (map ⟦_⟧ env)
≡⟨ toℕ-term-homo cont t₂ env ⟩
⟦ ⟦ t₂ ⟧T (Numeral-sig b (suc d) cont) env ⟧
∎
toℕ-pred-ℕ⇒Numeral cont (p →P q) env sem-ℕ ⟦p⟧P-Numeral =
toℕ-pred-ℕ⇒Numeral cont q env
(sem-ℕ (toℕ-pred-Numeral⇒ℕ cont p env ⟦p⟧P-Numeral))
toℕ-pred-ℕ⇒Numeral cont (∀P pred) env sem-ℕ x =
toℕ-pred-ℕ⇒Numeral cont pred (x ∷ env) (sem-ℕ ⟦ x ⟧)
toℕ-pred-Numeral⇒ℕ : ∀ {b d n}
→ (cont : True (Continuous? b (suc d) 0))
→ (pred : Predicate n)
→ (env : Vec (Numeral b (suc d) 0) n)
→ ⟦ pred ⟧P (Numeral-sig b (suc d) cont) env
→ ⟦ pred ⟧P ℕ-sig (map ⟦_⟧ env)
toℕ-pred-Numeral⇒ℕ {b} {d} cont (t₁ ≋P t₂) env sem-Num =
begin
⟦ t₁ ⟧T ℕ-sig (map ⟦_⟧ env)
≡⟨ toℕ-term-homo cont t₁ env ⟩
⟦ ⟦ t₁ ⟧T (Numeral-sig b (suc d) cont) env ⟧
≡⟨ sem-Num ⟩
⟦ ⟦ t₂ ⟧T (Numeral-sig b (suc d) cont) env ⟧
≡⟨ sym (toℕ-term-homo cont t₂ env) ⟩
⟦ t₂ ⟧T ℕ-sig (map ⟦_⟧ env)
∎
toℕ-pred-Numeral⇒ℕ cont (p →P q) env sem-Num ⟦p⟧P
= toℕ-pred-Numeral⇒ℕ cont q env
(sem-Num
(toℕ-pred-ℕ⇒Numeral cont p env ⟦p⟧P))
toℕ-pred-Numeral⇒ℕ {b} {d} cont (∀P pred) env sem-Num n with n ≟ ⟦ fromℕ {cont = cont} n z≤n ⟧
toℕ-pred-Numeral⇒ℕ {b} {d} cont (∀P pred) env sem-Num n | yes eq
rewrite eq
= toℕ-pred-Numeral⇒ℕ cont pred (fromℕ {cont = cont} n _ ∷ env) (sem-Num (fromℕ {cont = cont} n _))
toℕ-pred-Numeral⇒ℕ {b} {d} cont (∀P pred) env sem-Num n | no ¬eq
= contradiction (sym (fromℕ-toℕ cont n _)) ¬eq
module Example-2 where
+-comm-Predicate : Predicate 0
+-comm-Predicate = ∀P (∀P ((var (# 1) ∔ var (# 0)) ≋P (var (# 0) ∔ var (# 1))))
+-comm-ℕ : ∀ a b → a + b ≡ b + a
+-comm-ℕ = +-comm
+-comm-Num : ∀ {b d}
→ {cont : True (Continuous? b (suc d) 0)}
→ (xs ys : Numeral b (suc d) 0)
→ (_⊹_ {cont = cont} xs ys) ≋ (_⊹_ {cont = cont} ys xs)
+-comm-Num {cont = cont} = toℕ-pred-ℕ⇒Numeral cont +-comm-Predicate [] +-comm-ℕ
--
-- mutual
-- toℕ-pred-ℕ⇒Bij : ∀ {b n}
-- → (pred : Predicate n)
-- → (env : Vec (Bij (suc b)) n)
-- → ⟦ pred ⟧ ℕ-sig (map toℕ env)
-- → ⟦ pred ⟧ (BijN-sig (suc b)) env
-- toℕ-pred-ℕ⇒Bij {b} (t₁ ≋ t₂) env ⟦t₁≈t₂⟧ℕ
-- rewrite toℕ-term-homo t₁ env | toℕ-term-homo t₂ env -- ⟦ t₁ ⟧T ℕ-sig (map toℕ env) ≡ toℕ (⟦ t₁ ⟧T (Bij-sig (suc b)) env)
-- = toℕ-injective (⟦ t₁ ⟧T (BijN-sig (suc b)) env) (⟦ t₂ ⟧T (BijN-sig (suc b)) env) ⟦t₁≈t₂⟧ℕ
-- toℕ-pred-ℕ⇒Bij (p ⇒ q) env ⟦p→q⟧ℕ ⟦p⟧B = toℕ-pred-ℕ⇒Bij q env (⟦p→q⟧ℕ (toℕ-pred-Bij⇒ℕ p env ⟦p⟧B))
-- toℕ-pred-ℕ⇒Bij (All p) env ⟦λx→p⟧ℕ x = toℕ-pred-ℕ⇒Bij p (x ∷ env) (⟦λx→p⟧ℕ (toℕ x))
--
-- toℕ-pred-Bij⇒ℕ : ∀ {b n}
-- → (pred : Predicate n)
-- → (env : Vec (Bij (suc b)) n)
-- → ⟦ pred ⟧ (BijN-sig (suc b)) env
-- → ⟦ pred ⟧ ℕ-sig (map toℕ env)
-- toℕ-pred-Bij⇒ℕ (t₁ ≋ t₂) env ⟦t₁≈t₂⟧B
-- rewrite toℕ-term-homo t₁ env | toℕ-term-homo t₂ env
-- = cong toℕ ⟦t₁≈t₂⟧B
-- toℕ-pred-Bij⇒ℕ (p ⇒ q) env ⟦p→q⟧B ⟦p⟧ℕ = toℕ-pred-Bij⇒ℕ q env (⟦p→q⟧B (toℕ-pred-ℕ⇒Bij p env ⟦p⟧ℕ))
-- toℕ-pred-Bij⇒ℕ {b} (All p) env ⟦λx→p⟧B x
-- rewrite (sym (toℕ-fromℕ b x)) -- rewritting "x" to "toℕ (fromℕ x)"
-- = toℕ-pred-Bij⇒ℕ p (fromℕ x ∷ env) (⟦λx→p⟧B (fromℕ x))
-- fromℕ-term-homo : ∀ {b n}
-- → (term : Term n)
-- → (env : Vec ℕ n)
-- → ⟦ term ⟧T (Bij-sig (suc b)) (map fromℕ env) ≡ fromℕ (⟦ term ⟧T ℕ-sig env)
-- fromℕ-term-homo (var i) env = lookup-map fromℕ env i
-- fromℕ-term-homo {b} {n} (t₁ ∔ t₂) env
-- rewrite fromℕ-term-homo {b} {n} t₁ env | fromℕ-term-homo {b} {n} t₂ env
-- = sym (fromℕ-⊹-homo (⟦ t₁ ⟧T (sig ℕ _+_ _≡_) env) (⟦ t₂ ⟧T (sig ℕ _+_ _≡_) env))
--
--
-- ------------------------------------------------------------------------
--
-- open import Data.Nat.Properties.Simple
--
--
-- testExtract : {pred : Predicate 0}
-- → ⟦ pred ⟧ ℕ-sig []
-- → Predicate 0
-- testExtract {pred} ⟦pred⟧ℕ = pred
--
-- ∔-comm : Predicate 0
-- ∔-comm = testExtract {All (All (var (suc zero) ∔ var zero ≋ var zero ∔ var (suc zero)))} +-comm
--
-- ∔-assoc : Predicate 0
-- ∔-assoc = testExtract {All (All (All (var (suc (suc zero)) ∔ var (suc zero) ∔ var zero ≋ var (suc (suc zero)) ∔ (var (suc zero) ∔ var zero))))} +-assoc
-- ∔-assoc = testExtract {All (All (All (var (suc (suc zero)) ∔ var (suc zero) ∔ var zero ≋ var (suc (suc zero)) ∔ (var (suc zero) ∔ var zero))))} +-assoc
|
RIDOT also has long @-@ range plans to construct direct freeway connections linking Route 4 north with I @-@ 95 south and I @-@ 95 north with Route 4 south . As of November 2010 , environmental studies are being prepared for a reconfiguration of the interchange .
|
include defs
# dsfree --- return a block of storage to the available space list
subroutine dsfree (block)
pointer block
DS_COMMON
pointer p0, p, q
integer n, junk
character con (10)
p0 = block - DS_OHEAD
n = Mem (p0 + DS_SIZE)
q = DS_AVAIL
repeat {
p = Mem (q + DS_LINK)
if (p == LAMBDA | p > p0)
break
q = p
}
if (q + Mem (q + DS_SIZE) > p0) {
call remark ("in dsfree: attempt to free unallocated block.")
call remark ("type 'c' to continue.")
junk = getlin (con, STDIN)
if (con (1) != LETC & con (1) != BIGC)
call endst
return # do not attempt to free the block
}
if (p0 + n == p & p != LAMBDA) {
n = n + Mem (p + DS_SIZE)
Mem (p0 + DS_LINK) = Mem (p + DS_LINK)
}
else
Mem (p0 + DS_LINK) = p
if (q + Mem (q + DS_SIZE) == p0) {
Mem (q + DS_SIZE) = Mem (q + DS_SIZE) + n
Mem (q + DS_LINK) = Mem (p0 + DS_LINK)
}
else {
Mem (q + DS_LINK) = p0
Mem (p0 + DS_SIZE) = n
}
return
end
|
[STATEMENT]
lemma del_list_idem: "x \<notin> set(map fst xs) \<Longrightarrow> del_list x xs = xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<notin> set (map fst xs) \<Longrightarrow> del_list x xs = xs
[PROOF STEP]
by (induct xs) auto |
(*-------------------------------------------*
| CSP-Prover on Isabelle2004 |
| November 2004 |
| June 2005 (modified) |
| July 2005 (modified) |
| |
| CSP-Prover on Isabelle2005 |
| October 2005 (modified) |
| |
| CSP-Prover on Isabelle2007 |
| January 2008 (modified) |
| |
| CSP-Prover on Isabelle2016 |
| April 2016 (modified) |
| |
| Yoshinao Isobe (AIST JAPAN) |
*-------------------------------------------*)
theory Infra_fun
imports Infra_order
begin
(*****************************************************
Small lemmas for functions
*****************************************************)
(*
consts
inv_on :: "'a set => ('a => 'b) => 'b => 'a"
defs
inv_on_def : "inv_on A f y == (SOME x. x:A & f x = y)"
*)
definition
inv_on :: "'a set => ('a => 'b) => 'b => 'a"
where
inv_on_def : "inv_on A f y == (SOME x. x:A & f x = y)"
lemma inv_f_f_on: "[| inj_on f A ; x : A |] ==> inv_on A f (f x) = x"
apply (simp add: inj_on_def)
apply (simp add: inv_on_def)
apply (rule someI2)
apply (rule conjI)
apply (simp)
apply (simp)
apply (drule_tac x="x" in bspec, simp)
apply (drule_tac x="xa" in bspec, simp)
apply (simp)
done
(*****************************************************
fun (Product)
*****************************************************)
(*******************************
<= in fun
*******************************)
(* instance fun :: (type, order) ord <--- Isabelle 2005 *)
instance "fun" :: (type, order) ord (* <--- Isabelle 2007 *)
by (intro_classes)
(* Isabelle 2005
defs (overloaded)
order_prod_def:
"xp <= yp == ALL x. (xp x <= yp x)"
order_less_prod_def:
"xp < yp == (ALL x. (xp x <= yp x)) &
(EX x. (xp x ~= yp x))"
order_prod_def --> le_fun_def
order_less_prod_def --> less_fun_def (in Isabelle 2007)
*)
lemmas order_prod_def = le_fun_def
lemmas order_less_prod_def = less_fun_def
lemma fold_order_prod_def: "(ALL x. (xp x <= yp x)) = (xp <= yp)"
by (simp add: le_fun_def)
(*** order in prod ***)
(* In Isabelle 2007 (Fun.tht)
instance "fun" :: (type, order) order
by default
(auto simp add: le_fun_def less_fun_def expand_fun_eq
intro: order_trans order_antisym)
*)
(*
instance fun :: (type,order) order
apply (intro_classes)
apply (unfold order_prod_def order_less_prod_def)
apply (simp)
apply (blast intro: order_trans)
apply (simp add: expand_fun_eq)
apply (blast intro: order_antisym)
apply (auto simp add: expand_fun_eq)
done
*)
lemma order_prod_inv: "[| ALL x. f x <= g x |] ==> f <= g"
by (simp add: le_fun_def)
(*****************************************************
fun (Projection)
*****************************************************)
(* isabelle 2013
consts
proj_fun :: "'i => ('i => 'x) => 'x"
defs
proj_fun_def : "proj_fun i == (%x. x i)"
*)
definition
proj_fun :: "'i => ('i => 'x) => 'x"
where
proj_fun_def : "proj_fun i == (%x. x i)"
(*** lub for projection ***)
(*** only if ***)
lemma prod_LUB_decompo_only_if:
"x isLUB X ==> ALL i. (proj_fun i) x isLUB (proj_fun i) ` X"
apply (simp add: proj_fun_def)
apply (simp add: isLUB_def)
apply (auto)
(*** upper ***)
apply (simp add: isUB_def)
apply (auto)
apply (drule_tac x="xa" in spec)
apply (simp add: le_fun_def)
(*** least ***)
apply (drule_tac x="(%j. if (i=j) then y else (x j))" in spec)
apply (drule mp)
apply (simp add: isUB_def)
apply (intro allI impI)
apply (simp add: le_fun_def)
apply (simp add: le_fun_def)
apply (drule_tac x="i" in spec)
apply (simp)
done
(*** if ***)
lemma prod_LUB_decompo_if:
"ALL i. (proj_fun i) x isLUB (proj_fun i) ` X ==> x isLUB X"
apply (simp add: isLUB_def)
apply (rule conjI)
(*** upper ***)
apply (simp add: isUB_def)
apply (auto)
apply (simp add: le_fun_def)
apply (intro allI)
apply (rename_tac y i)
apply (drule_tac x="i" in spec)
apply (erule conjE)
apply (drule_tac x="y i" in spec)
apply (simp add: proj_fun_def)
(*** least ***)
apply (simp add: le_fun_def)
apply (intro allI)
apply (rename_tac y i)
apply (drule_tac x="i" in spec)
apply (erule conjE)
apply (drule_tac x="y i" in spec)
apply (simp add: proj_fun_def)
apply (drule mp)
apply (simp add: isUB_def)
apply (intro allI impI)
apply (simp add: image_def)
apply (erule bexE)
apply (drule_tac x="xa" in spec)
apply (simp add: le_fun_def)
apply (simp)
done
(*** iff ***)
lemma prod_LUB_decompo:
"x isLUB X = (ALL i. (proj_fun i) x isLUB (proj_fun i) ` X)"
apply (rule iffI)
apply (simp add: prod_LUB_decompo_only_if)
apply (simp add: prod_LUB_decompo_if)
done
(*****************************************************
mono
*****************************************************)
lemma prod_mono_only_if:
"mono f ==> mono ((proj_fun i) o f)"
apply (simp add: proj_fun_def)
apply (simp add: mono_def)
apply (simp add: le_fun_def)
done
lemma prod_mono_if:
"(ALL i. mono ((proj_fun i) o f)) ==> mono f"
apply (simp add: mono_def)
apply (simp add: proj_fun_def)
apply (simp add: le_fun_def)
done
lemma prod_mono:
"mono f = (ALL i. mono ((proj_fun i) o f))"
apply (rule iffI)
apply (simp add: prod_mono_only_if)
apply (simp add: prod_mono_if)
done
(**********************************************************
some preparation for fixed point induction
**********************************************************)
(* Isabelle 2016
consts
inductivefun :: "('a => bool) => ('a => 'a) => bool"
Ref_fun :: "'a::order => 'a => bool"
Rev_fun :: "'a::order => 'a => bool"
defs
inductivefun_def :
"inductivefun R f == (ALL x. R x --> R (f x))"
Ref_fun_def : "Ref_fun X == (%Y. (X <= Y))"
Rev_fun_def : "Rev_fun X == (%Y. (Y <= X))"
*)
definition
inductivefun :: "('a => bool) => ('a => 'a) => bool"
where
"inductivefun R f == (ALL x. R x --> R (f x))"
definition
Ref_fun :: "'a::order => 'a => bool"
where
"Ref_fun X == (%Y. (X <= Y))"
definition
Rev_fun :: "'a::order => 'a => bool"
where
"Rev_fun X == (%Y. (Y <= X))"
lemma inductivefun_all_n:
(* "[| inductivefun R f ; R x |] ==> (ALL n. R ((f ^ n) x))" Isabelle2009 *)
"[| inductivefun R f ; R x |] ==> (ALL n. R ((f ^^ n) x))"
apply (intro allI)
apply (induct_tac n)
apply (simp)
apply (simp add: inductivefun_def)
done
(*----------------------------------------------------------*
simplify inverse functions
*----------------------------------------------------------*)
lemma Pr1_inv_inj[simp]:
"inj f ==> (ALL x. Pr (inv f x)) = (ALL y. Pr y)"
apply (auto)
apply (drule_tac x="f y" in spec)
by (simp)
lemma Pr2_inv_inj[simp]:
"inj f ==> (ALL x. Pr1 (Pr2 (inv f x))) = (ALL y. Pr1 (Pr2 y))"
apply (auto)
apply (drule_tac x="f y" in spec)
by (simp)
lemma Pr3_inv_inj[simp]:
"inj f ==> (ALL x. Pr1 (Pr2 (Pr3 (inv f x)))) = (ALL y. Pr1 (Pr2 (Pr3 y)))"
apply (auto)
apply (drule_tac x="f y" in spec)
by (simp)
end
|
The Importance of Being Earnest , A Trivial Comedy for Serious People is a play by Oscar Wilde . First performed on 14 February 1895 at the St James 's Theatre in London , it is a farcical comedy in which the protagonists maintain fictitious personæ to escape burdensome social obligations . Working within the social conventions of late Victorian London , the play 's major themes are the triviality with which it treats institutions as serious as marriage , and the resulting satire of Victorian ways . Contemporary reviews all praised the play 's humour , though some were cautious about its explicit lack of social messages , while others foresaw the modern consensus that it was the culmination of Wilde 's artistic career so far . Its high farce and witty dialogue have helped make The Importance of Being Earnest Wilde 's most enduringly popular play .
|
module Cursor
import Data.Fin
%default total
public export
data Cursor : Nat -> Type where
MkCursor : Nat -> Fin n -> Cursor n
export
right : Cursor n -> Cursor n
right (MkCursor x y) = MkCursor (S x) y
export
left : Cursor n -> Cursor n
left (MkCursor (S x) y) = MkCursor x y
left (MkCursor Z y) = MkCursor Z y
dec : Fin n -> Fin n
dec FZ = FZ
dec (FS k) = weaken k
export
up : Cursor n -> Cursor n
up (MkCursor x y) = MkCursor x (dec y)
export
down : Cursor n -> Cursor (S n)
down (MkCursor x y) = MkCursor x (shift 1 y)
export
downInBounds : Cursor n -> Cursor n
downInBounds originalCursor@(MkCursor x y) =
case strengthen $ shift 1 y of
Left _ => originalCursor
Right newY => MkCursor x newY
--todo: is it possible to use something like strengthenN and shift?
export
downWithnInBounds : Nat -> Cursor n -> Cursor n
downWithnInBounds d originalCursor@(MkCursor x y) {n} =
case natToFin ((finToNat y) + d) n of
Nothing => originalCursor
Just newY => MkCursor x newY
export
lineStart : Cursor n -> Cursor n
lineStart (MkCursor _ y) = MkCursor Z y
|
[STATEMENT]
lemma invariantStateSpace_thm_5:
fixes ss vs domain
assumes "(stateSpace ss domain)"
shows "(stateSpace (ss_proj ss vs) (domain \<inter> vs))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. stateSpace (ss_proj ss vs) (domain \<inter> vs)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
stateSpace ss domain
goal (1 subgoal):
1. stateSpace (ss_proj ss vs) (domain \<inter> vs)
[PROOF STEP]
unfolding stateSpace_def ss_proj_def
[PROOF STATE]
proof (prove)
using this:
\<forall>s. s \<in> ss \<longrightarrow> fmdom' s = domain
goal (1 subgoal):
1. \<forall>s. s \<in> fmrestrict_set vs ` ss \<longrightarrow> fmdom' s = domain \<inter> vs
[PROOF STEP]
by (metis (no_types, lifting) fmdom'_fmrestrict_set imageE inf_commute) |
/-
Copyright (c) 2018 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad
Operations on set-valued functions, aka partial multifunctions, aka relations.
-/
import tactic.ext tactic.interactive data.set.lattice order.complete_lattice
variables {α : Type*} {β : Type*} {γ : Type*}
def rel (α : Type*) (β : Type*):= α → β → Prop
namespace rel
variables {δ : Type*} (r : rel α β)
instance : lattice.complete_lattice (rel α β) :=
by unfold rel; apply_instance
def inv : rel β α := flip r
lemma inv_def (x : α) (y : β) : r.inv y x ↔ r x y := iff.rfl
lemma inv_inv : inv (inv r) = r := by { ext x y, reflexivity }
def dom := {x | ∃ y, r x y}
def codom := {y | ∃ x, r x y}
lemma codom_inv : r.inv.codom = r.dom := by { ext x y, reflexivity }
lemma dom_inv : r.inv.dom = r.codom := by { ext x y, reflexivity}
def comp (r : rel α β) (s : rel β γ) : rel α γ :=
λ x z, ∃ y, r x y ∧ s y z
local infixr ` ∘ ` :=rel.comp
lemma comp_assoc (r : rel α β) (s : rel β γ) (t : rel γ δ) :
(r ∘ s) ∘ t = r ∘ s ∘ t :=
begin
unfold comp, ext x w, split,
{ rintros ⟨z, ⟨y, rxy, syz⟩, tzw⟩, exact ⟨y, rxy, z, syz, tzw⟩ },
rintros ⟨y, rxy, z, syz, tzw⟩, exact ⟨z, ⟨y, rxy, syz⟩, tzw⟩
end
@[simp]
lemma comp_right_id (r : rel α β) : r ∘ @eq β = r :=
by { unfold comp, ext y, simp }
@[simp]
lemma comp_left_id (r : rel α β) : @eq α ∘ r = r :=
by { unfold comp, ext x, simp }
lemma inv_id : inv (@eq α) = @eq α :=
by { ext x y, split; apply eq.symm }
lemma inv_comp (r : rel α β) (s : rel β γ) : inv (r ∘ s) = inv s ∘ inv r :=
by { ext x z, simp [comp, inv, flip, and.comm] }
def image (s : set α) : set β := {y | ∃ x ∈ s, r x y}
lemma mem_image (y : β) (s : set α) : y ∈ image r s ↔ ∃ x ∈ s, r x y :=
iff.refl _
lemma image_mono {s t : set α} (h : s ⊆ t) : r.image s ⊆ r.image t :=
assume y ⟨x, xs, rxy⟩, ⟨x, h xs, rxy⟩
lemma image_inter (s t : set α) : r.image (s ∩ t) ⊆ r.image s ∩ r.image t :=
assume y ⟨x, ⟨xs, xt⟩, rxy⟩, ⟨⟨x, xs, rxy⟩, ⟨x, xt, rxy⟩⟩
lemma image_union (s t : set α) : r.image (s ∪ t) = r.image s ∪ r.image t :=
set.subset.antisymm
(λ y ⟨x, xst, rxy⟩,
begin
cases xst with xs xt,
{ left, exact ⟨x, xs, rxy⟩ },
right, exact ⟨x, xt, rxy⟩
end)
(λ y ymem,
begin
rcases ymem with ⟨x, xs, rxy⟩ | ⟨x, xt, rxy⟩; existsi x,
{ split, { left, exact xs }, exact rxy},
split, { right, exact xt }, exact rxy
end)
@[simp]
lemma image_id (s : set α) : image (@eq α) s = s :=
by { ext x, simp [mem_image] }
lemma image_comp (s : rel β γ) (t : set α) : image (r ∘ s) t = image s (image r t) :=
begin
ext z, simp only [mem_image, comp], split,
{ rintros ⟨x, xt, y, rxy, syz⟩, exact ⟨y, ⟨x, xt, rxy⟩, syz⟩ },
rintros ⟨y, ⟨x, xt, rxy⟩, syz⟩, exact ⟨x, xt, y, rxy, syz⟩
end
lemma image_univ : r.image set.univ = r.codom := by { ext y, simp [mem_image, codom] }
def preimage (s : set β) : set α := image (inv r) s
lemma mem_preimage (x : α) (s : set β) : x ∈ preimage r s ↔ ∃ y ∈ s, r x y :=
iff.refl _
lemma preimage_def (s : set β) : preimage r s = {x | ∃ y ∈ s, r x y} :=
set.ext $ λ x, mem_preimage _ _ _
lemma preimage_mono {s t : set β} (h : s ⊆ t) : r.preimage s ⊆ r.preimage t :=
image_mono _ h
lemma preimage_inter (s t : set β) : r.preimage (s ∩ t) ⊆ r.preimage s ∩ r.preimage t :=
image_inter _ s t
lemma preimage_union (s t : set β) : r.preimage (s ∪ t) = r.preimage s ∪ r.preimage t :=
image_union _ s t
lemma preimage_id (s : set α) : preimage (@eq α) s = s :=
by simp only [preimage, inv_id, image_id]
lemma preimage_comp (s : rel β γ) (t : set γ) :
preimage (r ∘ s) t = preimage r (preimage s t) :=
by simp only [preimage, inv_comp, image_comp]
lemma preimage_univ : r.preimage set.univ = r.dom :=
by { rw [preimage, image_univ, codom_inv] }
def core (s : set β) := {x | ∀ y, r x y → y ∈ s}
lemma mem_core (x : α) (s : set β) : x ∈ core r s ↔ ∀ y, r x y → y ∈ s :=
iff.refl _
lemma core_mono {s t : set β} (h : s ⊆ t) : r.core s ⊆ r.core t :=
assume x h' y rxy, h (h' y rxy)
lemma core_inter (s t : set β) : r.core (s ∩ t) = r.core s ∩ r.core t :=
set.ext (by simp [mem_core, imp_and_distrib, forall_and_distrib])
lemma core_union (s t : set β) : r.core (s ∪ t) ⊇ r.core s ∪ r.core t :=
λ x,
begin
simp [mem_core], intro h, cases h with hs ht; intros y rxy,
{ left, exact hs y rxy },
right, exact ht y rxy
end
lemma core_univ : r.core set.univ = set.univ := set.ext (by simp [mem_core])
lemma core_id (s : set α): core (@eq α) s = s :=
by simp [core]
lemma core_comp (s : rel β γ) (t : set γ) :
core (r ∘ s) t = core r (core s t) :=
begin
ext x, simp [core, comp], split,
{ intros h y rxy z syz, exact h z y rxy syz },
intros h z y rzy syz, exact h y rzy z syz
end
def restrict_domain (s : set α) : rel {x // x ∈ s} β :=
λ x y, r x.val y
theorem image_subset_iff (s : set α) (t : set β) : image r s ⊆ t ↔ s ⊆ core r t :=
iff.intro
(λ h x xs y rxy, h ⟨x, xs, rxy⟩)
(λ h y ⟨x, xs, rxy⟩, h xs y rxy)
theorem core_preimage_gc : galois_connection (image r) (core r) :=
image_subset_iff _
end rel
namespace function
def graph (f : α → β) : rel α β := λ x y, f x = y
end function
namespace set
-- TODO: if image were defined with bounded quantification in corelib, the next two would
-- be definitional
lemma image_eq (f : α → β) (s : set α) : f '' s = (function.graph f).image s :=
by simp [set.image, function.graph, rel.image]
lemma preimage_eq (f : α → β) (s : set β) :
f ⁻¹' s = (function.graph f).preimage s :=
by simp [set.preimage, function.graph, rel.preimage, rel.inv, flip, rel.image]
lemma preimage_eq_core (f : α → β) (s : set β) :
f ⁻¹' s = (function.graph f).core s :=
by simp [set.preimage, function.graph, rel.core]
end set
|
From Hammer Require Import Hammer.
Require Export Coq.Classes.SetoidTactics.
Export Morphisms.ProperNotations.
Definition Setoid_Theory := @Equivalence.
Definition Build_Setoid_Theory := @Build_Equivalence.
Definition Seq_refl A Aeq (s : Setoid_Theory A Aeq) : forall x:A, Aeq x x.
Proof. hammer_hook "Setoid" "Setoid.Seq_refl".
unfold Setoid_Theory in s. intros ; reflexivity.
Defined.
Definition Seq_sym A Aeq (s : Setoid_Theory A Aeq) : forall x y:A, Aeq x y -> Aeq y x.
Proof. hammer_hook "Setoid" "Setoid.Seq_sym".
unfold Setoid_Theory in s. intros ; symmetry ; assumption.
Defined.
Definition Seq_trans A Aeq (s : Setoid_Theory A Aeq) : forall x y z:A, Aeq x y -> Aeq y z -> Aeq x z.
Proof. hammer_hook "Setoid" "Setoid.Seq_trans".
unfold Setoid_Theory in s. intros ; transitivity y ; assumption.
Defined.
Ltac trans_st x :=
idtac "trans_st on Setoid_Theory is OBSOLETE";
idtac "use transitivity on Equivalence instead";
match goal with
| H : Setoid_Theory _ ?eqA |- ?eqA _ _ =>
apply (Seq_trans _ _ H) with x; auto
end.
Ltac sym_st :=
idtac "sym_st on Setoid_Theory is OBSOLETE";
idtac "use symmetry on Equivalence instead";
match goal with
| H : Setoid_Theory _ ?eqA |- ?eqA _ _ =>
apply (Seq_sym _ _ H); auto
end.
Ltac refl_st :=
idtac "refl_st on Setoid_Theory is OBSOLETE";
idtac "use reflexivity on Equivalence instead";
match goal with
| H : Setoid_Theory _ ?eqA |- ?eqA _ _ =>
apply (Seq_refl _ _ H); auto
end.
Definition gen_st : forall A : Set, Setoid_Theory _ (@eq A).
Proof. hammer_hook "Setoid" "Setoid.gen_st".
constructor; congruence.
Qed.
|
For several seasons I was the play-by-play announcer and host of “Maccabi Haifa Basketball.” Each week I broadcast Haifa’s games in Israel’s top basketball league. The games were watched live across the world at: Triangle Internet TV and seen on the Yes network in the United States. A selection of individual broadcasts can be found below. |
-- Andreas, 2015-03-17
open import Common.Size
data ⊥ : Set where
data D (i : Size) : Set where
c : Size< i → D i
-- This definition of size predecessor should be forbidden...
module _ (i : Size) where
postulate
pred : Size< i
-- ...otherwise the injectivity test loops here.
iter : ∀ i → D i → ⊥
iter i (c j) = iter j (c (pred j))
loop : Size → ⊥
loop i = iter i (c (pred i))
absurd : ⊥
absurd = FIXME loop ∞
-- Testcase temporarily mutilated, original error
-- -Issue1428c.agda:13,5-19
-- -We don't like postulated sizes in parametrized modules.
-- +Issue1428c.agda:23,10-15
-- +Not in scope:
-- + FIXME at Issue1428c.agda:23,10-15
-- +when scope checking FIXME
|
/-
Copyright (c) 2022 Yaël Dillies, Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies, Bhavik Mehta
-/
import combinatorics.simple_graph.basic
import data.finset.pairwise
/-!
# Graph cliques
This file defines cliques in simple graphs. A clique is a set of vertices that are pairwise
adjacent.
## Main declarations
* `simple_graph.is_clique`: Predicate for a set of vertices to be a clique.
* `simple_graph.is_n_clique`: Predicate for a set of vertices to be a `n`-clique.
* `simple_graph.clique_finset`: Finset of `n`-cliques of a graph.
* `simple_graph.clique_free`: Predicate for a graph to have no `n`-cliques.
## TODO
* Clique numbers
* Going back and forth between cliques and complete subgraphs or embeddings of complete graphs.
* Do we need `clique_set`, a version of `clique_finset` for infinite graphs?
-/
open finset fintype
namespace simple_graph
variables {α : Type*} (G H : simple_graph α)
/-! ### Cliques -/
section clique
variables {s t : set α}
/-- A clique in a graph is a set of vertices that are pairwise adjacent. -/
abbreviation is_clique (s : set α) : Prop := s.pairwise G.adj
lemma is_clique_iff : G.is_clique s ↔ s.pairwise G.adj := iff.rfl
instance [decidable_eq α] [decidable_rel G.adj] {s : finset α} : decidable (G.is_clique s) :=
decidable_of_iff' _ G.is_clique_iff
variables {G H}
lemma is_clique.mono (h : G ≤ H) : G.is_clique s → H.is_clique s :=
by { simp_rw is_clique_iff, exact set.pairwise.mono' h }
lemma is_clique.subset (h : t ⊆ s) : G.is_clique s → G.is_clique t :=
by { simp_rw is_clique_iff, exact set.pairwise.mono h }
@[simp] lemma is_clique_bot_iff : (⊥ : simple_graph α).is_clique s ↔ (s : set α).subsingleton :=
set.pairwise_bot_iff
alias is_clique_bot_iff ↔ is_clique.subsingleton _
end clique
/-! ### `n`-cliques -/
section n_clique
variables {n : ℕ} {s : finset α}
/-- A `n`-clique in a graph is a set of `n` vertices which are pairwise connected. -/
structure is_n_clique (n : ℕ) (s : finset α) : Prop :=
(clique : G.is_clique s)
(card_eq : s.card = n)
lemma is_n_clique_iff : G.is_n_clique n s ↔ G.is_clique s ∧ s.card = n :=
⟨λ h, ⟨h.1, h.2⟩, λ h, ⟨h.1, h.2⟩⟩
instance [decidable_eq α] [decidable_rel G.adj] {n : ℕ} {s : finset α} :
decidable (G.is_n_clique n s) :=
decidable_of_iff' _ G.is_n_clique_iff
variables {G H}
lemma is_n_clique.mono (h : G ≤ H) : G.is_n_clique n s → H.is_n_clique n s :=
by { simp_rw is_n_clique_iff, exact and.imp_left (is_clique.mono h) }
@[simp] lemma is_n_clique_bot_iff : (⊥ : simple_graph α).is_n_clique n s ↔ n ≤ 1 ∧ s.card = n :=
begin
rw [is_n_clique_iff, is_clique_bot_iff],
refine and_congr_left _,
rintro rfl,
exact card_le_one.symm,
end
variables [decidable_eq α] {a b c : α}
lemma is_3_clique_triple_iff : G.is_n_clique 3 {a, b, c} ↔ G.adj a b ∧ G.adj a c ∧ G.adj b c :=
begin
simp only [is_n_clique_iff, is_clique_iff, set.pairwise_insert_of_symmetric G.symm, coe_insert],
have : ¬ 1 + 1 = 3 := by norm_num,
by_cases hab : a = b; by_cases hbc : b = c; by_cases hac : a = c;
subst_vars; simp [G.ne_of_adj, and_rotate, *],
end
lemma is_3_clique_iff :
G.is_n_clique 3 s ↔ ∃ a b c, G.adj a b ∧ G.adj a c ∧ G.adj b c ∧ s = {a, b, c} :=
begin
refine ⟨λ h, _, _⟩,
{ obtain ⟨a, b, c, -, -, -, rfl⟩ := card_eq_three.1 h.card_eq,
refine ⟨a, b, c, _⟩,
rw is_3_clique_triple_iff at h,
tauto },
{ rintro ⟨a, b, c, hab, hbc, hca, rfl⟩,
exact is_3_clique_triple_iff.2 ⟨hab, hbc, hca⟩ }
end
end n_clique
/-! ### Graphs without cliques -/
section clique_free
variables {m n : ℕ}
/-- `G.clique_free n` means that `G` has no `n`-cliques. -/
def clique_free (n : ℕ) : Prop := ∀ t, ¬ G.is_n_clique n t
variables {G H}
lemma clique_free_bot (h : 2 ≤ n) : (⊥ : simple_graph α).clique_free n :=
begin
rintro t ht,
rw is_n_clique_bot_iff at ht,
linarith,
end
lemma clique_free.mono (h : m ≤ n) : G.clique_free m → G.clique_free n :=
begin
rintro hG s hs,
obtain ⟨t, hts, ht⟩ := s.exists_smaller_set _ (h.trans hs.card_eq.ge),
exact hG _ ⟨hs.clique.subset hts, ht⟩,
end
lemma clique_free.anti (h : G ≤ H) : H.clique_free n → G.clique_free n :=
forall_imp $ λ s, mt $ is_n_clique.mono h
end clique_free
/-! ### Set of cliques -/
section clique_set
variables (G) {n : ℕ} {a b c : α} {s : finset α}
/-- The `n`-cliques in a graph as a set. -/
def clique_set (n : ℕ) : set (finset α) := {s | G.is_n_clique n s}
lemma mem_clique_set_iff : s ∈ G.clique_set n ↔ G.is_n_clique n s := iff.rfl
@[simp] lemma clique_set_eq_empty_iff : G.clique_set n = ∅ ↔ G.clique_free n :=
by simp_rw [clique_free, set.eq_empty_iff_forall_not_mem, mem_clique_set_iff]
alias clique_set_eq_empty_iff ↔ _ clique_free.clique_set
attribute [protected] clique_free.clique_set
variables {G H}
@[mono] lemma clique_set_mono (h : G ≤ H) : G.clique_set n ⊆ H.clique_set n :=
λ _, is_n_clique.mono h
lemma clique_set_mono' (h : G ≤ H) : G.clique_set ≤ H.clique_set := λ _, clique_set_mono h
end clique_set
/-! ### Finset of cliques -/
section clique_finset
variables (G) [fintype α] [decidable_eq α] [decidable_rel G.adj] {n : ℕ} {a b c : α} {s : finset α}
/-- The `n`-cliques in a graph as a finset. -/
def clique_finset (n : ℕ) : finset (finset α) := univ.filter $ G.is_n_clique n
lemma mem_clique_finset_iff : s ∈ G.clique_finset n ↔ G.is_n_clique n s :=
mem_filter.trans $ and_iff_right $ mem_univ _
@[simp] lemma coe_clique_finset (n : ℕ) : (G.clique_finset n : set (finset α)) = G.clique_set n :=
set.ext $ λ _, mem_clique_finset_iff _
@[simp] lemma clique_finset_eq_empty_iff : G.clique_finset n = ∅ ↔ G.clique_free n :=
by simp_rw [clique_free, eq_empty_iff_forall_not_mem, mem_clique_finset_iff]
alias clique_finset_eq_empty_iff ↔ _ _root_.simple_graph.clique_free.clique_finset
attribute [protected] clique_free.clique_finset
variables {G} [decidable_rel H.adj]
@[mono] lemma clique_finset_mono (h : G ≤ H) : G.clique_finset n ⊆ H.clique_finset n :=
monotone_filter_right _ $ λ _, is_n_clique.mono h
end clique_finset
end simple_graph
|
In some studies , age > 60 years and elevated lactate dehydrogenase level were also associated with poorer outcomes . As with most forms of cancer , performance status ( i.e. the general physical condition and activity level of the person ) plays a major role in prognosis as well .
|
module plfa.Induction where
import Relation.Binary.PropositionalEquality as Eq
open Eq using (_≡_; refl; cong; sym)
open Eq.≡-Reasoning using (begin_; _≡⟨⟩_; _≡⟨_⟩_ ;_∎)
open import Data.Nat using (ℕ; zero; suc; _+_; _*_; _∸_; _^_)
-- @practice: exercise `operators` start
--
-- 1. _+_ _*_: Unit (Zero), Associativity, Commutativity
-- operator _*_ distributes over operator _+_ from the left and right.
--
-- 2. x operator of matrices: Unit (Unit matrix),
-- Associativity (Associativity of the Linear transformation)
-- No Commutativity
-- @practice: exercise `operators` end
_ : (3 + 4) + 5 ≡ 3 + (4 + 5)
_ =
begin
(3 + 4) + 5
≡⟨⟩
7 + 5
≡⟨⟩
12
≡⟨⟩
3 + 9
≡⟨⟩
3 + (4 + 5)
∎
+-assoc : ∀ (m n p : ℕ) → (m + n) + p ≡ m + (n + p)
+-assoc zero n p =
begin
(zero + n) + p
≡⟨⟩
n + p
≡⟨⟩
zero + (n + p)
∎
+-assoc (suc m) n p =
begin
(suc m + n) + p
≡⟨⟩
suc (m + n) + p
≡⟨⟩
suc ((m + n) + p)
≡⟨ cong suc (+-assoc m n p) ⟩
suc (m + (n + p))
≡⟨⟩
suc m + (n + p)
∎
-- examples start
+-assoc-2 : ∀ (n p : ℕ) → (2 + n) + p ≡ 2 + (n + p)
+-assoc-2 n p =
begin
(2 + n) + p
≡⟨⟩
suc (1 + n) + p
≡⟨⟩
suc ((1 + n) + p)
≡⟨ cong suc (+-assoc-1 n p) ⟩
suc (1 + (n + p))
≡⟨⟩
2 + (n + p)
∎
where
+-assoc-1 : ∀ (n p : ℕ) → (1 + n) + p ≡ 1 + (n + p)
+-assoc-1 n p =
begin
(1 + n) + p
≡⟨⟩
suc (0 + n) + p
≡⟨⟩
suc ((0 + n) + p)
≡⟨ cong suc (+-assoc-0 n p) ⟩
suc (0 + (n + p))
≡⟨⟩
1 + (n + p)
∎
where
+-assoc-0 : ∀ (n p : ℕ) → (0 + n) + p ≡ 0 + (n + p)
+-assoc-0 n p =
begin
(0 + n) + p
≡⟨⟩
n + p
≡⟨⟩
0 + (n + p)
∎
-- examples end
+-identityʳ : ∀ ( m : ℕ ) → m + zero ≡ m
+-identityʳ zero =
begin
zero + zero
≡⟨⟩
zero
∎
+-identityʳ (suc m) =
begin
suc m + zero
≡⟨⟩
suc (m + zero)
≡⟨ cong suc (+-identityʳ m) ⟩
suc m
∎
+-suc : ∀ ( m n : ℕ ) → m + suc n ≡ suc ( m + n )
+-suc zero n =
begin
zero + suc n
≡⟨⟩
suc n
≡⟨⟩
suc (zero + n)
∎
+-suc (suc m) n =
begin
suc m + suc n
≡⟨⟩
suc ( m + suc n )
≡⟨ cong suc (+-suc m n)⟩
suc ( suc ( m + n ))
≡⟨⟩
suc ( suc m + n)
∎
+-comm : ∀ ( m n : ℕ ) → m + n ≡ n + m
+-comm m zero =
begin
m + zero
≡⟨ +-identityʳ m ⟩
m
≡⟨⟩
zero + m
∎
+-comm m (suc n) =
begin
m + suc n
≡⟨ +-suc m n ⟩
suc (m + n)
≡⟨ cong suc ( +-comm m n )⟩
suc (n + m)
≡⟨⟩
suc n + m
∎
+-rearrange : ∀ (m n p q : ℕ) → (m + n) + (p + q) ≡ m + (n + p) + q
+-rearrange m n p q =
begin
(m + n) + (p + q)
≡⟨ +-assoc m n (p + q) ⟩
m + (n + (p + q))
≡⟨ cong ( m +_ ) (sym (+-assoc n p q)) ⟩
m + ((n + p) + q)
≡⟨ sym (+-assoc m (n + p) q)⟩
(m + (n + p)) + q
∎
-- @stretch: exercise `finite-+-assoc` start
-- Day 0
-- Day 1
-- 0: ℕ
--Day 2
-- 1: ℕ
-- (0 + 0) + 0 ≡ 0 + (0 + 0)
-- Day 3
-- 2: ℕ
-- (0 + 0) + 1 ≡ 0 + (0 + 1)
-- (0 + 1) + 0 ≡ 0 + (1 + 0)
-- (0 + 1) + 1 ≡ 0 + (1 + 1)
-- (1 + 0) + 0 ≡ 1 + (0 + 0)
-- (1 + 0) + 1 ≡ 1 + (0 + 1)
-- (1 + 1) + 0 ≡ 1 + (1 + 0)
-- (1 + 1) + 1 ≡ 1 + (1 + 1)
-- Day 4
-- 3 : ℕ
-- (0 + 0) + 2 ≡ 0 + (0 + 2)
-- (0 + 1) + 2 ≡ 0 + (1 + 2)
-- (0 + 2) + 0 ≡ 0 + (2 + 0)
-- (0 + 2) + 1 ≡ 0 + (2 + 1)
-- (0 + 2) + 2 ≡ 0 + (2 + 2)
-- (1 + 0) + 2 ≡ 1 + (0 + 2)
-- (1 + 1) + 2 ≡ 1 + (1 + 2)
-- (1 + 2) + 0 ≡ 1 + (2 + 0)
-- (1 + 2) + 1 ≡ 1 + (2 + 1)
-- (1 + 2) + 2 ≡ 1 + (2 + 2)
-- (2 + 0) + 0 ≡ 2 + (0 + 0)
-- (2 + 0) + 1 ≡ 2 + (0 + 1)
-- (2 + 0) + 2 ≡ 2 + (0 + 2)
-- (2 + 1) + 0 ≡ 2 + (1 + 0)
-- (2 + 1) + 1 ≡ 2 + (1 + 1)
-- (2 + 1) + 2 ≡ 2 + (1 + 2)
-- (2 + 2) + 0 ≡ 2 + (2 + 0)
-- (2 + 2) + 1 ≡ 2 + (2 + 1)
-- (2 + 2) + 2 ≡ 2 + (2 + 2)
-- @stretch: exercise `finite-+-assoc` end
+-assoc' : ∀ ( m n p : ℕ ) → (m + n) + p ≡ m + (n + p)
+-assoc' zero n p = refl
+-assoc' (suc m) n p rewrite +-assoc' m n p = refl
+-identity' : ∀ ( n : ℕ ) → n + zero ≡ n
+-identity' zero = refl
+-identity' (suc m) rewrite +-identity' m = refl
+-suc' : ∀ ( m n : ℕ ) → m + suc n ≡ suc (m + n)
+-suc' zero n = refl
+-suc' (suc m) n rewrite +-suc' m n = refl
+-comm' : ∀ ( m n : ℕ ) → m + n ≡ n + m
+-comm' m zero rewrite +-identity' m = refl
+-comm' m (suc n) rewrite +-suc' m n | +-comm' m n = refl
-- with hole
+-assoc'' : ∀ (m n p : ℕ ) → ( m + n ) + p ≡ m + (n + p)
+-assoc'' zero n p = refl
+-assoc'' (suc m) n p rewrite +-assoc'' m n p = refl
-- @recommended: exercise `+-swap` start
+-swap : ∀ ( m n p : ℕ ) → m + (n + p) ≡ n + (m + p)
+-swap zero n p = refl
+-swap (suc m) n p rewrite +-swap m n p | +-suc n (m + p) = refl
-- @recommended: exercise `+-swap` end
-- @recommended: exercise `*-distrib-+` start
*-distrib-+ : ∀ ( m n p : ℕ ) → ( m + n ) * p ≡ m * p + n * p
*-distrib-+ zero n p = refl
*-distrib-+ (suc m) n p rewrite *-distrib-+ m n p | +-assoc p (m * p) (n * p) = refl
-- @recommended: exercise `*-distrib-+` end
-- @recommended: exercise `*-assoc` start
*-assoc : ∀ ( m n p : ℕ ) → (m * n) * p ≡ m * ( n * p)
*-assoc zero n p = refl
*-assoc (suc m) n p rewrite *-distrib-+ n (m * n) p | *-assoc m n p = refl
-- @recommended: exercise `*-assoc` end
-- @practice: exercise `*-comm` start
*-zeroʳ : ∀ ( m : ℕ ) → m * zero ≡ zero
*-zeroʳ zero = refl
*-zeroʳ ( suc m ) rewrite *-distrib-+ 1 m zero | *-zeroʳ m = refl
*-suc : ∀ ( n m : ℕ ) → n + n * m ≡ n * suc m
*-suc zero m = refl
*-suc (suc n) m
rewrite
sym (*-suc n m) |
sym (+-assoc m n ( n * m )) | sym ( +-assoc n m ( n * m )) |
+-comm m n = refl
*-comm : ∀ ( m n : ℕ ) → m * n ≡ n * m
*-comm zero n rewrite *-zeroʳ n = refl
*-comm (suc m) n rewrite *-distrib-+ 1 m n | +-identityʳ n | *-comm m n | *-suc n m = refl
-- @practice: exercise `*-comm` end
-- @practice: exercise `0∸n≡0` start
0∸n≡0 : ∀ ( n : ℕ ) → 0 ∸ n ≡ 0
0∸n≡0 zero = refl
0∸n≡0 (suc n) = refl
-- @practice: exercise `0∸n≡0` end
-- @practice: exercise `∸-|-assoc` start
∸-|-assoc : ∀ ( m n p : ℕ ) → m ∸ n ∸ p ≡ m ∸ ( n + p )
∸-|-assoc zero n p rewrite 0∸n≡0 n | 0∸n≡0 p | 0∸n≡0 ( n + p ) = refl
∸-|-assoc (suc m) zero p = refl
∸-|-assoc (suc m) (suc n) p rewrite ∸-|-assoc m n p = refl
-- @practice: exercise `∸-|-assoc` end
-- @stretch: exercise `+*^` start
^-distribʳ-+-* : ∀ ( m n p : ℕ ) → m ^ ( n + p ) ≡ (m ^ n) * (m ^ p)
^-distribʳ-+-* m zero zero = refl
^-distribʳ-+-* m zero (suc p) rewrite +-identityʳ (m * (m ^ p)) = refl
^-distribʳ-+-* m (suc n) p =
begin
m ^ (suc n + p)
≡⟨⟩
m ^ suc ( n + p )
≡⟨⟩
m * ( m ^ ( n + p ) )
≡⟨ cong (_*_ m) ( ^-distribʳ-+-* m n p ) ⟩
m * ( ( m ^ n ) * ( m ^ p ) )
≡⟨ sym (*-assoc m (m ^ n) (m ^ p)) ⟩
m * ( m ^ n ) * (m ^ p)
≡⟨⟩
(m ^ suc n ) * ( m ^ p )
∎
^-sucʳ : ∀ ( n p : ℕ ) → n ^ suc p ≡ n * n ^ p
^-sucʳ n p = refl
^-distribˡ-* : ∀ ( m n p : ℕ ) → (m * n) ^ p ≡ ( m ^ p ) * ( n ^ p)
^-distribˡ-* m n zero = refl
^-distribˡ-* m n (suc p) =
begin
(m * n) ^ ( suc p )
≡⟨ ^-sucʳ (m * n) p ⟩
(m * n) * ((m * n) ^ p)
≡⟨ cong (_*_ (m * n)) (^-distribˡ-* m n p) ⟩
m * n * ((m ^ p) * (n ^ p))
≡⟨ sym (*-assoc (m * n) (m ^ p) (n ^ p)) ⟩
m * n * (m ^ p) * (n ^ p)
≡⟨ *-1234-[13][24] m n (m ^ p) (n ^ p) ⟩
(m * m ^ p ) * (n * n ^ p)
≡⟨ cong (_*_ (m * m ^ p)) (sym (^-sucʳ n p))⟩
(m * m ^ p ) * ( n ^ suc p)
≡⟨ cong (λ {x → x * (n ^ suc p)}) (^-sucʳ m p) ⟩
(m ^ suc p) * (n ^ suc p)
∎
where
*-1234-[13][24] : ∀ ( a b c d : ℕ ) → a * b * c * d ≡ (a * c) * ( b * d )
*-1234-[13][24] a b c d
rewrite
*-assoc a b c |
*-comm b c |
sym (*-assoc a c b) |
*-assoc (a * c) b d = refl
^-oneˡ : ∀ ( p : ℕ ) → 1 ^ p ≡ 1
^-oneˡ zero = refl
^-oneˡ (suc p) rewrite ^-oneˡ p = refl
^-distribʳ-* : ∀ ( m n p : ℕ ) → m ^ (n * p) ≡ (m ^ n) ^ p
^-distribʳ-* m zero p rewrite ^-oneˡ p = refl
^-distribʳ-* m n zero rewrite *-zeroʳ n = refl
^-distribʳ-* m (suc n) (suc p)
rewrite
^-distribʳ-+-* m p ( n * suc p) |
^-distribˡ-* m (m ^ n) p |
^-distribʳ-* m n (suc p) =
begin
m * ((m ^ p) * ((m ^ n) * ((m ^ n) ^ p)))
≡⟨ *-1[2[34]]-13[24] m (m ^ p) (m ^ n) ((m ^ n) ^ p) ⟩
m * (m ^ n) * ((m ^ p) * ((m ^ n) ^ p))
∎
where
*-1[2[34]]-13[24] : ∀ ( a b c d : ℕ ) → a * ( b * ( c * d ) ) ≡ a * c * ( b * d )
*-1[2[34]]-13[24] a b c d
rewrite
sym (*-assoc b c d) |
*-comm b c |
*-assoc c b d |
sym (*-assoc a c (b * d)) = refl
-- @stretch: exercise `+*^` end
-- @stretch: exercise `Bin-laws` start
data Bin : Set where
⟨⟩ : Bin
_O : Bin → Bin
_I : Bin → Bin
inc : Bin → Bin
inc (m O) = m I
inc (m I) = (inc m) O
inc ⟨⟩ = ⟨⟩ I
to : ℕ → Bin
to zero = ⟨⟩ O
to (suc n) = inc (to n)
from : Bin -> ℕ
from (m O) = 2 * (from m)
from (m I) = 2 * (from m) + 1
from ⟨⟩ = 0
comm-+-from : ∀ ( b : Bin ) → from (inc b) ≡ suc (from b)
comm-+-from ⟨⟩ = refl
comm-+-from (b O)
rewrite
sym ( +-suc' (from b) (from b + zero)) |
+-assoc' (from b) (from b + 0) 1 |
+-identityʳ (from b) |
+-comm' (from b) 1 = refl
comm-+-from (b I)
rewrite
+-identityʳ (from (inc b)) | +-identityʳ ( from b) |
comm-+-from b |
sym (+-comm' (from b) 1) |
+-assoc' (from b) (from b) 1 = refl
identity-to-from : ∀ ( n : ℕ ) → from (to n) ≡ n
identity-to-from zero = refl
identity-to-from (suc n) rewrite comm-+-from (to n) | identity-to-from n = refl
-- ⟨⟩ !== ⟨⟩ O
-- @todo: rewrite with ∃-syntax
-- @stretch: exercise `Bin-laws` end
-- import Data.Nat.Properties using (+-assoc; +-identityʳ; +-suc; +-comm)
|
Require Import Le Gt Minus Bool Setoid.
Require Import Program.
Set Implicit Arguments.
Inductive snoclist (A : Type) : Type :=
| snil : snoclist A
| snoc : snoclist A -> A -> snoclist A.
Arguments snil {A}.
Arguments snoc {A} _ _. (* use underscore for argument position that has no name *)
Fixpoint app {A : Type} (l1 l2 : snoclist A) : (snoclist A) :=
match l2 with
| snil => l1
| snoc l2' x => snoc (app l1 l2') x
end.
Fixpoint length {A : Type} (l : snoclist A) : nat :=
match l with
| snil => 0
| snoc l' x => 1 + length l'
end.
Notation "l ::: x" := (snoc l x)
(at level 61, left associativity).
Notation "[ ]" := snil.
Notation "[ x ]" := (snoc snil x).
Notation "[ x ; .. ; y ]" := (snoc .. (snoc snil x) .. y).
Notation "x +++ y" := (app x y)
(at level 61, left associativity).
Fixpoint rev {A : Type} (l : snoclist A) : snoclist A :=
match l with
| snil => snil
| snoc l' a => [a] +++ (rev l')
end.
Section Map.
Variables A B : Type.
Variable f : A -> B.
Fixpoint smap (l : snoclist A) : snoclist B :=
match l with
| snil => snil
| l':::a => snoc (smap l') (f a)
end.
End Map.
Section Bool.
Variable A : Type.
Variable f : A -> bool.
Fixpoint sfilter (l : snoclist A) : snoclist A :=
match l with
| snil => snil
| l':::x => if f x then (sfilter l'):::x else sfilter l'
end.
End Bool.
Section Elmts.
Variable A : Type.
Fixpoint nth_from_end (default : A) (n : nat) (l : snoclist A) : A :=
match n, l with
| O, l':::x => x
| S n', l':::x => nth_from_end default n' l'
| _, [] => default
end.
Definition nth default n := (compose (nth_from_end default n) rev).
Fixpoint last_n (n : nat) (l : snoclist A) : snoclist A :=
match n, l with
| 0, _ => snil
| _, snil => snil
| S n', snoc l' a => snoc (last_n n' l') a
end.
Definition first_n n := (compose rev (compose (last_n n) rev)).
End Elmts.
|
section\<open>Increment and Reset\<close>
text\<open>The ``increment and reset'' heuristic proposed in \<^cite>\<open>"foster2019"\<close> is a naive way of
introducing an incrementing register into a model. This this theory implements that heuristic.\<close>
theory Increment_Reset
imports "../Inference"
begin
definition initialiseReg :: "transition \<Rightarrow> nat \<Rightarrow> transition" where
"initialiseReg t newReg = \<lparr>Label = Label t, Arity = Arity t, Guards = Guards t, Outputs = Outputs t, Updates = ((newReg, L (Num 0))#Updates t)\<rparr>"
definition "guardMatch t1 t2 = (\<exists>n n'. Guards t1 = [gexp.Eq (V (vname.I 0)) (L (Num n))] \<and> Guards t2 = [gexp.Eq (V (vname.I 0)) (L (Num n'))])"
definition "outputMatch t1 t2 = (\<exists>m m'. Outputs t1 = [L (Num m)] \<and> Outputs t2 = [L (Num m')])"
lemma guard_match_commute: "guardMatch t1 t2 = guardMatch t2 t1"
apply (simp add: guardMatch_def)
by auto
lemma guard_match_length:
"length (Guards t1) \<noteq> 1 \<or> length (Guards t2) \<noteq> 1 \<Longrightarrow> \<not> guardMatch t1 t2"
apply (simp add: guardMatch_def)
by auto
fun insert_increment :: update_modifier where
"insert_increment t1ID t2ID s new _ old check = (let
t1 = get_by_ids new t1ID;
t2 = get_by_ids new t2ID in
if guardMatch t1 t2 \<and> outputMatch t1 t2 then let
r = case max_reg new of None \<Rightarrow> 1 | Some r \<Rightarrow> r+ 1;
newReg = R r;
newT1 = \<lparr>Label = Label t1, Arity = Arity t1, Guards = [], Outputs = [Plus (V newReg) (V (vname.I 0))], Updates=((r, Plus (V newReg) (V (vname.I 0)))#Updates t1)\<rparr>;
newT2 = \<lparr>Label = Label t2, Arity = Arity t2, Guards = [], Outputs = [Plus (V newReg) (V (vname.I 0))], Updates=((r, Plus (V newReg) (V (vname.I 0)))#Updates t2)\<rparr>;
to_initialise = ffilter (\<lambda>(uid, (from, to), t). (to = dest t1ID new \<or> to = dest t2ID new) \<and> t \<noteq> t1 \<and> t \<noteq> t2) new;
initialisedTrans = fimage (\<lambda>(uid, (from, to), t). (uid, initialiseReg t r)) to_initialise;
initialised = replace_transitions new (sorted_list_of_fset initialisedTrans);
rep = replace_transitions new [(t1ID, newT1), (t2ID, newT2)]
in
if check (tm rep) then Some rep else None
else
None
)"
definition struct_replace_all :: "iEFSM \<Rightarrow> transition \<Rightarrow> transition \<Rightarrow> iEFSM" where
"struct_replace_all e old new = (let
to_replace = ffilter (\<lambda>(uid, (from, dest), t). same_structure t old) e;
replacements = fimage (\<lambda>(uid, (from, to), t). (uid, new)) to_replace
in
replace_transitions e (sorted_list_of_fset replacements))"
lemma output_match_symmetry: "(outputMatch t1 t2) = (outputMatch t2 t1)"
apply (simp add: outputMatch_def)
by auto
lemma guard_match_symmetry: "(guardMatch t1 t2) = (guardMatch t2 t1)"
apply (simp add: guardMatch_def)
by auto
fun insert_increment_2 :: update_modifier where
"insert_increment_2 t1ID t2ID s new _ old check = (let
t1 = get_by_ids new t1ID;
t2 = get_by_ids new t2ID in
if guardMatch t1 t2 \<and> outputMatch t1 t2 then let
r = case max_reg new of None \<Rightarrow> 1 | Some r \<Rightarrow> r + 1;
newReg = R r;
newT1 = \<lparr>Label = Label t1, Arity = Arity t1, Guards = [], Outputs = [Plus (V newReg) (V (vname.I 0))], Updates=((r, Plus (V newReg) (V (vname.I 0)))#Updates t1)\<rparr>;
newT2 = \<lparr>Label = Label t2, Arity = Arity t2, Guards = [], Outputs = [Plus (V newReg) (V (vname.I 0))], Updates=((r, Plus (V newReg) (V (vname.I 0)))#Updates t2)\<rparr>;
to_initialise = ffilter (\<lambda>(uid, (from, to), t). (to = dest t1ID new \<or> to = dest t2ID new) \<and> t \<noteq> t1 \<and> t \<noteq> t2) new;
initialisedTrans = fimage (\<lambda>(uid, (from, to), t). (uid, initialiseReg t r)) to_initialise;
initialised = replace_transitions new (sorted_list_of_fset initialisedTrans);
rep = struct_replace_all (struct_replace_all initialised t2 newT2) t1 newT1
in
if check (tm rep) then Some rep else None
else
None
)"
fun guardMatch_alt_2 :: "vname gexp list \<Rightarrow> bool" where
"guardMatch_alt_2 [(gexp.Eq (V (vname.I i)) (L (Num n)))] = (i = 1)" |
"guardMatch_alt_2 _ = False"
fun outputMatch_alt_2 :: "vname aexp list \<Rightarrow> bool" where
"outputMatch_alt_2 [(L (Num n))] = True" |
"outputMatch_alt_2 _ = False"
end
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: mgga_exc *)
mbeef_n := 5:
mbeef_coefs := [
[ 1.17114923e+00, 1.15594371e-01, -5.32167416e-02, -2.01131648e-02, 1.41417107e-03],
[-6.76157938e-02, 4.53837246e-02, -2.22650139e-02, 1.92374554e-02, 9.19317034e-07],
[ 1.48659502e-02, 3.18024096e-02, -5.21818079e-03, 1.33707403e-07, -5.00749348e-07],
[ 1.40794142e-03, -6.08338264e-03, -6.57949254e-07, -5.49909413e-08, 5.74317889e-08],
[ 1.41530486e-04, -1.00478906e-07, 2.01895739e-07, 3.97324768e-09, -3.40722258e-09]
]:
$include "mbeef.mpl"
mbeefvdw_f := (x, u, t) -> mbeef_expansion(x, t):
f := (rs, z, xt, xs0, xs1, u0, u1, t0, t1) ->
mgga_exchange(mbeefvdw_f, rs, z, xs0, xs1, u0, u1, t0, t1):
|
module Main
import Palindrome
showPalindrome : String -> String
showPalindrome str = show (palindrome str) ++ "\n"
main : IO ()
main = repl "Enter a string: " showPalindrome |
State Before: R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝² : CommSemiring R
inst✝¹ : CommSemiring S₁
p✝ q✝ : MvPolynomial σ R
inst✝ : DecidableEq σ
p q : MvPolynomial σ R
⊢ support p ∆ support q ⊆ support (p + q) State After: R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝² : CommSemiring R
inst✝¹ : CommSemiring S₁
p✝ q✝ : MvPolynomial σ R
inst✝ : DecidableEq σ
p q : MvPolynomial σ R
⊢ support p \ support q ∪ support q \ support p ⊆ support (p + q) Tactic: rw [symmDiff_def, Finset.sup_eq_union] State Before: R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝² : CommSemiring R
inst✝¹ : CommSemiring S₁
p✝ q✝ : MvPolynomial σ R
inst✝ : DecidableEq σ
p q : MvPolynomial σ R
⊢ support p \ support q ∪ support q \ support p ⊆ support (p + q) State After: case hs
R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝² : CommSemiring R
inst✝¹ : CommSemiring S₁
p✝ q✝ : MvPolynomial σ R
inst✝ : DecidableEq σ
p q : MvPolynomial σ R
⊢ support p \ support q ⊆ support (p + q)
case a
R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝² : CommSemiring R
inst✝¹ : CommSemiring S₁
p✝ q✝ : MvPolynomial σ R
inst✝ : DecidableEq σ
p q : MvPolynomial σ R
⊢ support q \ support p ⊆ support (p + q) Tactic: apply Finset.union_subset State Before: case hs
R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝² : CommSemiring R
inst✝¹ : CommSemiring S₁
p✝ q✝ : MvPolynomial σ R
inst✝ : DecidableEq σ
p q : MvPolynomial σ R
⊢ support p \ support q ⊆ support (p + q) State After: no goals Tactic: exact support_sdiff_support_subset_support_add p q State Before: case a
R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝² : CommSemiring R
inst✝¹ : CommSemiring S₁
p✝ q✝ : MvPolynomial σ R
inst✝ : DecidableEq σ
p q : MvPolynomial σ R
⊢ support q \ support p ⊆ support (p + q) State After: case a
R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝² : CommSemiring R
inst✝¹ : CommSemiring S₁
p✝ q✝ : MvPolynomial σ R
inst✝ : DecidableEq σ
p q : MvPolynomial σ R
⊢ support q \ support p ⊆ support (q + p) Tactic: rw [add_comm] State Before: case a
R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝² : CommSemiring R
inst✝¹ : CommSemiring S₁
p✝ q✝ : MvPolynomial σ R
inst✝ : DecidableEq σ
p q : MvPolynomial σ R
⊢ support q \ support p ⊆ support (q + p) State After: no goals Tactic: exact support_sdiff_support_subset_support_add q p |
/**
* Copyright (c) 2018 Melown Technologies SE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <boost/utility/in_place_factory.hpp>
#include <boost/optional.hpp>
#include "utility/buildsys.hpp"
#include "service/cmdline.hpp"
#include "mapproxy/support/mmapped/tileindex.hpp"
namespace po = boost::program_options;
namespace fs = boost::filesystem;
class QueryMappedTileIndex : public service::Cmdline {
public:
QueryMappedTileIndex()
: service::Cmdline("mapproxy-querymmti", BUILD_TARGET_VERSION)
{
}
private:
void configuration(po::options_description &cmdline
, po::options_description &config
, po::positional_options_description &pd);
void configure(const po::variables_map &vars);
bool help(std::ostream &out, const std::string &what) const;
int run();
fs::path ti_;
vts::TileId tileId_;
};
void QueryMappedTileIndex
::configuration(po::options_description &cmdline
, po::options_description &config
, po::positional_options_description &pd)
{
cmdline.add_options()
("tileIndex", po::value(&ti_)->required()
, "Path to tile index.")
("tileId", po::value(&tileId_)->required()
, "Tile ID to query.")
;
pd.add("tileIndex", 1)
.add("tileId", 1);
(void) config;
}
void QueryMappedTileIndex::configure(const po::variables_map &vars)
{
(void) vars;
}
bool QueryMappedTileIndex::help(std::ostream &out
, const std::string &what) const
{
if (what.empty()) {
// program help
out << ("mapproxy mmapped tileindex query tool\n"
"\n"
);
return true;
}
return false;
}
int QueryMappedTileIndex::run()
{
// try to open
boost::optional<mmapped::TileIndex> mti;
boost::optional<vts::TileIndex> ti;
try {
mti = boost::in_place(ti_);
} catch (const std::exception&) {
ti = boost::in_place();
ti->load(ti_);
}
if (mti) {
std::cout << std::bitset<8>(mti->get(tileId_)) << std::endl;
} else {
std::cout << std::bitset<32>(ti->get(tileId_)) << std::endl;
}
return EXIT_SUCCESS;
}
int main(int argc, char *argv[])
{
return QueryMappedTileIndex()(argc, argv);
}
|
using SpinSymmetry
using Test
@testset "SpinSymmetry.jl" begin
include("basis.jl")
include("symmetries.jl")
end
|
/-
Copyright (c) 2021 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Floris van Doorn
-/
import logic.function.basic
import tactic.protected
/-!
# Types that are empty
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
In this file we define a typeclass `is_empty`, which expresses that a type has no elements.
## Main declaration
* `is_empty`: a typeclass that expresses that a type is empty.
-/
variables {α β γ : Sort*}
/-- `is_empty α` expresses that `α` is empty. -/
@[protect_proj]
class is_empty (α : Sort*) : Prop :=
(false : α → false)
instance : is_empty empty := ⟨empty.elim⟩
instance : is_empty pempty := ⟨pempty.elim⟩
instance : is_empty false := ⟨id⟩
instance : is_empty (fin 0) := ⟨λ n, nat.not_lt_zero n.1 n.2⟩
protected lemma function.is_empty [is_empty β] (f : α → β) : is_empty α :=
⟨λ x, is_empty.false (f x)⟩
instance {p : α → Sort*} [h : nonempty α] [∀ x, is_empty (p x)] : is_empty (Π x, p x) :=
h.elim $ λ x, function.is_empty $ function.eval x
instance pprod.is_empty_left [is_empty α] : is_empty (pprod α β) :=
function.is_empty pprod.fst
instance pprod.is_empty_right [is_empty β] : is_empty (pprod α β) :=
function.is_empty pprod.snd
instance prod.is_empty_left {α β} [is_empty α] : is_empty (α × β) :=
function.is_empty prod.fst
instance prod.is_empty_right {α β} [is_empty β] : is_empty (α × β) :=
function.is_empty prod.snd
instance [is_empty α] [is_empty β] : is_empty (psum α β) :=
⟨λ x, psum.rec is_empty.false is_empty.false x⟩
instance {α β} [is_empty α] [is_empty β] : is_empty (α ⊕ β) :=
⟨λ x, sum.rec is_empty.false is_empty.false x⟩
/-- subtypes of an empty type are empty -/
instance [is_empty α] (p : α → Prop) : is_empty (subtype p) :=
⟨λ x, is_empty.false x.1⟩
/-- subtypes by an all-false predicate are false. -/
lemma subtype.is_empty_of_false {p : α → Prop} (hp : ∀ a, ¬(p a)) : is_empty (subtype p) :=
⟨λ x, hp _ x.2⟩
/-- subtypes by false are false. -/
instance subtype.is_empty_false : is_empty {a : α // false} :=
subtype.is_empty_of_false (λ a, id)
instance sigma.is_empty_left {α} [is_empty α] {E : α → Type*} : is_empty (sigma E) :=
function.is_empty sigma.fst
/- Test that `pi.is_empty` finds this instance. -/
example [h : nonempty α] [is_empty β] : is_empty (α → β) := by apply_instance
/-- Eliminate out of a type that `is_empty` (without using projection notation). -/
@[elab_as_eliminator]
def is_empty_elim [is_empty α] {p : α → Sort*} (a : α) : p a :=
(is_empty.false a).elim
lemma is_empty_iff : is_empty α ↔ α → false :=
⟨@is_empty.false α, is_empty.mk⟩
namespace is_empty
open function
/-- Eliminate out of a type that `is_empty` (using projection notation). -/
protected def elim (h : is_empty α) {p : α → Sort*} (a : α) : p a :=
is_empty_elim a
/-- Non-dependent version of `is_empty.elim`. Helpful if the elaborator cannot elaborate `h.elim a`
correctly. -/
protected def elim' {β : Sort*} (h : is_empty α) (a : α) : β :=
h.elim a
protected lemma prop_iff {p : Prop} : is_empty p ↔ ¬ p :=
is_empty_iff
variables [is_empty α]
@[simp] lemma forall_iff {p : α → Prop} : (∀ a, p a) ↔ true :=
iff_true_intro is_empty_elim
@[simp] lemma exists_iff {p : α → Prop} : (∃ a, p a) ↔ false :=
iff_false_intro $ λ ⟨x, hx⟩, is_empty.false x
@[priority 100] -- see Note [lower instance priority]
instance : subsingleton α := ⟨is_empty_elim⟩
end is_empty
@[simp] lemma not_nonempty_iff : ¬ nonempty α ↔ is_empty α :=
⟨λ h, ⟨λ x, h ⟨x⟩⟩, λ h1 h2, h2.elim h1.elim⟩
@[simp] lemma not_is_empty_iff : ¬ is_empty α ↔ nonempty α :=
not_iff_comm.mp not_nonempty_iff
@[simp] lemma is_empty_Prop {p : Prop} : is_empty p ↔ ¬p :=
by simp only [← not_nonempty_iff, nonempty_Prop]
@[simp] lemma is_empty_pi {π : α → Sort*} : is_empty (Π a, π a) ↔ ∃ a, is_empty (π a) :=
by simp only [← not_nonempty_iff, classical.nonempty_pi, not_forall]
@[simp] lemma is_empty_sigma {α} {E : α → Type*} :
is_empty (sigma E) ↔ ∀ a, is_empty (E a) :=
by simp only [← not_nonempty_iff, nonempty_sigma, not_exists]
@[simp] lemma is_empty_psigma {α} {E : α → Sort*} :
is_empty (psigma E) ↔ ∀ a, is_empty (E a) :=
by simp only [← not_nonempty_iff, nonempty_psigma, not_exists]
@[simp] lemma is_empty_subtype (p : α → Prop) : is_empty (subtype p) ↔ ∀ x, ¬p x :=
by simp only [← not_nonempty_iff, nonempty_subtype, not_exists]
@[simp] lemma is_empty_prod {α β : Type*} : is_empty (α × β) ↔ is_empty α ∨ is_empty β :=
by simp only [← not_nonempty_iff, nonempty_prod, not_and_distrib]
@[simp] lemma is_empty_pprod : is_empty (pprod α β) ↔ is_empty α ∨ is_empty β :=
by simp only [← not_nonempty_iff, nonempty_pprod, not_and_distrib]
@[simp] lemma is_empty_sum {α β} : is_empty (α ⊕ β) ↔ is_empty α ∧ is_empty β :=
by simp only [← not_nonempty_iff, nonempty_sum, not_or_distrib]
@[simp] lemma is_empty_psum {α β} : is_empty (psum α β) ↔ is_empty α ∧ is_empty β :=
by simp only [← not_nonempty_iff, nonempty_psum, not_or_distrib]
@[simp] lemma is_empty_ulift {α} : is_empty (ulift α) ↔ is_empty α :=
by simp only [← not_nonempty_iff, nonempty_ulift]
@[simp] lemma is_empty_plift {α} : is_empty (plift α) ↔ is_empty α :=
by simp only [← not_nonempty_iff, nonempty_plift]
lemma well_founded_of_empty {α} [is_empty α] (r : α → α → Prop) : well_founded r :=
⟨is_empty_elim⟩
variables (α)
lemma is_empty_or_nonempty : is_empty α ∨ nonempty α :=
(em $ is_empty α).elim or.inl $ or.inr ∘ not_is_empty_iff.mp
@[simp] lemma not_is_empty_of_nonempty [h : nonempty α] : ¬ is_empty α :=
not_is_empty_iff.mpr h
variable {α}
lemma function.extend_of_empty [is_empty α] (f : α → β) (g : α → γ) (h : β → γ) :
function.extend f g h = h :=
funext $ λ x, function.extend_apply' _ _ _ $ λ ⟨a, h⟩, is_empty_elim a
|
# Based on the paper by Dos Reis & al, 2004
naive_s = c(0, 0, 0, 0, 0.5, 0.5, 0.75, 0.5)
find_optimal_s = function (codon_usage, expression, lengths, trna) {
f = function (s)
cor(tai(codon_usage, w(trna, c(0, 0, 0, 0, exp(s))), lengths),
expression,
method = 'spearman')
# Fix first four values, optimize rest.
# Optimize `log(s)` to avoid getting negative values into `s`.
par = log(naive_s[-(1 : 4)])
within(optim(par, f, control = list(fnscale = -1)),
{par = c(0, 0, 0, 0, exp(par))})
}
# Reverse complement of the anticodons, in the order of anticodons as given in
# Figure 1 of dos Reis & al.
rc_anticodons = c('TTT', 'TTC', 'TTA', 'TTG',
'TCT', 'TCC', 'TCA', 'TCG',
'TAT', 'TAC', 'TAA', 'TAG',
'TGT', 'TGC', 'TGA', 'TGG',
'CTT', 'CTC', 'CTA', 'CTG',
'CCT', 'CCC', 'CCA', 'CCG',
'CAT', 'CAC', 'CAA', 'CAG',
'CGT', 'CGC', 'CGA', 'CGG',
'ATT', 'ATC', 'ATA', 'ATG',
'ACT', 'ACC', 'ACA', 'ACG',
'AAT', 'AAC', 'AAA', 'AAG',
'AGT', 'AGC', 'AGA', 'AGG',
'GTT', 'GTC', 'GTA', 'GTG',
'GCT', 'GCC', 'GCA', 'GCG',
'GAT', 'GAC', 'GAA', 'GAG',
'GGT', 'GGC', 'GGA', 'GGG')
met_codon = match('ATG', rc_anticodons)
stop_codons = match(c('TAA', 'TAG', 'TGA'), rc_anticodons)
w = function (counts, s = naive_s) {
counts = counts[rc_anticodons]
counts[is.na(counts)] = 0
p = 1 - s
w = vector('numeric', length(counts))
for (i in seq(1, length(counts), by = 4)) {
w[i] = p[1] * counts[i] + p[5] * counts[i + 1]
w[i + 1] = p[2] * counts[i + 1] + p[6] * counts[i]
w[i + 2] = p[3] * counts[i + 2] + p[7] * counts[i]
w[i + 3] = p[4] * counts[i + 3] + p[8] * counts[i + 2]
}
w[met_codon] = p[4] * counts[met_codon]
w = w[-stop_codons]
w_rel = w / max(w)
nonzero_w = w_rel[w != 0]
if (length(nonzero_w) != length(w_rel))
w_rel[w_rel == 0] = exp(sum(log(nonzero_w)) / length(nonzero_w))
w_rel
}
tai = function (codon_counts, w, lengths) {
codons = rc_anticodons[-stop_codons]
exp(colSums(apply(codon_counts[, codons], 1, `*`, log(w))) / lengths)
}
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.