Datasets:
AI4M
/

text
stringlengths
0
3.34M
[STATEMENT] lemma approx_form_aux: assumes "approx_form prec f vs ss" and "bounded_by xs vs" shows "interpret_form f xs" [PROOF STATE] proof (prove) goal (1 subgoal): 1. interpret_form f xs [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: approx_form prec f vs ss bounded_by xs vs goal (1 subgoal): 1. interpret_form f xs [PROOF STEP] proof (induct f arbitrary: vs) [PROOF STATE] proof (state) goal (7 subgoals): 1. \<And>x1 x2 x3 f vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f xs; approx_form prec (Bound x1 x2 x3 f) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Bound x1 x2 x3 f) xs 2. \<And>x1 x2 f vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f xs; approx_form prec (Assign x1 x2 f) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Assign x1 x2 f) xs 3. \<And>x1 x2 vs. \<lbrakk>approx_form prec (Less x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Less x1 x2) xs 4. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 5. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 6. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 7. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] case (Bound x a b f) [PROOF STATE] proof (state) this: \<lbrakk>approx_form prec f ?vs ss; bounded_by xs ?vs\<rbrakk> \<Longrightarrow> interpret_form f xs approx_form prec (Bound x a b f) vs ss bounded_by xs vs goal (7 subgoals): 1. \<And>x1 x2 x3 f vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f xs; approx_form prec (Bound x1 x2 x3 f) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Bound x1 x2 x3 f) xs 2. \<And>x1 x2 f vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f xs; approx_form prec (Assign x1 x2 f) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Assign x1 x2 f) xs 3. \<And>x1 x2 vs. \<lbrakk>approx_form prec (Less x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Less x1 x2) xs 4. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 5. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 6. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 7. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] then [PROOF STATE] proof (chain) picking this: \<lbrakk>approx_form prec f ?vs ss; bounded_by xs ?vs\<rbrakk> \<Longrightarrow> interpret_form f xs approx_form prec (Bound x a b f) vs ss bounded_by xs vs [PROOF STEP] obtain n where x_eq: "x = Var n" [PROOF STATE] proof (prove) using this: \<lbrakk>approx_form prec f ?vs ss; bounded_by xs ?vs\<rbrakk> \<Longrightarrow> interpret_form f xs approx_form prec (Bound x a b f) vs ss bounded_by xs vs goal (1 subgoal): 1. (\<And>n. x = Var n \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by (cases x) auto [PROOF STATE] proof (state) this: x = Var n goal (7 subgoals): 1. \<And>x1 x2 x3 f vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f xs; approx_form prec (Bound x1 x2 x3 f) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Bound x1 x2 x3 f) xs 2. \<And>x1 x2 f vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f xs; approx_form prec (Assign x1 x2 f) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Assign x1 x2 f) xs 3. \<And>x1 x2 vs. \<lbrakk>approx_form prec (Less x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Less x1 x2) xs 4. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 5. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 6. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 7. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] with Bound.prems [PROOF STATE] proof (chain) picking this: approx_form prec (Bound x a b f) vs ss bounded_by xs vs x = Var n [PROOF STEP] obtain ivl1 ivl2 where l_eq: "approx prec a vs = Some ivl1" and u_eq: "approx prec b vs = Some ivl2" and approx_form': "approx_form' prec f (ss ! n) n (sup ivl1 ivl2) vs ss" [PROOF STATE] proof (prove) using this: approx_form prec (Bound x a b f) vs ss bounded_by xs vs x = Var n goal (1 subgoal): 1. (\<And>ivl1 ivl2. \<lbrakk>approx prec a vs = Some ivl1; approx prec b vs = Some ivl2; approx_form' prec f (ss ! n) n (sup ivl1 ivl2) vs ss\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by (cases "approx prec a vs", simp) (cases "approx prec b vs", auto) [PROOF STATE] proof (state) this: approx prec a vs = Some ivl1 approx prec b vs = Some ivl2 approx_form' prec f (ss ! n) n (sup ivl1 ivl2) vs ss goal (7 subgoals): 1. \<And>x1 x2 x3 f vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f xs; approx_form prec (Bound x1 x2 x3 f) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Bound x1 x2 x3 f) xs 2. \<And>x1 x2 f vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f xs; approx_form prec (Assign x1 x2 f) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Assign x1 x2 f) xs 3. \<And>x1 x2 vs. \<lbrakk>approx_form prec (Less x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Less x1 x2) xs 4. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 5. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 6. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 7. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] have "interpret_form f xs" if "xs ! n \<in> { interpret_floatarith a xs .. interpret_floatarith b xs }" [PROOF STATE] proof (prove) goal (1 subgoal): 1. interpret_form f xs [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. interpret_form f xs [PROOF STEP] from approx[OF Bound.prems(2) l_eq] and approx[OF Bound.prems(2) u_eq] that [PROOF STATE] proof (chain) picking this: interpret_floatarith a xs \<in>\<^sub>r ivl1 interpret_floatarith b xs \<in>\<^sub>r ivl2 xs ! n \<in> {interpret_floatarith a xs..interpret_floatarith b xs} [PROOF STEP] have "xs ! n \<in>\<^sub>r (sup ivl1 ivl2)" [PROOF STATE] proof (prove) using this: interpret_floatarith a xs \<in>\<^sub>r ivl1 interpret_floatarith b xs \<in>\<^sub>r ivl2 xs ! n \<in> {interpret_floatarith a xs..interpret_floatarith b xs} goal (1 subgoal): 1. xs ! n \<in>\<^sub>r sup ivl1 ivl2 [PROOF STEP] by (auto simp: set_of_eq sup_float_def max_def inf_float_def min_def) [PROOF STATE] proof (state) this: xs ! n \<in>\<^sub>r sup ivl1 ivl2 goal (1 subgoal): 1. interpret_form f xs [PROOF STEP] from approx_form_approx_form'[OF approx_form' this] [PROOF STATE] proof (chain) picking this: (\<And>ivl'. \<lbrakk>xs ! n \<in>\<^sub>r ivl'; approx_form prec f (vs[n := Some ivl']) ss\<rbrakk> \<Longrightarrow> ?thesis) \<Longrightarrow> ?thesis [PROOF STEP] obtain ivlx where bnds: "xs ! n \<in>\<^sub>r ivlx" and approx_form: "approx_form prec f (vs[n := Some ivlx]) ss" [PROOF STATE] proof (prove) using this: (\<And>ivl'. \<lbrakk>xs ! n \<in>\<^sub>r ivl'; approx_form prec f (vs[n := Some ivl']) ss\<rbrakk> \<Longrightarrow> ?thesis) \<Longrightarrow> ?thesis goal (1 subgoal): 1. (\<And>ivlx. \<lbrakk>xs ! n \<in>\<^sub>r ivlx; approx_form prec f (vs[n := Some ivlx]) ss\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] . [PROOF STATE] proof (state) this: xs ! n \<in>\<^sub>r ivlx approx_form prec f (vs[n := Some ivlx]) ss goal (1 subgoal): 1. interpret_form f xs [PROOF STEP] from \<open>bounded_by xs vs\<close> bnds [PROOF STATE] proof (chain) picking this: bounded_by xs vs xs ! n \<in>\<^sub>r ivlx [PROOF STEP] have "bounded_by xs (vs[n := Some ivlx])" [PROOF STATE] proof (prove) using this: bounded_by xs vs xs ! n \<in>\<^sub>r ivlx goal (1 subgoal): 1. bounded_by xs (vs[n := Some ivlx]) [PROOF STEP] by (rule bounded_by_update) [PROOF STATE] proof (state) this: bounded_by xs (vs[n := Some ivlx]) goal (1 subgoal): 1. interpret_form f xs [PROOF STEP] with Bound.hyps[OF approx_form] [PROOF STATE] proof (chain) picking this: bounded_by xs (vs[n := Some ivlx]) \<Longrightarrow> interpret_form f xs bounded_by xs (vs[n := Some ivlx]) [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: bounded_by xs (vs[n := Some ivlx]) \<Longrightarrow> interpret_form f xs bounded_by xs (vs[n := Some ivlx]) goal (1 subgoal): 1. interpret_form f xs [PROOF STEP] by blast [PROOF STATE] proof (state) this: interpret_form f xs goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: xs ! n \<in> {interpret_floatarith a xs..interpret_floatarith b xs} \<Longrightarrow> interpret_form f xs goal (7 subgoals): 1. \<And>x1 x2 x3 f vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f xs; approx_form prec (Bound x1 x2 x3 f) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Bound x1 x2 x3 f) xs 2. \<And>x1 x2 f vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f xs; approx_form prec (Assign x1 x2 f) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Assign x1 x2 f) xs 3. \<And>x1 x2 vs. \<lbrakk>approx_form prec (Less x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Less x1 x2) xs 4. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 5. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 6. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 7. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] thus ?case [PROOF STATE] proof (prove) using this: xs ! n \<in> {interpret_floatarith a xs..interpret_floatarith b xs} \<Longrightarrow> interpret_form f xs goal (1 subgoal): 1. interpret_form (Bound x a b f) xs [PROOF STEP] using interpret_form.simps x_eq and interpret_floatarith.simps [PROOF STATE] proof (prove) using this: xs ! n \<in> {interpret_floatarith a xs..interpret_floatarith b xs} \<Longrightarrow> interpret_form f xs interpret_form (Bound ?x ?a ?b ?f) ?vs = (interpret_floatarith ?x ?vs \<in> {interpret_floatarith ?a ?vs..interpret_floatarith ?b ?vs} \<longrightarrow> interpret_form ?f ?vs) interpret_form (Assign ?x ?a ?f) ?vs = (interpret_floatarith ?x ?vs = interpret_floatarith ?a ?vs \<longrightarrow> interpret_form ?f ?vs) interpret_form (Less ?a ?b) ?vs = (interpret_floatarith ?a ?vs < interpret_floatarith ?b ?vs) interpret_form (LessEqual ?a ?b) ?vs = (interpret_floatarith ?a ?vs \<le> interpret_floatarith ?b ?vs) interpret_form (AtLeastAtMost ?x ?a ?b) ?vs = (interpret_floatarith ?x ?vs \<in> {interpret_floatarith ?a ?vs..interpret_floatarith ?b ?vs}) interpret_form (Conj ?f ?g) ?vs = (interpret_form ?f ?vs \<and> interpret_form ?g ?vs) interpret_form (Disj ?f ?g) ?vs = (interpret_form ?f ?vs \<or> interpret_form ?g ?vs) x = Var n interpret_floatarith (Add ?a ?b) ?vs = interpret_floatarith ?a ?vs + interpret_floatarith ?b ?vs interpret_floatarith (Minus ?a) ?vs = - interpret_floatarith ?a ?vs interpret_floatarith (Mult ?a ?b) ?vs = interpret_floatarith ?a ?vs * interpret_floatarith ?b ?vs interpret_floatarith (Inverse ?a) ?vs = inverse (interpret_floatarith ?a ?vs) interpret_floatarith (Cos ?a) ?vs = cos (interpret_floatarith ?a ?vs) interpret_floatarith (Arctan ?a) ?vs = arctan (interpret_floatarith ?a ?vs) interpret_floatarith (floatarith.Min ?a ?b) ?vs = min (interpret_floatarith ?a ?vs) (interpret_floatarith ?b ?vs) interpret_floatarith (floatarith.Max ?a ?b) ?vs = max (interpret_floatarith ?a ?vs) (interpret_floatarith ?b ?vs) interpret_floatarith (Abs ?a) ?vs = \<bar>interpret_floatarith ?a ?vs\<bar> interpret_floatarith Pi ?vs = pi interpret_floatarith (Sqrt ?a) ?vs = sqrt (interpret_floatarith ?a ?vs) interpret_floatarith (Exp ?a) ?vs = exp (interpret_floatarith ?a ?vs) interpret_floatarith (Powr ?a ?b) ?vs = interpret_floatarith ?a ?vs powr interpret_floatarith ?b ?vs interpret_floatarith (Ln ?a) ?vs = ln (interpret_floatarith ?a ?vs) interpret_floatarith (Power ?a ?n) ?vs = interpret_floatarith ?a ?vs ^ ?n interpret_floatarith (Floor ?a) ?vs = real_of_int \<lfloor>interpret_floatarith ?a ?vs\<rfloor> interpret_floatarith (Num ?f) ?vs = real_of_float ?f interpret_floatarith (Var ?n) ?vs = ?vs ! ?n goal (1 subgoal): 1. interpret_form (Bound x a b f) xs [PROOF STEP] by simp [PROOF STATE] proof (state) this: interpret_form (Bound x a b f) xs goal (6 subgoals): 1. \<And>x1 x2 f vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f xs; approx_form prec (Assign x1 x2 f) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Assign x1 x2 f) xs 2. \<And>x1 x2 vs. \<lbrakk>approx_form prec (Less x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Less x1 x2) xs 3. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 4. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 5. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 6. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] next [PROOF STATE] proof (state) goal (6 subgoals): 1. \<And>x1 x2 f vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f xs; approx_form prec (Assign x1 x2 f) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Assign x1 x2 f) xs 2. \<And>x1 x2 vs. \<lbrakk>approx_form prec (Less x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Less x1 x2) xs 3. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 4. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 5. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 6. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] case (Assign x a f) [PROOF STATE] proof (state) this: \<lbrakk>approx_form prec f ?vs ss; bounded_by xs ?vs\<rbrakk> \<Longrightarrow> interpret_form f xs approx_form prec (Assign x a f) vs ss bounded_by xs vs goal (6 subgoals): 1. \<And>x1 x2 f vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f xs; approx_form prec (Assign x1 x2 f) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Assign x1 x2 f) xs 2. \<And>x1 x2 vs. \<lbrakk>approx_form prec (Less x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Less x1 x2) xs 3. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 4. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 5. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 6. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] then [PROOF STATE] proof (chain) picking this: \<lbrakk>approx_form prec f ?vs ss; bounded_by xs ?vs\<rbrakk> \<Longrightarrow> interpret_form f xs approx_form prec (Assign x a f) vs ss bounded_by xs vs [PROOF STEP] obtain n where x_eq: "x = Var n" [PROOF STATE] proof (prove) using this: \<lbrakk>approx_form prec f ?vs ss; bounded_by xs ?vs\<rbrakk> \<Longrightarrow> interpret_form f xs approx_form prec (Assign x a f) vs ss bounded_by xs vs goal (1 subgoal): 1. (\<And>n. x = Var n \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by (cases x) auto [PROOF STATE] proof (state) this: x = Var n goal (6 subgoals): 1. \<And>x1 x2 f vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f xs; approx_form prec (Assign x1 x2 f) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Assign x1 x2 f) xs 2. \<And>x1 x2 vs. \<lbrakk>approx_form prec (Less x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Less x1 x2) xs 3. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 4. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 5. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 6. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] with Assign.prems [PROOF STATE] proof (chain) picking this: approx_form prec (Assign x a f) vs ss bounded_by xs vs x = Var n [PROOF STEP] obtain ivl where bnd_eq: "approx prec a vs = Some ivl" and x_eq: "x = Var n" and approx_form': "approx_form' prec f (ss ! n) n ivl vs ss" [PROOF STATE] proof (prove) using this: approx_form prec (Assign x a f) vs ss bounded_by xs vs x = Var n goal (1 subgoal): 1. (\<And>ivl. \<lbrakk>approx prec a vs = Some ivl; x = Var n; approx_form' prec f (ss ! n) n ivl vs ss\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by (cases "approx prec a vs") auto [PROOF STATE] proof (state) this: approx prec a vs = Some ivl x = Var n approx_form' prec f (ss ! n) n ivl vs ss goal (6 subgoals): 1. \<And>x1 x2 f vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f xs; approx_form prec (Assign x1 x2 f) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Assign x1 x2 f) xs 2. \<And>x1 x2 vs. \<lbrakk>approx_form prec (Less x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Less x1 x2) xs 3. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 4. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 5. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 6. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] have "interpret_form f xs" if bnds: "xs ! n = interpret_floatarith a xs" [PROOF STATE] proof (prove) goal (1 subgoal): 1. interpret_form f xs [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. interpret_form f xs [PROOF STEP] from approx[OF Assign.prems(2) bnd_eq] bnds [PROOF STATE] proof (chain) picking this: interpret_floatarith a xs \<in>\<^sub>r ivl xs ! n = interpret_floatarith a xs [PROOF STEP] have "xs ! n \<in>\<^sub>r ivl" [PROOF STATE] proof (prove) using this: interpret_floatarith a xs \<in>\<^sub>r ivl xs ! n = interpret_floatarith a xs goal (1 subgoal): 1. xs ! n \<in>\<^sub>r ivl [PROOF STEP] by auto [PROOF STATE] proof (state) this: xs ! n \<in>\<^sub>r ivl goal (1 subgoal): 1. interpret_form f xs [PROOF STEP] from approx_form_approx_form'[OF approx_form' this] [PROOF STATE] proof (chain) picking this: (\<And>ivl'. \<lbrakk>xs ! n \<in>\<^sub>r ivl'; approx_form prec f (vs[n := Some ivl']) ss\<rbrakk> \<Longrightarrow> ?thesis) \<Longrightarrow> ?thesis [PROOF STEP] obtain ivlx where bnds: "xs ! n \<in>\<^sub>r ivlx" and approx_form: "approx_form prec f (vs[n := Some ivlx]) ss" [PROOF STATE] proof (prove) using this: (\<And>ivl'. \<lbrakk>xs ! n \<in>\<^sub>r ivl'; approx_form prec f (vs[n := Some ivl']) ss\<rbrakk> \<Longrightarrow> ?thesis) \<Longrightarrow> ?thesis goal (1 subgoal): 1. (\<And>ivlx. \<lbrakk>xs ! n \<in>\<^sub>r ivlx; approx_form prec f (vs[n := Some ivlx]) ss\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] . [PROOF STATE] proof (state) this: xs ! n \<in>\<^sub>r ivlx approx_form prec f (vs[n := Some ivlx]) ss goal (1 subgoal): 1. interpret_form f xs [PROOF STEP] from \<open>bounded_by xs vs\<close> bnds [PROOF STATE] proof (chain) picking this: bounded_by xs vs xs ! n \<in>\<^sub>r ivlx [PROOF STEP] have "bounded_by xs (vs[n := Some ivlx])" [PROOF STATE] proof (prove) using this: bounded_by xs vs xs ! n \<in>\<^sub>r ivlx goal (1 subgoal): 1. bounded_by xs (vs[n := Some ivlx]) [PROOF STEP] by (rule bounded_by_update) [PROOF STATE] proof (state) this: bounded_by xs (vs[n := Some ivlx]) goal (1 subgoal): 1. interpret_form f xs [PROOF STEP] with Assign.hyps[OF approx_form] [PROOF STATE] proof (chain) picking this: bounded_by xs (vs[n := Some ivlx]) \<Longrightarrow> interpret_form f xs bounded_by xs (vs[n := Some ivlx]) [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: bounded_by xs (vs[n := Some ivlx]) \<Longrightarrow> interpret_form f xs bounded_by xs (vs[n := Some ivlx]) goal (1 subgoal): 1. interpret_form f xs [PROOF STEP] by blast [PROOF STATE] proof (state) this: interpret_form f xs goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: xs ! n = interpret_floatarith a xs \<Longrightarrow> interpret_form f xs goal (6 subgoals): 1. \<And>x1 x2 f vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f xs; approx_form prec (Assign x1 x2 f) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Assign x1 x2 f) xs 2. \<And>x1 x2 vs. \<lbrakk>approx_form prec (Less x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Less x1 x2) xs 3. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 4. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 5. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 6. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] thus ?case [PROOF STATE] proof (prove) using this: xs ! n = interpret_floatarith a xs \<Longrightarrow> interpret_form f xs goal (1 subgoal): 1. interpret_form (Assign x a f) xs [PROOF STEP] using interpret_form.simps x_eq and interpret_floatarith.simps [PROOF STATE] proof (prove) using this: xs ! n = interpret_floatarith a xs \<Longrightarrow> interpret_form f xs interpret_form (Bound ?x ?a ?b ?f) ?vs = (interpret_floatarith ?x ?vs \<in> {interpret_floatarith ?a ?vs..interpret_floatarith ?b ?vs} \<longrightarrow> interpret_form ?f ?vs) interpret_form (Assign ?x ?a ?f) ?vs = (interpret_floatarith ?x ?vs = interpret_floatarith ?a ?vs \<longrightarrow> interpret_form ?f ?vs) interpret_form (Less ?a ?b) ?vs = (interpret_floatarith ?a ?vs < interpret_floatarith ?b ?vs) interpret_form (LessEqual ?a ?b) ?vs = (interpret_floatarith ?a ?vs \<le> interpret_floatarith ?b ?vs) interpret_form (AtLeastAtMost ?x ?a ?b) ?vs = (interpret_floatarith ?x ?vs \<in> {interpret_floatarith ?a ?vs..interpret_floatarith ?b ?vs}) interpret_form (Conj ?f ?g) ?vs = (interpret_form ?f ?vs \<and> interpret_form ?g ?vs) interpret_form (Disj ?f ?g) ?vs = (interpret_form ?f ?vs \<or> interpret_form ?g ?vs) x = Var n interpret_floatarith (Add ?a ?b) ?vs = interpret_floatarith ?a ?vs + interpret_floatarith ?b ?vs interpret_floatarith (Minus ?a) ?vs = - interpret_floatarith ?a ?vs interpret_floatarith (Mult ?a ?b) ?vs = interpret_floatarith ?a ?vs * interpret_floatarith ?b ?vs interpret_floatarith (Inverse ?a) ?vs = inverse (interpret_floatarith ?a ?vs) interpret_floatarith (Cos ?a) ?vs = cos (interpret_floatarith ?a ?vs) interpret_floatarith (Arctan ?a) ?vs = arctan (interpret_floatarith ?a ?vs) interpret_floatarith (floatarith.Min ?a ?b) ?vs = min (interpret_floatarith ?a ?vs) (interpret_floatarith ?b ?vs) interpret_floatarith (floatarith.Max ?a ?b) ?vs = max (interpret_floatarith ?a ?vs) (interpret_floatarith ?b ?vs) interpret_floatarith (Abs ?a) ?vs = \<bar>interpret_floatarith ?a ?vs\<bar> interpret_floatarith Pi ?vs = pi interpret_floatarith (Sqrt ?a) ?vs = sqrt (interpret_floatarith ?a ?vs) interpret_floatarith (Exp ?a) ?vs = exp (interpret_floatarith ?a ?vs) interpret_floatarith (Powr ?a ?b) ?vs = interpret_floatarith ?a ?vs powr interpret_floatarith ?b ?vs interpret_floatarith (Ln ?a) ?vs = ln (interpret_floatarith ?a ?vs) interpret_floatarith (Power ?a ?n) ?vs = interpret_floatarith ?a ?vs ^ ?n interpret_floatarith (Floor ?a) ?vs = real_of_int \<lfloor>interpret_floatarith ?a ?vs\<rfloor> interpret_floatarith (Num ?f) ?vs = real_of_float ?f interpret_floatarith (Var ?n) ?vs = ?vs ! ?n goal (1 subgoal): 1. interpret_form (Assign x a f) xs [PROOF STEP] by simp [PROOF STATE] proof (state) this: interpret_form (Assign x a f) xs goal (5 subgoals): 1. \<And>x1 x2 vs. \<lbrakk>approx_form prec (Less x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Less x1 x2) xs 2. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 3. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 4. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 5. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] next [PROOF STATE] proof (state) goal (5 subgoals): 1. \<And>x1 x2 vs. \<lbrakk>approx_form prec (Less x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Less x1 x2) xs 2. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 3. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 4. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 5. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] case (Less a b) [PROOF STATE] proof (state) this: approx_form prec (Less a b) vs ss bounded_by xs vs goal (5 subgoals): 1. \<And>x1 x2 vs. \<lbrakk>approx_form prec (Less x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Less x1 x2) xs 2. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 3. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 4. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 5. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] then [PROOF STATE] proof (chain) picking this: approx_form prec (Less a b) vs ss bounded_by xs vs [PROOF STEP] obtain ivl ivl' where l_eq: "approx prec a vs = Some ivl" and u_eq: "approx prec b vs = Some ivl'" and inequality: "real_of_float (float_plus_up prec (upper ivl) (-lower ivl')) < 0" [PROOF STATE] proof (prove) using this: approx_form prec (Less a b) vs ss bounded_by xs vs goal (1 subgoal): 1. (\<And>ivl ivl'. \<lbrakk>approx prec a vs = Some ivl; approx prec b vs = Some ivl'; real_of_float (float_plus_up prec (upper ivl) (- lower ivl')) < 0\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by (cases "approx prec a vs", auto, cases "approx prec b vs", auto) [PROOF STATE] proof (state) this: approx prec a vs = Some ivl approx prec b vs = Some ivl' real_of_float (float_plus_up prec (upper ivl) (- lower ivl')) < 0 goal (5 subgoals): 1. \<And>x1 x2 vs. \<lbrakk>approx_form prec (Less x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Less x1 x2) xs 2. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 3. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 4. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 5. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] from le_less_trans[OF float_plus_up inequality] approx[OF Less.prems(2) l_eq] approx[OF Less.prems(2) u_eq] [PROOF STATE] proof (chain) picking this: real_of_float (upper ivl) + real_of_float (- lower ivl') < 0 interpret_floatarith a xs \<in>\<^sub>r ivl interpret_floatarith b xs \<in>\<^sub>r ivl' [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: real_of_float (upper ivl) + real_of_float (- lower ivl') < 0 interpret_floatarith a xs \<in>\<^sub>r ivl interpret_floatarith b xs \<in>\<^sub>r ivl' goal (1 subgoal): 1. interpret_form (Less a b) xs [PROOF STEP] by (auto simp: set_of_eq) [PROOF STATE] proof (state) this: interpret_form (Less a b) xs goal (4 subgoals): 1. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 2. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 3. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 4. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] next [PROOF STATE] proof (state) goal (4 subgoals): 1. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 2. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 3. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 4. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] case (LessEqual a b) [PROOF STATE] proof (state) this: approx_form prec (LessEqual a b) vs ss bounded_by xs vs goal (4 subgoals): 1. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 2. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 3. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 4. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] then [PROOF STATE] proof (chain) picking this: approx_form prec (LessEqual a b) vs ss bounded_by xs vs [PROOF STEP] obtain ivl ivl' where l_eq: "approx prec a vs = Some ivl" and u_eq: "approx prec b vs = Some ivl'" and inequality: "real_of_float (float_plus_up prec (upper ivl) (-lower ivl')) \<le> 0" [PROOF STATE] proof (prove) using this: approx_form prec (LessEqual a b) vs ss bounded_by xs vs goal (1 subgoal): 1. (\<And>ivl ivl'. \<lbrakk>approx prec a vs = Some ivl; approx prec b vs = Some ivl'; real_of_float (float_plus_up prec (upper ivl) (- lower ivl')) \<le> 0\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by (cases "approx prec a vs", auto, cases "approx prec b vs", auto) [PROOF STATE] proof (state) this: approx prec a vs = Some ivl approx prec b vs = Some ivl' real_of_float (float_plus_up prec (upper ivl) (- lower ivl')) \<le> 0 goal (4 subgoals): 1. \<And>x1 x2 vs. \<lbrakk>approx_form prec (LessEqual x1 x2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (LessEqual x1 x2) xs 2. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 3. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 4. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] from order_trans[OF float_plus_up inequality] approx[OF LessEqual.prems(2) l_eq] approx[OF LessEqual.prems(2) u_eq] [PROOF STATE] proof (chain) picking this: real_of_float (upper ivl) + real_of_float (- lower ivl') \<le> 0 interpret_floatarith a xs \<in>\<^sub>r ivl interpret_floatarith b xs \<in>\<^sub>r ivl' [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: real_of_float (upper ivl) + real_of_float (- lower ivl') \<le> 0 interpret_floatarith a xs \<in>\<^sub>r ivl interpret_floatarith b xs \<in>\<^sub>r ivl' goal (1 subgoal): 1. interpret_form (LessEqual a b) xs [PROOF STEP] by (auto simp: set_of_eq) [PROOF STATE] proof (state) this: interpret_form (LessEqual a b) xs goal (3 subgoals): 1. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 2. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 3. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] next [PROOF STATE] proof (state) goal (3 subgoals): 1. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 2. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 3. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] case (AtLeastAtMost x a b) [PROOF STATE] proof (state) this: approx_form prec (AtLeastAtMost x a b) vs ss bounded_by xs vs goal (3 subgoals): 1. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 2. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 3. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] then [PROOF STATE] proof (chain) picking this: approx_form prec (AtLeastAtMost x a b) vs ss bounded_by xs vs [PROOF STEP] obtain ivlx ivl ivl' where x_eq: "approx prec x vs = Some ivlx" and l_eq: "approx prec a vs = Some ivl" and u_eq: "approx prec b vs = Some ivl'" and inequality: "real_of_float (float_plus_up prec (upper ivl) (-lower ivlx)) \<le> 0" "real_of_float (float_plus_up prec (upper ivlx) (-lower ivl')) \<le> 0" [PROOF STATE] proof (prove) using this: approx_form prec (AtLeastAtMost x a b) vs ss bounded_by xs vs goal (1 subgoal): 1. (\<And>ivlx ivl ivl'. \<lbrakk>approx prec x vs = Some ivlx; approx prec a vs = Some ivl; approx prec b vs = Some ivl'; real_of_float (float_plus_up prec (upper ivl) (- lower ivlx)) \<le> 0; real_of_float (float_plus_up prec (upper ivlx) (- lower ivl')) \<le> 0\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by (cases "approx prec x vs", auto, cases "approx prec a vs", auto, cases "approx prec b vs", auto) [PROOF STATE] proof (state) this: approx prec x vs = Some ivlx approx prec a vs = Some ivl approx prec b vs = Some ivl' real_of_float (float_plus_up prec (upper ivl) (- lower ivlx)) \<le> 0 real_of_float (float_plus_up prec (upper ivlx) (- lower ivl')) \<le> 0 goal (3 subgoals): 1. \<And>x1 x2 x3 vs. \<lbrakk>approx_form prec (AtLeastAtMost x1 x2 x3) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (AtLeastAtMost x1 x2 x3) xs 2. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 3. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] from order_trans[OF float_plus_up inequality(1)] order_trans[OF float_plus_up inequality(2)] approx[OF AtLeastAtMost.prems(2) l_eq] approx[OF AtLeastAtMost.prems(2) u_eq] approx[OF AtLeastAtMost.prems(2) x_eq] [PROOF STATE] proof (chain) picking this: real_of_float (upper ivl) + real_of_float (- lower ivlx) \<le> 0 real_of_float (upper ivlx) + real_of_float (- lower ivl') \<le> 0 interpret_floatarith a xs \<in>\<^sub>r ivl interpret_floatarith b xs \<in>\<^sub>r ivl' interpret_floatarith x xs \<in>\<^sub>r ivlx [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: real_of_float (upper ivl) + real_of_float (- lower ivlx) \<le> 0 real_of_float (upper ivlx) + real_of_float (- lower ivl') \<le> 0 interpret_floatarith a xs \<in>\<^sub>r ivl interpret_floatarith b xs \<in>\<^sub>r ivl' interpret_floatarith x xs \<in>\<^sub>r ivlx goal (1 subgoal): 1. interpret_form (AtLeastAtMost x a b) xs [PROOF STEP] by (auto simp: set_of_eq) [PROOF STATE] proof (state) this: interpret_form (AtLeastAtMost x a b) xs goal (2 subgoals): 1. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Conj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Conj f1 f2) xs 2. \<And>f1 f2 vs. \<lbrakk>\<And>vs. \<lbrakk>approx_form prec f1 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f1 xs; \<And>vs. \<lbrakk>approx_form prec f2 vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form f2 xs; approx_form prec (Disj f1 f2) vs ss; bounded_by xs vs\<rbrakk> \<Longrightarrow> interpret_form (Disj f1 f2) xs [PROOF STEP] qed auto
{-# OPTIONS --without-K --safe #-} module Categories.Category.Equivalence where -- Strong equivalence of categories. Same as ordinary equivalence in Cat. -- May not include everything we'd like to think of as equivalences, namely -- the full, faithful functors that are essentially surjective on objects. open import Level open import Relation.Binary using (IsEquivalence; Setoid) open import Categories.Adjoint.Equivalence open import Categories.Category import Categories.Morphism.Reasoning as MR import Categories.Morphism.Properties as MP open import Categories.Functor renaming (id to idF) open import Categories.Functor.Properties open import Categories.NaturalTransformation using (ntHelper; _∘ᵥ_; _∘ˡ_; _∘ʳ_) open import Categories.NaturalTransformation.NaturalIsomorphism as NI using (NaturalIsomorphism ; unitorˡ; unitorʳ; associator; _ⓘᵥ_; _ⓘˡ_; _ⓘʳ_) renaming (sym to ≃-sym) open import Categories.NaturalTransformation.NaturalIsomorphism.Properties private variable o ℓ e : Level C D E : Category o ℓ e record WeakInverse (F : Functor C D) (G : Functor D C) : Set (levelOfTerm F ⊔ levelOfTerm G) where field F∘G≈id : NaturalIsomorphism (F ∘F G) idF G∘F≈id : NaturalIsomorphism (G ∘F F) idF module F∘G≈id = NaturalIsomorphism F∘G≈id module G∘F≈id = NaturalIsomorphism G∘F≈id private module C = Category C module D = Category D module F = Functor F module G = Functor G -- adjoint equivalence F⊣G : ⊣Equivalence F G F⊣G = record { unit = ≃-sym G∘F≈id ; counit = let open D open HomReasoning open MR D open MP D in record { F⇒G = ntHelper record { η = λ X → F∘G≈id.⇒.η X ∘ F.F₁ (G∘F≈id.⇒.η (G.F₀ X)) ∘ F∘G≈id.⇐.η (F.F₀ (G.F₀ X)) ; commute = λ {X Y} f → begin (F∘G≈id.⇒.η Y ∘ F.F₁ (G∘F≈id.⇒.η (G.F₀ Y)) ∘ F∘G≈id.⇐.η (F.F₀ (G.F₀ Y))) ∘ F.F₁ (G.F₁ f) ≈⟨ pull-last (F∘G≈id.⇐.commute (F.F₁ (G.F₁ f))) ⟩ F∘G≈id.⇒.η Y ∘ F.F₁ (G∘F≈id.⇒.η (G.F₀ Y)) ∘ (F.F₁ (G.F₁ (F.F₁ (G.F₁ f))) ∘ F∘G≈id.⇐.η (F.F₀ (G.F₀ X))) ≈˘⟨ refl⟩∘⟨ pushˡ F.homomorphism ⟩ F∘G≈id.⇒.η Y ∘ F.F₁ (G∘F≈id.⇒.η (G.F₀ Y) C.∘ G.F₁ (F.F₁ (G.F₁ f))) ∘ F∘G≈id.⇐.η (F.F₀ (G.F₀ X)) ≈⟨ refl ⟩∘⟨ F.F-resp-≈ (G∘F≈id.⇒.commute (G.F₁ f)) ⟩∘⟨ refl ⟩ F∘G≈id.⇒.η Y ∘ F.F₁ (G.F₁ f C.∘ G∘F≈id.⇒.η (G.F₀ X)) ∘ F∘G≈id.⇐.η (F.F₀ (G.F₀ X)) ≈⟨ refl ⟩∘⟨ F.homomorphism ⟩∘⟨ refl ⟩ F∘G≈id.⇒.η Y ∘ (F.F₁ (G.F₁ f) ∘ F.F₁ (G∘F≈id.⇒.η (G.F₀ X))) ∘ F∘G≈id.⇐.η (F.F₀ (G.F₀ X)) ≈⟨ center⁻¹ (F∘G≈id.⇒.commute f) refl ⟩ (f ∘ F∘G≈id.⇒.η X) ∘ F.F₁ (G∘F≈id.⇒.η (G.F₀ X)) ∘ F∘G≈id.⇐.η (F.F₀ (G.F₀ X)) ≈⟨ assoc ⟩ f ∘ F∘G≈id.⇒.η X ∘ F.F₁ (G∘F≈id.⇒.η (G.F₀ X)) ∘ F∘G≈id.⇐.η (F.F₀ (G.F₀ X)) ∎ } ; F⇐G = ntHelper record { η = λ X → (F∘G≈id.⇒.η (F.F₀ (G.F₀ X)) ∘ F.F₁ (G∘F≈id.⇐.η (G.F₀ X))) ∘ F∘G≈id.⇐.η X ; commute = λ {X Y} f → begin ((F∘G≈id.⇒.η (F.F₀ (G.F₀ Y)) ∘ F.F₁ (G∘F≈id.⇐.η (G.F₀ Y))) ∘ F∘G≈id.⇐.η Y) ∘ f ≈⟨ pullʳ (F∘G≈id.⇐.commute f) ⟩ (F∘G≈id.⇒.η (F.F₀ (G.F₀ Y)) ∘ F.F₁ (G∘F≈id.⇐.η (G.F₀ Y))) ∘ F.F₁ (G.F₁ f) ∘ F∘G≈id.⇐.η X ≈⟨ center (⟺ F.homomorphism) ⟩ F∘G≈id.⇒.η (F.F₀ (G.F₀ Y)) ∘ F.F₁ (G∘F≈id.⇐.η (G.F₀ Y) C.∘ G.F₁ f) ∘ F∘G≈id.⇐.η X ≈⟨ refl ⟩∘⟨ F.F-resp-≈ (G∘F≈id.⇐.commute (G.F₁ f)) ⟩∘⟨ refl ⟩ F∘G≈id.⇒.η (F.F₀ (G.F₀ Y)) ∘ F.F₁ (G.F₁ (F.F₁ (G.F₁ f)) C.∘ G∘F≈id.⇐.η (G.F₀ X)) ∘ F∘G≈id.⇐.η X ≈⟨ refl ⟩∘⟨ F.homomorphism ⟩∘⟨ refl ⟩ F∘G≈id.⇒.η (F.F₀ (G.F₀ Y)) ∘ (F.F₁ (G.F₁ (F.F₁ (G.F₁ f))) ∘ F.F₁ (G∘F≈id.⇐.η (G.F₀ X))) ∘ F∘G≈id.⇐.η X ≈⟨ center⁻¹ (F∘G≈id.⇒.commute _) refl ⟩ (F.F₁ (G.F₁ f) ∘ F∘G≈id.⇒.η (F.F₀ (G.F₀ X))) ∘ F.F₁ (G∘F≈id.⇐.η (G.F₀ X)) ∘ F∘G≈id.⇐.η X ≈⟨ center refl ⟩ F.F₁ (G.F₁ f) ∘ (F∘G≈id.⇒.η (F.F₀ (G.F₀ X)) ∘ F.F₁ (G∘F≈id.⇐.η (G.F₀ X))) ∘ F∘G≈id.⇐.η X ∎ } ; iso = λ X → Iso-∘ (Iso-∘ (Iso-swap (F∘G≈id.iso _)) ([ F ]-resp-Iso (G∘F≈id.iso _))) (F∘G≈id.iso X) } ; zig = λ {A} → let open D open HomReasoning open MR D in begin (F∘G≈id.⇒.η (F.F₀ A) ∘ F.F₁ (G∘F≈id.⇒.η (G.F₀ (F.F₀ A))) ∘ F∘G≈id.⇐.η (F.F₀ (G.F₀ (F.F₀ A)))) ∘ F.F₁ (G∘F≈id.⇐.η A) ≈⟨ pull-last (F∘G≈id.⇐.commute (F.F₁ (G∘F≈id.⇐.η A))) ⟩ F∘G≈id.⇒.η (F.F₀ A) ∘ F.F₁ (G∘F≈id.⇒.η (G.F₀ (F.F₀ A))) ∘ F.F₁ (G.F₁ (F.F₁ (G∘F≈id.⇐.η A))) ∘ F∘G≈id.⇐.η (F.F₀ A) ≈˘⟨ refl⟩∘⟨ pushˡ F.homomorphism ⟩ F∘G≈id.⇒.η (F.F₀ A) ∘ F.F₁ (G∘F≈id.⇒.η (G.F₀ (F.F₀ A)) C.∘ G.F₁ (F.F₁ (G∘F≈id.⇐.η A))) ∘ F∘G≈id.⇐.η (F.F₀ A) ≈⟨ refl ⟩∘⟨ F.F-resp-≈ (G∘F≈id.⇒.commute (G∘F≈id.⇐.η A)) ⟩∘⟨ refl ⟩ F∘G≈id.⇒.η (F.F₀ A) ∘ F.F₁ (G∘F≈id.⇐.η A C.∘ G∘F≈id.⇒.η A) ∘ F∘G≈id.⇐.η (F.F₀ A) ≈⟨ refl ⟩∘⟨ elimˡ ((F.F-resp-≈ (G∘F≈id.iso.isoˡ _)) ○ F.identity) ⟩ F∘G≈id.⇒.η (F.F₀ A) ∘ F∘G≈id.⇐.η (F.F₀ A) ≈⟨ F∘G≈id.iso.isoʳ _ ⟩ id ∎ } module F⊣G = ⊣Equivalence F⊣G record StrongEquivalence {o ℓ e o′ ℓ′ e′} (C : Category o ℓ e) (D : Category o′ ℓ′ e′) : Set (o ⊔ ℓ ⊔ e ⊔ o′ ⊔ ℓ′ ⊔ e′) where field F : Functor C D G : Functor D C weak-inverse : WeakInverse F G open WeakInverse weak-inverse public refl : StrongEquivalence C C refl = record { F = idF ; G = idF ; weak-inverse = record { F∘G≈id = unitorˡ ; G∘F≈id = unitorˡ } } sym : StrongEquivalence C D → StrongEquivalence D C sym e = record { F = G ; G = F ; weak-inverse = record { F∘G≈id = G∘F≈id ; G∘F≈id = F∘G≈id } } where open StrongEquivalence e trans : StrongEquivalence C D → StrongEquivalence D E → StrongEquivalence C E trans {C = C} {D = D} {E = E} e e′ = record { F = e′.F ∘F e.F ; G = e.G ∘F e′.G ; weak-inverse = record { F∘G≈id = let module S = Setoid (NI.Functor-NI-setoid E E) in S.trans (S.trans (associator (e.G ∘F e′.G) e.F e′.F) (e′.F ⓘˡ (unitorˡ ⓘᵥ (e.F∘G≈id ⓘʳ e′.G) ⓘᵥ NI.sym (associator e′.G e.G e.F)))) e′.F∘G≈id ; G∘F≈id = let module S = Setoid (NI.Functor-NI-setoid C C) in S.trans (S.trans (associator (e′.F ∘F e.F) e′.G e.G) (e.G ⓘˡ (unitorˡ ⓘᵥ (e′.G∘F≈id ⓘʳ e.F) ⓘᵥ NI.sym (associator e.F e′.F e′.G)))) e.G∘F≈id } } where module e = StrongEquivalence e module e′ = StrongEquivalence e′ isEquivalence : ∀ {o ℓ e} → IsEquivalence (StrongEquivalence {o} {ℓ} {e}) isEquivalence = record { refl = refl ; sym = sym ; trans = trans } setoid : ∀ o ℓ e → Setoid _ _ setoid o ℓ e = record { Carrier = Category o ℓ e ; _≈_ = StrongEquivalence ; isEquivalence = isEquivalence }
[STATEMENT] lemma kc_8x6_si: "step_in kc8x6 (2,5) (4,6)" (is "step_in ?ps _ _") [PROOF STATE] proof (prove) goal (1 subgoal): 1. step_in kc8x6 (2, 5) (4, 6) [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. step_in kc8x6 (2, 5) (4, 6) [PROOF STEP] have "0 < (34::nat)" "34 < length ?ps" "last (take 34 ?ps) = (2,5)" "hd (drop 34 ?ps) = (4,6)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (0 < 34 &&& 34 < length kc8x6) &&& last (take 34 kc8x6) = (2, 5) &&& hd (drop 34 kc8x6) = (4, 6) [PROOF STEP] by eval+ [PROOF STATE] proof (state) this: 0 < 34 34 < length kc8x6 last (take 34 kc8x6) = (2, 5) hd (drop 34 kc8x6) = (4, 6) goal (1 subgoal): 1. step_in kc8x6 (2, 5) (4, 6) [PROOF STEP] then [PROOF STATE] proof (chain) picking this: 0 < 34 34 < length kc8x6 last (take 34 kc8x6) = (2, 5) hd (drop 34 kc8x6) = (4, 6) [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: 0 < 34 34 < length kc8x6 last (take 34 kc8x6) = (2, 5) hd (drop 34 kc8x6) = (4, 6) goal (1 subgoal): 1. step_in kc8x6 (2, 5) (4, 6) [PROOF STEP] unfolding step_in_def [PROOF STATE] proof (prove) using this: 0 < 34 34 < length kc8x6 last (take 34 kc8x6) = (2, 5) hd (drop 34 kc8x6) = (4, 6) goal (1 subgoal): 1. \<exists>k>0. k < length kc8x6 \<and> last (take k kc8x6) = (2, 5) \<and> hd (drop k kc8x6) = (4, 6) [PROOF STEP] by blast [PROOF STATE] proof (state) this: step_in kc8x6 (2, 5) (4, 6) goal: No subgoals! [PROOF STEP] qed
/* * Portions Copyright (c) 2010-Present Couchbase * Portions Copyright (c) 2008 Sun Microsystems * * Use of this software is governed by the Apache License, Version 2.0 and * BSD 3 Clause included in the files licenses/APL2.txt and * licenses/BSD-3-Clause-Sun-Microsystems.txt */ #pragma once #include "dockey.h" #include <gsl/gsl-lite.hpp> #include <memcached/vbucket.h> #include <platform/socket.h> #ifndef WIN32 #include <arpa/inet.h> #endif #include <cstdint> #include <stdexcept> #include <string> namespace cb::durability { enum class Level : uint8_t; } // namespace cb::durability /** * \addtogroup Protocol * @{ */ /** * This file contains definitions of the constants and packet formats * defined in the binary specification. Please note that you _MUST_ remember * to convert each multibyte field to / from network byte order to / from * host order. */ #include <mcbp/protocol/datatype.h> #include <mcbp/protocol/dcp_stream_end_status.h> #include <mcbp/protocol/feature.h> #include <mcbp/protocol/magic.h> #include <mcbp/protocol/opcode.h> #include <mcbp/protocol/request.h> #include <mcbp/protocol/response.h> #include <mcbp/protocol/status.h> // For backward compatibility with old sources /** * Definition of the header structure for a request packet. * See section 2 */ union protocol_binary_request_header { cb::mcbp::Request request; uint8_t bytes[24]; }; /** * Definition of the header structure for a response packet. * See section 2 */ union protocol_binary_response_header { cb::mcbp::Response response; uint8_t bytes[24]; }; /** * Definition of a request-packet containing no extras */ typedef union { struct { protocol_binary_request_header header; } message; uint8_t bytes[sizeof(protocol_binary_request_header)]; } protocol_binary_request_no_extras; /** * Definition of a response-packet containing no extras */ typedef union { struct { protocol_binary_response_header header; } message; uint8_t bytes[sizeof(protocol_binary_response_header)]; } protocol_binary_response_no_extras; /** * Definition of the packet used by set, add and replace * See section 4 */ namespace cb::mcbp::request { #pragma pack(1) class MutationPayload { public: /// The memcached core keep the flags stored in network byte order /// internally as it does not use them for anything else than sending /// them back to the client uint32_t getFlagsInNetworkByteOrder() const { return flags; } uint32_t getFlags() const { return ntohl(flags); } void setFlags(uint32_t flags) { MutationPayload::flags = htonl(flags); } uint32_t getExpiration() const { return ntohl(expiration); } void setExpiration(uint32_t expiration) { MutationPayload::expiration = htonl(expiration); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint32_t flags = 0; uint32_t expiration = 0; }; static_assert(sizeof(MutationPayload) == 8, "Unexpected struct size"); class ArithmeticPayload { public: uint64_t getDelta() const { return ntohll(delta); } void setDelta(uint64_t delta) { ArithmeticPayload::delta = htonll(delta); } uint64_t getInitial() const { return ntohll(initial); } void setInitial(uint64_t initial) { ArithmeticPayload::initial = htonll(initial); } uint32_t getExpiration() const { return ntohl(expiration); } void setExpiration(uint32_t expiration) { ArithmeticPayload::expiration = htonl(expiration); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } private: uint64_t delta = 0; uint64_t initial = 0; uint32_t expiration = 0; }; static_assert(sizeof(ArithmeticPayload) == 20, "Unexpected struct size"); class DeprecatedSetClusterConfigPayload { public: int32_t getRevision() const { return ntohl(revision); } void setRevision(int32_t rev) { revision = htonl(rev); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: int32_t revision{}; }; static_assert(sizeof(DeprecatedSetClusterConfigPayload) == 4, "Unexpected struct size"); class SetClusterConfigPayload { public: int64_t getEpoch() const { return ntohll(epoch); } void setEpoch(int64_t ep) { epoch = htonll(ep); } int64_t getRevision() const { return ntohll(revision); } void setRevision(int64_t rev) { revision = htonll(rev); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: int64_t epoch{0}; int64_t revision{0}; }; static_assert(sizeof(SetClusterConfigPayload) == 16, "Unexpected struct size"); class VerbosityPayload { public: uint32_t getLevel() const { return ntohl(level); } void setLevel(uint32_t level) { VerbosityPayload::level = htonl(level); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint32_t level = 0; }; static_assert(sizeof(VerbosityPayload) == 4, "Unexpected size"); class TouchPayload { public: uint32_t getExpiration() const { return ntohl(expiration); } void setExpiration(uint32_t expiration) { TouchPayload::expiration = htonl(expiration); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint32_t expiration = 0; }; static_assert(sizeof(TouchPayload) == 4, "Unexpected size"); using GatPayload = TouchPayload; using GetLockedPayload = TouchPayload; class SetCtrlTokenPayload { public: uint64_t getCas() const { return ntohll(cas); } void setCas(uint64_t cas) { SetCtrlTokenPayload::cas = htonll(cas); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint64_t cas = 0; }; static_assert(sizeof(SetCtrlTokenPayload) == 8, "Unexpected size"); #pragma pack() } // namespace cb::mcbp::request /** * Definitions for extended (flexible) metadata * * @1: Flex Code to identify the number of extended metadata fields * @2: Size of the Flex Code, set to 1 byte * @3: Current size of extended metadata */ typedef enum { FLEX_META_CODE = 0x01, FLEX_DATA_OFFSET = 1, EXT_META_LEN = 1 } protocol_binary_flexmeta; /** * Definitions of sub-document path flags (this is a bitmap) */ typedef enum : uint8_t { /** No flags set */ SUBDOC_FLAG_NONE = 0x0, /** (Mutation) Should non-existent intermediate paths be created? */ SUBDOC_FLAG_MKDIR_P = 0x01, /** * 0x02 is unused */ /** * If set, the path refers to an Extended Attribute (XATTR). * If clear, the path refers to a path inside the document body. */ SUBDOC_FLAG_XATTR_PATH = 0x04, /** * 0x08 is unused */ /** * Expand macro values inside extended attributes. The request is * invalid if this flag is set without SUBDOC_FLAG_XATTR_PATH being * set. */ SUBDOC_FLAG_EXPAND_MACROS = 0x10, } protocol_binary_subdoc_flag; namespace mcbp::subdoc { /** * Definitions of sub-document doc flags (this is a bitmap). */ enum class doc_flag : uint8_t { None = 0x0, /** * (Mutation) Create the document if it does not exist. Implies * SUBDOC_FLAG_MKDIR_P and Set (upsert) mutation semantics. Not valid * with Add. */ Mkdoc = 0x1, /** * (Mutation) Add the document only if it does not exist. Implies * SUBDOC_FLAG_MKDIR_P. Not valid with Mkdoc. */ Add = 0x02, /** * Allow access to XATTRs for deleted documents (instead of * returning KEY_ENOENT). The result of mutations on a deleted * document is still a deleted document unless ReviveDocument is * being used. */ AccessDeleted = 0x04, /** * (Mutation) Used with Mkdoc / Add; if the document does not exist then * create it in the Deleted state, instead of the normal Alive state. * Not valid unless Mkdoc or Add specified. */ CreateAsDeleted = 0x08, /** * (Mutation) If the document exists and isn't deleted the operation * will fail with SubdocCanOnlyReviveDeletedDocuments. If the input * document _is_ deleted the result of the operation will store the * document as a "live" document instead of a deleted document. */ ReviveDocument = 0x10, }; /** * Used for validation at parsing the doc-flags. * The value depends on how many bits the doc_flag enum is actually using and * must change accordingly. */ static constexpr uint8_t extrasDocFlagMask = 0xe0; } // namespace mcbp::subdoc /** * Definition of the packet used by SUBDOCUMENT single-path commands. * * The path, which is always required, is in the Body, after the Key. * * Header: 24 @0: <protocol_binary_request_header> * Extras: * Sub-document pathlen 2 @24: <variable> * Sub-document flags 1 @26: <protocol_binary_subdoc_flag> * Expiry 4 @27: (Optional) Mutations only. The * ttl * Sub-document doc flags 1 @27: (Optional) @31 if expiry is * set. Note these are the * subdocument doc flags not the * flag section in the document. * Body: * Key keylen @27: <variable> * Path pathlen @27+keylen: <variable> * Value to insert/replace * vallen-keylen-pathlen @27+keylen+pathlen: [variable] */ typedef union { struct { protocol_binary_request_header header; struct { uint16_t pathlen; // Length in bytes of the sub-doc path. uint8_t subdoc_flags; // See protocol_binary_subdoc_flag /* uint32_t expiry (optional for mutations only - present if extlen == 7 or extlen == 8) */ /* uint8_t doc_flags (optional - present if extlen == 4 or extlen == 8) Note these are the subdocument doc flags not the flag \section in the document. */ } extras; } message; uint8_t bytes[sizeof(protocol_binary_request_header) + 3]; } protocol_binary_request_subdocument; /** Definition of the packet used by SUBDOCUMENT responses. */ typedef union { struct { protocol_binary_response_header header; } message; uint8_t bytes[sizeof(protocol_binary_response_header)]; } protocol_binary_response_subdocument; /** * Definition of the request packets used by SUBDOCUMENT multi-path commands. * * Multi-path sub-document commands differ from single-path in that they * encode a series of multiple paths to operate on (from a single key). * There are two multi-path commands - MULTI_LOOKUP and MULTI_MUTATION. * - MULTI_LOOKUP consists of variable number of subdoc lookup commands * (SUBDOC_GET or SUBDOC_EXISTS). * - MULTI_MUTATION consists of a variable number of subdoc mutation * commands (i.e. all subdoc commands apart from * SUBDOC_{GET,EXISTS}). * * Each path to be operated on is specified by an Operation Spec, which are * contained in the body. This defines the opcode, path, and value * (for mutations). * * A maximum of MULTI_MAX_PATHS paths (operations) can be encoded in a * single multi-path command. * * SUBDOC_MULTI_LOOKUP: * Header: 24 @0: <protocol_binary_request_header> * Extras: 0 or 1 @24: (optional) doc_flags. Note these are * the subdocument doc flags not the flag * section in the document. * Body: <variable> @24: * Key keylen @24: <variable> * 1..MULTI_MAX_PATHS [Lookup Operation Spec] * * Lookup Operation Spec: * 1 @0 : Opcode * 1 @1 : Flags * 2 @2 : Path Length * pathlen @4 : Path */ static const int PROTOCOL_BINARY_SUBDOC_MULTI_MAX_PATHS = 16; typedef struct { cb::mcbp::ClientOpcode opcode; uint8_t flags; uint16_t pathlen; /* uint8_t path[pathlen] */ } protocol_binary_subdoc_multi_lookup_spec; typedef protocol_binary_request_no_extras protocol_binary_request_subdocument_multi_lookup; /* * * SUBDOC_MULTI_MUTATION * Header: 24 @0: <protocol_binary_request_header> * Extras: 0 OR 4 @24: (optional) expiration * 0 OR 1 @24: (optional) doc_flags. Note these are * the subdocument doc flags not the * flag section in the document. * Body: variable @24 + extlen: * Key keylen @24: <variable> * 1..MULTI_MAX_PATHS [Mutation Operation Spec] * * Mutation Operation Spec: * 1 @0 : Opcode * 1 @1 : Flags * 2 @2 : Path Length * 4 @4 : Value Length * pathlen @8 : Path * vallen @8+pathlen : Value */ typedef struct { cb::mcbp::ClientOpcode opcode; uint8_t flags; uint16_t pathlen; uint32_t valuelen; /* uint8_t path[pathlen] */ /* uint8_t value[valuelen] */ } protocol_binary_subdoc_multi_mutation_spec; typedef protocol_binary_request_no_extras protocol_binary_request_subdocument_multi_mutation; /** * Definition of the response packets used by SUBDOCUMENT multi-path * commands. * * SUBDOC_MULTI_LOOKUP - Body consists of a series of lookup_result structs, * one per lookup_spec in the request. * * Lookup Result: * 2 @0 : status * 4 @2 : resultlen * resultlen @6 : result */ typedef struct { protocol_binary_request_header header; /* Variable-length 1..PROTOCOL_BINARY_SUBDOC_MULTI_MAX_PATHS */ protocol_binary_subdoc_multi_lookup_spec body[1]; } protocol_binary_response_subdoc_multi_lookup; /** * SUBDOC_MULTI_MUTATION response * * Extras is either 0 or 16 if MUTATION_SEQNO is enabled. * * Body consists of a variable number of subdoc_multi_mutation_result_spec * structs: * * On success (header.status == SUCCESS), zero or more result specs, one for * each multi_mutation_spec which wishes to return a value. * * Mutation Result (success): * [0..N] of: * 1 @0 : index - Index of multi_mutation spec this result * corresponds to. * 2 @1 : status - Status of the mutation (should always * be SUCCESS for successful multi-mutation * requests). * 4 @3 : resultlen - Result value length * resultlen @7 : Value payload * * On one of more of the mutation specs failing, there is exactly one * result spec, specifying the index and status code of the first failing * mutation spec. * * Mutation Result (failure): * 1 of: * 1 @0 : index - Index of multi_mutation spec this result * corresponds to. * 2 @1 : status - Status of the mutation (should always be * !SUCCESS for failures). * * (Note: On failure the multi_mutation_result_spec only includes the * first two fields). */ typedef union { struct { protocol_binary_response_header header; } message; uint8_t bytes[sizeof(protocol_binary_response_header)]; } protocol_binary_response_subdoc_multi_mutation; /* DCP related stuff */ namespace cb::mcbp { namespace request { #pragma pack(1) class DcpOpenPayload { public: uint32_t getSeqno() const { return ntohl(seqno); } void setSeqno(uint32_t seqno) { DcpOpenPayload::seqno = htonl(seqno); } uint32_t getFlags() const { return ntohl(flags); } void setFlags(uint32_t flags) { DcpOpenPayload::flags = htonl(flags); } // Flags is a bitmask where the following values are used: /** * If set a Producer connection should be opened, if clear a Consumer * connection should be opened. */ static const uint32_t Producer = 1; /// Invalid - should not be set (Previously the Notifier flag) static const uint32_t Invalid = 2; /** * Indicate that the server include the documents' XATTRs * within mutation and deletion bodies. */ static const uint32_t IncludeXattrs = 4; /** * Indicate that the server should strip off the values (note, * if you add INCLUDE_XATTR those will be present) */ static const uint32_t NoValue = 8; static const uint32_t Unused = 16; /** * Request that DCP delete message include the time the a delete was * persisted. This only applies to deletes being backfilled from storage, * in-memory deletes will have a delete time of 0 */ static const uint32_t IncludeDeleteTimes = 32; /** * Indicates that the server should strip off the values, but return the * datatype of the underlying document (note, if you add * INCLUDE_XATTR those will be present). * Note this differs from DCP_OPEN_NO_VALUE in that the datatype field will * contain the underlying datatype of the document; not the datatype of the * transmitted payload. * This flag can be used to obtain the full, original datatype for a * document without the user's value. Not valid to specify with * DCP_OPEN_NO_VALUE. */ static const uint32_t NoValueWithUnderlyingDatatype = 64; /// Requst PiTR for the connection (only legal for Producers) static const uint32_t PiTR = 128; /** * Indicates that the server includes the document UserXattrs within * deletion values. */ static const uint32_t IncludeDeletedUserXattrs = 256; cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint32_t seqno = 0; uint32_t flags = 0; }; static_assert(sizeof(DcpOpenPayload) == 8, "Unexpected struct size"); } // namespace request namespace response { class DcpAddStreamPayload { public: uint32_t getOpaque() const { return ntohl(opaque); } void setOpaque(uint32_t opaque) { DcpAddStreamPayload::opaque = htonl(opaque); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint32_t opaque = 0; }; static_assert(sizeof(DcpAddStreamPayload) == 4, "Unexpected struct size"); } // namespace response namespace request { class DcpAddStreamPayload { public: uint32_t getFlags() const { return ntohl(flags); } void setFlags(uint32_t flags) { DcpAddStreamPayload::flags = htonl(flags); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: /* * The following flags are defined */ #define DCP_ADD_STREAM_FLAG_TAKEOVER 1 #define DCP_ADD_STREAM_FLAG_DISKONLY 2 #define DCP_ADD_STREAM_FLAG_LATEST 4 /** * This flag is not used anymore, and should NOT be * set. It is replaced by DCP_OPEN_NO_VALUE. */ #define DCP_ADD_STREAM_FLAG_NO_VALUE 8 /** * Indicate the server to add stream only if the vbucket * is active. * If the vbucket is not active, the stream request fails with * error cb::engine_errc::not_my_vbucket */ #define DCP_ADD_STREAM_ACTIVE_VB_ONLY 16 /** * Indicate the server to check for vb_uuid match even at start_seqno 0 before * adding the stream successfully. * If the flag is set and there is a vb_uuid mismatch at start_seqno 0, then * the server returns cb::engine_errc::rollback error. */ #define DCP_ADD_STREAM_STRICT_VBUUID 32 uint32_t flags = 0; }; static_assert(sizeof(DcpAddStreamPayload) == 4, "Unexpected struct size"); class DcpStreamReqPayload { public: uint32_t getFlags() const { return ntohl(flags); } void setFlags(uint32_t flags) { DcpStreamReqPayload::flags = htonl(flags); } uint32_t getReserved() const { return ntohl(reserved); } void setReserved(uint32_t reserved) { DcpStreamReqPayload::reserved = htonl(reserved); } uint64_t getStartSeqno() const { return ntohll(start_seqno); } void setStartSeqno(uint64_t start_seqno) { DcpStreamReqPayload::start_seqno = htonll(start_seqno); } uint64_t getEndSeqno() const { return ntohll(end_seqno); } void setEndSeqno(uint64_t end_seqno) { DcpStreamReqPayload::end_seqno = htonll(end_seqno); } uint64_t getVbucketUuid() const { return ntohll(vbucket_uuid); } void setVbucketUuid(uint64_t vbucket_uuid) { DcpStreamReqPayload::vbucket_uuid = htonll(vbucket_uuid); } uint64_t getSnapStartSeqno() const { return ntohll(snap_start_seqno); } void setSnapStartSeqno(uint64_t snap_start_seqno) { DcpStreamReqPayload::snap_start_seqno = htonll(snap_start_seqno); } uint64_t getSnapEndSeqno() const { return ntohll(snap_end_seqno); } void setSnapEndSeqno(uint64_t snap_end_seqno) { DcpStreamReqPayload::snap_end_seqno = htonll(snap_end_seqno); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint32_t flags = 0; uint32_t reserved = 0; uint64_t start_seqno = 0; uint64_t end_seqno = 0; uint64_t vbucket_uuid = 0; uint64_t snap_start_seqno = 0; uint64_t snap_end_seqno = 0; }; static_assert(sizeof(DcpStreamReqPayload) == 48, "Unexpected struct size"); class DcpStreamEndPayload { public: DcpStreamEndStatus getStatus() const { return DcpStreamEndStatus(ntohl(status)); } void setStatus(DcpStreamEndStatus status) { DcpStreamEndPayload::status = htonl(uint32_t(status)); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: /** * Note the following is maintained in network/big endian * see protocol/dcp_stream_end_status.h for values */ uint32_t status = 0; }; static_assert(sizeof(DcpStreamEndPayload) == 4, "Unexpected struct size"); class DcpSnapshotMarkerV1Payload { public: uint64_t getStartSeqno() const { return ntohll(start_seqno); } void setStartSeqno(uint64_t start_seqno) { DcpSnapshotMarkerV1Payload::start_seqno = htonll(start_seqno); } uint64_t getEndSeqno() const { return ntohll(end_seqno); } void setEndSeqno(uint64_t end_seqno) { DcpSnapshotMarkerV1Payload::end_seqno = htonll(end_seqno); } uint32_t getFlags() const { return ntohl(flags); } void setFlags(uint32_t flags) { DcpSnapshotMarkerV1Payload::flags = htonl(flags); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint64_t start_seqno = 0; uint64_t end_seqno = 0; uint32_t flags = 0; }; static_assert(sizeof(DcpSnapshotMarkerV1Payload) == 20, "Unexpected struct size"); enum class DcpSnapshotMarkerFlag : uint32_t { Memory = 0x01, Disk = 0x02, Checkpoint = 0x04, Acknowledge = 0x08 }; enum class DcpSnapshotMarkerV2xVersion : uint8_t { Zero = 0, One = 1 }; // Version 2.x class DcpSnapshotMarkerV2xPayload { public: explicit DcpSnapshotMarkerV2xPayload(DcpSnapshotMarkerV2xVersion v) : version(v) { } DcpSnapshotMarkerV2xVersion getVersion() const { return version; } void setVersion(DcpSnapshotMarkerV2xVersion v) { version = v; } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: DcpSnapshotMarkerV2xVersion version{DcpSnapshotMarkerV2xVersion::Zero}; }; static_assert(sizeof(DcpSnapshotMarkerV2xPayload) == 1, "Unexpected struct size"); class DcpSnapshotMarkerV2_0Value : public DcpSnapshotMarkerV1Payload { public: uint64_t getMaxVisibleSeqno() const { return ntohll(maxVisibleSeqno); } void setMaxVisibleSeqno(uint64_t maxVisibleSeqno) { DcpSnapshotMarkerV2_0Value::maxVisibleSeqno = htonll(maxVisibleSeqno); } uint64_t getHighCompletedSeqno() const { return ntohll(highCompletedSeqno); } void setHighCompletedSeqno(uint64_t highCompletedSeqno) { DcpSnapshotMarkerV2_0Value::highCompletedSeqno = htonll(highCompletedSeqno); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint64_t maxVisibleSeqno{0}; uint64_t highCompletedSeqno{0}; }; static_assert(sizeof(DcpSnapshotMarkerV2_0Value) == 36, "Unexpected struct size"); class DcpSnapshotMarkerV2_1Value : public DcpSnapshotMarkerV2_0Value { public: uint64_t getTimestamp() const { return ntohll(timestamp); } void setTimestamp(uint64_t value) { timestamp = htonll(value); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint64_t timestamp{0}; }; static_assert(sizeof(DcpSnapshotMarkerV2_1Value) == 44, "Unexpected struct size"); class DcpMutationPayload { public: DcpMutationPayload() = default; DcpMutationPayload(uint64_t by_seqno, uint64_t rev_seqno, uint32_t flags, uint32_t expiration, uint32_t lock_time, uint8_t nru) : by_seqno(htonll(by_seqno)), rev_seqno(htonll(rev_seqno)), flags(flags), expiration(htonl(expiration)), lock_time(htonl(lock_time)), nru(nru) { } uint64_t getBySeqno() const { return ntohll(by_seqno); } void setBySeqno(uint64_t by_seqno) { DcpMutationPayload::by_seqno = htonll(by_seqno); } uint64_t getRevSeqno() const { return ntohll(rev_seqno); } void setRevSeqno(uint64_t rev_seqno) { DcpMutationPayload::rev_seqno = htonll(rev_seqno); } uint32_t getFlags() const { return flags; } void setFlags(uint32_t flags) { DcpMutationPayload::flags = flags; } uint32_t getExpiration() const { return ntohl(expiration); } void setExpiration(uint32_t expiration) { DcpMutationPayload::expiration = htonl(expiration); } uint32_t getLockTime() const { return ntohl(lock_time); } void setLockTime(uint32_t lock_time) { DcpMutationPayload::lock_time = htonl(lock_time); } uint16_t getNmeta() const { return ntohs(nmeta); } uint8_t getNru() const { return nru; } void setNru(uint8_t nru) { DcpMutationPayload::nru = nru; } std::string_view getBuffer() const { return {reinterpret_cast<const char*>(this), sizeof(*this)}; } protected: uint64_t by_seqno = 0; uint64_t rev_seqno = 0; uint32_t flags = 0; uint32_t expiration = 0; uint32_t lock_time = 0; /// We don't set this anymore, but old servers may send it to us /// but we'll ignore it const uint16_t nmeta = 0; uint8_t nru = 0; }; static_assert(sizeof(DcpMutationPayload) == 31, "Unexpected struct size"); class DcpDeletionV1Payload { public: DcpDeletionV1Payload(uint64_t _by_seqno, uint64_t _rev_seqno) : by_seqno(htonll(_by_seqno)), rev_seqno(htonll(_rev_seqno)) { } uint64_t getBySeqno() const { return ntohll(by_seqno); } void setBySeqno(uint64_t by_seqno) { DcpDeletionV1Payload::by_seqno = htonll(by_seqno); } uint64_t getRevSeqno() const { return ntohll(rev_seqno); } void setRevSeqno(uint64_t rev_seqno) { DcpDeletionV1Payload::rev_seqno = htonll(rev_seqno); } uint16_t getNmeta() const { return ntohs(nmeta); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint64_t by_seqno = 0; uint64_t rev_seqno = 0; const uint16_t nmeta = 0; }; static_assert(sizeof(DcpDeletionV1Payload) == 18, "Unexpected struct size"); class DcpDeleteRequestV1 { public: DcpDeleteRequestV1(uint32_t opaque, Vbid vbucket, uint64_t cas, uint16_t keyLen, uint32_t valueLen, protocol_binary_datatype_t datatype, uint64_t bySeqno, uint64_t revSeqno) : req{}, body(bySeqno, revSeqno) { req.setMagic(cb::mcbp::Magic::ClientRequest); req.setOpcode(cb::mcbp::ClientOpcode::DcpDeletion); req.setExtlen(gsl::narrow<uint8_t>(sizeof(body))); req.setKeylen(keyLen); req.setBodylen(gsl::narrow<uint32_t>(sizeof(body) + keyLen + valueLen)); req.setOpaque(opaque); req.setVBucket(vbucket); req.setCas(cas); req.setDatatype(cb::mcbp::Datatype(datatype)); } protected: cb::mcbp::Request req; DcpDeletionV1Payload body; }; static_assert(sizeof(DcpDeleteRequestV1) == 42, "Unexpected struct size"); class DcpDeletionV2Payload { public: DcpDeletionV2Payload(uint64_t by_seqno, uint64_t rev_seqno, uint32_t delete_time) : by_seqno(htonll(by_seqno)), rev_seqno(htonll(rev_seqno)), delete_time(htonl(delete_time)) { } uint64_t getBySeqno() const { return ntohll(by_seqno); } void setBySeqno(uint64_t by_seqno) { DcpDeletionV2Payload::by_seqno = htonll(by_seqno); } uint64_t getRevSeqno() const { return ntohll(rev_seqno); } void setRevSeqno(uint64_t rev_seqno) { DcpDeletionV2Payload::rev_seqno = htonll(rev_seqno); } uint32_t getDeleteTime() const { return ntohl(delete_time); } void setDeleteTime(uint32_t delete_time) { DcpDeletionV2Payload::delete_time = htonl(delete_time); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint64_t by_seqno = 0; uint64_t rev_seqno = 0; uint32_t delete_time = 0; const uint8_t unused = 0; }; static_assert(sizeof(DcpDeletionV2Payload) == 21, "Unexpected struct size"); class DcpDeleteRequestV2 { public: DcpDeleteRequestV2(uint32_t opaque, Vbid vbucket, uint64_t cas, uint16_t keyLen, uint32_t valueLen, protocol_binary_datatype_t datatype, uint64_t bySeqno, uint64_t revSeqno, uint32_t deleteTime) : req{}, body(bySeqno, revSeqno, deleteTime) { req.setMagic(cb::mcbp::Magic::ClientRequest); req.setOpcode(cb::mcbp::ClientOpcode::DcpDeletion); req.setExtlen(gsl::narrow<uint8_t>(sizeof(body))); req.setKeylen(keyLen); req.setBodylen(gsl::narrow<uint32_t>(sizeof(body) + keyLen + valueLen)); req.setOpaque(opaque); req.setVBucket(vbucket); req.setCas(cas); req.setDatatype(cb::mcbp::Datatype(datatype)); } protected: cb::mcbp::Request req; DcpDeletionV2Payload body; }; static_assert(sizeof(DcpDeleteRequestV2) == 45, "Unexpected struct size"); class DcpExpirationPayload { public: DcpExpirationPayload() = default; DcpExpirationPayload(uint64_t by_seqno, uint64_t rev_seqno, uint32_t delete_time) : by_seqno(htonll(by_seqno)), rev_seqno(htonll(rev_seqno)), delete_time(htonl(delete_time)) { } uint64_t getBySeqno() const { return ntohll(by_seqno); } void setBySeqno(uint64_t by_seqno) { DcpExpirationPayload::by_seqno = htonll(by_seqno); } uint64_t getRevSeqno() const { return ntohll(rev_seqno); } void setRevSeqno(uint64_t rev_seqno) { DcpExpirationPayload::rev_seqno = htonll(rev_seqno); } uint32_t getDeleteTime() const { return ntohl(delete_time); } void setDeleteTime(uint32_t delete_time) { DcpExpirationPayload::delete_time = htonl(delete_time); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint64_t by_seqno = 0; uint64_t rev_seqno = 0; uint32_t delete_time = 0; }; static_assert(sizeof(DcpExpirationPayload) == 20, "Unexpected struct size"); class DcpSetVBucketState { public: uint8_t getState() const { return state; } void setState(uint8_t state) { DcpSetVBucketState::state = state; } bool isValid() const { return is_valid_vbucket_state_t(state); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint8_t state; }; static_assert(sizeof(DcpSetVBucketState) == 1, "Unexpected struct size"); class DcpBufferAckPayload { public: uint32_t getBufferBytes() const { return ntohl(buffer_bytes); } void setBufferBytes(uint32_t buffer_bytes) { DcpBufferAckPayload::buffer_bytes = htonl(buffer_bytes); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint32_t buffer_bytes = 0; }; static_assert(sizeof(DcpBufferAckPayload) == 4, "Unexpected struct size"); enum class DcpOsoSnapshotFlags : uint32_t { Start = 0x01, End = 0x02, }; class DcpOsoSnapshotPayload { public: explicit DcpOsoSnapshotPayload(uint32_t flags) : flags(htonl(flags)) { } uint32_t getFlags() const { return ntohl(flags); } void setFlags(uint32_t flags) { DcpOsoSnapshotPayload::flags = htonl(flags); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint32_t flags = 0; }; static_assert(sizeof(DcpOsoSnapshotPayload) == 4, "Unexpected struct size"); class DcpSeqnoAdvancedPayload { public: explicit DcpSeqnoAdvancedPayload(uint64_t seqno) : by_seqno(htonll(seqno)) { } [[nodiscard]] uint64_t getSeqno() const { return ntohll(by_seqno); } void setSeqno(uint64_t seqno) { DcpSeqnoAdvancedPayload::by_seqno = htonll(seqno); } [[nodiscard]] cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint64_t by_seqno = 0; }; static_assert(sizeof(DcpSeqnoAdvancedPayload) == 8, "Unexpected struct size"); #pragma pack() } // namespace request } // namespace cb::mcbp /** * Events that the system may send */ namespace mcbp::systemevent { enum class id : uint32_t { CreateCollection = 0, DeleteCollection = 1, FlushCollection = 2, CreateScope = 3, DropScope = 4 }; enum class version : uint8_t { version0 = 0, version1 = 1 }; } // namespace mcbp::systemevent namespace cb::mcbp::request { #pragma pack(1) class DcpSystemEventPayload { public: DcpSystemEventPayload() = default; DcpSystemEventPayload(uint64_t by_seqno, ::mcbp::systemevent::id event, ::mcbp::systemevent::version version) : by_seqno(htonll(by_seqno)), event(htonl(static_cast<uint32_t>(event))), version(static_cast<uint8_t>(version)) { } uint64_t getBySeqno() const { return ntohll(by_seqno); } void setBySeqno(uint64_t by_seqno) { DcpSystemEventPayload::by_seqno = htonll(by_seqno); } uint32_t getEvent() const { return ntohl(event); } void setEvent(uint32_t event) { DcpSystemEventPayload::event = htonl(event); } uint8_t getVersion() const { return version; } void setVersion(uint8_t version) { DcpSystemEventPayload::version = version; } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } /** * Validate that the uint32_t event field represents a valid systemevent::id */ bool isValidEvent() const { using ::mcbp::systemevent::id; switch (id(getEvent())) { case id::CreateCollection: case id::DeleteCollection: case id::FlushCollection: case id::CreateScope: case id::DropScope: return true; } return false; } /** * Validate that the uint8_t version represents a valid systemevent::version */ bool isValidVersion() const { using ::mcbp::systemevent::version; switch (version(getVersion())) { case version::version0: case version::version1: return true; } return false; } protected: uint64_t by_seqno = 0; uint32_t event = 0; uint8_t version = 0; }; static_assert(sizeof(DcpSystemEventPayload) == 13, "Unexpected struct size"); class DcpPreparePayload { public: DcpPreparePayload() = default; DcpPreparePayload(uint64_t by_seqno, uint64_t rev_seqno, uint32_t flags, uint32_t expiration, uint32_t lock_time, uint8_t nru) : by_seqno(htonll(by_seqno)), rev_seqno(htonll(rev_seqno)), flags(flags), expiration(htonl(expiration)), lock_time(htonl(lock_time)), nru(nru) { } uint64_t getBySeqno() const { return ntohll(by_seqno); } void setBySeqno(uint64_t by_seqno) { DcpPreparePayload::by_seqno = htonll(by_seqno); } uint64_t getRevSeqno() const { return ntohll(rev_seqno); } void setRevSeqno(uint64_t rev_seqno) { DcpPreparePayload::rev_seqno = htonll(rev_seqno); } uint32_t getFlags() const { return flags; } void setFlags(uint32_t flags) { DcpPreparePayload::flags = flags; } uint32_t getExpiration() const { return ntohl(expiration); } void setExpiration(uint32_t expiration) { DcpPreparePayload::expiration = htonl(expiration); } uint32_t getLockTime() const { return ntohl(lock_time); } void setLockTime(uint32_t lock_time) { DcpPreparePayload::lock_time = htonl(lock_time); } uint8_t getNru() const { return nru; } void setNru(uint8_t nru) { DcpPreparePayload::nru = nru; } uint8_t getDeleted() const { return deleted; } void setDeleted(uint8_t deleted) { DcpPreparePayload::deleted = deleted; } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } cb::durability::Level getDurabilityLevel() const; void setDurabilityLevel(cb::durability::Level level); protected: uint64_t by_seqno = 0; uint64_t rev_seqno = 0; uint32_t flags = 0; uint32_t expiration = 0; uint32_t lock_time = 0; uint8_t nru = 0; // set to true if this is a document deletion uint8_t deleted = 0; uint8_t durability_level = 0; }; static_assert(sizeof(DcpPreparePayload) == 31, "Unexpected struct size"); class DcpSeqnoAcknowledgedPayload { public: explicit DcpSeqnoAcknowledgedPayload(uint64_t prepared) : prepared_seqno(htonll(prepared)) { } uint64_t getPreparedSeqno() const { return ntohll(prepared_seqno); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: // Stored in network order. uint64_t prepared_seqno = 0; }; static_assert(sizeof(DcpSeqnoAcknowledgedPayload) == 8, "Unexpected struct size"); class DcpCommitPayload { public: DcpCommitPayload(uint64_t prepared, uint64_t committed) : prepared_seqno(htonll(prepared)), commit_seqno(htonll(committed)) { } uint64_t getPreparedSeqno() const { return ntohll(prepared_seqno); } void setPreparedSeqno(uint64_t prepared_seqno) { DcpCommitPayload::prepared_seqno = htonll(prepared_seqno); } uint64_t getCommitSeqno() const { return ntohll(commit_seqno); } void setCommitSeqno(uint64_t commit_seqno) { DcpCommitPayload::commit_seqno = htonll(commit_seqno); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint64_t prepared_seqno = 0; uint64_t commit_seqno = 0; }; static_assert(sizeof(DcpCommitPayload) == 16, "Unexpected struct size"); class DcpAbortPayload { public: DcpAbortPayload(uint64_t prepared, uint64_t aborted) : prepared_seqno(htonll(prepared)), abort_seqno(htonll(aborted)) { } uint64_t getPreparedSeqno() const { return ntohll(prepared_seqno); } void setPreparedSeqno(uint64_t seqno) { prepared_seqno = htonll(seqno); } uint64_t getAbortSeqno() const { return ntohll(abort_seqno); } void setAbortSeqno(uint64_t seqno) { abort_seqno = htonll(seqno); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint64_t prepared_seqno = 0; uint64_t abort_seqno = 0; }; static_assert(sizeof(DcpAbortPayload) == 16, "Unexpected struct size"); class SetParamPayload { public: enum class Type : uint32_t { Flush = 1, Replication, Checkpoint, Dcp, Vbucket }; Type getParamType() const { return static_cast<Type>(ntohl(param_type)); } void setParamType(Type param_type) { SetParamPayload::param_type = htonl(static_cast<uint32_t>(param_type)); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } bool validate() const { switch (getParamType()) { case Type::Flush: case Type::Replication: case Type::Checkpoint: case Type::Dcp: case Type::Vbucket: return true; } return false; } protected: uint32_t param_type = 0; }; static_assert(sizeof(SetParamPayload) == 4, "Unexpected size"); #pragma pack() } // namespace cb::mcbp::request /** * This flag is used by the setWithMeta/addWithMeta/deleteWithMeta packets * to specify that the operation should be forced. The update will not * be subject to conflict resolution and the target vb can be active/pending or * replica. */ #define FORCE_WITH_META_OP 0x01 /** * This flag is used to indicate that the *_with_meta should be accepted * regardless of the bucket config. LWW buckets require this flag. */ #define FORCE_ACCEPT_WITH_META_OPS 0x02 /** * This flag asks that the server regenerates the CAS. The server requires * that SKIP_CONFLICT_RESOLUTION_FLAG is set along with this option. */ #define REGENERATE_CAS 0x04 /** * This flag is used by the setWithMeta/addWithMeta/deleteWithMeta packets * to specify that the conflict resolution mechanism should be skipped for * this operation. */ #define SKIP_CONFLICT_RESOLUTION_FLAG 0x08 /** * This flag is used by deleteWithMeta packets to specify if the delete sent * instead represents an expiration. */ #define IS_EXPIRATION 0x10 /** * This flag is used with the get meta response packet. If set it * specifies that the item recieved has been deleted, but that the * items meta data is still contained in ep-engine. Eg. the item * has been soft deleted. */ #define GET_META_ITEM_DELETED_FLAG 0x01 namespace cb::mcbp::request { #pragma pack(1) class SetWithMetaPayload { public: uint32_t getFlags() const { return ntohl(flags); } uint32_t getFlagsInNetworkByteOrder() const { return flags; } void setFlags(uint32_t flags) { SetWithMetaPayload::flags = htonl(flags); } void setFlagsInNetworkByteOrder(uint32_t flags) { SetWithMetaPayload::flags = flags; } uint32_t getExpiration() const { return ntohl(expiration); } void setExpiration(uint32_t expiration) { SetWithMetaPayload::expiration = htonl(expiration); } uint64_t getSeqno() const { return ntohll(seqno); } void setSeqno(uint64_t seqno) { SetWithMetaPayload::seqno = htonll(seqno); } uint64_t getCas() const { return ntohll(cas); } void setCas(uint64_t cas) { SetWithMetaPayload::cas = htonll(cas); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint32_t flags = 0; uint32_t expiration = 0; uint64_t seqno = 0; uint64_t cas = 0; }; static_assert(sizeof(SetWithMetaPayload) == 24, "Unexpected struct size"); class DelWithMetaPayload { public: DelWithMetaPayload(uint32_t flags, uint32_t delete_time, uint64_t seqno, uint64_t cas) : flags(htonl(flags)), delete_time(htonl(delete_time)), seqno(htonll(seqno)), cas(htonll(cas)) { } uint32_t getFlags() const { return ntohl(flags); } uint32_t getFlagsInNetworkByteOrder() const { return flags; } void setFlags(uint32_t flags) { DelWithMetaPayload::flags = htonl(flags); } uint32_t getDeleteTime() const { return ntohl(delete_time); } void setDeleteTime(uint32_t delete_time) { DelWithMetaPayload::delete_time = htonl(delete_time); } uint64_t getSeqno() const { return ntohll(seqno); } void setSeqno(uint64_t seqno) { DelWithMetaPayload::seqno = htonll(seqno); } uint64_t getCas() const { return ntohll(cas); } void setCas(uint64_t cas) { DelWithMetaPayload::cas = htonll(cas); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint32_t flags = 0; uint32_t delete_time = 0; uint64_t seqno = 0; uint64_t cas = 0; }; static_assert(sizeof(DelWithMetaPayload) == 24, "Unexpected struct size"); #pragma pack() } // namespace cb::mcbp::request /** * The physical layout for a CMD_GET_META command returns the meta-data * section for an item: */ typedef protocol_binary_request_no_extras protocol_binary_request_get_meta; /** * Structure holding getMeta command response fields */ #pragma pack(1) struct GetMetaResponse { uint32_t deleted; uint32_t flags; uint32_t expiry; uint64_t seqno; uint8_t datatype; GetMetaResponse() : deleted(0), flags(0), expiry(0), seqno(0), datatype(0) { } GetMetaResponse(uint32_t deleted, uint32_t flags, uint32_t expiry, uint64_t seqno, uint8_t datatype) : deleted(deleted), flags(flags), expiry(expiry), seqno(seqno), datatype(datatype) { } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } }; #pragma pack() static_assert(sizeof(GetMetaResponse) == 21, "Incorrect compiler padding"); /* Meta data versions for GET_META */ enum class GetMetaVersion : uint8_t { V1 = 1, // returns deleted, flags, expiry and seqno V2 = 2, // The 'spock' version returns V1 + the datatype }; /** * The physical layout for the CMD_RETURN_META */ namespace cb::mcbp::request { #pragma pack(1) enum class ReturnMetaType : uint32_t { Set = 1, Add = 2, Del = 3 }; class ReturnMetaPayload { public: ReturnMetaType getMutationType() const { return static_cast<ReturnMetaType>(ntohl(mutation_type)); } void setMutationType(ReturnMetaType mutation_type) { ReturnMetaPayload::mutation_type = htonl(static_cast<uint32_t>(mutation_type)); } uint32_t getFlags() const { return ntohl(flags); } void setFlags(uint32_t flags) { ReturnMetaPayload::flags = htonl(flags); } uint32_t getExpiration() const { return ntohl(expiration); } void setExpiration(uint32_t expiration) { ReturnMetaPayload::expiration = htonl(expiration); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint32_t mutation_type = 0; uint32_t flags = 0; uint32_t expiration = 0; }; static_assert(sizeof(ReturnMetaPayload) == 12, "Unexpected struct size"); /** * Message format for CMD_COMPACT_DB * * The PROTOCOL_BINARY_CMD_COMPACT_DB is used by ns_server to * issue a compaction request to ep-engine to compact the * underlying store's database files * * Request: * * Header: Contains the vbucket id. The vbucket id will be used * to identify the database file if the backend is * couchstore. If the vbucket id is set to 0xFFFF, then * the vbid field will be used for compaction. * Body: * - purge_before_ts: Deleted items whose expiry timestamp is less * than purge_before_ts will be purged. * - purge_before_seq: Deleted items whose sequence number is less * than purge_before_seq will be purged. * - drop_deletes: whether to purge deleted items or not. * - vbid : Database file id for the underlying store. * * Response: * * The response will return a SUCCESS after compaction is done * successfully and a NOT_MY_VBUCKET (along with cluster config) * if the vbucket isn't found. */ class CompactDbPayload { public: uint64_t getPurgeBeforeTs() const { return ntohll(purge_before_ts); } void setPurgeBeforeTs(uint64_t purge_before_ts) { CompactDbPayload::purge_before_ts = htonll(purge_before_ts); } uint64_t getPurgeBeforeSeq() const { return ntohll(purge_before_seq); } void setPurgeBeforeSeq(uint64_t purge_before_seq) { CompactDbPayload::purge_before_seq = htonll(purge_before_seq); } uint8_t getDropDeletes() const { return drop_deletes; } void setDropDeletes(uint8_t drop_deletes) { CompactDbPayload::drop_deletes = drop_deletes; } const Vbid getDbFileId() const { return db_file_id.ntoh(); } void setDbFileId(const Vbid& db_file_id) { CompactDbPayload::db_file_id = db_file_id.hton(); } // Generate a method which use align_pad1 and 3 to avoid the compiler // to generate a warning about unused member (because we bool validate() const { return align_pad1 == 0 && align_pad3 == 0; } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint64_t purge_before_ts = 0; uint64_t purge_before_seq = 0; uint8_t drop_deletes = 0; uint8_t align_pad1 = 0; Vbid db_file_id = Vbid{0}; uint32_t align_pad3 = 0; }; #pragma pack() static_assert(sizeof(CompactDbPayload) == 24, "Unexpected struct size"); } // namespace cb::mcbp::request #define OBS_STATE_NOT_PERSISTED 0x00 #define OBS_STATE_PERSISTED 0x01 #define OBS_STATE_NOT_FOUND 0x80 #define OBS_STATE_LOGICAL_DEL 0x81 /** * The physical layout for the PROTOCOL_BINARY_CMD_AUDIT_PUT */ typedef union { struct { protocol_binary_request_header header; struct { uint32_t id; } body; } message; uint8_t bytes[sizeof(protocol_binary_request_header) + 4]; } protocol_binary_request_audit_put; typedef protocol_binary_response_no_extras protocol_binary_response_audit_put; /** * The PROTOCOL_BINARY_CMD_OBSERVE_SEQNO command is used by the * client to retrieve information about the vbucket in order to * find out if a particular mutation has been persisted or * replicated at the server side. In order to do so, the client * would pass the vbucket uuid of the vbucket that it wishes to * observe to the serve. The response would contain the last * persisted sequence number and the latest sequence number in the * vbucket. For example, if a client sends a request to observe * the vbucket 0 with uuid 12345 and if the response contains the * values <58, 65> and then the client can infer that sequence * number 56 has been persisted, 60 has only been replicated and * not been persisted yet and 68 has not been replicated yet. */ /** * Definition of the request packet for the observe_seqno command. * * Header: Contains the vbucket id of the vbucket that the client * wants to observe. * * Body: Contains the vbucket uuid of the vbucket that the client * wants to observe. The vbucket uuid is of type uint64_t. * */ typedef union { struct { protocol_binary_request_header header; struct { uint64_t uuid; } body; } message; uint8_t bytes[sizeof(protocol_binary_request_header) + 8]; } protocol_binary_request_observe_seqno; /** * Definition of the response packet for the observe_seqno command. * Body: Contains a tuple of the form * <format_type, vbucket id, vbucket uuid, last_persisted_seqno, * current_seqno> * * - format_type is of type uint8_t and it describes whether * the vbucket has failed over or not. 1 indicates a hard * failover, 0 indicates otherwise. * - vbucket id is of type Vbid and it is the identifier for * the vbucket. * - vbucket uuid is of type uint64_t and it represents a UUID for * the vbucket. * - last_persisted_seqno is of type uint64_t and it is the * last sequence number that was persisted for this * vbucket. * - current_seqno is of the type uint64_t and it is the * sequence number of the latest mutation in the vbucket. * * In the case of a hard failover, the tuple is of the form * <format_type, vbucket id, vbucket uuid, last_persisted_seqno, * current_seqno, old vbucket uuid, last_received_seqno> * * - old vbucket uuid is of type uint64_t and it is the * vbucket UUID of the vbucket prior to the hard failover. * * - last_received_seqno is of type uint64_t and it is the * last received sequence number in the old vbucket uuid. * * The other fields are the same as that mentioned in the normal case. */ typedef protocol_binary_response_no_extras protocol_binary_response_observe_seqno; /** * Definition of the request packet for the command * PROTOCOL_BINARY_CMD_GET_ALL_VB_SEQNOS * * Header: Only opcode field is used. * * Body: Contains the vBucket state and/or collection id for which the vb * sequence numbers are requested. * Please note that these fields are optional, header.request.extlen is * checked to see if they are present. If a vBucket state is not * present or 0 it implies request is for all vbucket states. If * collection id is not present it it implies the request is for the * vBucket high seqno number. * */ typedef union { struct { protocol_binary_request_header header; struct { RequestedVBState state; CollectionIDType cid; } body; } message; uint8_t bytes[sizeof(protocol_binary_request_header) + sizeof(RequestedVBState) + sizeof(CollectionIDType)]; } protocol_binary_request_get_all_vb_seqnos; /** * Definition of the payload in the PROTOCOL_BINARY_CMD_GET_ALL_VB_SEQNOS * response. * * The body contains a "list" of "vbucket id - seqno pairs" for all * active and replica buckets on the node in network byte order. * * * Byte/ 0 | 1 | 2 | 3 | * / | | | | * |0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7| * +---------------+---------------+---------------+---------------+ * 0| VBID | VBID | SEQNO | SEQNO | * +---------------+---------------+---------------+---------------+ * 4| SEQNO | SEQNO | VBID | VBID | * +---------------+---------------+---------------+---------------+ * 4| SEQNO | SEQNO | * +---------------+---------------+ */ typedef protocol_binary_response_no_extras protocol_binary_response_get_all_vb_seqnos; /** * Message format for PROTOCOL_BINARY_CMD_GET_KEYS * * The extras field may contain a 32 bit integer specifying the number * of keys to fetch. If no value specified 1000 keys is transmitted. * * Key is mandatory and specifies the starting key * * Get keys is used to fetch a sequence of keys from the server starting * at the specified key. */ typedef protocol_binary_request_no_extras protocol_binary_request_get_keys; namespace cb::mcbp::request { #pragma pack(1) class AdjustTimePayload { public: enum class TimeType : uint8_t { TimeOfDay = 0, Uptime = 1 }; uint64_t getOffset() const { return ntohll(offset); } void setOffset(uint64_t offset) { AdjustTimePayload::offset = htonll(offset); } TimeType getTimeType() const { return time_type; } void setTimeType(TimeType time_type) { AdjustTimePayload::time_type = time_type; } bool isValid() const { switch (getTimeType()) { case TimeType::TimeOfDay: case TimeType::Uptime: return true; } return false; } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint64_t offset = 0; TimeType time_type = TimeType::TimeOfDay; }; static_assert(sizeof(AdjustTimePayload) == 9, "Unexpected struct size"); /** * Message format for PROTOCOL_BINARY_CMD_EWOULDBLOCK_CTL * * See engines/ewouldblock_engine for more information. */ class EWB_Payload { public: uint32_t getMode() const { return ntohl(mode); } void setMode(uint32_t m) { mode = htonl(m); } uint32_t getValue() const { return ntohl(value); } void setValue(uint32_t v) { value = htonl(v); } uint32_t getInjectError() const { return ntohl(inject_error); } void setInjectError(uint32_t ie) { inject_error = htonl(ie); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint32_t mode = 0; // See EWB_Engine_Mode uint32_t value = 0; uint32_t inject_error = 0; // cb::engine_errc to inject. }; static_assert(sizeof(EWB_Payload) == 12, "Unepected struct size"); /** * Message format for PROTOCOL_BINARY_CMD_GET_ERRORMAP * * The payload (*not* specified as extras) contains a 2 byte payload * containing a 16 bit encoded version number. This version number should * indicate the highest version number of the error map the client is able * to understand. The server will return a JSON-formatted error map * which is formatted to either the version requested by the client, or * a lower version (thus, clients must be ready to parse lower version * formats). */ class GetErrmapPayload { public: uint16_t getVersion() const { return ntohs(version); } void setVersion(uint16_t version) { GetErrmapPayload::version = htons(version); } cb::const_byte_buffer getBuffer() const { return {reinterpret_cast<const uint8_t*>(this), sizeof(*this)}; } protected: uint16_t version = 0; }; static_assert(sizeof(GetErrmapPayload) == 2, "Unexpected struct size"); #pragma pack() } // namespace cb::mcbp::request /** * Message format for PROTOCOL_BINARY_CMD_COLLECTIONS_SET_MANIFEST * * The body contains a JSON collections manifest. * No key and no extras */ typedef union { struct { protocol_binary_request_header header; } message; uint8_t bytes[sizeof(protocol_binary_request_header)]; } protocol_binary_collections_set_manifest; typedef protocol_binary_response_no_extras protocol_binary_response_collections_set_manifest; /** * @} */ inline protocol_binary_subdoc_flag operator|(protocol_binary_subdoc_flag a, protocol_binary_subdoc_flag b) { return protocol_binary_subdoc_flag(static_cast<uint8_t>(a) | static_cast<uint8_t>(b)); } namespace mcbp::subdoc { inline constexpr mcbp::subdoc::doc_flag operator|(mcbp::subdoc::doc_flag a, mcbp::subdoc::doc_flag b) { return mcbp::subdoc::doc_flag(static_cast<uint8_t>(a) | static_cast<uint8_t>(b)); } inline constexpr mcbp::subdoc::doc_flag operator&(mcbp::subdoc::doc_flag a, mcbp::subdoc::doc_flag b) { return mcbp::subdoc::doc_flag(static_cast<uint8_t>(a) & static_cast<uint8_t>(b)); } inline constexpr mcbp::subdoc::doc_flag operator~(mcbp::subdoc::doc_flag a) { return mcbp::subdoc::doc_flag(~static_cast<uint8_t>(a)); } inline std::string to_string(mcbp::subdoc::doc_flag a) { using mcbp::subdoc::doc_flag; switch (a) { case doc_flag::None: return "None"; case doc_flag::Mkdoc: return "Mkdoc"; case doc_flag::AccessDeleted: return "AccessDeleted"; case doc_flag::Add: return "Add"; case doc_flag::CreateAsDeleted: return "CreateAsDeleted"; case doc_flag::ReviveDocument: return "ReviveDocument"; } return std::to_string(static_cast<uint8_t>(a)); } inline bool hasAccessDeleted(mcbp::subdoc::doc_flag a) { return (a & mcbp::subdoc::doc_flag::AccessDeleted) != mcbp::subdoc::doc_flag::None; } inline bool hasMkdoc(mcbp::subdoc::doc_flag a) { return (a & mcbp::subdoc::doc_flag::Mkdoc) != mcbp::subdoc::doc_flag::None; } inline bool hasAdd(mcbp::subdoc::doc_flag a) { return (a & mcbp::subdoc::doc_flag::Add) != mcbp::subdoc::doc_flag::None; } inline bool hasReviveDocument(mcbp::subdoc::doc_flag a) { return (a & mcbp::subdoc::doc_flag::ReviveDocument) == mcbp::subdoc::doc_flag::ReviveDocument; } inline bool hasCreateAsDeleted(mcbp::subdoc::doc_flag a) { return (a & mcbp::subdoc::doc_flag::CreateAsDeleted) != mcbp::subdoc::doc_flag::None; } inline bool isNone(mcbp::subdoc::doc_flag a) { return a == mcbp::subdoc::doc_flag::None; } inline bool impliesMkdir_p(mcbp::subdoc::doc_flag a) { return hasAdd(a) || hasMkdoc(a); } } // namespace mcbp::subdoc namespace mcbp::cas { /** * The special value used as a wildcard and match all CAS values */ const uint64_t Wildcard = 0x0; } // namespace mcbp::cas namespace cb::mcbp::request { #pragma pack(1) // Payload for get_collection_id opcode 0xbb, data stored in network byte order class GetCollectionIDPayload { public: GetCollectionIDPayload() = default; GetCollectionIDPayload(uint64_t manifestId, CollectionID collectionId) : manifestId(htonll(manifestId)), collectionId(htonl(uint32_t(collectionId))) { } CollectionID getCollectionId() const { return ntohl(collectionId); } uint64_t getManifestId() const { return ntohll(manifestId); } std::string_view getBuffer() const { return {reinterpret_cast<const char*>(this), sizeof(*this)}; } protected: uint64_t manifestId{0}; uint32_t collectionId{0}; }; // Payload for get_scope_id opcode 0xbc, data stored in network byte order class GetScopeIDPayload { public: GetScopeIDPayload() = default; GetScopeIDPayload(uint64_t manifestId, ScopeID scopeId) : manifestId(htonll(manifestId)), scopeId(htonl(uint32_t(scopeId))) { } ScopeID getScopeId() const { return ntohl(scopeId); } uint64_t getManifestId() const { return ntohll(manifestId); } std::string_view getBuffer() const { return {reinterpret_cast<const char*>(this), sizeof(*this)}; } protected: uint64_t manifestId{0}; uint32_t scopeId{0}; }; // Payload for get_rando_key opcode 0xb6, data stored in network byte order class GetRandomKeyPayload { public: GetRandomKeyPayload() = default; explicit GetRandomKeyPayload(uint32_t collectionId) : collectionId(htonl(collectionId)) { } CollectionID getCollectionId() const { return ntohl(collectionId); } std::string_view getBuffer() const { return {reinterpret_cast<const char*>(this), sizeof(*this)}; } protected: CollectionIDType collectionId{0}; }; #pragma pack() } // namespace cb::mcbp::request
[STATEMENT] lemma lm093: "set (concat [ [ f \<union> {(x,y)} . y \<leftarrow> (filter (%y. y \<notin> Range f) Y) ]. f \<leftarrow> F ]) = (\<Union> f \<in> set F. {f \<union> {(x,y)} | y . y \<in> (set Y) - (Range f)})" [PROOF STATE] proof (prove) goal (1 subgoal): 1. set (concat (map (\<lambda>f. map (\<lambda>y. f \<union> {(x, y)}) (filter (\<lambda>y. y \<notin> Range f) Y)) F)) = (\<Union>f\<in>set F. {f \<union> {(x, y)} |y. y \<in> set Y - Range f}) [PROOF STEP] by auto
[STATEMENT] lemma nuc_hom: "Abs_cl_op_im \<circ> cl_op \<in> quantale_homset" [PROOF STATE] proof (prove) goal (1 subgoal): 1. Abs_cl_op_im \<circ> cl_op \<in> quantale_homset [PROOF STEP] unfolding quantale_homset_iff comp_def fun_eq_iff [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<forall>x y. Abs_cl_op_im (cl_op (x \<cdot> y)) = Abs_cl_op_im (cl_op x) \<cdot> Abs_cl_op_im (cl_op y)) \<and> (\<forall>x. Abs_cl_op_im (cl_op (\<Squnion> x)) = (\<Squnion>x\<in>x. Abs_cl_op_im (cl_op x))) [PROOF STEP] apply safe [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>x y. Abs_cl_op_im (cl_op (x \<cdot> y)) = Abs_cl_op_im (cl_op x) \<cdot> Abs_cl_op_im (cl_op y) 2. \<And>x. Abs_cl_op_im (cl_op (\<Squnion> x)) = (\<Squnion>x\<in>x. Abs_cl_op_im (cl_op x)) [PROOF STEP] apply (metis (no_types, lifting) Abs_cl_op_im_inverse Rep_cl_op_im_inverse nuc_comp_prop nucleus_cl_op rangeI times_cl_op_im.rep_eq) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>x. Abs_cl_op_im (cl_op (\<Squnion> x)) = (\<Squnion>x\<in>x. Abs_cl_op_im (cl_op x)) [PROOF STEP] unfolding Sup_cl_op_im_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>x. Abs_cl_op_im (cl_op (\<Squnion> x)) = map_fun ((`) Rep_cl_op_im) Abs_cl_op_im (\<lambda>X. cl_op (\<Squnion> X)) ((\<lambda>x. Abs_cl_op_im (cl_op x)) ` x) [PROOF STEP] by (smt Abs_cl_op_im_inverse SUP_cong image_image map_fun_apply nuc_Sup_closed_var nucleus_cl_op rangeI)
import analysis.inner_product_space.pi_L2 import topology.paracompact --OUTPUT 1 theorem ℝn_paracompact (n : ℕ) : paracompact_space (euclidean_space ℝ (fin n)) := begin --Let A be an open covering of R^n. assume A : set (set (euclidean_space ℝ (fin n))), assume hA : is_open_cover A, --We now construct a locally finite open refinement C of A that covers R^n. have h1 : ∃ C : set (set (euclidean_space ℝ (fin n))), is_open_cover C ∧ is_locally_finite_refinement A C, { --First, we define a collection of pen balls. --Let B_0 = phi, and for each n in N, let B_m denote the ball of radius m centered at 0. have h1a : ∀ (m : ℕ), ∃ Bm : set (euclidean_space ℝ (fin n)), is_open Bm ∧ ∀ x : euclidean_space ℝ (fin n), x ∈ Bm ↔ ∃ (a : ℝ) (b : fin n), (a > 0) ∧ (∀ (i : fin n), abs (x i - b i) < a), { assume m : ℕ, let Bm : set (euclidean_space ℝ (fin n)), let hBm : is_open Bm, let hBm1 : ∀ x : euclidean_space ℝ (fin n), x ∈ Bm ↔ ∃ (a : ℝ) (b : fin n), (a > 0) ∧ (∀ (i : fin n), abs (x i - b i) < a), { assume x : euclidean_space ℝ (fin n), split, { assume h1 : x ∈ Bm, let a : ℝ, let b : fin n, have h2 : ∃ (a : ℝ) (b : fin n), (a > 0) ∧ (∀ (i : fin n), abs (x i - b i) < a), { have h2a : ∃ (a : ℝ) (b : fin n), abs (x i - b i) < a, from by auto [h1], have h2b : ∃ (a : ℝ) (b : fin n), ((a > 0) ∧ (∀ (i : fin n), abs (x i - b i) < a)), from by auto [h2a], exact h2b, }, have h3 : ∃! (a : ℝ) (b : fin n), ((a > 0) ∧ (∀ (i : fin n), abs (x i - b i) < a)), from by auto [h2], have h4 : ∃ (a : ℝ) (b : fin n), ((a > 0) ∧ (∀ (i : fin n), abs (x i - b i) < a)), from by auto [exists_unique.exists, h3], exact h4, }, { assume h1 : ∃ (a : ℝ) (b : fin n), (a > 0) ∧ (∀ (i : fin n), abs (x i - b i) < a), let a : ℝ, let b : fin n, have h2 : a > 0 ∧ ∀ (i : fin n), abs (x i - b i) < a, from by auto [h1], have h3 : x ∈ Bm, from by auto [h2], exact h3, }, }, exact hBm1, }, have h1b : ∀ (m : ℕ), ∃ Bm : set (euclidean_space ℝ (fin n)), is_open Bm ∧ ∀ x : euclidean_space ℝ (fin n), x ∈ Bm ↔ ∃ (a : ℝ) (b : fin n), (a > 0) ∧ (∀ (i : fin n), abs (x i - b i) < a), from by auto [h1a], --Given m, set Bar{B_m} is compact in R^n by the Heine-Borel theorem, so choose finitely many elements of A that cover Bar{B_m} and intersect each one with the open set R^n setminus Bar{B_{m - 1}}, and let C_m denote this collection of open sets (each an open subset of an element of A). have h1c : ∀ (m : ℕ), ∃ Cm : set (set (euclidean_space ℝ (fin n))), is_open_cover Cm ∧ (∀ (U : set (euclidean_space ℝ (fin n))), U ∈ Cm ↔ (∃ (V : set (euclidean_space ℝ (fin n))), V ∈ A ∧ U = V ∩ (set.inter_compl (set.compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl (set.inter_compl))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))), { assume m : ℕ, let Cm : set (set (euclidean_space ℝ (fin n))), let hCm : is_open_cover Cm, let hCm1 : ∀ (U : set (euclidean_space end --Needs more than 2000 tokens! --OUTPUT 2 theorem ℝn_paracompact (n : ℕ) : paracompact_space (euclidean_space ℝ (fin n)) := begin intros A HA, have B0 : set (euclidean_space ℝ (fin n)) := ∅, have Bm : ∀ m : ℕ, set (euclidean_space ℝ (fin n)) := λ m, ball (0 : euclidean_space ℝ (fin n)) m, have B : ∀ m : ℕ, set (euclidean_space ℝ (fin n)) := λ m, closure (Bm m), have Cm : ∀ (m : ℕ) (A : set (euclidean_space ℝ (fin n))), ∃ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ HA), (Bm m) ⊆ C ∧ (Bm (m-1)) ∩ C = ∅, from begin assume (m : ℕ) (A : set (euclidean_space ℝ (fin n))), have h1 : (Bm m) ∩ A ≠ ∅, from by auto [set.inter_ne_empty_iff, set.mem_of_mem_closure], have h2 : (Bm (m-1)) ∩ A ≠ ∅, from by auto [set.inter_ne_empty_iff, set.mem_of_mem_closure], have h3 : (Bm m) ⊆ A, from by auto [set.subset_inter_iff, set.inter_ne_empty_iff, set.mem_of_mem_closure], have h4 : (Bm (m-1)) ∩ A ≠ ∅, from by auto [set.inter_ne_empty_iff, set.mem_of_mem_closure], have h5 : ∃ (C : set (euclidean_space ℝ (fin n))), (Bm m) ⊆ C ∧ (Bm (m-1)) ∩ C = ∅, from by auto [set.exists_inter_of_subset], show ∃ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ HA), (Bm m) ⊆ C ∧ (Bm (m-1)) ∩ C = ∅, from by auto [set.exists_of_mem_of_subset, set.exists.elim] using [h5], end, have Cm' : ∀ (m : ℕ) (A : set (euclidean_space ℝ (fin n))), ∃ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ HA), (Bm m) ⊆ C ∧ (Bm (m-1)) ∩ C = ∅, from begin assume (m : ℕ) (A : set (euclidean_space ℝ (fin n))), have h1 : (Bm m) ∩ A ≠ ∅, from by auto [set.inter_ne_empty_iff, set.mem_of_mem_closure], have h2 : (Bm (m-1)) ∩ A ≠ ∅, from by auto [set.inter_ne_empty_iff, set.mem_of_mem_closure], have h3 : (Bm m) ⊆ A, from by auto [set.subset_inter_iff, set.inter_ne_empty_iff, set.mem_of_mem_closure], have h4 : (Bm (m-1)) ∩ A ≠ ∅, from by auto [set.inter_ne_empty_iff, set.mem_of_mem_closure], have h5 : ∃ (C : set (euclidean_space ℝ (fin n))), (Bm m) ⊆ C ∧ (Bm (m-1)) ∩ C = ∅, from by auto [set.exists_inter_of_subset], show ∃ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ HA), (Bm m) ⊆ C ∧ (Bm (m-1)) ∩ C = ∅, from by auto [set.exists_of_mem_of_subset, set.exists.elim] using [h5], end, have Cm'' : ∀ (m : ℕ) (A : set (euclidean_space ℝ (fin n))), ∃ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ HA), (Bm m) ⊆ C ∧ (Bm (m-1)) ∩ C = ∅, from begin assume (m : ℕ) (A : set (euclidean_space ℝ (fin n))), have h1 : (Bm m) ∩ A ≠ ∅, from by auto [set.inter_ne_empty_iff, set.mem_of_mem_closure], have h2 : (Bm (m-1)) ∩ A ≠ ∅, from by auto [set.inter_ne_empty_iff, set.mem_of_mem_closure], have h3 : (Bm m) ⊆ A, from by auto [set.subset_inter_iff, set.inter_ne_empty_iff, set.mem_of_mem_closure], have h4 : (Bm (m-1)) ∩ A ≠ ∅, from by auto [set.inter_ne_empty_iff, set.mem_of_mem_closure], have h5 : ∃ (C : set (euclidean_space ℝ (fin n))), (Bm m) ⊆ C ∧ (Bm (m-1)) ∩ C = ∅, from by auto [set.exists_inter_of_subset], show ∃ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ HA), (Bm m) ⊆ C ∧ (Bm (m-1)) ∩ C = ∅, from by auto [set.exists_of_mem_of_subset, set.exists.elim] using [h5], end, have Cm''' : ∀ (m : ℕ) (A : set (euclidean_space ℝ (fin n))), ∃ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ HA), (Bm m) ⊆ C ∧ (Bm (m-1)) ∩ C = ∅, from begin assume (m : ℕ) (A : set (euclidean_space ℝ (fin n))), have h1 : (Bm m) ∩ A ≠ ∅, from by auto [set.inter_ne_empty_iff, set.mem_of_mem_closure], have h2 : (Bm (m-1)) ∩ A ≠ ∅, from by auto [set.inter_ne_empty_iff, set.mem_of_mem_closure], have h3 : (Bm m) ⊆ A, from by auto [set.subset_inter_iff, set.inter_ne_empty_iff, set.mem_of_mem_closure], have h4 : (Bm (m-1)) ∩ A ≠ ∅, from by auto [set.inter_ne_empty_iff, set.mem_of_mem_closure], have h5 : ∃ (C : set (euclidean_space ℝ (fin n))), (Bm m) ⊆ C ∧ (Bm (m-1)) ∩ C = ∅, from by auto [set.exists_inter_of_subset], show ∃ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ HA), (Bm m) ⊆ C ∧ (Bm (m-1)) ∩ C = ∅, from by auto [set.exists_of_mem_of_subset, set.ex end --Needs more than 2000 tokens! --OUTPUT 3 theorem ℝn_paracompact (n : ℕ) : paracompact_space (euclidean_space ℝ (fin n)) := begin assume (A : set (euclidean_space ℝ (fin n))) (hA : is_open_cover A), have h1 : ∀ (x : ℝ) (n : ℕ), ∃ m : ℕ, (x ≤ m ∧ m ≤ x + 1), from begin assume (x : ℝ) (n : ℕ), have h1 : ∃ m : ℕ, (x ≤ m), from exists_nat.of_le (le_of_lt ((x : ℝ) + 1)), have h2 : ∃ m : ℕ, (m ≤ x + 1), from exists_nat.of_le (le_add_right x 1), have h3 : ∃ m : ℕ, (x ≤ m ∧ m ≤ x + 1), from begin cases h1 with m hm, cases h2 with n hn, existsi max m n, apply and.intro, apply le_max_left, apply le_max_right, end, exact h3, end, have h2 : ∀ (x : ℝ) (n : ℕ), ∃ m : ℕ, (x ≤ m ∧ m ≤ x + 1 ∧ ∀ (n : ℕ), m ≤ n → x ≤ n), from begin assume (x : ℝ) (n : ℕ), have h1 : ∃ m : ℕ, (x ≤ m ∧ m ≤ x + 1), from h1 x n, cases h1 with m hm, have h2 : ∀ (n : ℕ), m ≤ n → x ≤ n, from le_trans (and.elim_right hm) (le_add_left x 1), existsi m, apply and.intro, apply and.elim_left hm, apply and.intro, apply and.elim_right hm, exact h2, end, have h3 : ∀ (x : ℝ) (n : ℕ), ∃ m : ℕ, (x ≤ m ∧ m ≤ x + 1 ∧ ∀ (n : ℕ), m ≤ n → x ≤ n ∧ ∀ (n : ℕ), x ≤ n → m ≤ n), from begin assume (x : ℝ) (n : ℕ), have h1 : ∃ m : ℕ, (x ≤ m ∧ m ≤ x + 1 ∧ ∀ (n : ℕ), m ≤ n → x ≤ n), from h2 x n, cases h1 with m hm, have h2 : ∀ (n : ℕ), x ≤ n → m ≤ n, from le_trans (and.elim_right (and.elim_right hm)) (le_add_left x 1), existsi m, apply and.intro, apply and.elim_left hm, apply and.intro, apply and.elim_right (and.elim_left hm), apply and.intro, apply and.elim_right (and.elim_left hm), exact h2, end, have h4 : ∀ (x : ℝ) (n : ℕ), ∃ m : ℕ, (x ≤ m ∧ m ≤ x + 1 ∧ ∀ (n : ℕ), m ≤ n → x ≤ n ∧ ∀ (n : ℕ), x ≤ n → m ≤ n ∧ ∀ (n : ℕ), m ≤ n → x + 1 ≤ n), from begin assume (x : ℝ) (n : ℕ), have h1 : ∃ m : ℕ, (x ≤ m ∧ m ≤ x + 1 ∧ ∀ (n : ℕ), m ≤ n → x ≤ n ∧ ∀ (n : ℕ), x ≤ n → m ≤ n), from h3 x n, cases h1 with m hm, have h2 : ∀ (n : ℕ), m ≤ n → x + 1 ≤ n, from le_trans (and.elim_right (and.elim_right (and.elim_right (and.elim_left hm)))) (le_add_right x 1), existsi m, apply and.intro, apply and.elim_left hm, apply and.intro, apply and.elim_right (and.elim_left hm), apply and.intro, apply and.elim_right (and.elim_left (and.elim_left hm)), apply and.intro, apply and.elim_right (and.elim_left (and.elim_left hm)), exact h2, end, have h5 : ∀ (x : ℝ) (n : ℕ), ∃ m : ℕ, (x ≤ m ∧ m ≤ x + 1 ∧ ∀ (n : ℕ), m ≤ n → x ≤ n ∧ ∀ (n : ℕ), x ≤ n → m ≤ n ∧ ∀ (n : ℕ), m ≤ n → x + 1 ≤ n ∧ ∀ (n : ℕ), x ≤ n → x + 1 ≤ n), from begin assume (x : ℝ) (n : ℕ), have h1 : ∃ m : ℕ, (x ≤ m ∧ m ≤ x + 1 ∧ ∀ (n : ℕ), m ≤ n → x ≤ n ∧ ∀ (n : ℕ), x ≤ n → m ≤ n ∧ ∀ (n : ℕ), m ≤ n → x + 1 ≤ n), from h4 x n, cases h1 with m hm, have h2 : ∀ (n : ℕ), x ≤ n → x + 1 ≤ n, from le_trans (and.elim_right (and.elim_right (and.elim_right (and.elim_left (and.elim_left hm))))) (le_add_right x 1), existsi m, apply and.intro, apply and.elim_left hm, apply and.intro, apply and.elim_right (and.elim_left hm), apply and.intro, apply and.elim_right (and.elim_left (and.elim_left hm)), apply and.intro, apply and.elim_right (and.elim_left (and.elim_left (and.elim_left hm))), apply and.intro, apply and.elim_right (and.elim_left (and.elim_left (and.elim_left hm))), exact h2, end, have h6 : ∀ (x : ℝ) (n : ℕ), ∃ m : ℕ, (x ≤ m ∧ m ≤ x + 1 ∧ ∀ (n : ℕ), m ≤ n → x ≤ n ∧ ∀ (n : ℕ), x ≤ n → m ≤ n ∧ ∀ (n : ℕ), m ≤ n → x + 1 ≤ n ∧ ∀ (n : ℕ), x ≤ n → x + 1 ≤ n ∧ ∀ (n : ℕ), x + 1 ≤ n → m ≤ n), from begin assume (x : ℝ) (n : ℕ), have h1 : ∃ m : ℕ, (x ≤ m ∧ m ≤ x + 1 ∧ ∀ (n : ℕ), m ≤ n → x ≤ n ∧ ∀ (n : ℕ), x ≤ n → m ≤ n ∧ ∀ (n : ℕ), m ≤ n → x + 1 ≤ n ∧ ∀ (n : ℕ), x ≤ n → x + 1 ≤ n), from h5 x n, cases h1 with m hm, have h2 : ∀ (n : ℕ), x + 1 ≤ n → m ≤ n end --Needs more than 2000 tokens! --OUTPUT 4 theorem ℝn_paracompact (n : ℕ) : paracompact_space (euclidean_space ℝ (fin n)) := begin have h1 : ∀ A : set (euclidean_space ℝ (fin n)), is_open A → is_open (A ∩ (univ : set (euclidean_space ℝ (fin n)))), from by auto [set.inter_univ], have h2 : ∀ m : ℕ, is_open (set.Iio (m : ℝ) ∩ (univ : set (euclidean_space ℝ (fin n)))), from by auto [set.Iio_subset_Ioi, set.Ioi_subset_Icc, set.Icc_subset_Ioc, set.Ioc_subset_Ico, set.Ico_subset_Ioo, set.Ioo_subset_Icc, set.Icc_subset_Icc, set.Icc_subset_univ, set.inter_univ], have h3 : ∀ m : ℕ, is_open (set.Icc (m : ℝ) (m + 1) ∩ (univ : set (euclidean_space ℝ (fin n)))), from by auto [set.Iio_subset_Ioi, set.Ioi_subset_Icc, set.Icc_subset_Ioc, set.Ioc_subset_Ico, set.Ico_subset_Ioo, set.Ioo_subset_Icc, set.Icc_subset_Icc, set.Icc_subset_univ, set.inter_univ], have h4 : (set.Iio (0 : ℝ) ∩ (univ : set (euclidean_space ℝ (fin n)))) ⊆ (set.Icc (0 : ℝ) 1 ∩ (univ : set (euclidean_space ℝ (fin n)))), from by auto [set.Iio_subset_Ioi, set.Ioi_subset_Icc, set.Icc_subset_univ, set.inter_univ], have h5 : (set.Icc (0 : ℝ) 1 ∩ (univ : set (euclidean_space ℝ (fin n)))) ⊆ (set.Iio (0 : ℝ) ∩ (univ : set (euclidean_space ℝ (fin n)))), from by auto [set.Iio_subset_Ioi, set.Ioi_subset_Icc, set.Icc_subset_univ, set.inter_univ], have h6 : is_open (set.Iio (0 : ℝ) ∩ (univ : set (euclidean_space ℝ (fin n)))), from by auto [h2, h3, h4, h5], have h7 : ∀ m : ℕ, is_open (set.Ico (m : ℝ) (m + 1) ∩ (univ : set (euclidean_space ℝ (fin n)))), from by auto [set.Iio_subset_Ioi, set.Ioi_subset_Icc, set.Icc_subset_Ioc, set.Ioc_subset_Ico, set.Ico_subset_Ioo, set.Ioo_subset_Icc, set.Icc_subset_Icc, set.Icc_subset_univ, set.inter_univ], have h8 : is_open (set.Ico (0 : ℝ) 1 ∩ (univ : set (euclidean_space ℝ (fin n)))), from by auto [h7], have h9 : ∀ m : ℕ, is_open (set.Icc (m : ℝ) (m + 1) ∩ (univ : set (euclidean_space ℝ (fin n)))), from by auto [set.Iio_subset_Ioi, set.Ioi_subset_Icc, set.Icc_subset_Ioc, set.Ioc_subset_Ico, set.Ico_subset_Ioo, set.Ioo_subset_Icc, set.Icc_subset_Icc, set.Icc_subset_univ, set.inter_univ], have h10 : ∀ m : ℕ, is_open (set.Icc (m : ℝ) (m + 1)), from by auto [set.Iio_subset_Ioi, set.Ioi_subset_Icc, set.Icc_subset_Ioc, set.Ioc_subset_Ico, set.Ico_subset_Ioo, set.Ioo_subset_Icc, set.Icc_subset_Icc, set.Icc_subset_univ, set.inter_univ], have h11 : ∀ m : ℕ, is_open (set.Iio (m : ℝ)), from by auto [set.Iio_subset_Ioi, set.Ioi_subset_Icc, set.Icc_subset_Ioc, set.Ioc_subset_Ico, set.Ico_subset_Ioo, set.Ioo_subset_Icc, set.Icc_subset_Icc, set.Icc_subset_univ, set.inter_univ], have h12 : ∀ m : ℕ, is_open (set.Ico (m : ℝ)), from by auto [set.Iio_subset_Ioi, set.Ioi_subset_Icc, set.Icc_subset_Ioc, set.Ioc_subset_Ico, set.Ico_subset_Ioo, set.Ioo_subset_Icc, set.Icc_subset_Icc, set.Icc_subset_univ, set.inter_univ], have h13 : ∀ m : ℕ, is_open (set.Icc (m : ℝ) (m + 1) ∩ (univ : set (euclidean_space ℝ (fin n)))), from by auto [set.Iio_subset_Ioi, set.Ioi_subset_Icc, set.Icc_subset_Ioc, set.Ioc_subset_Ico, set.Ico_subset_Ioo, set.Ioo_subset_Icc, set.Icc_subset_Icc, set.Icc_subset_univ, set.inter_univ], have h14 : ∀ m : ℕ, is_open (set.Icc (m : ℝ) (m + 1)), from by auto [set.Iio_subset_Ioi, set.Ioi_subset_Icc, set.Icc_subset_Ioc, set.Ioc_subset_Ico, set.Ico_subset_Ioo, set.Ioo_subset_Icc, set.Icc_subset_Icc, set.Icc_subset_univ, set.inter_univ], have h15 : ∀ m : ℕ, is_open (set.Ico (m : ℝ) (m + 1) ∩ (univ : set (euclidean_space ℝ (fin n)))), from by auto [set.Iio_subset_Ioi, set.Ioi_subset_Icc, set.Icc_subset_Ioc, set.Ioc_subset_Ico, set.Ico_subset_Ioo, set.Ioo_subset_Icc, set.Icc_subset_Icc, set.Icc_subset_univ, set.inter_univ], have h16 : ∀ m : ℕ, is_open (set.Ico (m : ℝ) (m + 1)), from by auto [set.Iio_subset_Ioi, set.Ioi_subset_Icc, set.Icc_subset_Ioc, set.Ioc_subset_Ico, set.Ico_subset_Ioo, set.Ioo_subset_Icc, set.Icc_subset_Icc, set.Icc_subset_univ, set.inter_un end --Needs more than 2000 tokens! --OUTPUT 5 theorem ℝn_paracompact (n : ℕ) : paracompact_space (euclidean_space ℝ (fin n)) := begin let A := λ (a : ℝ), {b : ℝ | a < b}, have h1 : ∀ (a : ℝ), is_open (A a), from by auto [is_open_lt], let B := λ (a : ℝ), {b : ℝ | b < a}, have h2 : ∀ (a : ℝ), is_open (B a), from by auto [is_open_gt], have h3 : ∀ (a : ℝ), is_open (A a) ∧ is_open (B a), from by auto [h1, h2], have h4 : ∀ (a : ℝ) (h : a > 0), ∃ b : ℝ, b > a, from by auto [h1], have h5 : ∀ (a : ℝ) (h : a < 0), ∃ b : ℝ, b < a, from by auto [h2], have h6 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b ≠ a, from by auto [h4, h5], have h7 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h8 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h9 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h10 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h11 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h12 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h13 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h14 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h15 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h16 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h17 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h18 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h19 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h20 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h21 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h22 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h23 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h24 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h25 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h26 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h27 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h28 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h29 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h30 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h31 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h32 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h33 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h34 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h35 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h36 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h37 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h38 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h39 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h40 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h41 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h42 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h43 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h44 : ∀ (a : ℝ) (h : a ≠ 0), ∃ b : ℝ, b = a, from by auto [h4, h5], have h45 end --Needs more than 2000 tokens! --OUTPUT 6 theorem ℝn_paracompact (n : ℕ) : paracompact_space (euclidean_space ℝ (fin n)) := begin have h1 : ∀ m : ℕ, {x : ℝ ^ n // ∥x∥ ≤ m} ∈ (𝓝 (0 : ℝ ^ n)), from by auto [nhds_zero, set.mem_nhds_sets_iff, set.mem_ball], have h2 : ∀ m : ℕ, {x : ℝ ^ n // m ≤ ∥x∥} ∈ (𝓝 (0 : ℝ ^ n)), from by auto [nhds_zero, set.mem_nhds_sets_iff, set.mem_ball], have h3 : ∀ m : ℕ, {x : ℝ ^ n // ∥x∥ < m} ∈ (𝓝 (0 : ℝ ^ n)), from by auto [nhds_zero, set.mem_nhds_sets_iff, set.mem_ball], have h4 : ∀ m : ℕ, {x : ℝ ^ n // m < ∥x∥} ∈ (𝓝 (0 : ℝ ^ n)), from by auto [nhds_zero, set.mem_nhds_sets_iff, set.mem_ball], have h5 : ∀ m : ℕ, {x : ℝ ^ n // ∥x∥ ≤ m ∧ ∥x∥ < m + 1} = {x : ℝ ^ n // ∥x∥ ≤ m} ∩ {x : ℝ ^ n // ∥x∥ < m + 1}, from by auto, have h6 : ∀ m : ℕ, {x : ℝ ^ n // m ≤ ∥x∥ ∧ m + 1 < ∥x∥} = {x : ℝ ^ n // m ≤ ∥x∥} ∩ {x : ℝ ^ n // m + 1 < ∥x∥}, from by auto, have h7 : ∀ m : ℕ, {x : ℝ ^ n // ∥x∥ ≤ m ∧ ∥x∥ < m + 1} = {x : ℝ ^ n // ∥x∥ ≤ m} ∩ {x : ℝ ^ n // ∥x∥ < m + 1}, from by auto, have h8 : ∀ m : ℕ, {x : ℝ ^ n // m ≤ ∥x∥ ∧ m + 1 < ∥x∥} = {x : ℝ ^ n // m ≤ ∥x∥} ∩ {x : ℝ ^ n // m + 1 < ∥x∥}, from by auto, have h9 : ∀ m : ℕ, {x : ℝ ^ n // ∥x∥ ≤ m ∧ ∥x∥ < m + 1} = {x : ℝ ^ n // ∥x∥ ≤ m} ∩ {x : ℝ ^ n // ∥x∥ < m + 1}, from by auto, have h10 : ∀ m : ℕ, {x : ℝ ^ n // m ≤ ∥x∥ ∧ m + 1 < ∥x∥} = {x : ℝ ^ n // m ≤ ∥x∥} ∩ {x : ℝ ^ n // m + 1 < ∥x∥}, from by auto, have h11 : ∀ m : ℕ, {x : ℝ ^ n // ∥x∥ ≤ m ∧ ∥x∥ < m + 1} = {x : ℝ ^ n // ∥x∥ ≤ m} ∩ {x : ℝ ^ n // ∥x∥ < m + 1}, from by auto, have h12 : ∀ m : ℕ, {x : ℝ ^ n // m ≤ ∥x∥ ∧ m + 1 < ∥x∥} = {x : ℝ ^ n // m ≤ ∥x∥} ∩ {x : ℝ ^ n // m + 1 < ∥x∥}, from by auto, have h13 : ∀ m : ℕ, {x : ℝ ^ n // ∥x∥ ≤ m ∧ ∥x∥ < m + 1} = {x : ℝ ^ n // ∥x∥ ≤ m} ∩ {x : ℝ ^ n // ∥x∥ < m + 1}, from by auto, have h14 : ∀ m : ℕ, {x : ℝ ^ n // m ≤ ∥x∥ ∧ m + 1 < ∥x∥} = {x : ℝ ^ n // m ≤ ∥x∥} ∩ {x : ℝ ^ n // m + 1 < ∥x∥}, from by auto, have h15 : ∀ m : ℕ, {x : ℝ ^ n // ∥x∥ ≤ m ∧ ∥x∥ < m + 1} = {x : ℝ ^ n // ∥x∥ ≤ m} ∩ {x : ℝ ^ n // ∥x∥ < m + 1}, from by auto, have h16 : ∀ m : ℕ, {x : ℝ ^ n // m ≤ ∥x∥ ∧ m + 1 < ∥x∥} = {x : ℝ ^ n // m ≤ ∥x∥} ∩ {x : ℝ ^ n // m + 1 < ∥x∥}, from by auto, have h17 : ∀ m : ℕ, {x : ℝ ^ n // ∥x∥ ≤ m ∧ ∥x∥ < m + 1} = {x : ℝ ^ n // ∥x∥ ≤ m} ∩ {x : ℝ ^ n // ∥x∥ < m + 1}, from by auto, have h18 : ∀ m : ℕ, {x : ℝ ^ n // m ≤ ∥x∥ ∧ m + 1 < ∥x∥} = {x : ℝ ^ n // m ≤ ∥x∥} ∩ {x : ℝ ^ n // m + 1 < ∥x∥}, from by auto, have h19 : ∀ m : ℕ, {x : ℝ ^ n // ∥x∥ ≤ m ∧ ∥x∥ < m + 1} = {x : ℝ ^ n // ∥x∥ ≤ m} ∩ {x : ℝ ^ n // ∥x∥ < m + 1}, from by auto, have h20 : ∀ m : ℕ, {x : ℝ ^ n // m ≤ ∥x∥ ∧ m + 1 < ∥x∥} = {x : ℝ ^ n // m ≤ ∥x∥} ∩ {x : ℝ ^ n // m + 1 < ∥x∥}, from by auto, have h21 : ∀ m : ℕ, {x : ℝ ^ n // ∥x∥ ≤ m ∧ ∥x∥ < m + 1} = {x : ℝ ^ n // ∥x∥ ≤ m} ∩ {x : ℝ ^ n // ∥x∥ < m + 1}, from by auto, have h22 : ∀ m : ℕ, {x : ℝ ^ n // m ≤ ∥x∥ ∧ m + 1 < ∥x∥} = {x : ℝ ^ n // m ≤ ∥x∥} ∩ {x : ℝ ^ n // m + 1 < ∥x∥}, from by auto, have h23 : ∀ m : ℕ, {x : ℝ ^ n // ∥x∥ ≤ m ∧ ∥x∥ < m + 1} = {x : ℝ ^ n // ∥x∥ ≤ m} ∩ {x : ℝ ^ n // ∥x∥ < m + 1}, from by auto, have h24 : ∀ m : ℕ, {x : ℝ ^ n // m ≤ ∥x∥ ∧ m + 1 < ∥x∥} = {x : ℝ ^ n // m ≤ ∥x∥} ∩ {x : ℝ ^ n // m + 1 < ∥x∥}, from by auto, end --Needs more than 2000 tokens! --OUTPUT 7 theorem ℝn_paracompact (n : ℕ) : paracompact_space (euclidean_space ℝ (fin n)) := begin let A : set (euclidean_space ℝ (fin n)) → Prop := λ x, (is_open x), let S : set (euclidean_space ℝ (fin n)) := univ, let T : set (euclidean_space ℝ (fin n)) → Prop := λ x, (is_open x) ∧ cover S x ∧ locally_finite x, let U : set (euclidean_space ℝ (fin n)) → Prop := λ x, (is_open x) ∧ cover S x, let C : set (euclidean_space ℝ (fin n)) → Prop := λ x, (is_open x) ∧ locally_finite x, have h1 : S ∈ 𝒫 (euclidean_space ℝ (fin n)), from by auto [set.univ_mem_powerset], have h2 : ∀ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C), from by auto [set.is_open_of_mem_powerset], have h3 : ∀ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ cover S C, from by auto [set.is_open_of_mem_powerset, set.cover_univ, set.univ_mem_powerset], have h4 : ∀ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ cover S C ∧ locally_finite C, from by auto [set.is_open_of_mem_powerset, set.cover_univ, set.univ_mem_powerset, set.locally_finite_of_mem_powerset], have h5 : ∀ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ locally_finite C, from by auto [set.is_open_of_mem_powerset, set.locally_finite_of_mem_powerset], have h6 : ∀ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ cover S C ∧ locally_finite C ↔ (is_open C) ∧ cover S C, from by auto [iff_iff_iff_iff], have h7 : ∀ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ cover S C ∧ locally_finite C ↔ (is_open C) ∧ locally_finite C, from by auto [iff_iff_iff_iff], have h8 : T = U ∨ T = C, from by auto [set.ext], have h9 : (∃ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ cover S C ∧ locally_finite C) ↔ (∃ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ cover S C), from by auto [h6], have h10 : (∃ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ cover S C ∧ locally_finite C) ↔ (∃ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ locally_finite C), from by auto [h7], have h11 : (∃ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ cover S C ∧ locally_finite C) ↔ (∃ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ cover S C) ∨ (∃ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ locally_finite C), from by auto [h8, h9, h10, exists_or_distrib], have h12 : ∀ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ cover S C, from by auto [set.is_open_of_mem_powerset, set.cover_univ, set.univ_mem_powerset], have h13 : ∀ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ locally_finite C, from by auto [set.is_open_of_mem_powerset, set.locally_finite_of_mem_powerset], have h14 : ∀ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ cover S C ∨ (is_open C) ∧ locally_finite C, from by auto [h11], have h15 : ∀ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ cover S C ∨ (is_open C) ∧ locally_finite C, from by auto [h14, h12, h13, classical.or_iff_not_imp_left, set.ext, classical.not_not_iff_iff], have h16 : ∀ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ cover S C ∨ (is_open C) ∧ locally_finite C, from by auto [h15, set.ext], have h17 : ∀ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ cover S C ∨ (is_open C) ∧ locally_finite C, from by auto [h16, set.ext], have h18 : ∀ (C : set (euclidean_space ℝ (fin n))) (hC : C ∈ 𝒫 (euclidean_space ℝ (fin n))), (is_open C) ∧ cover S C ∨ (is_open C) ∧ locally_finite C, from by auto [h17, set. end --Needs more than 2000 tokens! --OUTPUT 8 theorem ℝn_paracompact (n : ℕ) : paracompact_space (euclidean_space ℝ (fin n)) := begin assume (A : set (euclidean_space ℝ (fin n))) (hA : is_open A), have h1 : is_open (⋃ (m : ℕ), (λ (x : euclidean_space ℝ (fin n)), (∃ (m : ℕ), ∀ (i : fin n), abs (x $ i) ≤ m)) ⁻¹' {m} ∩ A) := by auto [is_open_Inter, is_open_Ball, is_open_Union, is_open_Inter, is_open_Ball, is_open_Union], have h2 : (⋃ (m : ℕ), (λ (x : euclidean_space ℝ (fin n)), (∃ (m : ℕ), ∀ (i : fin n), abs (x $ i) ≤ m)) ⁻¹' {m} ∩ A) = A, from by auto [ext_iff], show ∃ (B : set (euclidean_space ℝ (fin n))), is_open B ∧ is_locally_finite B ∧ ⋃ B = A, from by auto [exists_prop, h1, h2], end /- FEW SHOT PROMPTS TO CODEX(START) /--`theorem` Power Set is Closed under Intersection Let $S$ be a set. Let $\powerset S$ be the power set of $S$. Then: :$\forall A, B \in \powerset S: A \cap B \in \powerset S$ `proof` Let $A, B \in \powerset S$. Then by the definition of power set, $A \subseteq S$ and $B \subseteq S$. From Intersection is Subset we have that $A \cap B \subseteq A$. It follows from Subset Relation is Transitive that $A \cap B \subseteq S$. Thus $A \cap B \in \powerset S$ and closure is proved. {{qed}} -/ theorem power_set_intersection_closed {α : Type*} (S : set α) : ∀ A B ∈ 𝒫 S, (A ∩ B) ∈ 𝒫 S := begin assume (A : set α) (hA : A ∈ 𝒫 S) (B : set α) (hB : B ∈ 𝒫 S), have h1 : (A ⊆ S) ∧ (B ⊆ S), from by auto [set.subset_of_mem_powerset, set.subset_of_mem_powerset], have h2 : (A ∩ B) ⊆ A, from by auto [set.inter_subset_left], have h3 : (A ∩ B) ⊆ S, from by auto [set.subset.trans], show (A ∩ B) ∈ 𝒫 S, from by auto [set.mem_powerset], end /--`theorem` Square of Sum :$\forall x, y \in \R: \paren {x + y}^2 = x^2 + 2 x y + y^2$ `proof` Follows from the distribution of multiplication over addition: {{begin-eqn}} {{eqn | l = \left({x + y}\right)^2 | r = \left({x + y}\right) \cdot \left({x + y}\right) }} {{eqn | r = x \cdot \left({x + y}\right) + y \cdot \left({x + y}\right) | c = Real Multiplication Distributes over Addition }} {{eqn | r = x \cdot x + x \cdot y + y \cdot x + y \cdot y | c = Real Multiplication Distributes over Addition }} {{eqn | r = x^2 + 2xy + y^2 | c = }} {{end-eqn}} {{qed}} -/ theorem square_of_sum (x y : ℝ) : (x + y)^2 = (x^2 + 2*x*y + y^2) := begin calc (x + y)^2 = (x+y)*(x+y) : by auto [sq] ... = x*(x+y) + y*(x+y) : by auto [add_mul] ... = x*x + x*y + y*x + y*y : by auto [mul_comm, add_mul] using [ring] ... = x^2 + 2*x*y + y^2 : by auto [sq, mul_comm] using [ring] end /--`theorem` Identity of Group is Unique Let $\struct {G, \circ}$ be a group. Then there is a unique identity element $e \in G$. `proof` From Group has Latin Square Property, there exists a unique $x \in G$ such that: :$a x = b$ and there exists a unique $y \in G$ such that: :$y a = b$ Setting $b = a$, this becomes: There exists a unique $x \in G$ such that: :$a x = a$ and there exists a unique $y \in G$ such that: :$y a = a$ These $x$ and $y$ are both $e$, by definition of identity element. {{qed}} -/ theorem group_identity_unique {G : Type*} [group G] : ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a := begin have h1 : ∀ a b : G, ∃! x : G, a * x = b, from by auto using [use (a⁻¹ * b)], have h2 : ∀ a b : G, ∃! y : G, y * a = b, from by auto using [use b * a⁻¹], have h3 : ∀ a : G, ∃! x : G, a * x = a, from by auto [h1], have h4 : ∀ a : G, ∃! y : G, y * a = a, from by auto [h2], have h5 : ∀ a : G, classical.some (h3 a).exists = (1 : G), from by auto [exists_unique.unique, h3, classical.some_spec, exists_unique.exists, mul_one], have h6 : ∀ a : G, classical.some (h4 a).exists = (1 : G), from by auto [exists_unique.unique, h4, classical.some_spec, exists_unique.exists, one_mul], show ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a, from by auto [h3, h4, exists_unique.unique, classical.some_spec, exists_unique.exists] using [use (1 : G)], end /--`theorem` \mathbb{R}^n is paracompact $\mathbb{R}^n$ is paracompact for all $n$. `proof` Let $\mathcal{A}$ be an open covering of $\mathbb{R}^n$. We now construct a locally finite open refinement $\mathcal{C}$ of $\mathcal{A}$ that covers $\mathbb{R}^n$. First, we define a collection of pen balls. Let $B_0 = \phi$, and for each $n \in \mathbb{N}$, let $B_m$ denote the ball of radius $m$ centered at 0. Given $m$, set $\Bar{B_m}$ is compact in $\mathbb{R}^n$ by the Heine-Borel theorem, so choose finitely many elements of $\mathcal{A}$ that cover $\Bar{B_m}$ and intersect each one with the open set $\mathbb{R}^n \setminus \Bar{B_{m - 1}}$, and let $\mathcal{C}_{m}$ denote this collection of open sets (each an open subset of an element of $\mathcal{A}$). So $\mathcal{C} = \bigcup_{m = 0}^{\infty} \mathcal{C}_m$ is an open refinement of $\mathcal{A}$. Note that $\mathcal{C}$ covers $\mathbb{R}^n$ since for any $x \in \mathbb{R}^n$, there is a smallest $m \in \mathbb{N}$ such that $x \in \Bar{B_{m}}$ (namely, some $m$ where $\rVert x \lVert \leq m \leq \rVert x \lVert + 1$), and so $x$ is an element of $\mathcal{C}_m$. Now collection $\mathcal{C}$ is locally finite since for given $x \in \mathbb{R}^n$, neighborhood $B_m$ intersects only finitely many elements of $\mathcal{C}$, namely those elements in collection $\mathcal{C}_1 \cup \mathcal{C}_2 \cup \cdots \mathcal{C}_m$. So $\mathcal{C}$ is a locally finite open refinement of $\mathcal{A}$ that covers $\mathbb{R}^n$, hence $\mathbb{R}^n$ is paracompact. QED -/ theorem ℝn_paracompact (n : ℕ) : paracompact_space (euclidean_space ℝ (fin n)) := FEW SHOT PROMPTS TO CODEX(END)-/
import argparse import importlib import os import numpy as np import torch from torch.utils.data import DataLoader from tqdm import tqdm from argparse import RawTextHelpFormatter from TTS.tts.datasets.TTSDataset import MyDataset from TTS.tts.utils.generic_utils import setup_model from TTS.tts.utils.io import load_checkpoint from TTS.tts.utils.text.symbols import make_symbols, phonemes, symbols from TTS.utils.audio import AudioProcessor from TTS.utils.io import load_config if __name__ == '__main__': parser = argparse.ArgumentParser( description='''Extract attention masks from trained Tacotron/Tacotron2 models. These masks can be used for different purposes including training a TTS model with a Duration Predictor.\n\n''' '''Each attention mask is written to the same path as the input wav file with ".npy" file extension. (e.g. path/bla.wav (wav file) --> path/bla.npy (attention mask))\n''' ''' Example run: CUDA_VISIBLE_DEVICE="0" python TTS/bin/compute_attention_masks.py --model_path /data/rw/home/Models/ljspeech-dcattn-December-14-2020_11+10AM-9d0e8c7/checkpoint_200000.pth.tar --config_path /data/rw/home/Models/ljspeech-dcattn-December-14-2020_11+10AM-9d0e8c7/config.json --dataset_metafile /root/LJSpeech-1.1/metadata.csv --data_path /root/LJSpeech-1.1/ --batch_size 32 --dataset ljspeech --use_cuda True ''', formatter_class=RawTextHelpFormatter ) parser.add_argument('--model_path', type=str, required=True, help='Path to Tacotron/Tacotron2 model file ') parser.add_argument( '--config_path', type=str, required=True, help='Path to Tacotron/Tacotron2 config file.', ) parser.add_argument('--dataset', type=str, default='', required=True, help='Target dataset processor name from TTS.tts.dataset.preprocess.') parser.add_argument( '--dataset_metafile', type=str, default='', required=True, help='Dataset metafile inclusing file paths with transcripts.') parser.add_argument( '--data_path', type=str, default='', help='Defines the data path. It overwrites config.json.') parser.add_argument('--use_cuda', type=bool, default=False, help="enable/disable cuda.") parser.add_argument( '--batch_size', default=16, type=int, help='Batch size for the model. Use batch_size=1 if you have no CUDA.') args = parser.parse_args() C = load_config(args.config_path) ap = AudioProcessor(**C.audio) # if the vocabulary was passed, replace the default if 'characters' in C.keys(): symbols, phonemes = make_symbols(**C.characters) # load the model num_chars = len(phonemes) if C.use_phonemes else len(symbols) # TODO: handle multi-speaker model = setup_model(num_chars, num_speakers=0, c=C) model, _ = load_checkpoint(model, args.model_path, None, args.use_cuda) model.eval() # data loader preprocessor = importlib.import_module('TTS.tts.datasets.preprocess') preprocessor = getattr(preprocessor, args.dataset) meta_data = preprocessor(args.data_path, args.dataset_metafile) dataset = MyDataset(model.decoder.r, C.text_cleaner, compute_linear_spec=False, ap=ap, meta_data=meta_data, tp=C.characters if 'characters' in C.keys() else None, add_blank=C['add_blank'] if 'add_blank' in C.keys() else False, use_phonemes=C.use_phonemes, phoneme_cache_path=C.phoneme_cache_path, phoneme_language=C.phoneme_language, enable_eos_bos=C.enable_eos_bos_chars) dataset.sort_items() loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=4, collate_fn=dataset.collate_fn, shuffle=False, drop_last=False) # compute attentions file_paths = [] with torch.no_grad(): for data in tqdm(loader): # setup input data text_input = data[0] text_lengths = data[1] linear_input = data[3] mel_input = data[4] mel_lengths = data[5] stop_targets = data[6] item_idxs = data[7] # dispatch data to GPU if args.use_cuda: text_input = text_input.cuda() text_lengths = text_lengths.cuda() mel_input = mel_input.cuda() mel_lengths = mel_lengths.cuda() mel_outputs, postnet_outputs, alignments, stop_tokens = model.forward( text_input, text_lengths, mel_input) alignments = alignments.detach() for idx, alignment in enumerate(alignments): item_idx = item_idxs[idx] # interpolate if r > 1 alignment = torch.nn.functional.interpolate( alignment.transpose(0, 1).unsqueeze(0), size=None, scale_factor=model.decoder.r, mode='nearest', align_corners=None, recompute_scale_factor=None).squeeze(0).transpose(0, 1) # remove paddings alignment = alignment[:mel_lengths[idx], :text_lengths[idx]].cpu().numpy() # set file paths wav_file_name = os.path.basename(item_idx) align_file_name = os.path.splitext(wav_file_name)[0] + '.npy' file_path = item_idx.replace(wav_file_name, align_file_name) # save output file_paths.append([item_idx, file_path]) np.save(file_path, alignment) # ourput metafile metafile = os.path.join(args.data_path, "metadata_attn_mask.txt") with open(metafile, "w") as f: for p in file_paths: f.write(f"{p[0]}|{p[1]}\n") print(f" >> Metafile created: {metafile}")
section \<open>Test\<close> theory Test imports Main begin lemma "(\<forall>e f g. is_aux(e, g, f(g, the_det)) \<longrightarrow> e = f(g, the_det)) \<and> is_aux(change_noun, param1, actualization_noun(param1, the_det)) \<and> is_aux(change_noun, None, feature_noun(real_adj, a_det, of_adp(world_noun(None, the_det, None, None)))) \<and> change_noun(hello) \<longrightarrow> is_aux(actualization_noun(param1, the_det), None, feature_noun(real_adj, a_det, of_adp(world_noun(None, the_det, None, None))))" by auto end
Formal statement is: lemma poly_shift_id [simp]: "poly_shift 0 = (\<lambda>x. x)" Informal statement is: The polynomial $x \mapsto x$ is the same as the polynomial $x \mapsto x + 0$.
\subsection{Standards Committees} An important activity for ECP ST staff is participation in standards efforts. In many instances, our software will not be sustainable if it is not tightly connected to a standard. At the same time, any standard has to take into account the emerging requirements that Exascale platforms need in order to achieve performance and portability. Figure~\ref{fig:standards} summarized ECP ST staff involvement in the major standards efforts that impact ECP. ECP ST staff are heavily involved in MPI and OpenMP standards efforts. ECP ST staff hold several key leadership positions and have heavy involvement in all aspects. ECP ST staff also play a critical role in C++ standards efforts. While DOE staff have only recently engaged in C++ standards, our efforts are essential to getting HPC requirements considered, especially by contributing working code that demonstrates requirements and design. ECP ST sponsors the newest open source Fortran compiler Flang~\ref{subsubsect:flang}, a front end for LLVM. This compiler is a rapidly emerging and essential part of the HPC ecosystem. In particular, while ARM processors are not explicitly part of the pre-Exascale ecosystem, they are emerging as a strong contender in the future. Flang is \textit{the} Fortran compiler for ARM-based systems. ECP ST involvement in other committees, including the \textit{de facto} also provide valuable leverage and improved uniformity for HPC software. Lastly, we mention the Visualization Toolkit (VTK) Architecture Review Board (ARB). While this is only a single instance, we intend to explore the ARB model as part of our SDK efforts. \begin{figure}[htb] \begin{center} \includegraphics[width=0.5\textwidth]{StandardsInvolvement} \caption{\label{fig:standards} ECP ST staff are involved in a variety of official and \textit{de facto} standards committees. Involvement in standards efforts is essential to assuring the sustainability of our products and to assure that emerging Exascale requirements are addressed by these standards.} \end{center} \end{figure}
using Judycon, Test # # 1 2---3 4---5 # | | | | | # 6---7 8 9 10 # G1 = QuickFind(10); G2 = QuickUnion(10, false, false); G3 = QuickUnion(10, true, false); G4 = QuickUnion(10, false, true); G5 = QuickUnion(10, true, true); function test(G) connect!(G, 1, 6) connect!(G, 6, 7) connect!(G, 7, 2) connect!(G, 2, 3) connect!(G, 3, 8) connect!(G, 9, 4) connect!(G, 4, 5) connect!(G, 5, 10) # `pts1` should form one set of connected components and `pts2` another, respectively. pts1 = [1, 2, 3, 6, 7, 8] pts2 = [4, 5, 9, 10] for p in pts1 for q in pts1 @test isconnected(G, p, q) @test isconnected(G, q, p) end for q in pts2 @test !isconnected(G, p, q) @test !isconnected(G, q, p) end end end @testset "Test Judycon.jl" begin @testset "Test QuickFind" begin test(G1) end @testset "Test QuickUnion without weighting and path compression" begin test(G2) end @testset "Test QuickUnion with weighting and without path compression" begin test(G3) end @testset "Test QuickUnion without weighting and with path compression" begin test(G4) end @testset "Test QuickUnion with weighting and path compression" begin test(G5) end end
\chapter{Environment} \section{Atmosphere} US Standard Atmosphere 1976 is used to calculate air temperature, pressure, density, viscosity and speed of sound depending on altitude. Mean molecular weight is given as follows: \begin{equation} M_0 = \frac{ \sum_{j} M_j F_j }{ \sum_{j} F_j } = 28.9645 \end{equation} Temperature is given by the following formula: \cite{NASA-TM-X-74335} \begin{equation} T \left( h \right) = T_j + \left( \frac{dT}{dh} \right)_j \left( h - h_j \right) \end{equation} Pressure is given as follows: \cite{NASA-TM-X-74335} \begin{align} p \left( h \right) = p_j \left( \frac{T_j}{ T \left( h \right) } \right) ^ { \frac{gM_0}{ R \left( \frac{dT}{dh} \right)_j } } &\mathrm{~for~} \left( \frac{dT}{dh} \right)_j \neq 0 \\ p \left( h \right) = p_j e^{ \frac{ g M_0 \left( h - h_j \right) }{RT_j} } &\mathrm{~for~} \left( \frac{dT}{dh} \right)_j = 0 \end{align} Density is expressed by the following formula: \cite{NASA-TM-X-74335} \begin{equation} \rho \left( h \right) = \frac{ p \left( h \right) M_0 }{ RT \left( h \right) } \end{equation} Speed of sound is given as follows: \cite{NASA-TM-X-74335} \begin{equation} c_S \left( h \right) = \sqrt{ \frac{ \gamma RT \left( h \right) }{ M_0 } } \end{equation} Dynamic viscosity is given by the formula: \cite{NASA-TM-X-74335} \begin{equation} \mu \left( h \right) = \frac{ 1.458 \cdot 10^{-6} \sqrt{ \left[ T \left( h \right) \right]^3 } } { T \left( h \right) + S } \end{equation} Kinetic viscosity is given as follows: \cite{NASA-TM-X-74335} \begin{equation} \nu \left( h \right) = \frac{ \mu \left( h \right) }{ \rho \left( h \right) } \end{equation} \newpage \vfill \begin{table}[h!] \begin{center} \begin{tabular}{ S | S | S | S } \toprule \textbf{Altitude} & \textbf{Temperature gradient} & \textbf{Temperature} & \textbf{Pressure} \\ {$h_j$} & {$\left( \cfrac{dT}{dh} \right)_j$} & {$T_j$} & {$p_j$} \\ {[m]} & {[K/m]} & {[K]} & {[Pa]} \\ \midrule 0 & -6.5e-3 & 288.15 & 101325.0 \\ 11000 & 0.0 & 216.65 & 22632.0 \\ 20000 & 1.0e-3 & 216.65 & 5474.8 \\ 32000 & 2.8e-3 & 228.65 & 868.01 \\ 47000 & 0.0 & 270.65 & 110.9 \\ 51000 & -2.8e-3 & 270.65 & 66.938 \\ 71000 & -2.0e-3 & 214.65 & 3.9564 \\ \bottomrule \end{tabular} \caption{Reference levels \cite{NASA-TM-X-74335} } \end{center} \end{table} \vfill \begin{table}[h!] \begin{center} \begin{tabular}{ l | S | S } \toprule \textbf{Gas species} & \textbf{Molecular weight} & \textbf{Fractional volume} \\ {} & {[kg/kmol]} & {[-]} \\ \midrule Nitrogen & 28.0134 & 0.78084 \\ Oxygen & 31.9988 & 0.209476 \\ Argon & 39.948 & 0.00934 \\ Carbon Dioxide & 44.00995 & 0.000314 \\ Neon & 20.183 & 0.00001818 \\ Helium & 4.0026 & 0.00000524 \\ Krypton & 83.8 & 0.00000114 \\ Xenon & 131.3 & 0.000000087 \\ Methane & 16.04303 & 0.000002 \\ Hydrogen & 2.01594 & 0.0000005 \\ \bottomrule \end{tabular} \caption{Molecular weights and fractional volume composition of S/L dry air \cite{NASA-TM-X-74335} } \end{center} \end{table} \vfill
module Examples.AOC1 import Data.String import Data.Vect import Stream import System import System.File export part1 : IO () part1 = ignore $ withLines "resources/aoc1a" $ \ls => do res <- run . count_ (\[x,y] => x > y) . slidingWindow 2 $ mapVals (cast {to = Nat} . trim) ls putStrLn "Day1, Result A: \{show res}" export part2 : IO () part2 = ignore $ withLines "resources/aoc1a" $ \ls => do res <- run . count_ (\[x,y] => sum x > sum y) . slidingWindow 2 . slidingWindow 3 $ mapVals (cast {to = Nat} . trim) ls putStrLn "Day1, Result B: \{show res}"
Old Baltimore Pike is a road in the U.S. state of Delaware . The road , known as New Castle County Road 26 , runs from Maryland Route 281 ( MD 281 ) at the Maryland state line south of Newark , Delaware and continues east to Christiana , ending near Delaware Route 1 ( DE 1 ) . The road is paralleled by Interstate 95 ( I @-@ 95 , Delaware Turnpike ) to the north and U.S. Route 40 ( US 40 , Pulaski Highway ) to the south . The Old Baltimore Pike was built before 1720 and connected Elkton , Maryland to Christiana . It was a turnpike called the Elk and Christiana Turnpike between 1817 and 1838 . In the past it served as a major connection between Philadelphia and Baltimore .
C C************************** CFMATP ************************************ C C assemble the global LHS system matrix from the stiffness and mass C matrices: Picard scheme C C*********************************************************************** C SUBROUTINE CFMATP(N,NTERM,TETAF,DELTAT,COEF1,COEF2,COEF4,COEF5, 1 TOPOL,ET2) C IMPLICIT NONE INTEGER K INTEGER NTERM,N,TOPOL(*) REAL*8 RDT REAL*8 TETAF,DELTAT REAL*8 COEF1(*),COEF2(*),COEF4(*),COEF5(*) REAL*8 ET2(*) C RDT=1.0D0/DELTAT DO K=1,N COEF5(TOPOL(K))=COEF4(TOPOL(K))*ET2(K) END DO DO K=1,NTERM COEF1(K)=TETAF*COEF1(K) + COEF2(K)*RDT+COEF5(K)*RDT END DO C RETURN END
import System.Environment import Data.Complex import Paraiso main = do args <- getArgs let arch = if "--cuda" `elem` args then CUDA 128 128 else X86 putStrLn $ compile arch code where code = do parallel 16384 $ do r <- allocate x <- allocate r =$ Rand 0.0 (4.0::Double) x =$ Rand 0.0 (1.0::Double) cuda $ do sequential 65536 $ do x =$ r * x * (1-x) output [r,x]
# Inferring parameters of SDEs using a Euler-Maruyama scheme _This notebook is derived from a presentation prepared for the Theoretical Neuroscience Group, Institute of Systems Neuroscience at Aix-Marseile University._ ```python %pylab inline import pymc3 as pm import theano.tensor as tt import scipy from pymc3.distributions.timeseries import EulerMaruyama ``` Populating the interactive namespace from numpy and matplotlib ## Toy model 1 Here's a scalar linear SDE in symbolic form $ dX_t = \lambda X_t + \sigma^2 dW_t $ discretized with the Euler-Maruyama scheme ```python # parameters λ = -0.78 σ2 = 5e-3 N = 200 dt = 1e-1 # time series x = 0.1 x_t = [] # simulate for i in range(N): x += dt * λ * x + sqrt(dt) * σ2 * randn() x_t.append(x) x_t = array(x_t) # z_t noisy observation z_t = x_t + randn(x_t.size) * 5e-3 ``` ```python figure(figsize=(10, 3)) subplot(121) plot(x_t[:30], 'k', label='$x(t)$', alpha=0.5), plot(z_t[:30], 'r', label='$z(t)$', alpha=0.5) title('Transient'), legend() subplot(122) plot(x_t[30:], 'k', label='$x(t)$', alpha=0.5), plot(z_t[30:], 'r', label='$z(t)$', alpha=0.5) title('All time'); tight_layout() ``` What is the inference we want to make? Since we've made a noisy observation of the generated time series, we need to estimate both $x(t)$ and $\lambda$. First, we rewrite our SDE as a function returning a tuple of the drift and diffusion coefficients ```python def lin_sde(x, lam): return lam * x, σ2 ``` Next, we describe the probability model as a set of three stochastic variables, `lam`, `xh`, and `zh`: ```python with pm.Model() as model: # uniform prior, but we know it must be negative lam = pm.Flat('lam') # "hidden states" following a linear SDE distribution # parametrized by time step (det. variable) and lam (random variable) xh = EulerMaruyama('xh', dt, lin_sde, (lam, ), shape=N, testval=x_t) # predicted observation zh = pm.Normal('zh', mu=xh, sigma=5e-3, observed=z_t) ``` Once the model is constructed, we perform inference, i.e. sample from the posterior distribution, in the following steps: ```python with model: trace = pm.sample(2000, tune=1000) ``` Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (2 chains in 2 jobs) NUTS: [xh, lam] Sampling 2 chains: 100%|██████████| 6000/6000 [00:28<00:00, 210.97draws/s] /Users/twiecki/anaconda3/lib/python3.6/site-packages/mkl_fft/_numpy_fft.py:1044: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result. output = mkl_fft.rfftn_numpy(a, s, axes) Next, we plot some basic statistics on the samples from the posterior, ```python figure(figsize=(10, 3)) subplot(121) plot(percentile(trace[xh], [2.5, 97.5], axis=0).T, 'k', label='$\hat{x}_{95\%}(t)$') plot(x_t, 'r', label='$x(t)$') legend() subplot(122) hist(trace[lam], 30, label='$\hat{\lambda}$', alpha=0.5) axvline(λ, color='r', label='$\lambda$', alpha=0.5) legend(); ``` A model can fit the data precisely and still be wrong; we need to use _posterior predictive checks_ to assess if, under our fit model, the data our likely. In other words, we - assume the model is correct - simulate new observations - check that the new observations fit with the original data ```python # generate trace from posterior ppc_trace = pm.sample_posterior_predictive(trace, model=model) # plot with data figure(figsize=(10, 3)) plot(percentile(ppc_trace['zh'], [2.5, 97.5], axis=0).T, 'k', label=r'$z_{95\% PP}(t)$') plot(z_t, 'r', label='$z(t)$') legend() ``` Note that - inference also estimates the initial conditions - the observed data $z(t)$ lies fully within the 95% interval of the PPC. - there are many other ways of evaluating fit ### Toy model 2 As the next model, let's use a 2D deterministic oscillator, \begin{align} \dot{x} &= \tau (x - x^3/3 + y) \\ \dot{y} &= \frac{1}{\tau} (a - x) \end{align} with noisy observation $z(t) = m x + (1 - m) y + N(0, 0.05)$. ```python N, τ, a, m, σ2 = 200, 3.0, 1.05, 0.2, 1e-1 xs, ys = [0.0], [1.0] for i in range(N): x, y = xs[-1], ys[-1] dx = τ * (x - x**3.0/3.0 + y) dy = (1.0 / τ) * (a - x) xs.append(x + dt * dx + sqrt(dt) * σ2 * randn()) ys.append(y + dt * dy + sqrt(dt) * σ2 * randn()) xs, ys = array(xs), array(ys) zs = m * xs + (1 - m) * ys + randn(xs.size) * 0.1 figure(figsize=(10, 2)) plot(xs, label='$x(t)$') plot(ys, label='$y(t)$') plot(zs, label='$z(t)$') legend() ``` Now, estimate the hidden states $x(t)$ and $y(t)$, as well as parameters $\tau$, $a$ and $m$. As before, we rewrite our SDE as a function returned drift & diffusion coefficients: ```python def osc_sde(xy, τ, a): x, y = xy[:, 0], xy[:, 1] dx = τ * (x - x**3.0/3.0 + y) dy = (1.0 / τ) * (a - x) dxy = tt.stack([dx, dy], axis=0).T return dxy, σ2 ``` As before, the Euler-Maruyama discretization of the SDE is written as a prediction of the state at step $i+1$ based on the state at step $i$. We can now write our statistical model as before, with uninformative priors on $\tau$, $a$ and $m$: ```python xys = c_[xs, ys] with pm.Model() as model: τh = pm.Uniform('τh', lower=0.1, upper=5.0) ah = pm.Uniform('ah', lower=0.5, upper=1.5) mh = pm.Uniform('mh', lower=0.0, upper=1.0) xyh = EulerMaruyama('xyh', dt, osc_sde, (τh, ah), shape=xys.shape, testval=xys) zh = pm.Normal('zh', mu=mh * xyh[:, 0] + (1 - mh) * xyh[:, 1], sigma=0.1, observed=zs) ``` ```python with model: trace = pm.sample(2000, tune=1000) ``` Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (2 chains in 2 jobs) NUTS: [xyh, mh, ah, τh] Sampling 2 chains: 100%|██████████| 6000/6000 [02:08<00:00, 46.77draws/s] /Users/twiecki/anaconda3/lib/python3.6/site-packages/mkl_fft/_numpy_fft.py:1044: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result. output = mkl_fft.rfftn_numpy(a, s, axes) The estimated number of effective samples is smaller than 200 for some parameters. Again, the result is a set of samples from the posterior, including our parameters of interest but also the hidden states ```python figure(figsize=(10, 6)) subplot(211) plot(percentile(trace[xyh][..., 0], [2.5, 97.5], axis=0).T, 'k', label='$\hat{x}_{95\%}(t)$') plot(xs, 'r', label='$x(t)$') legend(loc=0) subplot(234), hist(trace['τh']), axvline(τ), xlim([1.0, 4.0]), title('τ') subplot(235), hist(trace['ah']), axvline(a), xlim([0, 2.0]), title('a') subplot(236), hist(trace['mh']), axvline(m), xlim([0, 1]), title('m') tight_layout() ``` Again, we can perform a posterior predictive check, that our data are likely given the fit model ```python # generate trace from posterior ppc_trace = pm.sample_posterior_predictive(trace, model=model) # plot with data figure(figsize=(10, 3)) plot(percentile(ppc_trace['zh'], [2.5, 97.5], axis=0).T, 'k', label=r'$z_{95\% PP}(t)$') plot(zs, 'r', label='$z(t)$') legend() ```
[GOAL] C : Type u inst✝² : Category.{v, u} C D : Type w inst✝¹ : Category.{max v u, w} D inst✝ : Abelian D J : GrothendieckTopology C j : ℕ F : Discrete (Fin j) ⥤ Sheaf J D ⊢ HasLimit F [PROOFSTEP] infer_instance
[STATEMENT] lemma span_finite: assumes fS: "finite S" shows "span S = range (\<lambda>u. \<Sum>v\<in>S. u v *s v)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. span S = range (\<lambda>u. \<Sum>v\<in>S. u v *s v) [PROOF STEP] unfolding span_explicit [PROOF STATE] proof (prove) goal (1 subgoal): 1. {\<Sum>a\<in>t. r a *s a |t r. finite t \<and> t \<subseteq> S} = range (\<lambda>u. \<Sum>v\<in>S. u v *s v) [PROOF STEP] proof safe [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>x t r. \<lbrakk>finite t; t \<subseteq> S\<rbrakk> \<Longrightarrow> (\<Sum>a\<in>t. r a *s a) \<in> range (\<lambda>u. \<Sum>v\<in>S. u v *s v) 2. \<And>x u. u \<in> UNIV \<Longrightarrow> \<exists>t r. (\<Sum>v\<in>S. u v *s v) = (\<Sum>a\<in>t. r a *s a) \<and> finite t \<and> t \<subseteq> S [PROOF STEP] fix t r [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>x t r. \<lbrakk>finite t; t \<subseteq> S\<rbrakk> \<Longrightarrow> (\<Sum>a\<in>t. r a *s a) \<in> range (\<lambda>u. \<Sum>v\<in>S. u v *s v) 2. \<And>x u. u \<in> UNIV \<Longrightarrow> \<exists>t r. (\<Sum>v\<in>S. u v *s v) = (\<Sum>a\<in>t. r a *s a) \<and> finite t \<and> t \<subseteq> S [PROOF STEP] assume "t \<subseteq> S" [PROOF STATE] proof (state) this: t \<subseteq> S goal (2 subgoals): 1. \<And>x t r. \<lbrakk>finite t; t \<subseteq> S\<rbrakk> \<Longrightarrow> (\<Sum>a\<in>t. r a *s a) \<in> range (\<lambda>u. \<Sum>v\<in>S. u v *s v) 2. \<And>x u. u \<in> UNIV \<Longrightarrow> \<exists>t r. (\<Sum>v\<in>S. u v *s v) = (\<Sum>a\<in>t. r a *s a) \<and> finite t \<and> t \<subseteq> S [PROOF STEP] then [PROOF STATE] proof (chain) picking this: t \<subseteq> S [PROOF STEP] show "(\<Sum>a\<in>t. r a *s a) \<in> range (\<lambda>u. \<Sum>v\<in>S. u v *s v)" [PROOF STATE] proof (prove) using this: t \<subseteq> S goal (1 subgoal): 1. (\<Sum>a\<in>t. r a *s a) \<in> range (\<lambda>u. \<Sum>v\<in>S. u v *s v) [PROOF STEP] by (intro image_eqI[of _ _ "\<lambda>a. if a \<in> t then r a else 0"]) (auto simp: if_distrib[of "\<lambda>r. r *s a" for a] sum.If_cases fS Int_absorb1) [PROOF STATE] proof (state) this: (\<Sum>a\<in>t. r a *s a) \<in> range (\<lambda>u. \<Sum>v\<in>S. u v *s v) goal (1 subgoal): 1. \<And>x u. u \<in> UNIV \<Longrightarrow> \<exists>t r. (\<Sum>v\<in>S. u v *s v) = (\<Sum>a\<in>t. r a *s a) \<and> finite t \<and> t \<subseteq> S [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>x u. u \<in> UNIV \<Longrightarrow> \<exists>t r. (\<Sum>v\<in>S. u v *s v) = (\<Sum>a\<in>t. r a *s a) \<and> finite t \<and> t \<subseteq> S [PROOF STEP] show "\<exists>t r. (\<Sum>v\<in>S. u v *s v) = (\<Sum>a\<in>t. r a *s a) \<and> finite t \<and> t \<subseteq> S" for u [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<exists>t r. (\<Sum>v\<in>S. u v *s v) = (\<Sum>a\<in>t. r a *s a) \<and> finite t \<and> t \<subseteq> S [PROOF STEP] by (intro exI[of _ u] exI[of _ S]) (auto intro: fS) [PROOF STATE] proof (state) this: \<exists>t r. (\<Sum>v\<in>S. ?u v *s v) = (\<Sum>a\<in>t. r a *s a) \<and> finite t \<and> t \<subseteq> S goal: No subgoals! [PROOF STEP] qed
If $s$ and $t$ are compact sets, then the set of all sums $x + y$ where $x \in s$ and $y \in t$ is compact.
{- Byzantine Fault Tolerant Consensus Verification in Agda, version 0.9. Copyright (c) 2020, 2021, Oracle and/or its affiliates. Licensed under the Universal Permissive License v 1.0 as shown at https://opensource.oracle.com/licenses/upl -} module Dijkstra.EitherD where open import Agda.Builtin.Equality using (_≡_; refl) open import Data.Product using (_×_; _,_; proj₁; proj₂) open import Haskell.Prelude data EitherD (E : Set) : Set → Set₁ where -- Primitive combinators EitherD-return : ∀ {A} → A → EitherD E A EitherD-bind : ∀ {A B} → EitherD E A → (A → EitherD E B) → EitherD E B EitherD-bail : ∀ {A} → E → EitherD E A -- Branching conditionals (used for creating more convenient contracts) EitherD-if : ∀ {A} → Guards (EitherD E A) → EitherD E A EitherD-either : ∀ {A B C} → (B → EitherD E A) → (C → EitherD E A) → Either B C → EitherD E A EitherD-maybe : ∀ {A B} → EitherD E B → (A → EitherD E B) → Maybe A → EitherD E B pattern LeftD x = EitherD-bail x pattern RightD x = EitherD-return x private variable E : Set A B C : Set EitherD-run : EitherD E A → Either E A EitherD-run (EitherD-return x) = Right x EitherD-run (EitherD-bind m f) with EitherD-run m ... | Left x = Left x ... | Right y = EitherD-run (f y) EitherD-run (EitherD-bail x) = Left x EitherD-run (EitherD-if (clause (b ≔ c) gs)) = if toBool b then EitherD-run c else EitherD-run (EitherD-if gs) EitherD-run (EitherD-if (otherwise≔ c)) = EitherD-run c EitherD-run (EitherD-either f₁ f₂ (Left x)) = EitherD-run (f₁ x) EitherD-run (EitherD-either f₁ f₂ (Right y)) = EitherD-run (f₂ y) EitherD-run (EitherD-maybe n s nothing ) = EitherD-run n EitherD-run (EitherD-maybe n s (just x)) = EitherD-run (s x) EitherD-Pre : (E A : Set) → Set₁ EitherD-Pre E A = Set EitherD-Post : (E A : Set) → Set₁ EitherD-Post E A = Either E A → Set EitherD-Post-⇒ : ∀ {E} {A} → (P Q : EitherD-Post E A) → Set EitherD-Post-⇒ P Q = ∀ r → P r → Q r EitherD-PredTrans : (E A : Set) → Set₁ EitherD-PredTrans E A = EitherD-Post E A → EitherD-Pre E A EitherD-weakestPre-bindPost : (f : A → EitherD E B) → EitherD-Post E B → EitherD-Post E A EitherD-weakestPre : (m : EitherD E A) → EitherD-PredTrans E A EitherD-weakestPre (EitherD-return x) P = P (Right x) EitherD-weakestPre (EitherD-bind m f) P = EitherD-weakestPre m (EitherD-weakestPre-bindPost f P) EitherD-weakestPre (EitherD-bail x) P = P (Left x) EitherD-weakestPre (EitherD-if (clause (b ≔ c) gs)) P = (toBool b ≡ true → EitherD-weakestPre c P) × (toBool b ≡ false → EitherD-weakestPre (EitherD-if gs) P) EitherD-weakestPre (EitherD-if (otherwise≔ x)) P = EitherD-weakestPre x P EitherD-weakestPre (EitherD-either f₁ f₂ e) P = (∀ x → e ≡ Left x → EitherD-weakestPre (f₁ x) P) × (∀ y → e ≡ Right y → EitherD-weakestPre (f₂ y) P) EitherD-weakestPre (EitherD-maybe n s m) P = (m ≡ nothing → EitherD-weakestPre n P) × (∀ j → m ≡ just j → EitherD-weakestPre (s j) P) EitherD-weakestPre-bindPost f P (Left x) = P (Left x) EitherD-weakestPre-bindPost f P (Right y) = ∀ c → c ≡ y → EitherD-weakestPre (f c) P EitherD-Contract : (m : EitherD E A) → Set₁ EitherD-Contract{E}{A} m = (P : EitherD-Post E A) → EitherD-weakestPre m P → P (EitherD-run m) EitherD-contract : (m : EitherD E A) → EitherD-Contract m EitherD-contract (EitherD-return x) P wp = wp EitherD-contract (EitherD-bind m f) P wp with EitherD-contract m _ wp ...| wp' with EitherD-run m ... | Left x = wp' ... | Right y = EitherD-contract (f y) P (wp' y refl) EitherD-contract (EitherD-bail x) P wp = wp EitherD-contract{E}{A} (EitherD-if gs) P wp = EitherD-contract-if gs P wp where EitherD-contract-if : (gs : Guards (EitherD E A)) → EitherD-Contract (EitherD-if gs) EitherD-contract-if (clause (b ≔ c) gs) P wp with toBool b ... | false = EitherD-contract-if gs P (proj₂ wp refl) ... | true = EitherD-contract c P (proj₁ wp refl) EitherD-contract-if (otherwise≔ x) P wp = EitherD-contract x P wp EitherD-contract (EitherD-either f₁ f₂ (Left x)) P wp = EitherD-contract (f₁ x) P (proj₁ wp x refl) EitherD-contract (EitherD-either f₁ f₂ (Right y)) P wp = EitherD-contract (f₂ y) P (proj₂ wp y refl) EitherD-contract (EitherD-maybe f₁ f₂ nothing) P wp = EitherD-contract f₁ P (proj₁ wp refl) EitherD-contract (EitherD-maybe f₁ f₂ (just x)) P wp = EitherD-contract (f₂ x) P (proj₂ wp x refl) EitherD-⇒ : ∀ {E A} {P Q : EitherD-Post E A} → ∀ m → EitherD-weakestPre m P → (EitherD-Post-⇒ P Q) → EitherD-weakestPre m Q EitherD-⇒ {P = Post₁} {Post₂} (LeftD x ) pre pf = pf (Left x ) pre EitherD-⇒ {P = Post₁} {Post₂} (RightD x) pre pf = pf (Right x) pre EitherD-⇒ {P = Post₁} {Post₂} (EitherD-bind m x) pre pf = EitherD-⇒ m pre P⇒Q where P⇒Q : EitherD-Post-⇒ (EitherD-weakestPre-bindPost x Post₁) (EitherD-weakestPre-bindPost x Post₂) P⇒Q (Left rL) Pr = pf (Left rL) Pr P⇒Q (Right rR) Pr .rR refl = EitherD-⇒ (x rR) (Pr rR refl) pf EitherD-⇒ {Post₁} {Post₂} (EitherD-if (otherwise≔ x)) pre pf = EitherD-⇒ x pre pf EitherD-⇒ {Post₁} {Post₂} (EitherD-if (clause (x ≔ x₂) x₁)) (pre₁ , pre₂) pf = (λ x≡true → EitherD-⇒ x₂ (pre₁ x≡true) pf) , (λ x≡false → EitherD-⇒ (EitherD-if x₁) (pre₂ x≡false) pf) proj₁ (EitherD-⇒ {Post₁} {Post₂} (EitherD-either x₁ x₂ (Left x)) (pre₁ , pre₂) pf) .x refl = EitherD-⇒ (x₁ x) (pre₁ x refl) pf proj₂ (EitherD-⇒ {Post₁} {Post₂} (EitherD-either x₁ x₂ (Right x)) (pre₁ , pre₂) pf) .x refl = EitherD-⇒ (x₂ x) (pre₂ x refl) pf proj₁ (EitherD-⇒ {Post₁} {Post₂} (EitherD-maybe m x₁ .nothing) (pre₁ , pre₂) pf) refl = EitherD-⇒ m (pre₁ refl) pf proj₂ (EitherD-⇒ {Post₁} {Post₂} (EitherD-maybe m x₁ (just x)) (pre₁ , pre₂) pf) j refl = EitherD-⇒ (x₁ j) (pre₂ j refl) pf EitherD-⇒-bind : ∀ {E} {A} {P : EitherD-Post E A} → {Q : EitherD-Post E B} → {f : A → EitherD E B} → ∀ m → EitherD-weakestPre m P → EitherD-Post-⇒ P (EitherD-weakestPre-bindPost f Q) → EitherD-weakestPre (EitherD-bind m f) Q EitherD-⇒-bind = EitherD-⇒ EitherD-vacuous : ∀ (m : EitherD E A) → EitherD-weakestPre m (const Unit) EitherD-vacuous (LeftD x) = unit EitherD-vacuous (RightD x) = unit EitherD-vacuous (EitherD-if (otherwise≔ x)) = EitherD-vacuous x EitherD-vacuous (EitherD-if (clause (b ≔ x) x₁)) = (const (EitherD-vacuous x)) , (const (EitherD-vacuous (EitherD-if x₁))) EitherD-vacuous (EitherD-either x₁ x₂ x) = (λ x₃ _ → EitherD-vacuous (x₁ x₃)) , (λ y _ → EitherD-vacuous (x₂ y)) EitherD-vacuous (EitherD-maybe m x₁ x) = (const (EitherD-vacuous m)) , λ j _ → EitherD-vacuous (x₁ j) EitherD-vacuous (EitherD-bind m x) = EitherD-⇒-bind m (EitherD-vacuous m) λ { (Left _) _ → unit ; (Right _) _ → λ c _ → EitherD-vacuous (x c) }
theory Propositional imports Main begin text \<open> In this exercise, we will prove some lemmas of propositional logic with the aid of a calculus of natural deduction. For the proofs, you may only use \begin{itemize} \item the following lemmas: \\ @{text "notI:"}~@{thm notI[of A,no_vars]},\\ @{text "notE:"}~@{thm notE[of A B,no_vars]},\\ @{text "conjI:"}~@{thm conjI[of A B,no_vars]},\\ @{text "conjE:"}~@{thm conjE[of A B C,no_vars]},\\ @{text "disjI1:"}~@{thm disjI1[of A B,no_vars]},\\ @{text "disjI2:"}~@{thm disjI2[of A B,no_vars]},\\ @{text "disjE:"}~@{thm disjE[of A B C,no_vars]},\\ @{text "impI:"}~@{thm impI[of A B,no_vars]},\\ @{text "impE:"}~@{thm impE[of A B C,no_vars]},\\ @{text "mp:"}~@{thm mp[of A B,no_vars]}\\ @{text "iffI:"}~@{thm iffI[of A B,no_vars]}, \\ @{text "iffE:"}~@{thm iffE[of A B C,no_vars]}\\ @{text "classical:"}~@{thm classical[of A,no_vars]} \item the proof methods @{term rule}, @{term erule} and @{term assumption}. \end{itemize} Prove: \<close> lemma I: "A \<longrightarrow> A" apply (rule impI) apply (rule classical) by assumption lemma "A \<and> B \<longrightarrow> B \<and> A" apply (rule impI) apply (erule conjE) apply (rule conjI) by assumption lemma "(A \<and> B) \<longrightarrow> (A \<or> B)" apply (rule impI) apply (erule conjE) apply (rule disjI1) by assumption lemma "((A \<or> B) \<or> C) \<longrightarrow> A \<or> (B \<or> C)" apply (rule impI) apply (erule disjE) apply (erule disjE) apply (rule disjI1, assumption) apply (rule disjI2, rule disjI1, assumption) apply (rule disjI2, rule disjI2, assumption) done lemma "(A \<or> A) = (A \<and> A)" apply (rule iffI) apply (rule conjI) apply (erule disjE) apply assumption+ apply (erule disjE) apply assumption+ apply (erule conjE) apply (rule disjI1) by assumption lemma S: "(A \<longrightarrow> B \<longrightarrow> C) \<longrightarrow> (A \<longrightarrow> B) \<longrightarrow> A \<longrightarrow> C" apply (rule impI)+ apply (drule mp, assumption)+ by assumption lemma "(A \<longrightarrow> B) \<longrightarrow> (B \<longrightarrow> C) \<longrightarrow> A \<longrightarrow> C" apply (rule impI)+ apply (drule mp, assumption)+ by assumption lemma "\<not> \<not> A \<longrightarrow> A" apply (rule impI) apply (rule classical) apply (erule notE, assumption) done lemma "A \<longrightarrow> \<not> \<not> A" apply (rule impI) apply (rule notI) apply (erule notE, assumption) done lemma "(\<not> A \<longrightarrow> B) \<longrightarrow> (\<not> B \<longrightarrow> A)" apply (rule impI)+ apply (rule classical) apply (erule impE, assumption) by (rule notE) lemma "((A \<longrightarrow> B) \<longrightarrow> A) \<longrightarrow> A" apply (rule impI) apply (rule classical) apply (erule impE) apply (rule impI) by (erule notE, assumption)+ lemma "A \<or> \<not> A" apply (rule classical) apply (rule disjI2) apply (rule notI) apply (erule notE) apply (rule disjI1) by assumption lemma "(\<not> (A \<and> B)) = (\<not> A \<or> \<not> B)" apply (rule iffI) apply (rule classical) apply (rule disjI1) apply (rule notI) apply (erule notE) apply (rule classical) apply (rule conjI, assumption) apply (rule classical) apply (erule notE) apply (rule disjI2, assumption) apply (rule classical) apply (rule notI) apply (erule notE) apply (erule conjE) apply (erule disjE) by (erule notE, assumption)+ (*<*) end (*>*)
function uhlig05() # # test polynomial used by F. Uhlig # p = poly([-1*ones(6); 2; 2]); z = [-1.0 6; 2 2]; p, PolyZeros(z) end
import Data.List import Data.List1 import Data.String.Parser import System.File Coord : Type Coord = (Nat, Nat) Dots : Type Dots = List Coord data Fold = Up Nat | Left Nat Show Fold where show (Up n) = "fold up " ++ show n show (Left n) = "fold left " ++ show n Input : Type Input = (Dots, List Fold) pairParser : Parser Coord pairParser = do x <- natural token "," y <- natural pure (x, y) foldParser : Parser Fold foldParser = do token "fold along" (token "y=" *> Up <$> natural) <|> (token "x=" *> Left <$> natural) parser : Parser Input parser = do coords <- some (pairParser <* spaces) spaces folds <- some (foldParser <* spaces) pure (coords, folds) fold1 : Fold -> Coord -> Coord fold1 (Up f) (x, y) = if y > f then (x, minus f (minus y f)) else (x, y) fold1 (Left f) (x, y) = if x > f then (minus f (minus x f), y) else (x, y) fold : Fold -> Dots -> Dots fold f dots = .head <$> (group $ sort $ fold1 f <$> dots) part1 : Input -> IO String part1 (dots, []) = pure "No folds" part1 (dots, (f :: _)) = pure $ show $ length $ fold f dots printDots : Coord -> Dots -> IO () printDots (_, _) [] = pure () printDots (lx, ly) ((y, x) :: cs) = do if y > ly then putStr $ pack $ replicate (minus y ly) '\n' else putStr $ pack $ replicate (minus x (lx + 1)) ' ' putChar '#' printDots (x, y) cs part2 : Input -> IO String part2 (dots, fs) = do let r = sort $ (\(x, y) => (y, x)) <$> foldl (flip fold) dots fs printDots (0, 0) r pure "" main : IO () main = do Right input <- readFile "input.txt" | Left err => printLn err Right (a, _) <- pure $ parse parser input | Left err => printLn err part1 a >>= putStrLn part2 a >>= putStrLn
{-# OPTIONS --without-K #-} open import HoTT.Base open import HoTT.Identity.Sigma module HoTT.Pi.Transport where transport-→ : ∀ {i j k} {X : 𝒰 i} (A : X → 𝒰 j) (B : X → 𝒰 k) {x₁ x₂ : X} (p : x₁ == x₂) (f : A x₁ → B x₁) → transport (λ x → A x → B x) p f == transport B p ∘ f ∘ transport A (p ⁻¹) transport-→ A B refl f = refl module _ {i j k} {X : 𝒰 i} (A : X → 𝒰 j) (B : {x : X} → A x → 𝒰 k) {x₁ x₂ : X} (p : x₁ == x₂) (f : Π[ a ∶ A x₁ ] B a) (a : A x₂) where private B̂ : Σ[ x ∶ X ] A x → 𝒰 k B̂ w = B (pr₂ w) transport-Π : transport (λ x → Π[ a ∶ A x ] B a) p f a == transport {x = x₁ , transport _ (p ⁻¹) a} {y = x₂ , a} B̂ (pair⁼ (p ⁻¹ , refl) ⁻¹) (f (transport A (p ⁻¹) a)) transport-Π rewrite p = refl
/* * This file is part of Poedit (http://poedit.net) * * Copyright (C) 1999-2015 Vaclav Slavik * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * */ #include "edframe.h" #include <wx/wx.h> #include <wx/checkbox.h> #include <wx/config.h> #include <wx/html/htmlwin.h> #include <wx/statline.h> #include <wx/sizer.h> #include <wx/filedlg.h> #include <wx/datetime.h> #include <wx/tokenzr.h> #include <wx/xrc/xmlres.h> #include <wx/settings.h> #include <wx/button.h> #include <wx/statusbr.h> #include <wx/stdpaths.h> #include <wx/splitter.h> #include <wx/fontutil.h> #include <wx/textfile.h> #include <wx/wupdlock.h> #include <wx/iconbndl.h> #include <wx/dnd.h> #include <wx/windowptr.h> #ifdef __WXOSX__ #import <AppKit/NSDocumentController.h> #include "osx_helpers.h" #endif #include <algorithm> #include <map> #include <fstream> #include <boost/range/counting_range.hpp> #include "catalog.h" #include "concurrency.h" #include "crowdin_gui.h" #include "customcontrols.h" #include "edapp.h" #include "hidpi.h" #include "propertiesdlg.h" #include "prefsdlg.h" #include "fileviewer.h" #include "findframe.h" #include "tm/transmem.h" #include "language.h" #include "progressinfo.h" #include "commentdlg.h" #include "main_toolbar.h" #include "manager.h" #include "pluralforms/pl_evaluate.h" #include "attentionbar.h" #include "errorbar.h" #include "utility.h" #include "languagectrl.h" #include "welcomescreen.h" #include "errors.h" #include "sidebar.h" #include "spellchecking.h" #include "str_helpers.h" #include "syntaxhighlighter.h" #include "text_control.h" // this should be high enough to not conflict with any wxNewId-allocated value, PoeditFrame::PoeditFramesList PoeditFrame::ms_instances; // but there's a check for this in the PoeditFrame ctor, too const wxWindowID ID_POEDIT_FIRST = wxID_HIGHEST + 10000; const unsigned ID_POEDIT_STEP = 1000; const wxWindowID ID_POPUP_REFS = ID_POEDIT_FIRST + 1*ID_POEDIT_STEP; const wxWindowID ID_POPUP_DUMMY = ID_POEDIT_FIRST + 3*ID_POEDIT_STEP; const wxWindowID ID_BOOKMARK_GO = ID_POEDIT_FIRST + 4*ID_POEDIT_STEP; const wxWindowID ID_BOOKMARK_SET = ID_POEDIT_FIRST + 5*ID_POEDIT_STEP; const wxWindowID ID_POEDIT_LAST = ID_POEDIT_FIRST + 6*ID_POEDIT_STEP; const wxWindowID ID_LIST = wxNewId(); const wxWindowID ID_TEXTORIG = wxNewId(); const wxWindowID ID_TEXTORIGPLURAL = wxNewId(); const wxWindowID ID_TEXTTRANS = wxNewId(); #ifdef __VISUALC__ // Disabling the useless and annoying MSVC++'s // warning C4800: 'long' : forcing value to bool 'true' or 'false' // (performance warning): #pragma warning ( disable : 4800 ) #endif // I don't like this global flag, but all PoeditFrame instances must share it bool g_focusToText = false; /*static*/ PoeditFrame *PoeditFrame::Find(const wxString& filename) { wxFileName fn(filename); for (auto n: ms_instances) { if (wxFileName(n->GetFileName()) == fn) return n; } return NULL; } /*static*/ PoeditFrame *PoeditFrame::UnusedWindow(bool active) { for (auto win: ms_instances) { if ((!active || win->IsActive()) && win->m_catalog == nullptr) return win; } return nullptr; } /*static*/ bool PoeditFrame::AnyWindowIsModified() { for (PoeditFramesList::const_iterator n = ms_instances.begin(); n != ms_instances.end(); ++n) { if ((*n)->IsModified()) return true; } return false; } /*static*/ PoeditFrame *PoeditFrame::Create(const wxString& filename) { PoeditFrame *f = PoeditFrame::Find(filename); if (f) { f->Raise(); } else { // NB: duplicated in ReadCatalog() CatalogPtr cat = std::make_shared<Catalog>(filename); if (!cat->IsOk()) { wxMessageDialog dlg ( nullptr, _("The file cannot be opened."), _("Invalid file"), wxOK | wxICON_ERROR ); dlg.SetExtendedMessage( _("The file may be either corrupted or in a format not recognized by Poedit.") ); dlg.ShowModal(); return nullptr; } f = new PoeditFrame(); f->Show(true); f->ReadCatalog(cat); } f->Show(true); if (g_focusToText && f->m_textTrans) ((wxTextCtrl*)f->m_textTrans)->SetFocus(); else if (f->m_list) f->m_list->SetFocus(); return f; } /*static*/ PoeditFrame *PoeditFrame::CreateEmpty() { PoeditFrame *f = new PoeditFrame; f->Show(true); return f; } /*static*/ PoeditFrame *PoeditFrame::CreateWelcome() { PoeditFrame *f = new PoeditFrame; f->EnsureContentView(Content::Welcome); f->Show(true); return f; } class TransTextctrlHandler : public wxEvtHandler { public: TransTextctrlHandler(PoeditFrame* frame) : m_frame(frame) {} private: void OnText(wxCommandEvent& event) { m_frame->UpdateFromTextCtrl(); event.Skip(); } PoeditFrame *m_frame; DECLARE_EVENT_TABLE() }; BEGIN_EVENT_TABLE(TransTextctrlHandler, wxEvtHandler) EVT_TEXT(-1, TransTextctrlHandler::OnText) END_EVENT_TABLE() // special handling of events in listctrl class ListHandler : public wxEvtHandler { public: ListHandler(PoeditFrame *frame) : wxEvtHandler(), m_frame(frame) {} private: void OnSel(wxListEvent& event) { m_frame->OnListSel(event); } void OnRightClick(wxMouseEvent& event) { m_frame->OnListRightClick(event); } void OnFocus(wxFocusEvent& event) { m_frame->OnListFocus(event); } DECLARE_EVENT_TABLE() PoeditFrame *m_frame; }; BEGIN_EVENT_TABLE(ListHandler, wxEvtHandler) EVT_LIST_ITEM_SELECTED (ID_LIST, ListHandler::OnSel) EVT_RIGHT_DOWN ( ListHandler::OnRightClick) EVT_SET_FOCUS ( ListHandler::OnFocus) END_EVENT_TABLE() BEGIN_EVENT_TABLE(PoeditFrame, wxFrame) // OS X and GNOME apps should open new documents in a new window. On Windows, // however, the usual thing to do is to open the new document in the already // open window and replace the current document. #ifdef __WXMSW__ EVT_MENU (wxID_NEW, PoeditFrame::OnNew) EVT_MENU (XRCID("menu_new_from_pot"),PoeditFrame::OnNew) EVT_MENU (wxID_OPEN, PoeditFrame::OnOpen) #ifdef HAVE_HTTP_CLIENT EVT_MENU (XRCID("menu_open_crowdin"),PoeditFrame::OnOpenFromCrowdin) #endif #endif // __WXMSW__ #ifndef __WXOSX__ EVT_MENU_RANGE (wxID_FILE1, wxID_FILE9, PoeditFrame::OnOpenHist) EVT_MENU (wxID_CLOSE, PoeditFrame::OnCloseCmd) #endif EVT_MENU (wxID_SAVE, PoeditFrame::OnSave) EVT_MENU (wxID_SAVEAS, PoeditFrame::OnSaveAs) EVT_MENU (XRCID("menu_compile_mo"), PoeditFrame::OnCompileMO) EVT_MENU (XRCID("menu_export"), PoeditFrame::OnExport) EVT_MENU (XRCID("menu_catproperties"), PoeditFrame::OnProperties) EVT_MENU (XRCID("menu_update_from_src"), PoeditFrame::OnUpdateFromSources) EVT_MENU (XRCID("menu_update_from_pot"),PoeditFrame::OnUpdateFromPOT) #ifdef HAVE_HTTP_CLIENT EVT_MENU (XRCID("menu_update_from_crowdin"),PoeditFrame::OnUpdateFromCrowdin) #endif EVT_MENU (XRCID("toolbar_update"),PoeditFrame::OnUpdateSmart) EVT_MENU (XRCID("menu_validate"), PoeditFrame::OnValidate) EVT_MENU (XRCID("menu_purge_deleted"), PoeditFrame::OnPurgeDeleted) EVT_MENU (XRCID("menu_fuzzy"), PoeditFrame::OnFuzzyFlag) EVT_MENU (XRCID("menu_ids"), PoeditFrame::OnIDsFlag) EVT_MENU (XRCID("sort_by_order"), PoeditFrame::OnSortByFileOrder) EVT_MENU (XRCID("sort_by_source"), PoeditFrame::OnSortBySource) EVT_MENU (XRCID("sort_by_translation"), PoeditFrame::OnSortByTranslation) EVT_MENU (XRCID("sort_group_by_context"), PoeditFrame::OnSortGroupByContext) EVT_MENU (XRCID("sort_untrans_first"), PoeditFrame::OnSortUntranslatedFirst) EVT_MENU (XRCID("sort_errors_first"), PoeditFrame::OnSortErrorsFirst) EVT_MENU (XRCID("show_sidebar"), PoeditFrame::OnShowHideSidebar) EVT_UPDATE_UI (XRCID("show_sidebar"), PoeditFrame::OnUpdateShowHideSidebar) EVT_MENU (XRCID("show_statusbar"), PoeditFrame::OnShowHideStatusbar) EVT_UPDATE_UI (XRCID("show_statusbar"), PoeditFrame::OnUpdateShowHideStatusbar) EVT_MENU (XRCID("menu_copy_from_src"), PoeditFrame::OnCopyFromSource) EVT_MENU (XRCID("menu_clear"), PoeditFrame::OnClearTranslation) EVT_MENU (XRCID("menu_references"), PoeditFrame::OnReferencesMenu) EVT_MENU (wxID_FIND, PoeditFrame::OnFind) EVT_MENU (wxID_REPLACE, PoeditFrame::OnFindAndReplace) EVT_MENU (XRCID("menu_find_next"), PoeditFrame::OnFindNext) EVT_MENU (XRCID("menu_find_prev"), PoeditFrame::OnFindPrev) EVT_MENU (XRCID("menu_comment"), PoeditFrame::OnEditComment) EVT_BUTTON (XRCID("menu_comment"), PoeditFrame::OnEditComment) EVT_MENU (XRCID("go_done_and_next"), PoeditFrame::OnDoneAndNext) EVT_MENU (XRCID("go_prev"), PoeditFrame::OnPrev) EVT_MENU (XRCID("go_next"), PoeditFrame::OnNext) EVT_MENU (XRCID("go_prev_page"), PoeditFrame::OnPrevPage) EVT_MENU (XRCID("go_next_page"), PoeditFrame::OnNextPage) EVT_MENU (XRCID("go_prev_unfinished"), PoeditFrame::OnPrevUnfinished) EVT_MENU (XRCID("go_next_unfinished"), PoeditFrame::OnNextUnfinished) EVT_MENU_RANGE (ID_POPUP_REFS, ID_POPUP_REFS + 999, PoeditFrame::OnReference) EVT_COMMAND (wxID_ANY, EVT_SUGGESTION_SELECTED, PoeditFrame::OnSuggestion) EVT_MENU (XRCID("menu_auto_translate"), PoeditFrame::OnAutoTranslateAll) EVT_MENU_RANGE (ID_BOOKMARK_GO, ID_BOOKMARK_GO + 9, PoeditFrame::OnGoToBookmark) EVT_MENU_RANGE (ID_BOOKMARK_SET, ID_BOOKMARK_SET + 9, PoeditFrame::OnSetBookmark) EVT_CLOSE ( PoeditFrame::OnCloseWindow) EVT_SIZE (PoeditFrame::OnSize) // handling of selection: EVT_UPDATE_UI(XRCID("menu_references"), PoeditFrame::OnReferencesMenuUpdate) EVT_UPDATE_UI_RANGE(ID_BOOKMARK_SET, ID_BOOKMARK_SET + 9, PoeditFrame::OnSingleSelectionUpdate) EVT_UPDATE_UI(XRCID("go_done_and_next"), PoeditFrame::OnSingleSelectionUpdate) EVT_UPDATE_UI(XRCID("go_prev"), PoeditFrame::OnSingleSelectionUpdate) EVT_UPDATE_UI(XRCID("go_next"), PoeditFrame::OnSingleSelectionUpdate) EVT_UPDATE_UI(XRCID("go_prev_page"), PoeditFrame::OnSingleSelectionUpdate) EVT_UPDATE_UI(XRCID("go_next_page"), PoeditFrame::OnSingleSelectionUpdate) EVT_UPDATE_UI(XRCID("go_prev_unfinished"), PoeditFrame::OnSingleSelectionUpdate) EVT_UPDATE_UI(XRCID("go_next_unfinished"), PoeditFrame::OnSingleSelectionUpdate) EVT_UPDATE_UI(XRCID("menu_fuzzy"), PoeditFrame::OnSelectionUpdateEditable) EVT_UPDATE_UI(XRCID("menu_copy_from_src"), PoeditFrame::OnSelectionUpdateEditable) EVT_UPDATE_UI(XRCID("menu_clear"), PoeditFrame::OnSelectionUpdateEditable) EVT_UPDATE_UI(XRCID("menu_comment"), PoeditFrame::OnEditCommentUpdate) // handling of open files: EVT_UPDATE_UI(wxID_SAVE, PoeditFrame::OnHasCatalogUpdate) EVT_UPDATE_UI(wxID_SAVEAS, PoeditFrame::OnHasCatalogUpdate) EVT_UPDATE_UI(XRCID("menu_statistics"), PoeditFrame::OnHasCatalogUpdate) EVT_UPDATE_UI(XRCID("menu_validate"), PoeditFrame::OnIsEditableUpdate) EVT_UPDATE_UI(XRCID("menu_update_from_src"), PoeditFrame::OnUpdateFromSourcesUpdate) #ifdef HAVE_HTTP_CLIENT EVT_UPDATE_UI(XRCID("menu_update_from_crowdin"), PoeditFrame::OnUpdateFromCrowdinUpdate) #endif EVT_UPDATE_UI(XRCID("menu_update_from_pot"), PoeditFrame::OnUpdateFromPOTUpdate) EVT_UPDATE_UI(XRCID("toolbar_update"), PoeditFrame::OnUpdateSmartUpdate) // handling of find/replace: EVT_UPDATE_UI(XRCID("menu_find_next"), PoeditFrame::OnUpdateFind) EVT_UPDATE_UI(XRCID("menu_find_prev"), PoeditFrame::OnUpdateFind) #if defined(__WXMSW__) || defined(__WXGTK__) EVT_MENU(wxID_UNDO, PoeditFrame::OnTextEditingCommand) EVT_MENU(wxID_REDO, PoeditFrame::OnTextEditingCommand) EVT_MENU(wxID_CUT, PoeditFrame::OnTextEditingCommand) EVT_MENU(wxID_COPY, PoeditFrame::OnTextEditingCommand) EVT_MENU(wxID_PASTE, PoeditFrame::OnTextEditingCommand) EVT_MENU(wxID_DELETE, PoeditFrame::OnTextEditingCommand) EVT_MENU(wxID_SELECTALL, PoeditFrame::OnTextEditingCommand) EVT_UPDATE_UI(wxID_UNDO, PoeditFrame::OnTextEditingCommandUpdate) EVT_UPDATE_UI(wxID_REDO, PoeditFrame::OnTextEditingCommandUpdate) EVT_UPDATE_UI(wxID_CUT, PoeditFrame::OnTextEditingCommandUpdate) EVT_UPDATE_UI(wxID_COPY, PoeditFrame::OnTextEditingCommandUpdate) EVT_UPDATE_UI(wxID_PASTE, PoeditFrame::OnTextEditingCommandUpdate) EVT_UPDATE_UI(wxID_DELETE, PoeditFrame::OnTextEditingCommandUpdate) EVT_UPDATE_UI(wxID_SELECTALL, PoeditFrame::OnTextEditingCommandUpdate) #endif END_EVENT_TABLE() #if 0 // These translations are provided by wxWidgets. Force the strings here, // even though unused, because Poedit is translated into many more languages // than wx is. _("&Undo"), _("Undo") _("&Redo"), _("Redo") _("Cu&t"), _("Cut") _("&Copy"), _("Copy") _("&Paste"), _("Paste") _("&Delete"), _("Delete") _("Select &All"), _("Select All") /// TRANSLATORS: Keyboard shortcut for display in Windows menus _("Ctrl+"), /// TRANSLATORS: Keyboard shortcut for display in Windows menus _("Alt+"), /// TRANSLATORS: Keyboard shortcut for display in Windows menus _("Shift+"), /// TRANSLATORS: Keyboard shortcut for display in Windows menus _("Enter"), /// TRANSLATORS: Keyboard shortcut for display in Windows menus _("Up"), /// TRANSLATORS: Keyboard shortcut for display in Windows menus _("Down"), /// TRANSLATORS: Keyboard shortcut, must correspond to translation of "Ctrl+" _("ctrl"), /// TRANSLATORS: Keyboard shortcut, must correspond to translation of "Alt+" _("alt"), /// TRANSLATORS: Keyboard shortcut, must correspond to translation of "Shift+" _("shift"), #endif class PoeditDropTarget : public wxFileDropTarget { public: PoeditDropTarget(PoeditFrame *win) : m_win(win) {} virtual bool OnDropFiles(wxCoord /*x*/, wxCoord /*y*/, const wxArrayString& files) { if ( files.size() != 1 ) { wxLogError(_("You can't drop more than one file on Poedit window.")); return false; } wxFileName f(files[0]); auto ext = f.GetExt().Lower(); if ( ext != "po" && ext != "pot" ) { wxLogError(_("File '%s' is not a message catalog."), f.GetFullPath().c_str()); return false; } if ( !f.FileExists() ) { wxLogError(_("File '%s' doesn't exist."), f.GetFullPath().c_str()); return false; } m_win->OpenFile(f.GetFullPath()); return true; } private: PoeditFrame *m_win; }; // Frame class: PoeditFrame::PoeditFrame() : wxFrame(NULL, -1, _("Poedit"), wxDefaultPosition, wxDefaultSize, wxDEFAULT_FRAME_STYLE | wxNO_FULL_REPAINT_ON_RESIZE, "mainwin"), m_contentType(Content::Invalid), m_contentView(nullptr), m_catalog(nullptr), m_fileExistsOnDisk(false), m_list(nullptr), m_modified(false), m_hasObsoleteItems(false), m_dontAutoclearFuzzyStatus(false), m_setSashPositionsWhenMaximized(false) { m_list = nullptr; m_textTrans = nullptr; m_textOrig = nullptr; m_textOrigPlural = nullptr; m_splitter = nullptr; m_sidebarSplitter = nullptr; m_sidebar = nullptr; m_errorBar = nullptr; m_labelContext = m_labelPlural = m_labelSingular = nullptr; m_pluralNotebook = nullptr; // make sure that the [ID_POEDIT_FIRST,ID_POEDIT_LAST] range of IDs is not // used for anything else: wxASSERT_MSG( wxGetCurrentId() < ID_POEDIT_FIRST || wxGetCurrentId() > ID_POEDIT_LAST, "detected ID values conflict!" ); wxRegisterId(ID_POEDIT_LAST); wxConfigBase *cfg = wxConfig::Get(); m_displayIDs = (bool)cfg->Read("display_lines", (long)false); g_focusToText = (bool)cfg->Read("focus_to_text", (long)false); #if defined(__WXGTK__) wxIconBundle appicons; appicons.AddIcon(wxArtProvider::GetIcon("poedit", wxART_FRAME_ICON, wxSize(16,16))); appicons.AddIcon(wxArtProvider::GetIcon("poedit", wxART_FRAME_ICON, wxSize(32,32))); appicons.AddIcon(wxArtProvider::GetIcon("poedit", wxART_FRAME_ICON, wxSize(48,48))); SetIcons(appicons); #elif defined(__WXMSW__) SetIcons(wxIconBundle(wxStandardPaths::Get().GetResourcesDir() + "\\Resources\\Poedit.ico")); #endif // This is different from the default, because it's a bit smaller on OS X m_normalGuiFont = wxSystemSettings::GetFont(wxSYS_DEFAULT_GUI_FONT); m_boldGuiFont = m_normalGuiFont; m_boldGuiFont.SetWeight(wxFONTWEIGHT_BOLD); wxMenuBar *MenuBar = wxXmlResource::Get()->LoadMenuBar("mainmenu"); if (MenuBar) { #ifndef __WXOSX__ m_menuForHistory = MenuBar->GetMenu(MenuBar->FindMenu(_("&File"))); FileHistory().UseMenu(m_menuForHistory); FileHistory().AddFilesToMenu(m_menuForHistory); #endif SetMenuBar(MenuBar); AddBookmarksMenu(MenuBar->GetMenu(MenuBar->FindMenu(_("&Go")))); #ifdef __WXOSX__ wxGetApp().TweakOSXMenuBar(MenuBar); #endif #ifndef HAVE_HTTP_CLIENT wxMenu *menu; wxMenuItem *item; item = MenuBar->FindItem(XRCID("menu_update_from_crowdin"), &menu); menu->Destroy(item); item = MenuBar->FindItem(XRCID("menu_open_crowdin"), &menu); menu->Destroy(item); #endif } else { wxLogError("Cannot load main menu from resource, something must have went terribly wrong."); wxLog::FlushActive(); return; } m_toolbar = MainToolbar::Create(this); GetMenuBar()->Check(XRCID("menu_ids"), m_displayIDs); if (wxConfigBase::Get()->ReadBool("/statusbar_shown", true)) CreateStatusBar(1, wxST_SIZEGRIP); m_contentWrappingSizer = new wxBoxSizer(wxVERTICAL); SetSizer(m_contentWrappingSizer); m_attentionBar = new AttentionBar(this); m_contentWrappingSizer->Add(m_attentionBar, wxSizerFlags().Expand()); SetAccelerators(); wxSize defaultSize(PX(1100), PX(750)); if (!wxRect(wxGetDisplaySize()).Contains(wxSize(PX(1400),PX(850)))) defaultSize = wxSize(PX(980), PX(700)); RestoreWindowState(this, defaultSize, WinState_Size | WinState_Pos); UpdateMenu(); ms_instances.insert(this); SetDropTarget(new PoeditDropTarget(this)); #ifdef __WXOSX__ NSWindow *wnd = (NSWindow*)GetWXWindow(); [wnd setCollectionBehavior:NSWindowCollectionBehaviorFullScreenPrimary]; #endif } void PoeditFrame::EnsureContentView(Content type) { if (m_contentType == type) return; #ifdef __WXMSW__ wxWindowUpdateLocker no_updates(this); #endif if (m_contentView) DestroyContentView(); switch (type) { case Content::Invalid: m_contentType = Content::Invalid; return; // nothing to do case Content::Welcome: m_contentView = CreateContentViewWelcome(); break; case Content::Empty_PO: m_contentView = CreateContentViewEmptyPO(); break; case Content::PO: case Content::POT: m_contentView = CreateContentViewPO(type); break; } m_contentType = type; m_contentWrappingSizer->Add(m_contentView, wxSizerFlags(1).Expand()); Layout(); #ifdef __WXMSW__ m_contentView->Show(); Layout(); #endif } void PoeditFrame::EnsureAppropriateContentView() { wxCHECK_RET( m_catalog, "must have catalog here" ); if (m_catalog->empty()) { EnsureContentView(Content::Empty_PO); } else { switch (m_catalog->GetFileType()) { case Catalog::Type::PO: EnsureContentView(Content::PO); break; case Catalog::Type::POT: EnsureContentView(Content::POT); break; } } } wxWindow* PoeditFrame::CreateContentViewPO(Content type) { auto main = new wxPanel(this, wxID_ANY); auto mainSizer = new wxBoxSizer(wxHORIZONTAL); main->SetSizer(mainSizer); #ifdef __WXMSW__ // don't create the window as shown, avoid flicker main->Hide(); #endif m_sidebarSplitter = new wxSplitterWindow(main, -1, wxDefaultPosition, wxDefaultSize, wxSP_NOBORDER | wxSP_LIVE_UPDATE); m_sidebarSplitter->Bind(wxEVT_SPLITTER_SASH_POS_CHANGING, &PoeditFrame::OnSidebarSplitterSashMoving, this); mainSizer->Add(m_sidebarSplitter, wxSizerFlags(1).Expand()); m_splitter = new wxSplitterWindow(m_sidebarSplitter, -1, wxDefaultPosition, wxDefaultSize, wxSP_NOBORDER | wxSP_LIVE_UPDATE); m_splitter->Bind(wxEVT_SPLITTER_SASH_POS_CHANGING, &PoeditFrame::OnSplitterSashMoving, this); // make only the upper part grow when resizing m_splitter->SetSashGravity(1.0); m_list = new PoeditListCtrl(m_splitter, ID_LIST, wxDefaultPosition, wxDefaultSize, wxLC_REPORT, m_displayIDs); m_bottomPanel = new wxPanel(m_splitter); wxStaticText *labelSource = new wxStaticText(m_bottomPanel, -1, _("Source text:")); labelSource->SetFont(m_boldGuiFont); m_labelContext = new wxStaticText(m_bottomPanel, -1, wxEmptyString); m_labelContext->SetFont(m_normalGuiFont); m_labelContext->Hide(); m_labelSingular = new wxStaticText(m_bottomPanel, -1, _("Singular:")); m_labelSingular->SetFont(m_normalGuiFont); m_textOrig = new SourceTextCtrl(m_bottomPanel, ID_TEXTORIG); m_labelPlural = new wxStaticText(m_bottomPanel, -1, _("Plural:")); m_labelPlural->SetFont(m_normalGuiFont); m_textOrigPlural = new SourceTextCtrl(m_bottomPanel, ID_TEXTORIGPLURAL); auto *panelSizer = new wxBoxSizer(wxVERTICAL); wxFlexGridSizer *gridSizer = new wxFlexGridSizer(2); gridSizer->AddGrowableCol(1); gridSizer->AddGrowableRow(0); gridSizer->AddGrowableRow(1); gridSizer->Add(m_labelSingular, 0, wxALIGN_CENTER_VERTICAL | wxALL, 3); gridSizer->Add(m_textOrig, 1, wxEXPAND); gridSizer->Add(m_labelPlural, 0, wxALIGN_CENTER_VERTICAL | wxALL, 3); gridSizer->Add(m_textOrigPlural, 1, wxEXPAND); gridSizer->SetItemMinSize(m_textOrig, 1, 1); gridSizer->SetItemMinSize(m_textOrigPlural, 1, 1); panelSizer->Add(m_labelContext, 0, wxEXPAND | wxALL, 3); panelSizer->Add(labelSource, 0, wxEXPAND | wxALL, 3); panelSizer->Add(gridSizer, 1, wxEXPAND); if (type == Content::POT) CreateContentViewTemplateControls(m_bottomPanel, panelSizer); else CreateContentViewEditControls(m_bottomPanel, panelSizer); SetCustomFonts(); m_bottomPanel->SetAutoLayout(true); m_bottomPanel->SetSizer(panelSizer); m_splitter->SetMinimumPaneSize(PX(200)); m_sidebarSplitter->SetMinimumPaneSize(PX(200)); m_list->PushEventHandler(new ListHandler(this)); auto suggestionsMenu = GetMenuBar()->FindItem(XRCID("menu_suggestions"))->GetSubMenu(); m_sidebar = new Sidebar(m_sidebarSplitter, suggestionsMenu); m_sidebar->Bind(wxEVT_UPDATE_UI, &PoeditFrame::OnSingleSelectionUpdate, this); ShowPluralFormUI(false); UpdateMenu(); switch ( m_list->sortOrder.by ) { case SortOrder::By_FileOrder: GetMenuBar()->Check(XRCID("sort_by_order"), true); break; case SortOrder::By_Source: GetMenuBar()->Check(XRCID("sort_by_source"), true); break; case SortOrder::By_Translation: GetMenuBar()->Check(XRCID("sort_by_translation"), true); break; } GetMenuBar()->Check(XRCID("sort_group_by_context"), m_list->sortOrder.groupByContext); GetMenuBar()->Check(XRCID("sort_untrans_first"), m_list->sortOrder.untransFirst); GetMenuBar()->Check(XRCID("sort_errors_first"), m_list->sortOrder.errorsFirst); // Call splitter splitting later, when the window is laid out, otherwise // the sizes would get truncated immediately: CallAfter([=]{ // This is a hack -- windows are not maximized immediately and so we can't // set correct sash position in ctor (unmaximized window may be too small // for sash position when maximized -- see bug #2120600) if ( wxConfigBase::Get()->Read(WindowStatePath(this) + "maximized", long(0)) ) m_setSashPositionsWhenMaximized = true; if (wxConfigBase::Get()->ReadBool("/sidebar_shown", true)) { auto split = GetSize().x * wxConfigBase::Get()->ReadDouble("/sidebar_splitter", 0.75); m_sidebarSplitter->SplitVertically(m_splitter, m_sidebar, split); } else { m_sidebar->Hide(); m_sidebarSplitter->Initialize(m_splitter); Layout(); } m_splitter->SplitHorizontally(m_list, m_bottomPanel, (int)wxConfigBase::Get()->ReadLong("/splitter", -PX(250))); if (m_sidebar) m_sidebar->SetUpperHeight(m_splitter->GetSashPosition()); }); return main; } void PoeditFrame::CreateContentViewEditControls(wxWindow *p, wxBoxSizer *panelSizer) { p->Bind(wxEVT_UPDATE_UI, &PoeditFrame::OnSingleSelectionUpdate, this); wxStaticText *labelTrans = new wxStaticText(p, -1, _("Translation:")); labelTrans->SetFont(m_boldGuiFont); m_textTrans = new TranslationTextCtrl(p, ID_TEXTTRANS); m_textTrans->PushEventHandler(new TransTextctrlHandler(this)); // in case of plurals form, this is the control for n=1: m_textTransSingularForm = nullptr; m_pluralNotebook = new wxNotebook(p, -1); m_errorBar = new ErrorBar(p); panelSizer->Add(labelTrans, 0, wxEXPAND | wxALL, 3); panelSizer->Add(m_textTrans, 1, wxEXPAND); panelSizer->Add(m_pluralNotebook, 1, wxEXPAND); panelSizer->Add(m_errorBar, 0, wxEXPAND | wxALL, 2); } void PoeditFrame::CreateContentViewTemplateControls(wxWindow *p, wxBoxSizer *panelSizer) { auto win = new wxPanel(p, wxID_ANY); auto sizer = new wxBoxSizer(wxVERTICAL); auto explain = new wxStaticText(win, wxID_ANY, _(L"POT files are only templates and don’t contain any translations themselves.\nTo make a translation, create a new PO file based on the template."), wxDefaultPosition, wxDefaultSize, wxALIGN_CENTRE_HORIZONTAL); #ifdef __WXOSX__ explain->SetWindowVariant(wxWINDOW_VARIANT_SMALL); #endif explain->SetForegroundColour(ExplanationLabel::GetTextColor().ChangeLightness(160)); win->SetBackgroundColour(GetBackgroundColour().ChangeLightness(50)); auto button = new wxButton(win, wxID_ANY, MSW_OR_OTHER(_("Create new translation"), _("Create New Translation"))); button->Bind(wxEVT_BUTTON, [=](wxCommandEvent&) { wxWindowPtr<LanguageDialog> dlg(new LanguageDialog(this)); dlg->ShowWindowModalThenDo([=](int retcode){ if (retcode == wxID_OK) NewFromPOT(m_catalog->GetFileName(), dlg->GetLang()); }); }); sizer->AddStretchSpacer(); sizer->Add(explain, wxSizerFlags().Center().Border(wxLEFT|wxRIGHT, PX(100))); sizer->Add(button, wxSizerFlags().Center().Border(wxTOP|wxBOTTOM, PX(10))); sizer->AddStretchSpacer(); win->SetSizerAndFit(sizer); panelSizer->Add(win, 1, wxEXPAND); } wxWindow* PoeditFrame::CreateContentViewWelcome() { return new WelcomeScreenPanel(this); } wxWindow* PoeditFrame::CreateContentViewEmptyPO() { return new EmptyPOScreenPanel(this); } void PoeditFrame::DestroyContentView() { if (!m_contentView) return; if (m_list) m_list->PopEventHandler(true/*delete*/); if (m_textTrans) m_textTrans->PopEventHandler(true/*delete*/); for (auto tp : m_textTransPlural) { tp->PopEventHandler(true/*delete*/); } m_textTransPlural.clear(); NotifyCatalogChanged(nullptr); if (m_splitter) wxConfigBase::Get()->Write("/splitter", (long)m_splitter->GetSashPosition()); m_contentWrappingSizer->Detach(m_contentView); m_contentView->Destroy(); m_contentView = nullptr; m_list = nullptr; m_labelContext = m_labelSingular = m_labelPlural = nullptr; m_textTrans = m_textTransSingularForm = nullptr; m_textOrig = nullptr; m_textOrigPlural = nullptr; m_errorBar = nullptr; m_splitter = nullptr; m_sidebarSplitter = nullptr; m_sidebar = nullptr; m_pluralNotebook = nullptr; if (m_findWindow) { m_findWindow->Destroy(); m_findWindow.Release(); } } PoeditFrame::~PoeditFrame() { ms_instances.erase(this); DestroyContentView(); wxConfigBase *cfg = wxConfig::Get(); cfg->SetPath("/"); cfg->Write("display_lines", m_displayIDs); SaveWindowState(this); #ifndef __WXOSX__ FileHistory().RemoveMenu(m_menuForHistory); FileHistory().Save(*cfg); #endif // write all changes: cfg->Flush(); m_catalog.reset(); m_pendingHumanEditedItem.reset(); // shutdown the spellchecker: InitSpellchecker(); } void PoeditFrame::SetAccelerators() { wxAcceleratorEntry entries[] = { #ifdef __WXMSW__ { wxACCEL_CTRL, WXK_F3, XRCID("menu_find_next") }, { wxACCEL_CTRL | wxACCEL_SHIFT, WXK_F3, XRCID("menu_find_prev") }, #endif { wxACCEL_CTRL, WXK_PAGEUP, XRCID("go_prev_page") }, { wxACCEL_CTRL, WXK_NUMPAD_PAGEUP, XRCID("go_prev_page") }, { wxACCEL_CTRL, WXK_PAGEDOWN, XRCID("go_next_page") }, { wxACCEL_CTRL, WXK_NUMPAD_PAGEDOWN, XRCID("go_next_page") }, { wxACCEL_CTRL | wxACCEL_SHIFT, WXK_UP, XRCID("go_prev_unfinished") }, { wxACCEL_CTRL | wxACCEL_SHIFT, WXK_NUMPAD_UP, XRCID("go_prev_unfinished") }, { wxACCEL_CTRL | wxACCEL_SHIFT, WXK_DOWN, XRCID("go_next_unfinished") }, { wxACCEL_CTRL | wxACCEL_SHIFT, WXK_NUMPAD_DOWN, XRCID("go_next_unfinished") }, { wxACCEL_CTRL, WXK_UP, XRCID("go_prev") }, { wxACCEL_CTRL, WXK_NUMPAD_UP, XRCID("go_prev") }, { wxACCEL_CTRL, WXK_DOWN, XRCID("go_next") }, { wxACCEL_CTRL, WXK_NUMPAD_DOWN, XRCID("go_next") }, { wxACCEL_CTRL, WXK_RETURN, XRCID("go_done_and_next") }, { wxACCEL_CTRL, WXK_NUMPAD_ENTER, XRCID("go_done_and_next") } }; wxAcceleratorTable accel(WXSIZEOF(entries), entries); SetAcceleratorTable(accel); } void PoeditFrame::InitSpellchecker() { if (!IsSpellcheckingAvailable()) { #ifdef __WXMSW__ int osmajor = 0, osminor = 0; if (wxGetOsVersion(&osmajor, &osminor) == wxOS_WINDOWS_NT && osmajor == 6 && osminor == 1 && wxDateTime::Now() < wxDateTime(29, wxDateTime::Jul, 2016)) { AttentionMessage msg ( "windows10-spellchecking", AttentionMessage::Info, _("Upgrade to Windows 10 (for free) to enable spellchecking in Poedit.") ); msg.SetExplanation(_("Poedit needs Windows 8 or newer for spellchecking, but you only have Windows 7. Microsoft is offering free upgrades to Windows 10 until July 29, 2016.")); msg.AddAction(_("Learn more"), []{ wxLaunchDefaultBrowser("https://www.microsoft.com/en-us/windows/windows-10-upgrade"); }); msg.AddDontShowAgain(); m_attentionBar->ShowMessage(msg); } #endif // __WXMSW__ return; } if (!m_catalog || !m_textTrans) return; Language lang = m_catalog->GetLanguage(); bool report_problem = false; bool enabled = m_catalog && #ifndef __WXMSW__ // language choice is automatic, per-keyboard on Windows lang.IsValid() && #endif wxConfig::Get()->Read("enable_spellchecking", (long)true); const bool enabledInitially = enabled; #ifdef __WXOSX__ if (enabled) { if ( !SetSpellcheckerLang(lang.LangAndCountry()) ) { enabled = false; report_problem = true; } } #endif if ( !InitTextCtrlSpellchecker(m_textTrans, enabled, lang) ) report_problem = true; for (size_t i = 0; i < m_textTransPlural.size(); i++) { if ( !InitTextCtrlSpellchecker(m_textTransPlural[i], enabled, lang) ) report_problem = true; } #ifndef __WXMSW__ // language choice is automatic, per-keyboard on Windows, can't fail if ( enabledInitially && report_problem ) { AttentionMessage msg ( "missing-spell-dict", AttentionMessage::Warning, wxString::Format ( // TRANSLATORS: %s is language name in its basic form (as you // would see e.g. in a list of supported languages). You may need // to rephrase it, e.g. to an equivalent of "for language %s". _(L"Spellchecking is disabled, because the dictionary for %s isn’t installed."), lang.LanguageDisplayName() ) ); msg.AddAction(_("Install"), []{ ShowSpellcheckerHelp(); }); msg.AddDontShowAgain(); m_attentionBar->ShowMessage(msg); } #endif // !__WXMSW__ } void PoeditFrame::UpdateTextLanguage() { if (!m_catalog || !m_textTrans) return; InitSpellchecker(); auto isRTL = m_catalog->GetLanguage().IsRTL(); m_textTrans->SetLanguageRTL(isRTL); for (auto tp : m_textTransPlural) tp->SetLanguageRTL(isRTL); if (m_sidebar) m_sidebar->RefreshContent(); } #ifndef __WXOSX__ void PoeditFrame::OnCloseCmd(wxCommandEvent&) { Close(); } #endif void PoeditFrame::OpenFile(const wxString& filename) { DoIfCanDiscardCurrentDoc([=]{ DoOpenFile(filename); }); } void PoeditFrame::DoOpenFile(const wxString& filename) { ReadCatalog(filename); if (m_textTrans && m_list) { if (g_focusToText) m_textTrans->SetFocus(); else m_list->SetFocus(); } } bool PoeditFrame::NeedsToAskIfCanDiscardCurrentDoc() const { return m_catalog && m_modified; } template<typename TFunctor> void PoeditFrame::DoIfCanDiscardCurrentDoc(TFunctor completionHandler) { if ( !NeedsToAskIfCanDiscardCurrentDoc() ) { completionHandler(); return; } wxWindowPtr<wxMessageDialog> dlg = CreateAskAboutSavingDialog(); dlg->ShowWindowModalThenDo([this,dlg,completionHandler](int retval) { // hide the dialog asap, WriteCatalog() may show another modal sheet dlg->Hide(); #ifdef __WXOSX__ // Hide() alone is not sufficient on OS X, we need to destroy dlg // shared_ptr and only then continue. Because this code is called // from event loop (and not this functions' caller) at an unspecified // time anyway, we can just as well defer it into the next idle time // iteration. CallAfter([this,retval,completionHandler]() { #endif if (retval == wxID_YES) { auto doSaveFile = [=](const wxString& fn){ WriteCatalog(fn, [=](bool saved){ if (saved) completionHandler(); }); }; if (!m_fileExistsOnDisk || GetFileName().empty()) GetSaveAsFilenameThenDo(m_catalog, doSaveFile); else doSaveFile(GetFileName()); } else if (retval == wxID_NO) { // call completion without saving the document completionHandler(); } else if (retval == wxID_CANCEL) { // do not call -- not OK } #ifdef __WXOSX__ }); #endif }); } wxWindowPtr<wxMessageDialog> PoeditFrame::CreateAskAboutSavingDialog() { wxWindowPtr<wxMessageDialog> dlg(new wxMessageDialog ( this, _("Catalog modified. Do you want to save changes?"), _("Save changes"), wxYES_NO | wxCANCEL | wxICON_QUESTION )); dlg->SetExtendedMessage(_("Your changes will be lost if you don't save them.")); dlg->SetYesNoLabels ( _("Save"), #ifdef __WXMSW__ _("Don't save") #else _("Don't Save") #endif ); return dlg; } void PoeditFrame::OnCloseWindow(wxCloseEvent& event) { if (event.CanVeto() && NeedsToAskIfCanDiscardCurrentDoc()) { #ifdef __WXOSX__ // Veto the event by default, the window-modally ask for permission. // If it turns out that the window can be closed, the completion handler // will do it: event.Veto(); #endif DoIfCanDiscardCurrentDoc([=]{ Destroy(); }); } else // can't veto { Destroy(); } } void PoeditFrame::OnOpen(wxCommandEvent&) { DoIfCanDiscardCurrentDoc([=]{ wxString path = wxPathOnly(GetFileName()); if (path.empty()) path = wxConfig::Get()->Read("last_file_path", wxEmptyString); wxString name = wxFileSelector(OSX_OR_OTHER("", _("Open catalog")), path, wxEmptyString, wxEmptyString, Catalog::GetAllTypesFileMask(), wxFD_OPEN | wxFD_FILE_MUST_EXIST, this); if (!name.empty()) { wxConfig::Get()->Write("last_file_path", wxPathOnly(name)); DoOpenFile(name); } }); } #ifdef HAVE_HTTP_CLIENT void PoeditFrame::OnOpenFromCrowdin(wxCommandEvent&) { DoIfCanDiscardCurrentDoc([=]{ CrowdinOpenFile(this, [=](wxString name){ DoOpenFile(name); }); }); } #endif #ifndef __WXOSX__ void PoeditFrame::OnOpenHist(wxCommandEvent& event) { wxString f(FileHistory().GetHistoryFile(event.GetId() - wxID_FILE1)); if ( !wxFileExists(f) ) { wxLogError(_("File '%s' doesn't exist."), f.c_str()); return; } OpenFile(f); } #endif // !__WXOSX__ void PoeditFrame::OnSave(wxCommandEvent& event) { try { if (!m_fileExistsOnDisk || GetFileName().empty()) OnSaveAs(event); else WriteCatalog(GetFileName()); } catch (Exception& e) { wxLogError("%s", e.What()); } } static wxString SuggestFileName(const CatalogPtr& catalog) { wxString name; if (catalog) name = catalog->GetLanguage().Code(); if (name.empty()) return "default"; else return name; } template<typename F> void PoeditFrame::GetSaveAsFilenameThenDo(const CatalogPtr& cat, F then) { auto current = cat->GetFileName(); wxString name(wxFileNameFromPath(current)); wxString path(wxPathOnly(current)); if (name.empty()) { path = wxConfig::Get()->Read("last_file_path", wxEmptyString); name = SuggestFileName(cat) + ".po"; } wxWindowPtr<wxFileDialog> dlg( new wxFileDialog(this, OSX_OR_OTHER("", _("Save as...")), path, name, m_catalog->GetFileMask(), wxFD_SAVE | wxFD_OVERWRITE_PROMPT)); dlg->ShowWindowModalThenDo([=](int retcode){ if (retcode != wxID_OK) return; auto fn = dlg->GetPath(); wxConfig::Get()->Write("last_file_path", wxPathOnly(name)); then(fn); }); } void PoeditFrame::DoSaveAs(const wxString& filename) { if (filename.empty()) return; WriteCatalog(filename); } void PoeditFrame::OnSaveAs(wxCommandEvent&) { GetSaveAsFilenameThenDo(m_catalog, [=](const wxString& fn){ DoSaveAs(fn); }); } void PoeditFrame::OnCompileMO(wxCommandEvent&) { auto fileName = GetFileName(); wxString name; wxFileName::SplitPath(fileName, nullptr, &name, nullptr); if (name.empty()) { name = SuggestFileName(m_catalog) + ".mo"; } else name += ".mo"; wxWindowPtr<wxFileDialog> dlg( new wxFileDialog(this, OSX_OR_OTHER("", _("Compile to...")), wxPathOnly(fileName), name, wxString::Format("%s (*.mo)|*.mo", _("Compiled Translation Files")), wxFD_SAVE | wxFD_OVERWRITE_PROMPT)); dlg->ShowWindowModalThenDo([=](int retcode){ if (retcode != wxID_OK) return; wxBusyCursor bcur; auto fn = dlg->GetPath(); wxConfig::Get()->Write("last_file_path", wxPathOnly(fn)); int validation_errors = 0; Catalog::CompilationStatus compilation_status = Catalog::CompilationStatus::NotDone; if (!m_catalog->CompileToMO(fn, validation_errors, compilation_status)) return; if (validation_errors) { // Note: this may show window-modal window and because we may // be called from such window too, run this in the next // event loop iteration. CallAfter([=]{ ReportValidationErrors(validation_errors, compilation_status, /*from_save=*/true, /*other_file_saved=*/false, []{}); }); } }); } void PoeditFrame::OnExport(wxCommandEvent&) { auto fileName = GetFileName(); wxString name; wxFileName::SplitPath(fileName, nullptr, &name, nullptr); if (name.empty()) { name = SuggestFileName(m_catalog) + ".html"; } else name += ".html"; wxWindowPtr<wxFileDialog> dlg( new wxFileDialog(this, OSX_OR_OTHER("", _("Export as...")), wxPathOnly(fileName), name, wxString::Format("%s (*.html)|*.html", _("HTML Files")), wxFD_SAVE | wxFD_OVERWRITE_PROMPT)); dlg->ShowWindowModalThenDo([=](int retcode){ if (retcode != wxID_OK) return; auto fn = dlg->GetPath(); wxConfig::Get()->Write("last_file_path", wxPathOnly(fn)); ExportCatalog(fn); }); } bool PoeditFrame::ExportCatalog(const wxString& filename) { wxBusyCursor bcur; TempOutputFileFor tempfile(filename); std::ofstream f; f.open(tempfile.FileName().fn_str()); m_catalog->ExportToHTML(f); f.close(); if (!tempfile.Commit()) { wxLogError(_("Couldn't save file %s."), filename); return false; } return true; } void PoeditFrame::OnNew(wxCommandEvent& event) { DoIfCanDiscardCurrentDoc([=]{ bool isFromPOT = event.GetId() == XRCID("menu_new_from_pot"); if (isFromPOT) NewFromPOT(); else NewFromScratch(); }); } void PoeditFrame::NewFromPOT() { wxString path = wxPathOnly(GetFileName()); if (path.empty()) path = wxConfig::Get()->Read("last_file_path", wxEmptyString); wxString pot_file = wxFileSelector(_("Open catalog template"), path, wxEmptyString, wxEmptyString, Catalog::GetTypesFileMask({Catalog::Type::POT, Catalog::Type::PO}), wxFD_OPEN | wxFD_FILE_MUST_EXIST, this); if (!pot_file.empty()) { wxConfig::Get()->Write("last_file_path", wxPathOnly(pot_file)); NewFromPOT(pot_file); } } void PoeditFrame::NewFromPOT(const wxString& pot_file, Language language) { UpdateResultReason reason; CatalogPtr catalog = std::make_shared<Catalog>(); if (!catalog->UpdateFromPOT(pot_file, /*summary=*/false, reason, /*replace_header=*/true)) { return; } m_catalog = catalog; m_pendingHumanEditedItem.reset(); m_fileExistsOnDisk = false; m_modified = true; EnsureAppropriateContentView(); NotifyCatalogChanged(m_catalog); UpdateTitle(); UpdateMenu(); UpdateStatusBar(); UpdateTextLanguage(); auto setLanguageFunc = [=](Language lang) { if (lang.IsValid()) { catalog->SetLanguage(lang); // Derive save location for the file from the location of the POT // file (same directory, language-based name). This doesn't always // work, e.g. WordPress plugins use different naming, so don't actually // save the file just yet and let the user confirm the location when saving. wxFileName pot_fn(pot_file); pot_fn.SetFullName(lang.Code() + ".po"); m_catalog->SetFileName(pot_fn.GetFullPath()); } else { // default to English style plural if (catalog->HasPluralItems()) catalog->Header().SetHeaderNotEmpty("Plural-Forms", "nplurals=2; plural=(n != 1);"); } RecreatePluralTextCtrls(); UpdateTitle(); UpdateMenu(); UpdateStatusBar(); UpdateTextLanguage(); NotifyCatalogChanged(m_catalog); // refresh language column }; if (language.IsValid()) { setLanguageFunc(language); } else { // Choose the language: wxWindowPtr<LanguageDialog> dlg(new LanguageDialog(this)); dlg->ShowWindowModalThenDo([=](int retcode){ if (retcode == wxID_OK) setLanguageFunc(dlg->GetLang()); else setLanguageFunc(Language()); }); } } void PoeditFrame::NewFromScratch() { CatalogPtr catalog = std::make_shared<Catalog>(); catalog->CreateNewHeader(); m_catalog = catalog; m_pendingHumanEditedItem.reset(); m_fileExistsOnDisk = false; m_modified = true; EnsureContentView(Content::Empty_PO); UpdateTitle(); UpdateMenu(); UpdateStatusBar(); // Choose the language: wxWindowPtr<LanguageDialog> dlg(new LanguageDialog(this)); dlg->ShowWindowModalThenDo([=](int retcode){ if (retcode == wxID_OK) { catalog->SetLanguage(dlg->GetLang()); } }); } void PoeditFrame::OnProperties(wxCommandEvent&) { EditCatalogProperties(); } void PoeditFrame::EditCatalogProperties() { wxWindowPtr<PropertiesDialog> dlg(new PropertiesDialog(this, m_catalog, m_fileExistsOnDisk)); const Language prevLang = m_catalog->GetLanguage(); dlg->TransferTo(m_catalog); dlg->ShowWindowModalThenDo([=](int retcode){ if (retcode == wxID_OK) { dlg->TransferFrom(m_catalog); m_modified = true; RecreatePluralTextCtrls(); UpdateTitle(); UpdateMenu(); if (prevLang != m_catalog->GetLanguage()) { UpdateTextLanguage(); // trigger resorting and language header update: NotifyCatalogChanged(m_catalog); } } }); } void PoeditFrame::EditCatalogPropertiesAndUpdateFromSources() { // TODO: share code with EditCatalogProperties() wxWindowPtr<PropertiesDialog> dlg(new PropertiesDialog(this, m_catalog, m_fileExistsOnDisk, 1)); const Language prevLang = m_catalog->GetLanguage(); dlg->TransferTo(m_catalog); dlg->ShowWindowModalThenDo([=](int retcode){ if (retcode == wxID_OK) { dlg->TransferFrom(m_catalog); m_modified = true; if (m_list) RecreatePluralTextCtrls(); UpdateTitle(); UpdateMenu(); if (prevLang != m_catalog->GetLanguage()) { UpdateTextLanguage(); // trigger resorting and language header update: NotifyCatalogChanged(m_catalog); } if (!m_catalog->Header().SearchPaths.empty()) { EnsureAppropriateContentView(); UpdateCatalog(); } } }); } void PoeditFrame::UpdateAfterPreferencesChange() { g_focusToText = (bool)wxConfig::Get()->Read("focus_to_text", (long)false); if (m_list) { SetCustomFonts(); m_list->Refresh(); // if font changed UpdateTextLanguage(); } } /*static*/ void PoeditFrame::UpdateAllAfterPreferencesChange() { for (PoeditFramesList::const_iterator n = ms_instances.begin(); n != ms_instances.end(); ++n) { (*n)->UpdateAfterPreferencesChange(); } } bool PoeditFrame::UpdateCatalog(const wxString& pot_file) { // This ensures that the list control won't be redrawn during Update() // call when a dialog box is hidden; another alternative would be to call // m_list->CatalogChanged(NULL) here std::unique_ptr<wxWindowUpdateLocker> locker; if (m_list) locker.reset(new wxWindowUpdateLocker(m_list)); UpdateResultReason reason = UpdateResultReason::Unspecified; bool succ; if (pot_file.empty()) { if (m_catalog->HasSourcesAvailable()) { ProgressInfo progress(this, _("Updating catalog")); succ = m_catalog->Update(&progress, true, reason); locker.reset(); EnsureAppropriateContentView(); NotifyCatalogChanged(m_catalog); } else { reason = UpdateResultReason::NoSourcesFound; succ = false; } } else { succ = m_catalog->UpdateFromPOT(pot_file, true, reason); locker.reset(); EnsureAppropriateContentView(); NotifyCatalogChanged(m_catalog); } m_modified = succ || m_modified; UpdateStatusBar(); if (!succ) { switch (reason) { case UpdateResultReason::NoSourcesFound: { wxWindowPtr<wxMessageDialog> dlg(new wxMessageDialog ( this, _("Source code not available."), _("Updating failed"), wxOK | wxICON_ERROR )); dlg->SetExtendedMessage(_(L"Translations couldn’t be updated from the source code, because no code was found in the location specified in the catalog’s Properties.")); dlg->ShowWindowModalThenDo([dlg](int){}); break; } case UpdateResultReason::Unspecified: { wxLogWarning(_("Entries in the catalog are probably incorrect.")); wxLogError( _("Updating the catalog failed. Click on 'Details >>' for details.")); break; } case UpdateResultReason::CancelledByUser: break; } } return succ; } void PoeditFrame::OnUpdateFromSources(wxCommandEvent&) { DoIfCanDiscardCurrentDoc([=]{ try { if (UpdateCatalog()) { if (wxConfig::Get()->ReadBool("use_tm", true) && wxConfig::Get()->ReadBool("use_tm_when_updating", false)) { AutoTranslateCatalog(nullptr, AutoTranslate_OnlyGoodQuality); } } } catch (...) { wxLogError("%s", DescribeCurrentException()); } RefreshControls(); }); } void PoeditFrame::OnUpdateFromSourcesUpdate(wxUpdateUIEvent& event) { event.Enable(m_catalog && !m_catalog->IsFromCrowdin() && m_catalog->HasSourcesConfigured()); } void PoeditFrame::OnUpdateFromPOT(wxCommandEvent&) { DoIfCanDiscardCurrentDoc([=]{ wxString path = wxPathOnly(GetFileName()); if (path.empty()) path = wxConfig::Get()->Read("last_file_path", wxEmptyString); wxWindowPtr<wxFileDialog> dlg( new wxFileDialog(this, _("Open catalog template"), path, wxEmptyString, Catalog::GetTypesFileMask({Catalog::Type::POT, Catalog::Type::PO}), wxFD_OPEN | wxFD_FILE_MUST_EXIST)); dlg->ShowWindowModalThenDo([=](int retcode){ if (retcode != wxID_OK) return; auto pot_file = dlg->GetPath(); wxConfig::Get()->Write("last_file_path", wxPathOnly(pot_file)); try { if (UpdateCatalog(pot_file)) { if (wxConfig::Get()->ReadBool("use_tm", true) && wxConfig::Get()->ReadBool("use_tm_when_updating", false)) { AutoTranslateCatalog(nullptr, AutoTranslate_OnlyGoodQuality); } } } catch (...) { wxLogError("%s", DescribeCurrentException()); } }); RefreshControls(); }); } void PoeditFrame::OnUpdateFromPOTUpdate(wxUpdateUIEvent& event) { if (!m_catalog || m_catalog->GetFileType() != Catalog::Type::PO) event.Enable(false); else OnHasCatalogUpdate(event); } #ifdef HAVE_HTTP_CLIENT void PoeditFrame::OnUpdateFromCrowdin(wxCommandEvent&) { DoIfCanDiscardCurrentDoc([=]{ CrowdinSyncFile(this, m_catalog, [=](std::shared_ptr<Catalog> cat){ m_catalog = cat; EnsureAppropriateContentView(); NotifyCatalogChanged(m_catalog); RefreshControls(); }); }); } void PoeditFrame::OnUpdateFromCrowdinUpdate(wxUpdateUIEvent& event) { event.Enable(m_catalog && m_catalog->IsFromCrowdin() && m_catalog->HasCapability(Catalog::Cap::Translations)); } #endif void PoeditFrame::OnUpdateSmart(wxCommandEvent& event) { if (!m_catalog) return; #ifdef HAVE_HTTP_CLIENT if (m_catalog->IsFromCrowdin()) OnUpdateFromCrowdin(event); else #endif OnUpdateFromSources(event); } void PoeditFrame::OnUpdateSmartUpdate(wxUpdateUIEvent& event) { event.Enable(false); if (m_catalog) { #ifdef HAVE_HTTP_CLIENT if (m_catalog->IsFromCrowdin()) OnUpdateFromCrowdinUpdate(event); else #endif OnUpdateFromSourcesUpdate(event); } } void PoeditFrame::OnValidate(wxCommandEvent&) { try { wxBusyCursor bcur; ReportValidationErrors(m_catalog->Validate(), /*mo_compilation_failed=*/Catalog::CompilationStatus::NotDone, /*from_save=*/false, /*other_file_saved=*/false, []{}); } catch (Exception& e) { wxLogError("%s", e.What()); } } template<typename TFunctor> void PoeditFrame::ReportValidationErrors(int errors, Catalog::CompilationStatus mo_compilation_status, bool from_save, bool other_file_saved, TFunctor completionHandler) { wxWindowPtr<wxMessageDialog> dlg; if ( errors ) { if (m_list && m_catalog->GetCount()) m_list->RefreshItems(0, m_catalog->GetCount()-1); RefreshControls(); dlg.reset(new wxMessageDialog ( this, wxString::Format ( wxPLURAL("%d issue with the translation found.", "%d issues with the translation found.", errors), errors ), _("Validation results"), wxOK | wxICON_ERROR )); wxString details = _("Entries with errors were marked in red in the list. Details of the error will be shown when you select such an entry."); if ( from_save ) { details += "\n\n"; if (other_file_saved) { switch ( mo_compilation_status ) { case Catalog::CompilationStatus::NotDone: details += _("The file was saved safely."); break; case Catalog::CompilationStatus::Success: details += _("The file was saved safely and compiled into the MO format, but it will probably not work correctly."); break; case Catalog::CompilationStatus::Error: details += _("The file was saved safely, but it cannot be compiled into the MO format and used."); break; } } else // saving only the MO file { switch ( mo_compilation_status ) { case Catalog::CompilationStatus::Success: details += _("The file was compiled into the MO format, but it will probably not work correctly."); break; case Catalog::CompilationStatus::NotDone: case Catalog::CompilationStatus::Error: details += _("The file cannot be compiled into the MO format and used."); break; } } } dlg->SetExtendedMessage(details); } else { wxASSERT( !from_save ); dlg.reset(new wxMessageDialog ( this, _("No problems with the translation found."), _("Validation results"), wxOK | wxICON_INFORMATION )); wxString details; int unfinished = 0; m_catalog->GetStatistics(nullptr, nullptr, nullptr, nullptr, &unfinished); if (unfinished) { details = wxString::Format(wxPLURAL("The translation is ready for use, but %d entry is not translated yet.", "The translation is ready for use, but %d entries are not translated yet.", unfinished), unfinished); } else { details = _("The translation is ready for use."); } dlg->SetExtendedMessage(details); } dlg->ShowWindowModalThenDo([dlg,completionHandler](int){ completionHandler(); }); } void PoeditFrame::OnListSel(wxListEvent& event) { wxWindow *focus = wxWindow::FindFocus(); bool hasFocus = (focus == m_textTrans) || (focus && focus->GetParent() == m_pluralNotebook); event.Skip(); if (m_pendingHumanEditedItem) { OnNewTranslationEntered(m_pendingHumanEditedItem); m_pendingHumanEditedItem.reset(); } UpdateToTextCtrl(ItemChanged); if (m_sidebar && m_list) { if (m_list->HasMultipleSelection()) m_sidebar->SetMultipleSelection(); else m_sidebar->SetSelectedItem(m_catalog, GetCurrentItem()); // may be nullptr } if (hasFocus && m_textTrans) { if (m_textTrans->IsShown()) m_textTrans->SetFocus(); else if (!m_textTransPlural.empty()) m_textTransPlural[0]->SetFocus(); } auto references = FileViewer::GetIfExists(); if (references) references->ShowReferences(m_catalog, GetCurrentItem(), 0); } void PoeditFrame::OnReferencesMenu(wxCommandEvent&) { auto entry = GetCurrentItem(); if ( !entry ) return; ShowReference(0); } void PoeditFrame::OnReferencesMenuUpdate(wxUpdateUIEvent& event) { OnSingleSelectionUpdate(event); if (event.GetEnabled()) { auto item = GetCurrentItem(); event.Enable(item && !item->GetReferences().empty()); } } void PoeditFrame::OnReference(wxCommandEvent& event) { ShowReference(event.GetId() - ID_POPUP_REFS); } void PoeditFrame::ShowReference(int num) { auto entry = GetCurrentItem(); if (!entry) return; FileViewer::GetAndActivate()->ShowReferences(m_catalog, entry, num); } void PoeditFrame::OnFuzzyFlag(wxCommandEvent& event) { bool setFuzzy = false; auto source = event.GetEventObject(); if (source && dynamic_cast<wxMenu*>(source)) { setFuzzy = GetMenuBar()->IsChecked(XRCID("menu_fuzzy")); m_toolbar->SetFuzzy(setFuzzy); } else { setFuzzy = m_toolbar->IsFuzzy(); GetMenuBar()->Check(XRCID("menu_fuzzy"), setFuzzy); } bool modified = false; m_list->ForSelectedCatalogItemsDo([=,&modified](CatalogItem& item){ if (item.IsFuzzy() != setFuzzy) { item.SetFuzzy(setFuzzy); item.SetModified(true); modified = true; } }); if (modified && !IsModified()) { m_modified = true; UpdateTitle(); } UpdateStatusBar(); UpdateToTextCtrl(UndoableEdit); if (m_list->HasSingleSelection()) { // The user explicitly changed fuzzy status (e.g. to on). Normally, if the // user edits an entry, it's fuzzy flag is cleared, but if the user sets // fuzzy on to indicate the translation is problematic and then continues // editing the entry, we do not want to annoy him by changing fuzzy back on // every keystroke. m_dontAutoclearFuzzyStatus = true; } } void PoeditFrame::OnIDsFlag(wxCommandEvent&) { m_displayIDs = GetMenuBar()->IsChecked(XRCID("menu_ids")); m_list->SetDisplayLines(m_displayIDs); } void PoeditFrame::OnCopyFromSource(wxCommandEvent&) { bool modified = false; m_list->ForSelectedCatalogItemsDo([&modified](CatalogItem& item){ item.SetTranslationFromSource(); if (item.IsModified()) modified = true; }); if (modified && !IsModified()) { m_modified = true; UpdateTitle(); } UpdateStatusBar(); UpdateToTextCtrl(UndoableEdit); } void PoeditFrame::OnClearTranslation(wxCommandEvent&) { bool modified = false; m_list->ForSelectedCatalogItemsDo([&modified](CatalogItem& item){ item.ClearTranslation(); if (item.IsModified()) modified = true; }); if (modified && !IsModified()) { m_modified = true; UpdateTitle(); } UpdateStatusBar(); UpdateToTextCtrl(UndoableEdit); } void PoeditFrame::OnFind(wxCommandEvent&) { if (!m_findWindow) m_findWindow = new FindFrame(this, m_list, m_catalog, m_textOrig, m_textTrans, m_pluralNotebook); m_findWindow->ShowForFind(); } void PoeditFrame::OnFindAndReplace(wxCommandEvent&) { if (!m_findWindow) m_findWindow = new FindFrame(this, m_list, m_catalog, m_textOrig, m_textTrans, m_pluralNotebook); m_findWindow->ShowForReplace(); } void PoeditFrame::OnFindNext(wxCommandEvent&) { if (m_findWindow) m_findWindow->FindNext(); } void PoeditFrame::OnFindPrev(wxCommandEvent&) { if (m_findWindow) m_findWindow->FindPrev(); } void PoeditFrame::OnUpdateFind(wxUpdateUIEvent& e) { e.Enable(m_catalog && !m_catalog->empty() && m_findWindow && m_findWindow->HasText()); } CatalogItemPtr PoeditFrame::GetCurrentItem() const { if ( !m_catalog || !m_list ) return nullptr; int item = m_list->GetFirstSelectedCatalogItem(); if ( item == -1 ) return nullptr; wxASSERT( item >= 0 && item < (int)m_catalog->GetCount() ); return (*m_catalog)[item]; } namespace { // does some basic processing of user input, e.g. to remove trailing \n wxString PreprocessEnteredTextForItem(CatalogItemPtr item, wxString t) { auto& orig = item->GetString(); if (!t.empty() && !orig.empty()) { if (orig.Last() == '\n' && t.Last() != '\n') t.append(1, '\n'); else if (orig.Last() != '\n' && t.Last() == '\n') t.RemoveLast(); } return t; } } // anonymous namespace void PoeditFrame::UpdateFromTextCtrl() { if (!m_list || !m_list->HasSingleSelection()) return; auto entry = GetCurrentItem(); if ( !entry ) return; wxString key = entry->GetString(); bool newfuzzy = m_toolbar->IsFuzzy(); const bool oldIsTranslated = entry->IsTranslated(); bool allTranslated = true; // will be updated later bool anyTransChanged = false; // ditto if (entry->HasPlural()) { wxArrayString str; for (unsigned i = 0; i < m_textTransPlural.size(); i++) { auto val = PreprocessEnteredTextForItem(entry, m_textTransPlural[i]->GetPlainText()); str.Add(val); if ( val.empty() ) allTranslated = false; } if ( str != entry->GetTranslations() ) { anyTransChanged = true; entry->SetTranslations(str); } } else { auto newval = PreprocessEnteredTextForItem(entry, m_textTrans->GetPlainText()); if ( newval.empty() ) allTranslated = false; if ( newval != entry->GetTranslation() ) { anyTransChanged = true; entry->SetTranslation(newval); } } if (entry->IsFuzzy() == newfuzzy && !anyTransChanged) { return; // not even fuzzy status changed, so return } // did something affecting statistics change? bool statisticsChanged = false; if (newfuzzy == entry->IsFuzzy() && !m_dontAutoclearFuzzyStatus) newfuzzy = false; m_toolbar->SetFuzzy(newfuzzy); GetMenuBar()->Check(XRCID("menu_fuzzy"), newfuzzy); if ( entry->IsFuzzy() != newfuzzy ) { entry->SetFuzzy(newfuzzy); statisticsChanged = true; } if ( oldIsTranslated != allTranslated ) { entry->SetTranslated(allTranslated); statisticsChanged = true; } entry->SetModified(true); entry->SetAutomatic(false); m_pendingHumanEditedItem = entry; m_list->RefreshSelectedItems(); if ( statisticsChanged ) { UpdateStatusBar(); } // else: no point in recomputing stats if ( !IsModified() ) { m_modified = true; UpdateTitle(); } } void PoeditFrame::OnNewTranslationEntered(const CatalogItemPtr& item) { if (wxConfig::Get()->ReadBool("use_tm", true)) { auto srclang = m_catalog->GetSourceLanguage(); auto lang = m_catalog->GetLanguage(); concurrency_queue::add([=](){ try { auto tm = TranslationMemory::Get().GetWriter(); tm->Insert(srclang, lang, item); // Note: do *not* call tm->Commit() here, because Lucene commit is // expensive. Instead, wait until the file is saved with committing // the changes. This way TM updates are available immediately for use // in further translations within the file, but per-item updates // remain inexpensive. } catch (const Exception&) { // ignore failures here, they'll become apparent when saving the file } }); } } namespace { struct EventHandlerDisabler { EventHandlerDisabler(wxEvtHandler *h) : m_hnd(h) { m_hnd->SetEvtHandlerEnabled(false); } ~EventHandlerDisabler() { m_hnd->SetEvtHandlerEnabled(true); } wxEvtHandler *m_hnd; }; void SetTranslationValue(TranslationTextCtrl *txt, const wxString& value, int flags) { // disable EVT_TEXT forwarding -- the event is generated by // programmatic changes to text controls' content and we *don't* // want UpdateFromTextCtrl() to be called from here EventHandlerDisabler disabler(txt->GetEventHandler()); if (flags & PoeditFrame::UndoableEdit) txt->SetPlainTextUserWritten(value); else txt->SetPlainText(value); } } // anonymous namespace void PoeditFrame::UpdateToTextCtrl(int flags) { m_pendingHumanEditedItem.reset(); auto entry = GetCurrentItem(); if ( !entry ) return; m_textOrig->SetPlainText(entry->GetString()); if (entry->HasPlural()) { m_textOrigPlural->SetPlainText(entry->GetPluralString()); unsigned formsCnt = (unsigned)m_textTransPlural.size(); for (unsigned j = 0; j < formsCnt; j++) SetTranslationValue(m_textTransPlural[j], wxEmptyString, flags); unsigned i = 0; for (i = 0; i < std::min(formsCnt, entry->GetNumberOfTranslations()); i++) { SetTranslationValue(m_textTransPlural[i], entry->GetTranslation(i), flags); } } else { if (m_textTrans) SetTranslationValue(m_textTrans, entry->GetTranslation(), flags); } if ( entry->HasContext() ) { const wxString prefix = _("Context:"); const wxString ctxt = entry->GetContext(); m_labelContext->SetLabelMarkup( wxString::Format("<b>%s</b> %s", prefix, EscapeMarkup(ctxt))); } m_labelContext->GetContainingSizer()->Show(m_labelContext, entry->HasContext()); if (m_errorBar) { if( entry->GetValidity() == CatalogItem::Val_Invalid ) m_errorBar->ShowError(entry->GetErrorString()); else m_errorBar->HideError(); } // by default, editing fuzzy item unfuzzies it m_dontAutoclearFuzzyStatus = false; m_toolbar->SetFuzzy(entry->IsFuzzy()); GetMenuBar()->Check(XRCID("menu_fuzzy"), entry->IsFuzzy()); ShowPluralFormUI(entry->HasPlural()); } void PoeditFrame::ReadCatalog(const wxString& catalog) { wxBusyCursor bcur; // NB: duplicated in PoeditFrame::Create() CatalogPtr cat = std::make_shared<Catalog>(catalog); if (cat->IsOk()) { ReadCatalog(cat); } else { wxMessageDialog dlg ( this, _("The file cannot be opened."), _("Invalid file"), wxOK | wxICON_ERROR ); dlg.SetExtendedMessage( _("The file may be either corrupted or in a format not recognized by Poedit.") ); dlg.ShowModal(); } } void PoeditFrame::ReadCatalog(const CatalogPtr& cat) { wxASSERT( cat && cat->IsOk() ); { #ifdef __WXMSW__ wxWindowUpdateLocker no_updates(this); #endif m_catalog = cat; m_pendingHumanEditedItem.reset(); if (m_catalog->empty()) { EnsureContentView(Content::Empty_PO); } else { EnsureAppropriateContentView(); // This must be done as soon as possible, otherwise the list would be // confused. GetCurrentItem() could return nullptr or something invalid, // causing crash in UpdateToTextCtrl() called from // RecreatePluralTextCtrls() just few lines below. NotifyCatalogChanged(m_catalog); } m_fileExistsOnDisk = true; m_modified = false; RecreatePluralTextCtrls(); RefreshControls(Refresh_NoCatalogChanged /*done right above*/); UpdateTitle(); UpdateTextLanguage(); #ifdef HAVE_HTTP_CLIENT m_toolbar->EnableSyncWithCrowdin(m_catalog->IsFromCrowdin()); #endif NoteAsRecentFile(); if (cat->HasCapability(Catalog::Cap::Translations)) WarnAboutLanguageIssues(); } FixDuplicatesIfPresent(); } void PoeditFrame::FixDuplicatesIfPresent() { // Poedit always produces good files, so don't bother with it. Older // versions would preserve bad files, though. wxString generator = m_catalog->Header().GetHeader("X-Generator"); wxString gversion; if (generator.StartsWith("Poedit ", &gversion) && !gversion.StartsWith("1.7") && !gversion.StartsWith("1.6") && !gversion.StartsWith("1.5")) return; if (!m_catalog->HasDuplicateItems()) return; // good // Fix duplicates and explain the changes to the user: m_catalog->FixDuplicateItems(); NotifyCatalogChanged(m_catalog); wxWindowPtr<wxMessageDialog> dlg( new wxMessageDialog ( this, wxString::Format(_(L"Poedit automatically fixed invalid content in the file “%s”."), wxFileName(GetFileName()).GetFullName()), _("Invalid file"), wxOK | wxICON_INFORMATION ) ); dlg->SetExtendedMessage(_("The file contained duplicate items, which is not allowed in PO files and would prevent the file from being used. Poedit fixed the issue, but you should review translations of any items marked as fuzzy and correct them if necessary.")); dlg->ShowWindowModalThenDo([dlg](int){}); } void PoeditFrame::WarnAboutLanguageIssues() { Language srclang = m_catalog->GetSourceLanguage(); Language lang = m_catalog->GetLanguage(); if (!lang.IsValid()) { AttentionMessage msg ( "missing-language", AttentionMessage::Error, _("Language of the translation isn't set.") ); msg.AddAction(MSW_OR_OTHER(_("Set language"), _("Set Language")), [=]{ EditCatalogProperties(); }); // TRANSLATORS: This is shown underneath "Language of the translation isn't set (or ...is the same as source language)." msg.SetExplanation(_("Suggestions are not available if the translation language is not set correctly. Other features, such as plural forms, may be affected as well.")); m_attentionBar->ShowMessage(msg); } // Check if the language is set wrongly. This is typically done in such a way that // both languages are English, so check explicitly for the common case of "translating" // from en to en_US too: if (lang.IsValid() && srclang.IsValid() && (lang == srclang || (srclang == Language::English() && lang.Code() == "en_US"))) { AttentionMessage msg ( "same-language-as-source", AttentionMessage::Warning, _("Language of the translation is the same as source language.") ); msg.SetExplanation(_("Suggestions are not available if the translation language is not set correctly. Other features, such as plural forms, may be affected as well.")); msg.AddAction(MSW_OR_OTHER(_("Fix language"), _("Fix Language")), [=]{ EditCatalogProperties(); }); if (lang != srclang) msg.AddDontShowAgain(); // possible that Poedit misjudged the intent m_attentionBar->ShowMessage(msg); } // check if plural forms header is correct (only if the language is set, // otherwise setting the language will fix this issue too): if ( lang.IsValid() && m_catalog->HasPluralItems() ) { wxString err; if ( m_catalog->Header().GetHeader("Plural-Forms").empty() ) { err = _("This catalog has entries with plural forms, but doesn't have Plural-Forms header configured."); } else if ( m_catalog->HasWrongPluralFormsCount() ) { err = _("Entries in this catalog have different plural forms count from what catalog's Plural-Forms header says"); } // FIXME: make this part of global error checking wxString plForms = m_catalog->Header().GetHeader("Plural-Forms"); PluralFormsCalculator *plCalc = PluralFormsCalculator::make(plForms.ToAscii()); if ( !plCalc ) { if ( plForms.empty() ) { err = _("Required header Plural-Forms is missing."); } else { err = wxString::Format( _("Syntax error in Plural-Forms header (\"%s\")."), plForms.c_str()); } } delete plCalc; if ( !err.empty() ) { AttentionMessage msg ( "malformed-plural-forms", AttentionMessage::Error, err ); msg.AddAction(MSW_OR_OTHER(_("Fix the header"), _("Fix the Header")), [=]{ EditCatalogProperties(); }); m_attentionBar->ShowMessage(msg); } else // no error, check for warning-worthy stuff { if ( lang.IsValid() ) { // Check for unusual plural forms. Do some normalization to avoid unnecessary // complains when the only differences are in whitespace for example. wxString pl1 = plForms; wxString pl2 = lang.DefaultPluralFormsExpr(); if (!pl2.empty()) { pl1.Replace(" ", ""); pl2.Replace(" ", ""); if ( pl1 != pl2 ) { if (pl1.Find(";plural=(") == wxNOT_FOUND && pl1.Last() == ';') { pl1.Replace(";plural=", ";plural=("); pl1.RemoveLast(); pl1 += ");"; } } if ( pl1 != pl2 ) { AttentionMessage msg ( "unusual-plural-forms", AttentionMessage::Warning, wxString::Format ( // TRANSLATORS: %s is language name in its basic form (as you // would see e.g. in a list of supported languages). You may need // to rephrase it, e.g. to an equivalent of "for language %s". _("Plural forms expression used by the catalog is unusual for %s."), lang.DisplayName() ) ); // TRANSLATORS: A verb, shown as action button with ""Plural forms expression used by the catalog is unusual for %s.")" msg.AddAction(_("Review"), [=]{ EditCatalogProperties(); }); msg.AddDontShowAgain(); m_attentionBar->ShowMessage(msg); } } } } } } void PoeditFrame::NoteAsRecentFile() { wxFileName fn(GetFileName()); fn.Normalize(wxPATH_NORM_DOTS | wxPATH_NORM_ABSOLUTE); #ifdef __WXOSX__ [[NSDocumentController sharedDocumentController] noteNewRecentDocumentURL:[NSURL fileURLWithPath:str::to_NS(fn.GetFullPath())]]; #else FileHistory().AddFileToHistory(fn.GetFullPath()); #endif } void PoeditFrame::RefreshControls(int flags) { if (!m_catalog) return; m_hasObsoleteItems = false; if (!m_catalog->IsOk()) { wxLogError(_("Error loading message catalog file '%s'."), m_catalog->GetFileName()); m_fileExistsOnDisk = false; UpdateMenu(); UpdateTitle(); m_catalog.reset(); m_pendingHumanEditedItem.reset(); NotifyCatalogChanged(nullptr); return; } wxBusyCursor bcur; UpdateMenu(); if (m_list) { // update catalog view, this may involve reordering the items... if (!(flags & Refresh_NoCatalogChanged)) m_list->CatalogChanged(m_catalog); if (m_findWindow) m_findWindow->Reset(m_catalog); } UpdateTitle(); UpdateStatusBar(); Refresh(); } void PoeditFrame::NotifyCatalogChanged(const CatalogPtr& cat) { if (m_sidebar) m_sidebar->ResetCatalog(); if (m_list) m_list->CatalogChanged(cat); } void PoeditFrame::UpdateStatusBar() { auto bar = GetStatusBar(); if (m_catalog && bar) { int all, fuzzy, untranslated, errors, unfinished; m_catalog->GetStatistics(&all, &fuzzy, &errors, &untranslated, &unfinished); wxString text; if (m_catalog->HasCapability(Catalog::Cap::Translations)) { int percent = (all == 0) ? 0 : (100 * (all - unfinished) / all); text.Printf(_("Translated: %d of %d (%d %%)"), all - unfinished, all, percent); if (unfinished > 0) { text += L" • "; text += wxString::Format(_("Remaining: %d"), unfinished); } if (errors > 0) { text += L" • "; text += wxString::Format(wxPLURAL("%d error", "%d errors", errors), errors); } } else { text.Printf(wxPLURAL("%d entry", "%d entries", all), all); } bar->SetStatusText(text); } } void PoeditFrame::DoGiveHelp(const wxString& text, bool show) { if (show || !text.empty()) wxFrame::DoGiveHelp(text, show); else UpdateStatusBar(); } void PoeditFrame::UpdateTitle() { #ifdef __WXOSX__ OSXSetModified(IsModified()); #endif wxString title; auto fileName = GetFileName(); if ( !fileName.empty() ) { wxFileName fn(GetFileName()); wxString fpath = fn.GetFullName(); if (m_fileExistsOnDisk) SetRepresentedFilename(fileName); else fpath += _(" (unsaved)"); if ( !m_catalog->Header().Project.empty() ) { title.Printf( #ifdef __WXOSX__ L"%s — %s", #else L"%s • %s", #endif fpath, m_catalog->Header().Project); } else { title = fn.GetFullName(); } #ifndef __WXOSX__ if ( IsModified() ) title += _(" (modified)"); title += " - Poedit"; #endif } else { title = "Poedit"; } SetTitle(title); } void PoeditFrame::UpdateMenu() { wxMenuBar *menubar = GetMenuBar(); const bool hasCatalog = m_catalog != nullptr; const bool nonEmpty = hasCatalog && !m_catalog->empty(); const bool editable = nonEmpty && m_catalog->HasCapability(Catalog::Cap::Translations); menubar->Enable(XRCID("menu_compile_mo"), hasCatalog && m_catalog->GetFileType() == Catalog::Type::PO); menubar->Enable(XRCID("menu_export"), hasCatalog); menubar->Enable(XRCID("menu_references"), nonEmpty); menubar->Enable(wxID_FIND, nonEmpty); menubar->Enable(wxID_REPLACE, nonEmpty); menubar->Enable(XRCID("menu_auto_translate"), editable); menubar->Enable(XRCID("menu_purge_deleted"), editable); menubar->Enable(XRCID("menu_validate"), editable); menubar->Enable(XRCID("menu_catproperties"), hasCatalog); menubar->Enable(XRCID("menu_ids"), nonEmpty); menubar->Enable(XRCID("sort_by_order"), nonEmpty); menubar->Enable(XRCID("sort_by_source"), nonEmpty); menubar->Enable(XRCID("sort_by_translation"), editable); menubar->Enable(XRCID("sort_group_by_context"), nonEmpty); menubar->Enable(XRCID("sort_untrans_first"), editable); menubar->Enable(XRCID("sort_errors_first"), editable); if (m_textTrans) m_textTrans->Enable(editable); if (m_list) m_list->Enable(nonEmpty); menubar->Enable(XRCID("menu_purge_deleted"), editable && m_catalog->HasDeletedItems()); #ifdef __WXGTK__ if (!editable) { // work around a wxGTK bug: enabling wxTextCtrl makes it editable too // in wxGTK <= 2.8: if (m_textOrig) m_textOrig->SetEditable(false); if (m_textOrigPlural) m_textOrigPlural->SetEditable(false); } #endif auto goMenuPos = menubar->FindMenu(_("Go")); if (goMenuPos != wxNOT_FOUND) menubar->EnableTop(goMenuPos, editable); for (int i = 0; i < 10; i++) { menubar->Enable(ID_BOOKMARK_SET + i, editable); menubar->Enable(ID_BOOKMARK_GO + i, editable && m_catalog->GetBookmarkIndex(Bookmark(i)) != -1); } } void PoeditFrame::WriteCatalog(const wxString& catalog) { WriteCatalog(catalog, [](bool){}); } template<typename TFunctor> void PoeditFrame::WriteCatalog(const wxString& catalog, TFunctor completionHandler) { wxBusyCursor bcur; concurrency_queue::future<void> tmUpdateThread; if (wxConfig::Get()->ReadBool("use_tm", true) && m_catalog->HasCapability(Catalog::Cap::Translations)) { tmUpdateThread = concurrency_queue::add([=]{ try { auto tm = TranslationMemory::Get().GetWriter(); tm->Insert(m_catalog); tm->Commit(); } catch ( const Exception& e ) { wxLogWarning(_("Failed to update translation memory: %s"), e.What()); } catch ( ... ) { wxLogWarning(_("Failed to update translation memory: %s"), "unknown error"); } }); } if (m_catalog->GetFileType() == Catalog::Type::PO) { Catalog::HeaderData& dt = m_catalog->Header(); dt.Translator = wxConfig::Get()->Read("translator_name", dt.Translator); dt.TranslatorEmail = wxConfig::Get()->Read("translator_email", dt.TranslatorEmail); } int validation_errors = 0; Catalog::CompilationStatus mo_compilation_status = Catalog::CompilationStatus::NotDone; if ( !m_catalog->Save(catalog, true, validation_errors, mo_compilation_status) ) { if (is_future_valid(tmUpdateThread)) tmUpdateThread.wait(); completionHandler(false); return; } m_modified = false; m_fileExistsOnDisk = true; #ifndef __WXOSX__ FileHistory().AddFileToHistory(GetFileName()); #endif UpdateTitle(); RefreshControls(); NoteAsRecentFile(); if (ManagerFrame::Get()) ManagerFrame::Get()->NotifyFileChanged(GetFileName()); if (is_future_valid(tmUpdateThread)) tmUpdateThread.wait(); if (validation_errors) { // Note: this may show window-modal window and because we may // be called from such window too, run this in the next // event loop iteration. CallAfter([=]{ ReportValidationErrors(validation_errors, mo_compilation_status, /*from_save=*/true, /*other_file_saved=*/true, [=]{ completionHandler(true); }); }); } else { completionHandler(true); } } void PoeditFrame::OnEditComment(wxCommandEvent& event) { auto firstItem = GetCurrentItem(); wxCHECK_RET( firstItem, "no entry selected" ); (void)event; wxWindow *parent = this; #ifndef __WXOSX__ // Find suitable parent window for the comment dialog (e.g. the button): parent = dynamic_cast<wxWindow*>(event.GetEventObject()); if (parent && dynamic_cast<wxToolBar*>(parent) != nullptr) parent = nullptr; if (!parent) parent = this; #endif wxWindowPtr<CommentDialog> dlg(new CommentDialog(parent, firstItem->GetComment())); dlg->ShowWindowModalThenDo([=](int retcode){ if (retcode == wxID_OK) { m_modified = true; UpdateTitle(); wxString comment = dlg->GetComment(); bool modified = false; m_list->ForSelectedCatalogItemsDo([&modified,comment](CatalogItem& item){ if (item.GetComment() != comment) { item.SetComment(comment); item.SetModified(true); modified = true; } }); if (modified && !IsModified()) { m_modified = true; UpdateTitle(); } // update comment window if (m_sidebar) m_sidebar->RefreshContent(); } }); } void PoeditFrame::OnPurgeDeleted(wxCommandEvent& WXUNUSED(event)) { const wxString title = _("Purge deleted translations"); const wxString main = _("Do you want to remove all translations that are no longer used?"); const wxString details = _("If you continue with purging, all translations marked as deleted will be permanently removed. You will have to translate them again if they are added back in the future."); wxWindowPtr<wxMessageDialog> dlg(new wxMessageDialog(this, main, title, wxYES_NO | wxICON_QUESTION)); dlg->SetExtendedMessage(details); dlg->SetYesNoLabels(_("Purge"), _("Keep")); dlg->ShowWindowModalThenDo([this,dlg](int retcode){ if (retcode == wxID_YES) { m_catalog->RemoveDeletedItems(); UpdateMenu(); } }); } void PoeditFrame::OnSuggestion(wxCommandEvent& event) { auto entry = GetCurrentItem(); if (!entry) return; entry->SetTranslation(event.GetString()); entry->SetFuzzy(false); entry->SetModified(true); // FIXME: instead of this mess, use notifications of catalog change m_modified = true; UpdateTitle(); UpdateStatusBar(); UpdateToTextCtrl(UndoableEdit); m_list->RefreshSelectedItems(); } void PoeditFrame::OnAutoTranslateAll(wxCommandEvent&) { wxWindowPtr<wxDialog> dlg(new wxDialog(this, wxID_ANY, _("Fill missing translations from TM"))); auto topsizer = new wxBoxSizer(wxVERTICAL); auto sizer = new wxBoxSizer(wxVERTICAL); auto onlyExact = new wxCheckBox(dlg.get(), wxID_ANY, _("Only fill in exact matches")); auto onlyExactE = new ExplanationLabel(dlg.get(), _("By default, inaccurate results are filled in as well and marked as fuzzy. Check this option to only include accurate matches.")); auto noFuzzy = new wxCheckBox(dlg.get(), wxID_ANY, _(L"Don’t mark exact matches as fuzzy")); auto noFuzzyE = new ExplanationLabel(dlg.get(), _("Only enable if you trust the quality of your TM. By default, all matches from the TM are marked as fuzzy and should be reviewed.")); #ifdef __WXOSX__ sizer->AddSpacer(PX(5)); sizer->Add(new HeadingLabel(dlg.get(), _("Fill missing translations from TM")), wxSizerFlags().Expand().PXDoubleBorder(wxBOTTOM)); #endif sizer->Add(onlyExact, wxSizerFlags().PXBorder(wxTOP)); sizer->AddSpacer(PX(1)); sizer->Add(onlyExactE, wxSizerFlags().Expand().Border(wxLEFT, ExplanationLabel::CHECKBOX_INDENT)); sizer->Add(noFuzzy, wxSizerFlags().PXDoubleBorder(wxTOP)); sizer->AddSpacer(PX(1)); sizer->Add(noFuzzyE, wxSizerFlags().Expand().Border(wxLEFT, ExplanationLabel::CHECKBOX_INDENT)); topsizer->Add(sizer, wxSizerFlags(1).Expand().PXDoubleBorderAll()); auto buttons = dlg->CreateButtonSizer(wxOK | wxCANCEL); auto ok = static_cast<wxButton*>(dlg->FindWindow(wxID_OK)); ok->SetLabel(_("Fill")); ok->SetDefault(); #ifdef __WXOSX__ topsizer->Add(buttons, wxSizerFlags().Expand()); #else topsizer->Add(buttons, wxSizerFlags().Expand().PXBorderAll()); topsizer->AddSpacer(PX(5)); #endif dlg->SetSizer(topsizer); dlg->SetMinSize(wxSize(PX(400), -1)); dlg->Layout(); dlg->Fit(); dlg->CenterOnParent(); dlg->ShowWindowModalThenDo([this,onlyExact,noFuzzy,dlg](int retcode) { if (retcode != wxID_OK) return; int matches = 0; int flags = 0; if (onlyExact->GetValue()) flags |= AutoTranslate_OnlyExact; if (noFuzzy->GetValue()) flags |= AutoTranslate_ExactNotFuzzy; if (m_list->HasMultipleSelection()) { if (!AutoTranslateCatalog(&matches, m_list->GetSelectedCatalogItems(), flags)) return; } else { if (!AutoTranslateCatalog(&matches, flags)) return; } wxString msg, details; if (matches) { msg = wxString::Format(wxPLURAL("%d entry was filled from the translation memory.", "%d entries were filled from the translation memory.", matches), matches); details = _("The translations were marked as fuzzy, because they may be inaccurate. You should review them for correctness."); } else { msg = _("No entries could be filled from the translation memory."); details = _(L"The TM doesn’t contain any strings similar to the content of this file. It is only effective for semi-automatic translations after Poedit learns enough from files that you translated manually."); } wxWindowPtr<wxMessageDialog> resultsDlg( new wxMessageDialog ( this, msg, _("Fill missing translations from TM"), wxOK | wxICON_INFORMATION ) ); resultsDlg->SetExtendedMessage(details); resultsDlg->ShowWindowModalThenDo([resultsDlg](int){}); }); } bool PoeditFrame::AutoTranslateCatalog(int *matchesCount, int flags) { return AutoTranslateCatalog(matchesCount, boost::counting_range(0, (int)m_catalog->GetCount()), flags); } template<typename T> bool PoeditFrame::AutoTranslateCatalog(int *matchesCount, const T& range, int flags) { if (matchesCount) *matchesCount = 0; if (range.empty()) return false; if (!wxConfig::Get()->ReadBool("use_tm", true)) return false; wxBusyCursor bcur; TranslationMemory& tm = TranslationMemory::Get(); auto srclang = m_catalog->GetSourceLanguage(); auto lang = m_catalog->GetLanguage(); // TODO: make this window-modal ProgressInfo progress(this, _("Translating")); progress.UpdateMessage(_("Filling missing translations from TM...")); // Function to apply fetched suggestions to a catalog item: auto process_results = [=](CatalogItemPtr dt, const SuggestionsList& results) -> bool { if (results.empty()) return false; auto& res = results.front(); if ((flags & AutoTranslate_OnlyExact) && !res.IsExactMatch()) return false; if ((flags & AutoTranslate_OnlyGoodQuality) && res.score < 0.80) return false; dt->SetTranslation(res.text); dt->SetAutomatic(true); dt->SetFuzzy(!res.IsExactMatch() || (flags & AutoTranslate_ExactNotFuzzy) == 0); return true; }; std::vector<concurrency_queue::future<bool>> operations; for (int i: range) { auto dt = (*m_catalog)[i]; if (dt->HasPlural()) continue; // can't handle yet (TODO?) if (dt->IsTranslated() && !dt->IsFuzzy()) continue; operations.push_back(concurrency_queue::add([=,&tm]{ auto results = tm.Search(srclang, lang, dt->GetString().ToStdWstring()); bool ok = process_results(dt, results); return ok; })); } progress.SetGaugeMax((int)operations.size()); int matches = 0; for (auto& op: operations) { if (!progress.UpdateGauge()) break; // TODO: cancel pending 'operations' futures if (op.get()) { matches++; progress.UpdateMessage(wxString::Format(wxPLURAL("Translated %u string", "Translated %u strings", matches), matches)); } } if (matchesCount) *matchesCount = matches; if (matches && !m_modified) { m_modified = true; UpdateTitle(); } RefreshControls(); return true; } wxMenu *PoeditFrame::GetPopupMenu(int item) { if (!m_catalog) return NULL; if (item < 0 || item >= (int)m_list->GetItemCount()) return NULL; const wxArrayString& refs = (*m_catalog)[item]->GetReferences(); wxMenu *menu = new wxMenu; menu->Append(XRCID("menu_copy_from_src"), #ifdef __WXMSW__ wxString(_("Copy from source text")) #else wxString(_("Copy from Source Text")) #endif + "\t" + _("Ctrl+") + "B"); menu->Append(XRCID("menu_clear"), #ifdef __WXMSW__ wxString(_("Clear translation")) #else wxString(_("Clear Translation")) #endif + "\t" + _("Ctrl+") + "K"); menu->Append(XRCID("menu_comment"), #ifdef __WXMSW__ wxString(_("Edit comment")) #else wxString(_("Edit Comment")) #endif #ifndef __WXOSX__ + "\t" + _("Ctrl+") + "M" #endif ); if ( !refs.empty() ) { menu->AppendSeparator(); wxMenuItem *it1 = new wxMenuItem(menu, ID_POPUP_DUMMY+0, _("References:")); #ifdef __WXMSW__ it1->SetFont(m_boldGuiFont); menu->Append(it1); #else menu->Append(it1); it1->Enable(false); #endif for (int i = 0; i < (int)refs.GetCount(); i++) menu->Append(ID_POPUP_REFS + i, " " + refs[i]); } return menu; } static inline void SetCtrlFont(wxWindow *win, const wxFont& font) { if (!win) return; #ifdef __WXMSW__ // Native wxMSW text control sends EN_CHANGE when the font changes, // producing a wxEVT_TEXT event as if the user changed the value. // Unfortunately the event seems to be used internally for sizing, // so we can't just filter it out completely. What we can do, however, // is to disable *our* handling of the event. EventHandlerDisabler disabler(win->GetEventHandler()); #endif win->SetFont(font); } void PoeditFrame::SetCustomFonts() { if (!m_list) return; wxConfigBase *cfg = wxConfig::Get(); static bool prevUseFontText = false; bool useFontList = (bool)cfg->Read("custom_font_list_use", (long)false); bool useFontText = (bool)cfg->Read("custom_font_text_use", (long)false); if (useFontList) { wxString name = cfg->Read("custom_font_list_name", wxEmptyString); if (!name.empty()) { wxNativeFontInfo fi; fi.FromString(name); wxFont font; font.SetNativeFontInfo(fi); m_list->SetCustomFont(font); } } else { m_list->SetCustomFont(wxNullFont); } if (useFontText) { wxString name = cfg->Read("custom_font_text_name", wxEmptyString); if (!name.empty()) { wxNativeFontInfo fi; fi.FromString(name); wxFont font; font.SetNativeFontInfo(fi); SetCtrlFont(m_textOrig, font); SetCtrlFont(m_textOrigPlural, font); SetCtrlFont(m_textTrans, font); for (size_t i = 0; i < m_textTransPlural.size(); i++) SetCtrlFont(m_textTransPlural[i], font); prevUseFontText = true; } } else if (prevUseFontText) { wxFont font(wxSystemSettings::GetFont(wxSYS_DEFAULT_GUI_FONT)); SetCtrlFont(m_textOrig, font); SetCtrlFont(m_textOrigPlural, font); SetCtrlFont(m_textTrans, font); for (size_t i = 0; i < m_textTransPlural.size(); i++) SetCtrlFont(m_textTransPlural[i], font); prevUseFontText = false; } } void PoeditFrame::OnSize(wxSizeEvent& event) { wxWindowUpdateLocker lock(this); event.Skip(); // see the comment in PoeditFrame ctor if ( m_setSashPositionsWhenMaximized && IsMaximized() ) { m_setSashPositionsWhenMaximized = false; // update sizes of child windows first: Layout(); // then set sash positions if (m_splitter) m_splitter->SetSashPosition((int)wxConfig::Get()->ReadLong("/splitter", PX(250))); } if (m_sidebarSplitter) { auto split = wxConfigBase::Get()->ReadDouble("/sidebar_splitter", 0.75); m_sidebarSplitter->SetSashPosition(split * event.GetSize().x); } } void PoeditFrame::ShowPluralFormUI(bool show) { if (show && (!m_catalog || m_catalog->GetPluralFormsCount() == 0)) show = false; wxSizer *origSizer = m_textOrig->GetContainingSizer(); origSizer->Show(m_labelSingular, show); origSizer->Show(m_labelPlural, show); origSizer->Show(m_textOrigPlural, show); origSizer->Layout(); if (m_textTrans && m_pluralNotebook) { wxSizer *textSizer = m_textTrans->GetContainingSizer(); textSizer->Show(m_textTrans, !show); textSizer->Show(m_pluralNotebook, show); textSizer->Layout(); } } void PoeditFrame::RecreatePluralTextCtrls() { if (!m_catalog || !m_list || !m_pluralNotebook) return; for (size_t i = 0; i < m_textTransPlural.size(); i++) m_textTransPlural[i]->PopEventHandler(true/*delete*/); m_textTransPlural.clear(); m_pluralNotebook->DeleteAllPages(); m_textTransSingularForm = NULL; PluralFormsCalculator *calc = PluralFormsCalculator::make( m_catalog->Header().GetHeader("Plural-Forms").ToAscii()); int formsCount = m_catalog->GetPluralFormsCount(); for (int form = 0; form < formsCount; form++) { // find example number that would use this plural form: static const int maxExamplesCnt = 5; wxString examples; int firstExample = -1; int examplesCnt = 0; if (calc && formsCount > 1) { for (int example = 0; example < 1000; example++) { if (calc->evaluate(example) == form) { if (++examplesCnt == 1) firstExample = example; if (examplesCnt == maxExamplesCnt) { examples += L'…'; break; } else if (examplesCnt == 1) examples += wxString::Format("%d", example); else examples += wxString::Format(", %d", example); } } } wxString desc; if (formsCount == 1) desc = _("Everything"); else if (examplesCnt == 0) desc.Printf(_("Form %i"), form); else if (examplesCnt == 1) { if (formsCount == 2 && firstExample == 1) // English-like { desc = _("Singular"); } else { if (firstExample == 0) desc = _("Zero"); else if (firstExample == 1) desc = _("One"); else if (firstExample == 2) desc = _("Two"); else desc.Printf(L"n = %s", examples); } } else if (formsCount == 2 && examplesCnt == 2 && firstExample == 0 && examples == "0, 1") { desc = _("Singular"); } else if (formsCount == 2 && firstExample != 1 && examplesCnt == maxExamplesCnt) { if (firstExample == 0 || firstExample == 2) desc = _("Plural"); else desc = _("Other"); } else desc.Printf(L"n → %s", examples); // create text control and notebook page for it: auto txt = new TranslationTextCtrl(m_pluralNotebook, wxID_ANY); txt->PushEventHandler(new TransTextctrlHandler(this)); m_textTransPlural.push_back(txt); m_pluralNotebook->AddPage(txt, desc); if (examplesCnt == 1 && firstExample == 1) // == singular m_textTransSingularForm = txt; } // as a fallback, assume 1st form for plural entries is the singular // (like in English and most real-life uses): if (!m_textTransSingularForm && !m_textTransPlural.empty()) m_textTransSingularForm = m_textTransPlural[0]; delete calc; SetCustomFonts(); UpdateTextLanguage(); UpdateToTextCtrl(ItemChanged); } void PoeditFrame::OnListRightClick(wxMouseEvent& event) { long item; int flags = wxLIST_HITTEST_ONITEM; auto list = static_cast<PoeditListCtrl*>(event.GetEventObject()); item = list->HitTest(event.GetPosition(), flags); if (item != -1 && (flags & wxLIST_HITTEST_ONITEM)) { list->SelectAndFocus(item); } wxMenu *menu = GetPopupMenu(m_list->ListIndexToCatalog(int(item))); if (menu) { list->PopupMenu(menu, event.GetPosition()); delete menu; } else event.Skip(); } void PoeditFrame::OnListFocus(wxFocusEvent& event) { if (g_focusToText && m_textTrans != nullptr) { if (m_textTrans->IsShown()) m_textTrans->SetFocus(); else if (!m_textTransPlural.empty()) (m_textTransPlural)[0]->SetFocus(); } else event.Skip(); } void PoeditFrame::OnSplitterSashMoving(wxSplitterEvent& event) { auto pos = event.GetSashPosition(); wxConfigBase::Get()->Write("/splitter", (long)pos); if (m_sidebar) m_sidebar->SetUpperHeight(pos); } void PoeditFrame::OnSidebarSplitterSashMoving(wxSplitterEvent& event) { auto split = (double)event.GetSashPosition() / (double)GetSize().x; wxConfigBase::Get()->Write("/sidebar_splitter", split); } void PoeditFrame::AddBookmarksMenu(wxMenu *parent) { wxMenu *menu = new wxMenu(); parent->AppendSeparator(); parent->AppendSubMenu(menu, _("&Bookmarks")); #ifdef __WXOSX__ // on Mac, Alt+something is used during normal typing, so we shouldn't // use it as shortcuts: #define BK_ACCEL_SET "Ctrl+rawctrl+%i" #define BK_ACCEL_GO "Ctrl+Alt+%i" #else // TRANSLATORS: This is the key shortcut used in menus on Windows, some languages call them differently #define BK_ACCEL_SET _("Alt+") + "%i" // TRANSLATORS: This is the key shortcut used in menus on Windows, some languages call them differently #define BK_ACCEL_GO _("Ctrl+") + _("Alt+") + "%i" #endif #ifdef __WXMSW__ #define BK_LABEL_SET _("Set bookmark %i") #define BK_LABEL_GO _("Go to bookmark %i") #else #define BK_LABEL_SET _("Set Bookmark %i") #define BK_LABEL_GO _("Go to Bookmark %i") #endif for (int i = 0; i < 10; i++) { auto label = BK_LABEL_SET + "\t" + BK_ACCEL_SET; menu->Append(ID_BOOKMARK_SET + i, wxString::Format(label, i, i)); } menu->AppendSeparator(); for (int i = 0; i < 10; i++) { auto label = BK_LABEL_GO + "\t" + BK_ACCEL_GO; menu->Append(ID_BOOKMARK_GO + i, wxString::Format(label, i, i)); } } void PoeditFrame::OnGoToBookmark(wxCommandEvent& event) { // Go to bookmark, if there is an item for it Bookmark bk = static_cast<Bookmark>(event.GetId() - ID_BOOKMARK_GO); int bkIndex = m_catalog->GetBookmarkIndex(bk); if (bkIndex != -1) { int listIndex = m_list->CatalogIndexToList(bkIndex); if (listIndex >= 0 && listIndex < m_list->GetItemCount()) { m_list->EnsureVisible(listIndex); m_list->SelectOnly(listIndex); } } } void PoeditFrame::OnSetBookmark(wxCommandEvent& event) { // Set bookmark if different from the current value for the item, // else unset it int bkIndex = -1; int selItemIndex = m_list->GetFirstSelectedCatalogItem(); if (selItemIndex == -1) return; Bookmark bk = static_cast<Bookmark>(event.GetId() - ID_BOOKMARK_SET); if (m_catalog->GetBookmarkIndex(bk) == selItemIndex) { m_catalog->SetBookmark(selItemIndex, NO_BOOKMARK); } else { bkIndex = m_catalog->SetBookmark(selItemIndex, bk); } // Refresh items m_list->RefreshSelectedItems(); if (bkIndex != -1) m_list->RefreshItem(m_list->CatalogIndexToList(bkIndex)); // Catalog has been modified m_modified = true; UpdateTitle(); UpdateMenu(); } void PoeditFrame::OnSortByFileOrder(wxCommandEvent&) { m_list->sortOrder.by = SortOrder::By_FileOrder; m_list->Sort(); } void PoeditFrame::OnSortBySource(wxCommandEvent&) { m_list->sortOrder.by = SortOrder::By_Source; m_list->Sort(); } void PoeditFrame::OnSortByTranslation(wxCommandEvent&) { m_list->sortOrder.by = SortOrder::By_Translation; m_list->Sort(); } void PoeditFrame::OnSortGroupByContext(wxCommandEvent& event) { m_list->sortOrder.groupByContext = event.IsChecked(); m_list->Sort(); } void PoeditFrame::OnSortUntranslatedFirst(wxCommandEvent& event) { m_list->sortOrder.untransFirst = event.IsChecked(); m_list->Sort(); } void PoeditFrame::OnSortErrorsFirst(wxCommandEvent& event) { m_list->sortOrder.errorsFirst = event.IsChecked(); m_list->Sort(); } void PoeditFrame::OnShowHideSidebar(wxCommandEvent&) { bool toShow = !m_sidebarSplitter->IsSplit(); if (toShow) { auto split = GetSize().x * wxConfigBase::Get()->ReadDouble("/sidebar_splitter", 0.75); m_sidebarSplitter->SplitVertically(m_splitter, m_sidebar, split); m_sidebar->RefreshContent(); } else { m_sidebarSplitter->Unsplit(m_sidebar); } wxConfigBase::Get()->Write("/sidebar_shown", toShow); } void PoeditFrame::OnUpdateShowHideSidebar(wxUpdateUIEvent& event) { event.Enable(m_sidebar != nullptr); if (!m_sidebar) return; bool shown = m_sidebarSplitter->IsSplit(); #ifdef __WXOSX__ auto shortcut = "\tCtrl+Alt+S"; if (shown) event.SetText(_("Hide Sidebar") + shortcut); else event.SetText(_("Show Sidebar") + shortcut); #else event.Check(shown); #endif } void PoeditFrame::OnShowHideStatusbar(wxCommandEvent&) { auto bar = GetStatusBar(); bool toShow = (bar == nullptr); if (toShow) { CreateStatusBar(1, wxST_SIZEGRIP); UpdateStatusBar(); } else { SetStatusBar(nullptr); bar->Destroy(); } wxConfigBase::Get()->Write("/statusbar_shown", toShow); } void PoeditFrame::OnUpdateShowHideStatusbar(wxUpdateUIEvent& event) { bool shown = GetStatusBar() != nullptr; #ifdef __WXOSX__ auto shortcut = "\tCtrl+/"; if (shown) event.SetText(_("Hide Status Bar") + shortcut); else event.SetText(_("Show Status Bar") + shortcut); #else event.Check(shown); #endif } void PoeditFrame::OnSelectionUpdate(wxUpdateUIEvent& event) { event.Enable(m_catalog && m_list && m_list->HasSelection()); } void PoeditFrame::OnSelectionUpdateEditable(wxUpdateUIEvent& event) { event.Enable(m_catalog && m_list && m_list->HasSelection() && m_catalog->HasCapability(Catalog::Cap::Translations)); } void PoeditFrame::OnSingleSelectionUpdate(wxUpdateUIEvent& event) { event.Enable(m_catalog && m_list && m_list->HasSingleSelection()); } void PoeditFrame::OnHasCatalogUpdate(wxUpdateUIEvent& event) { event.Enable(m_catalog != nullptr); } void PoeditFrame::OnIsEditableUpdate(wxUpdateUIEvent& event) { event.Enable(m_catalog && !m_catalog->empty() && m_catalog->HasCapability(Catalog::Cap::Translations)); } void PoeditFrame::OnEditCommentUpdate(wxUpdateUIEvent& event) { event.Enable(m_catalog && m_list && m_list->HasSelection() && m_catalog->HasCapability(Catalog::Cap::UserComments)); } #if defined(__WXMSW__) || defined(__WXGTK__) // Emulate something like OS X's first responder: pass text editing commands to // the focused text control. void PoeditFrame::OnTextEditingCommand(wxCommandEvent& event) { #ifdef __WXGTK__ wxEventBlocker block(this, wxEVT_MENU); #endif wxWindow *w = wxWindow::FindFocus(); if (!w || w == this || !w->ProcessWindowEventLocally(event)) event.Skip(); } void PoeditFrame::OnTextEditingCommandUpdate(wxUpdateUIEvent& event) { #ifdef __WXGTK__ wxEventBlocker block(this, wxEVT_UPDATE_UI); #endif wxWindow *w = wxWindow::FindFocus(); if (!w || w == this || !w->ProcessWindowEventLocally(event)) event.Enable(false); } #endif // __WXMSW__ || __WXGTK__ // ------------------------------------------------------------------ // catalog navigation // ------------------------------------------------------------------ namespace { bool Pred_AnyItem(const CatalogItemPtr&) { return true; } bool Pred_UnfinishedItem(const CatalogItemPtr& item) { return !item->IsTranslated() || item->IsFuzzy() || item->GetValidity() == CatalogItem::Val_Invalid; } } // anonymous namespace long PoeditFrame::NavigateGetNextItem(const long start, int step, PoeditFrame::NavigatePredicate predicate, bool wrap, CatalogItemPtr *out_item) { const int count = m_list ? m_list->GetItemCount() : 0; if ( !count ) return -1; long i = start; for ( ;; ) { i += step; if ( i < 0 ) { if ( wrap ) i += count; else return -1; // nowhere to go } else if ( i >= count ) { if ( wrap ) i -= count; else return -1; // nowhere to go } if ( i == start ) return -1; // nowhere to go auto item = m_list->ListIndexToCatalogItem(i); if ( predicate(item) ) { if (out_item) *out_item = item; return i; } } } void PoeditFrame::Navigate(int step, NavigatePredicate predicate, bool wrap) { if (!m_list) return; auto i = NavigateGetNextItem(m_list->GetFirstSelected(), step, predicate, wrap, nullptr); if (i == -1) return; m_list->SelectOnly(i); } void PoeditFrame::OnPrev(wxCommandEvent&) { Navigate(-1, Pred_AnyItem, /*wrap=*/false); } void PoeditFrame::OnNext(wxCommandEvent&) { Navigate(+1, Pred_AnyItem, /*wrap=*/false); } void PoeditFrame::OnPrevUnfinished(wxCommandEvent&) { Navigate(-1, Pred_UnfinishedItem, /*wrap=*/false); } void PoeditFrame::OnNextUnfinished(wxCommandEvent&) { Navigate(+1, Pred_UnfinishedItem, /*wrap=*/false); } void PoeditFrame::OnDoneAndNext(wxCommandEvent&) { auto item = GetCurrentItem(); if (!item) return; // If the user is "done" with an item, it should be in its final approved state: if (item->IsFuzzy()) { item->SetFuzzy(false); item->SetAutomatic(false); item->SetModified(true); if (!IsModified()) { m_modified = true; UpdateTitle(); UpdateStatusBar(); } // do additional processing of finished translations, such as adding it to the TM: m_pendingHumanEditedItem = item; } // like "next unfinished", but wraps Navigate(+1, Pred_UnfinishedItem, /*wrap=*/true); } void PoeditFrame::OnPrevPage(wxCommandEvent&) { if (!m_list) return; auto pos = std::max(m_list->GetFirstSelected()-10, 0L); m_list->SelectOnly(pos); } void PoeditFrame::OnNextPage(wxCommandEvent&) { if (!m_list) return; auto pos = std::min(m_list->GetFirstSelected()+10, long(m_list->GetItemCount())-1); m_list->SelectOnly(pos); }
using Gadfly using RDatasets plot(data("datasets", "iris"), x="Sepal.Length", y="Sepal.Width", Geom.point)
age 30, marriage license to marry Ava Corley, age 20, both of Welch Oklahoma. Marriage Records read: Married on Apr 19, 1920, by C. S. Wortman, County Judge. Witnesses were Rachel Adams and John Adams.
#---------------------------------------------------- # MIT License # # Copyright (c) 2017 Rishi Rai # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. #---------------------------------------------------- import tensorflow as tf from PIL import Image import numpy as np import sys import align.detect_face # 初始化图 with tf.Graph().as_default(): config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) with sess.as_default(): pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None) # 检测图像数据 def test_src(image_src): image_np = np.array(image_src).astype(np.uint8) minsize = 20 # minimum size of face threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold factor = 0.709 # scale factor face_crop_margin = 32 bounding_boxes, _ = align.detect_face.detect_face(image_np, minsize, pnet, rnet, onet, threshold, factor) result = np.empty(bounding_boxes.shape, np.float32) index = 0 for bb in bounding_boxes: img_size = np.asarray(image_np.shape)[0:2] result[index, 0] = np.maximum(bb[0] - face_crop_margin / 2, 0) result[index, 1] = np.maximum(bb[1] - face_crop_margin / 2, 0) result[index, 2] = np.minimum(bb[2] + face_crop_margin / 2, img_size[1]) result[index, 3] = np.minimum(bb[3] + face_crop_margin / 2, img_size[0]) result[index, 4] = bb[4] index = index + 1 return result # 检测图像文件 def test_image(image_file): try: image = Image.open(image_file) print('>>> Run test on image:', image_file) except IOError: print('IOError: File is not accessible.') return bounding_boxes = test_src(image) print('bounding_boxes.shape =', bounding_boxes.shape) print('bounding_boxes.dtype =', bounding_boxes.dtype) print('bounding_boxes =', bounding_boxes) # 激活GPU test_image('image.jpg') # MAIN if __name__ == '__main__': test_image('image.jpg' if (1 == len(sys.argv)) else sys.argv[1])
The preferred vehicle of The St. Regis Aspen Resort. Allow us to drive you anywhere within the city limits of Aspen in a LX 570 SUV. Take advantage of the house car and arrive to your dinner reservation stress-free and in style. Lexus’ onsite fleet of vehicles will be available to guests for complimentary offsite transportation and test drives, providing easy access and a way to explore the surrounding mountains, valleys and beyond. Guests can cover more area of the world famous valley of Aspen than is possible on foot. Get behind the wheel of a classic GS 350 F Sport, a NX 200t F Sport crossover or a spacious GX 460 luxury utility vehicle for up to five hours without mileage restrictions. Note: Terms and conditions are non-negotiable. To reserve a vehicle, please see our Concierge, Porte Cochere or a Front Office Manager.
#strand_function plots delay, HL and intensity segments of both conditions. #strand_function plots events as pausing sites and iTSS_I. strand_function <- function(data, data_p, data_n, data_p_c, data_n_c, Strand, condition, frag, i, fontface, axis_text_y_size, axis_title_y_size ) { #first plot for intensity segments for (j in seq_along(Strand)) { df <- data.frame() if (Strand[j] == "+") { df <- data_p df_c <- data_p_c } else{ df <- data_n df_c <- data_n_c } if (nrow(df) != 0 & nrow(df_c) != 0) { p1 <- ggplot(df, aes(x = get('position'))) + scale_x_continuous(limits = c(frag[i], frag[i + 1])) + labs(y = "Intensity [log2FC]") + theme_bw() + background_grid(major = "xy", minor = "none") + theme( legend.title = element_blank(), axis.title.y = element_text(colour = 5, size = axis_title_y_size), axis.text.y = element_text( angle = 90, hjust = 1, size = axis_text_y_size ), axis.text.x = element_blank(), axis.title.x = element_blank(), panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), legend.position = "none", plot.margin = margin(.1, .2, .1, .2, "cm"), panel.border = element_blank() ) #######################segment plot################### #first plot for half-life segments #select segments without outliers df <- indice_function(df, "HL_fragment") # increase the limit to 20 in case 3 or more probes/bins have a HL # above 10 Limit_h_df1 <- limit_function(df, "half_life", ind = 1) if (Limit_h_df1 == 20) { Breaks_h <- seq(0, Limit_h_df1, by = 4) } else{ Breaks_h <- seq(0, Limit_h_df1, by = 2) } df1.h <- secondaryAxis(df, "half_life", ind = 1) #in case only one bin is available and the HL is above 20 if (all(df$half_life > 20)) { df$half_life <- 20 } p2 <- ggplot(df, aes(x = get('position'))) + scale_x_continuous(limits = c(frag[i], frag[i + 1])) + labs(y = "Half-life [min]") + theme_bw() + background_grid(major = "xy", minor = "none") + theme( legend.title = element_blank(), legend.position = "none", axis.title.y = element_text(colour = 6, size = axis_title_y_size), axis.text.x = element_text(size = 6), axis.title.x = element_blank(), axis.text.y = element_text( angle = 90, hjust = 1, size = axis_text_y_size ), panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), plot.margin = margin(.1, .2, .1, .2, "cm"), panel.border = element_blank() ) #add the second axis for half-life segments plot if (length(unique(df$half_life)) == 1) { if (is.na(unique(df$half_life))) { p2 <- p2 + geom_blank() + scale_y_continuous( limits = c(0, Limit_h_df1), breaks = Breaks_h, sec.axis = sec_axis( ~ . * 1, name = "Half-life [min]", breaks = Breaks_h) ) } } #select segments without outliers df <- indice_function(df, "delay_fragment") #increase the limit to 20 in case 3 or more probes/bins have a delay #above 10 Limit_df1 <- limit_function(df, "delay", ind = 1) if (Limit_df1 == 20) { Breaks_d <- seq(0, Limit_df1, by = 4) } else{ Breaks_d <- seq(0, Limit_df1, by = 2) } #first plot for delay segments df1.d <- secondaryAxis(df, "delay", ind = 1) #in case only one bin is available and the delay is above 20 if (all(df$delay > 20)) { df$delay <- 20 } p3 <- ggplot(df, aes( x = get('position'), y = get('delay'), col = cdt )) + scale_x_continuous(limits = c(frag[i], frag[i + 1])) + scale_y_continuous( limits = c(0, Limit_df1), breaks = Breaks_d, sec.axis = sec_axis( ~ . * 1, name = "Delay [min]", breaks = Breaks_d) ) + labs(y = "Delay [min]") + theme_bw() + background_grid(major = "xy", minor = "none") + theme( legend.title = element_blank(), axis.title.x = element_blank(), legend.position = "none", axis.title.y = element_text(colour = 4, size = axis_title_y_size), axis.text.y = element_text( angle = 90, hjust = 1, size = axis_text_y_size ), axis.text.x = element_text(size = 6), panel.grid.major.x = element_blank(), plot.title = element_blank(), plot.margin = margin(.1, .2, .2, .2, "cm"), panel.border = element_blank() ) #add the second axis for delay segments plot if (length(unique(df$delay)) == 1) { if (is.na(unique(df$delay))) { p3 <- p3 + scale_y_continuous( limits = c(0, Limit_df1), breaks = Breaks_d, sec.axis = sec_axis( ~ . * 1, name = "Delay [min]", breaks = Breaks_d) ) } } #add the segments to the plot base and check #intensity is plotted independently if delay/HL data are present or not. if (length(na.omit(df$delay)) == 0) { p1 <- p1 + geom_point(data = .data, aes(x = position, y = distance_int), size = .5) p2 <- p2 p3 <- p3 } else{ ######################intensity plot################ #add a reference to outliers probes or bins #plot distance and new fragments results of DP of both #conditions df_c <- indice_function(df_c, "intensity_comb_fragment") df1_wo <- meanPosition(df_c %>% filter(indice==1), "intensity_comb_fragment") if (nrow(df_c %>% filter(indice == 1)) != 0) { p1 <- p1 + geom_point( data = filter( df_c, indice == 1, distance_int < 0, distance_int > -5 ), aes(x = position, y = distance_int), col = 6, size = .5 ) + geom_point( data = filter( df_c, indice == 1, distance_int > 0, distance_int < 5 ), aes(x = position, y = distance_int), col = 4, size = .5 ) + geom_line(data = filter(df_c, indice == 1), aes( x = get('position'), y = get('intensity_mean_comb_fragment'), col = get('intensity_comb_fragment') ) ) + geom_text( data = df1_wo, aes( x = get('meanPosi'), y = get('intensity_mean_comb_fragment'), label = get('intensity_comb_fragment') ), size = 1.3, check_overlap = TRUE ) if (nrow(df_c %>% filter( indice == 1, p_value_distance_intensity < 0.05 )) != 0) { if (unique(df_c$strand) == "+") { df_c_mean <- arrange_byGroup( df_c %>% filter( indice == 1, p_value_distance_intensity < 0.05 ), "intensity_comb_fragment" ) } else{ df_c_mean <- df_c %>% filter(indice == 1, p_value_distance_intensity < 0.05) df_c_mean <- df_c_mean[!duplicated(df_c_mean$p_value_distance_intensity), ] } p1 <- p1 + geom_text( data = df_c_mean, aes( x = get('position'), y = get('intensity_mean_comb_fragment') ), label = "**", fontface = fontface, size = 2, check_overlap = TRUE ) } } if (nrow(df_c %>% filter(get('indice') == 2)) != 0) { p1 <- p1 + geom_point( data = df_c %>% filter( get('indice') == 2 & distance_int > 0 & distance_int < 10 ), aes(x = position, y = distance_int), col = 2, shape = 17, size = .5 ) + geom_point( data = df_c %>% filter( get('indice') == 2 & distance_int < 0 & distance_int > -10 ), aes(x = position, y = distance_int), col = 3, shape = 12, size = .5 ) } #######################HL plot################### #plot distance and new fragments results of DP of both #conditions #dismiss outliers df_c <- indice_function(df_c, "HL_comb_fragment") #add fragment label df1_wo <- meanPosition(df_c %>% filter(indice==1), "HL_comb_fragment") if (nrow(df_c %>% filter(indice == 1)) != 0) { p2 <- p2 + geom_point( data = filter( df_c, indice == 1, distance_HL < 0, distance_HL < 5 ), aes(x = position, y = distance_HL), col = 6, size = .5 ) + geom_point( data = filter( df_c, indice == 1, distance_HL > 0, distance_HL > -5 ), aes(x = position, y = distance_HL), col = 4, size = .5 ) + geom_line(data = filter(df_c, indice == 1), aes( x = get('position'), y = get('HL_mean_comb_fragment'), col = get('HL_comb_fragment') ) )+ geom_text( data = df1_wo, aes( x = get('meanPosi'), y = get('HL_mean_comb_fragment'), label = get('HL_comb_fragment') ), size = 1.3, check_overlap = TRUE ) if (nrow(df_c %>% filter(p_value_distance_HL < 0.05)) != 0) { if (unique(df_c$strand) == "+") { df_c_mean <- arrange_byGroup( df_c %>% filter( indice == 1, p_value_distance_HL < 0.05 ), "HL_comb_fragment" ) } else{ df_c_mean <- df_c %>% filter(indice == 1, p_value_distance_HL < 0.05) df_c_mean <- df_c_mean[!duplicated(df_c_mean$p_value_distance_HL), ] } p2 <- p2 + geom_text( data = df_c_mean, aes( x = get('position'), y = get('HL_mean_comb_fragment') ), label = "**", fontface = fontface, size = 2, check_overlap = TRUE ) } } if (nrow(df_c %>% filter(get('indice') == 2)) != 0) { p2 <- p2 + geom_point( data = df_c %>% filter( get('indice') == 2 & distance_HL > 0 & distance_HL < 10 ), aes(x = position, y = distance_HL), col = 2, shape = 17, size = .5 ) + geom_point( data = df_c %>% filter( get('indice') == 2 & distance_HL < 0 & distance_HL > -10 ), aes(x = position, y = distance_HL), col = 3, shape = 12, size = .5 ) } } } if (Strand[j] == "+") { p <- list(p1, p2, p3) } else{ p4 <- p1 + coord_trans(y = "reverse") p5 <- p2 + coord_trans(y = "reverse") p6 <- p3 + coord_trans(y = "reverse") p.1 <- list(p4, p5, p6) } } p <- c(p, p.1) return(p) }
------------------------------------------------------------------------ -- An up-to technique for CCS ------------------------------------------------------------------------ {-# OPTIONS --sized-types #-} open import Prelude hiding (step-→) module Bisimilarity.Weak.Up-to.CCS {ℓ} {Name : Type ℓ} where open import Equality.Propositional open import Logical-equivalence using (_⇔_) open import Prelude.Size open import Function-universe equality-with-J hiding (id; _∘_) open import Bisimilarity.Weak.CCS import Bisimilarity.Weak.Equational-reasoning-instances open import Equational-reasoning open import Indexed-container hiding (⟨_⟩) open import Labelled-transition-system.CCS Name open import Relation open import Bisimilarity.Weak CCS open import Bisimilarity.Weak.Up-to CCS import Labelled-transition-system.Equational-reasoning-instances CCS as Dummy -- Up to (non-degenerate) context for CCS (for polyadic, coinductive -- contexts). Up-to-context : Trans₂ ℓ (Proc ∞) Up-to-context R (p , q) = ∃ λ n → ∃ λ (C : Context ∞ n) → Non-degenerate ∞ C × ∃ λ ps → ∃ λ qs → p ≡ C [ ps ] × q ≡ C [ qs ] × ∀ x → R (ps x , qs x) -- Up to context is monotone. up-to-context-monotone : Monotone Up-to-context up-to-context-monotone R⊆S = Σ-map id $ Σ-map id $ Σ-map id $ Σ-map id $ Σ-map id $ Σ-map id $ Σ-map id (R⊆S ∘_) -- Up to context is size-preserving. up-to-context-size-preserving : Size-preserving Up-to-context up-to-context-size-preserving = _⇔_.from (monotone→⇔ up-to-context-monotone) (λ where (_ , C , D , ps , qs , refl , refl , ps∼qs) → C [ ps ] ∼⟨ D [ ps∼qs ]-cong ⟩■ C [ qs ]) -- Note that up to context is not compatible (assuming that Name is -- inhabited). -- -- This counterexample is a minor variant of one due to Pous and -- Sangiorgi, who state that up to context is contained in a larger -- function that is compatible for another, "one-sided" step function -- (see Section 6.5.3 in "Enhancements of the bisimulation proof -- method"). ¬-up-to-context-compatible : Name → ¬ Compatible Up-to-context ¬-up-to-context-compatible x comp = contradiction where a = x , true data R₀ : Rel₂ ℓ (Proc ∞) where base : R₀ (τ ∙ (a ∙) , a ∙) R : Rel₂ ℓ (Proc ∞) R = R₀ ⁼ !τa[R]!a : Up-to-context R (! τ ∙ (a ∙) , ! a ∙) !τa[R]!a = 1 , ! hole fzero , ! hole , (λ _ → τ ∙ (a ∙)) , (λ _ → a ∙) , refl , refl , (λ _ → inj₁ base) !a[τ]⇒̂→≡ : ∀ {P} → ! a ∙ [ τ ]⇒̂ P → P ≡ ! a ∙ !a[τ]⇒̂→≡ (non-silent ¬s _) = ⊥-elim $ ¬s _ !a[τ]⇒̂→≡ (silent _ done) = refl !a[τ]⇒̂→≡ (silent _ (step {μ = μ} s !a⟶ _)) = ⊥-elim $ name≢τ ( name a ≡⟨ !-only ·-only !a⟶ ⟩ μ ≡⟨ silent≡τ s ⟩∎ τ ∎) drop-[] : ∀ {P Q S} → Up-to-context R (P ∣ Q , ! S) → R (P ∣ Q , ! S) drop-[] (_ , hole i , _ , _ , _ , P∣Q≡Ps[i] , !S≡Qs[i] , PsRQs) = subst R (cong₂ _,_ (sym P∣Q≡Ps[i]) (sym !S≡Qs[i])) (PsRQs i) drop-[] (_ , ∅ , _ , _ , _ , () , _) drop-[] (_ , _ ∣ _ , _ , _ , _ , _ , () , _) drop-[] (_ , _ ⊕ _ , _ , _ , _ , () , _) drop-[] (_ , _ · _ , _ , _ , _ , () , _) drop-[] (_ , ⟨ν _ ⟩ _ , _ , _ , _ , () , _) drop-[] (_ , ! _ , _ , _ , _ , () , _) R⊆StepR : R ⊆ ⟦ StepC ⟧ R R⊆StepR (inj₁ base) = ⟨ lr , rl ⟩ where lr : ∀ {P μ} → τ ∙ (a ∙) [ μ ]⟶ P → ∃ λ Q → a ∙ [ μ ]⇒̂ Q × R (P , Q) lr action = _ , (a ∙ ■) , inj₂ refl rl : ∀ {Q μ} → a ∙ [ μ ]⟶ Q → ∃ λ P → τ ∙ (a ∙) [ μ ]⇒̂ P × R (P , Q) rl action = _ , (τ ∙ (a ∙) →⟨ ⟶: action ⟩ a ∙ →⟨ ⟶: action ⟩■ ∅) , inj₂ refl R⊆StepR {P , _} (inj₂ refl) = ⟨ Σ-map id (Σ-map id inj₂) ∘ lr , Σ-map id (Σ-map id (inj₂ ∘ sym)) ∘ lr ⟩ where lr : ∀ {P′ μ} → P [ μ ]⟶ P′ → ∃ λ Q′ → P [ μ ]⇒̂ Q′ × P′ ≡ Q′ lr P⟶P′ = _ , ⟶→⇒̂ P⟶P′ , refl -- Note the use of compatibility in [R]⊆Step[S]. [R]⊆Step[R] : Up-to-context R ⊆ ⟦ StepC ⟧ (Up-to-context R) [R]⊆Step[R] = Up-to-context R ⊆⟨ up-to-context-monotone (λ {x} → R⊆StepR {x}) ⟩ Up-to-context (⟦ StepC ⟧ R) ⊆⟨ comp ⟩∎ ⟦ StepC ⟧ (Up-to-context R) ∎ contradiction : ⊥ contradiction = $⟨ !τa[R]!a ⟩ Up-to-context R (! τ ∙ (a ∙) , ! a ∙) ↝⟨ [R]⊆Step[R] ⟩ ⟦ StepC ⟧ (Up-to-context R) (! τ ∙ (a ∙) , ! a ∙) ↝⟨ (λ s → StepC.left-to-right s (replication (par-right action))) ⟩ (∃ λ P → ! a ∙ [ τ ]⇒̂ P × Up-to-context R (! τ ∙ (a ∙) ∣ a ∙ , P)) ↝⟨ (λ { (_ , !a⟶ , hyp) → subst (Up-to-context R ∘ (_ ,_)) (!a[τ]⇒̂→≡ !a⟶) hyp }) ⟩ Up-to-context R (! τ ∙ (a ∙) ∣ a ∙ , ! a ∙) ↝⟨ drop-[] ⟩ R (! τ ∙ (a ∙) ∣ a ∙ , ! a ∙) ↝⟨ [ (λ ()) , (λ ()) ] ⟩□ ⊥ □
Caroline (Kat Dennings) is a sassy and cynical teenage girl who's mortified when her father moves them to a perpetually dreary Podunk in British Columbia where other students' drug-induced behavior consistently depresses her. Out of boredom, she initiates affairs with her teacher and a stoner classmate, all while a serial killer is terrorizing the town─but matters sour when the men develop intense feelings. Josh Lucas, Andie MacDowell, Reece Thompson, and Rachel Blanchard co-star. 94 min. Widescreen (Enhanced); Soundtrack: English Dolby Digital 5.1; Subtitles: English (SDH), Spanish; featurette.
(** * Combi.LRrule.freeSchur : Free Schur functions *) (******************************************************************************) (* Copyright (C) 2014-2018 Florent Hivert <[email protected]> *) (* *) (* Distributed under the terms of the GNU General Public License (GPL) *) (* *) (* This code is distributed in the hope that it will be useful, *) (* but WITHOUT ANY WARRANTY; without even the implied warranty of *) (* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *) (* General Public License for more details. *) (* *) (* The full text of the GPL is available at: *) (* *) (* http://www.gnu.org/licenses/ *) (******************************************************************************) (** * Free Schur functions This file is the second step of the proof of the Littewood-Richardson rule. We translate theorem [LRtriple_cat_equiv] in a algebraic setting. Specifically, the main goal of this file is to lift the multiplication of Schur multivariate polynomials to the non commutative setting. - [commword n R w] == the commutative image of the word [w] as a multivariate polynomial (of type [{mpoly R[n]}]). - [homlang n d] == the type of homogenous langage over ['I_n.+1] of degre [d]. that is [{set d.-tuple 'I_n}]. - [polylang n R s] == the commutative image of the langage [s] where s is of type [homlang n d]. - [catlang l1 l2] == the concatenation of homogeneous language: given [s1] of degree [d1] and [s2] of degree [d2] return an homogeneous language of degree [d1 + d2]. - [tabwordshape n sh] == the set of reading of tableaux over ['I_n.+1] of shape [sh], where [sh] is of type ['P_d] - [freeSchur n t] == the set of words whose recording tableau over ['I_n.+1] is [t], where [t] is of type [stdtabn] - [tabword_of_tuple w] == the bijection freeSchur -> tabwordshape as stated in Theorem [tabword_of_tuple_freeSchur]: [ forall Q : stdtabn d, [set tabword_of_tuple x | x in freeSchur n0 Q] = tabwordshape n0 (shape_deg Q) ] The free Littlewood-Richardson rule: - [LRsupport Q1 Q2] == the set of standard Littlewood-Richardson Q-tableau in the product of the free Schur function indexed by [Q1] and [Q2], that is the set of [Q] which forms a [LRtriple] with [Q1] and [Q2]. The main result here is the free LR rule [free_LR_rule]: [ catlang (freeSchur Q1) (freeSchur Q2) = \bigcup_(Q in LRsupport) freeSchur Q. ] We then go back to commutative Schur polynomials: - [hyper_stdtab sh] == the hyper standard tableau of shape sh as a [seq (seq nat)]. - [hyper_stdtabn sh] == the hyper standard tableau of shape sh as a [stdtabn d] where sh is a ['P_d]. - [LRtab_set Q1 Q2 Q] == the set of standard Littlewood-Richardson Q-tableau in the product of the free Schur function indexed by [Q1] and [Q2] of shape [Q]. - [LRtab_coeff Q1 Q2] == the Littlewood-Richardson coefficient defined as the cardinality of [LRtab_set Q1 Q2 Q]. Invariance with the choice of Q1 and Q2: - [bij_LRsupport Q1 Q2] == a bijection from [LRsupport T1 T2] to [LRsupport Q1 Q2] as long as [T1] and [Q1] have the same shape as well as [T2] and [Q2]. It is used to show Theorem [LRtab_coeff_shapeE]: [ shape T1 = P1 -> shape T2 = P2 -> LRtab_coeff P = #|[set Q in (LRsupport T1 T2) | (shape Q == P)]|. ] ****************************************************************************) Require Import mathcomp.ssreflect.ssreflect. From mathcomp Require Import ssrfun ssrbool eqtype ssrnat seq fintype. From mathcomp Require Import order tuple finfun bigop finset ssralg. From SsrMultinomials Require Import ssrcomplements freeg mpoly. Require Import tools ordtype partition Yamanouchi std tableau stdtab. Require Import Schensted congr plactic stdplact Yam_plact Greene_inv shuffle. Require Import Schur_mpoly. Set Implicit Arguments. Unset Strict Implicit. Unset Printing Implicit Defensive. Import Order.TTheory. Local Open Scope ring_scope. Import GRing.Theory. (** * Commutative image of an homogeneous langage *) Section CommutativeImage. Variable n : nat. Variable R : comRingType. Definition commword (w : seq 'I_n) : {mpoly R[n]} := \prod_(i <- w) 'X_i. Lemma perm_commword (u v : seq 'I_n) : perm_eq u v -> commword u = commword v. Proof using . exact: perm_big. Qed. Lemma commword_morph (u v : seq 'I_n) : commword (u ++ v) = (commword u) * (commword v). Proof using . by rewrite /commword big_cat. Qed. Lemma commtuple_morph d1 d2 (u : d1.-tuple 'I_n) (v : d2.-tuple 'I_n) : commword (cat_tuple u v) = (commword u) * (commword v). Proof using . by rewrite commword_morph. Qed. Definition homlang d := {set d.-tuple 'I_n}. Definition polylang d (s : homlang d) := \sum_(w in s) commword w. Definition catlang d1 d2 (s1 : homlang d1) (s2 : homlang d2) : homlang (d1 + d2) := [set cat_tuple w1 w2 | w1 in s1, w2 in s2]. Lemma cat_tuple_inj d1 d2 (u x : d1.-tuple 'I_n) (v y : d2.-tuple 'I_n) : cat_tuple u v = cat_tuple x y -> (u, v) = (x, y). Proof using . rewrite /cat_tuple => [] [/eqP]. rewrite eqseq_cat; last by rewrite !size_tuple. by move=> /andP [/eqP/val_inj -> /eqP/val_inj ->]. Qed. Lemma catlangM d1 d2 (s1 : homlang d1) (s2 : homlang d2) : polylang s1 * polylang s2 = polylang (catlang s1 s2). Proof using . rewrite /polylang /catlang mulr_suml. under eq_bigr do [rewrite mulr_sumr; under eq_bigr do rewrite -commword_morph]. rewrite pair_big /=. rewrite -(big_imset (h := fun p => cat_tuple p.1 p.2) commword) /=; last by move=> [u v] [x y] /= _ _; apply: cat_tuple_inj. apply: eq_bigl => w. apply/idP/idP. - move/imsetP => [] [u v] /=; rewrite unfold_in /= => /andP [Hu Hv] ->. exact: imset2_f. - move/imset2P => [u v Hu Hv -> {w}]. by apply/imsetP; exists (u, v) => //=; rewrite unfold_in /= Hu Hv. Qed. End CommutativeImage. (** ** Row reading of tableau *) Section TableauReading. Context {disp : unit} {A : inhOrderType disp}. Definition tabsh_reading_RS (sh : seq nat) (w : seq A) := (to_word (RS w) == w) && (shape (RS (w)) == sh). Lemma tabsh_reading_RSP (sh : seq nat) (w : seq A) : reflect (exists tab, [/\ is_tableau tab, shape tab = sh & to_word tab = w]) (tabsh_reading_RS sh w). Proof using . apply (iffP idP). - move=> /andP [/eqP HRS /eqP Hsh]. exists (RS w); split => //; exact: is_tableau_RS. - move=> [tab] [Htab Hsh Hw]; apply/andP. have:= RS_tabE Htab; rewrite Hw => ->. by rewrite Hw Hsh. Qed. Lemma tabsh_reading_RSE sh : tabsh_reading sh =1 tabsh_reading_RS sh. Proof using . move=> w. apply/idP/idP. - by move /tabsh_readingP/tabsh_reading_RSP. - by move /tabsh_reading_RSP/tabsh_readingP. Qed. End TableauReading. (** * Free Schur function : lifting Schur function is the free algebra *) Section FreeSchur. Variable R : comRingType. Variable n0 : nat. Local Notation n := (n0.+1). Local Notation Schur sh := (Schur n0 R sh). Local Notation homlang d := (homlang n d). Section Degree. Variable d : nat. (* set of tableaux words on 'I_n of a given shape *) Definition tabwordshape (sh : 'P_d) : homlang d := [set t : d.-tuple 'I_n | tabsh_reading sh t ]. (* set of tableaux words on 'I_n of a given Q-symbol *) Definition freeSchur (Q : stdtabn d) : homlang d := [set t : d.-tuple 'I_n | (RStabmap t).2 == Q]. Lemma freeSchurP Q (t : d.-tuple 'I_n) : t \in freeSchur Q = (val t \in langQ Q). Proof using . by rewrite /freeSchur /langQ !inE /=. Qed. Lemma size_RS_tuple (t : d.-tuple 'I_n) : size (to_word (RS t)) == d. Proof using . by rewrite size_to_word -{2}(size_tuple t) size_RS. Qed. (** Bijection freeSchur -> tabwordshape *) Definition tabword_of_tuple (t : d.-tuple 'I_n) : d.-tuple 'I_n := Tuple (size_RS_tuple t). Lemma perm_tabword_of_tuple (t : d.-tuple 'I_n) : perm_eq t (tabword_of_tuple t). Proof using . rewrite /tabword_of_tuple /=; exact: perm_RS. Qed. Lemma tabword_of_tuple_freeSchur_inj (Q : stdtabn d) : {in (freeSchur Q) &, injective tabword_of_tuple}. Proof using . move=> /= u v. rewrite /freeSchur !inE => /eqP Hu /eqP Hv /(congr1 (@tval _ _)) /= H. case: (bijRStab [inhOrderType of 'I_n]) => RSinv HK _. apply: val_inj; rewrite -[val u]HK -[val v]HK; congr (RSinv _). rewrite {RSinv HK} /RStab /=. apply: pqpair_inj => /=. have:= (is_tableau_RS u). have:= is_tableau_RS v. move: Hu Hv H; rewrite -!RStabmapE /RStabmap. case: RSmap => [pu qu] /= ->; case: RSmap => [pv qv] /= -> Heq Hv Hu. by rewrite -(RS_tabE Hu) -(RS_tabE Hv) Heq. Qed. Lemma tabword_of_tuple_freeSchur (Q : stdtabn d) : [set tabword_of_tuple x | x in freeSchur Q] = tabwordshape (shape_deg Q). Proof using . rewrite /freeSchur /tabwordshape /tabword_of_tuple. apply/setP/subset_eqP/andP; split; apply/subsetP => w; rewrite !inE tabsh_reading_RSE /tabsh_reading_RS. - move/imsetP => [t]; rewrite inE => /eqP HQ /(congr1 val) /= ->. rewrite (RS_tabE (is_tableau_RS t)) eq_refl /= {w}. by rewrite -HQ -!RStabmapE shape_RStabmapE. - move/andP => [/eqP Hw /eqP Hsh]; apply/imsetP. have Hpair : is_RStabpair (RS w, val Q). by rewrite /is_RStabpair is_tableau_RS stdtabnP Hsh eq_refl. have Hpr : is_RSpair (RS w, yam_of_stdtab Q). have:= Hpair; rewrite /is_RStabpair /= => /andP [-> /=]. move=> /andP [/yam_of_stdtabP -> /= /eqP ->]. by rewrite shape_yam_of_stdtab. pose imw := (RStabinv (RSTabPair Hpair)). have Hsz : size (imw) == d. rewrite /imw /RStabinv /= -size_RS -RSmapE. rewrite (RSmapinv2K Hpr) /=. by rewrite size_RS size_tuple. exists (Tuple Hsz). + rewrite inE /= /imw. by have/(congr1 val) := RStabinvK (RSTabPair Hpair) => /= ->. + apply: val_inj => /=. rewrite /imw /RStabinv /= -Hw /=; congr (to_word _). by rewrite Hw -[RS (RSmapinv _ _)]RSmapE RSmapinv2K. Qed. End Degree. (** ** Noncommutative lifting of Schur polynomials *) Lemma SchurE d (Q : stdtabn d) : Schur (shape_deg Q) = polylang R (tabwordshape (shape_deg Q)). Proof using . rewrite Schur_tabsh_readingE /polylang /commword; apply eq_bigl => i /=. by rewrite inE. Qed. (** ** Commutative image of freeSchur language *) Lemma Schur_freeSchurE d (Q : stdtabn d) : Schur (shape_deg Q) = polylang R (freeSchur Q). Proof using . rewrite SchurE -tabword_of_tuple_freeSchur. rewrite /polylang (big_imset _ (@tabword_of_tuple_freeSchur_inj _ Q)) /=. apply: eq_bigr => t _; apply: perm_commword. by rewrite perm_sym; exact: perm_RS. Qed. (** * The free Littlewood-Richardson rule *) Section FreeLRrule. Variables (d1 d2 : nat). Variables (Q1 : stdtabn d1) (Q2 : stdtabn d2). Definition LRsupport := [set Q : stdtabn (d1 + d2) | pred_LRtriple_fast Q1 Q2 Q ]. Lemma free_LR_rule : catlang (freeSchur Q1) (freeSchur Q2) = \bigcup_(Q in LRsupport) freeSchur Q. Proof using . rewrite /catlang. apply/setP/subset_eqP/andP; split; apply/subsetP=> t. - move/imset2P => [w1 w2]. rewrite !freeSchurP /= => Hw1 Hw2 ->. have:= conj Hw1 Hw2. rewrite LRtriple_cat_equiv // => [[H1 H2] [Q [Htriple /= Hcat]]]. have:= is_stdtab_of_n_LRtriple (stdtabnP Q1) (stdtabnP Q2) Htriple. rewrite !size_tab_stdtabn => HQ. apply/bigcupP; exists (StdtabN HQ). rewrite /LRsupport inE -LRtriple_fastE //. apply/LRtripleP => //; exact: Htriple. by rewrite freeSchurP. - move/bigcupP => [Q]; rewrite /LRsupport freeSchurP inE => Htriple /= Ht. have Hsz1 : size (take d1 t) == d1. by rewrite size_takel // size_tuple leq_addr. pose t1 := Tuple Hsz1. have Hsz2 : size (drop d1 t) == d2. by rewrite size_drop size_tuple addKn. pose t2 := Tuple Hsz2. have Hcat : t = cat_tuple t1 t2. by apply: val_inj => /=; rewrite cat_take_drop. have : val t1 \in langQ Q1 /\ val t2 \in langQ Q2. rewrite LRtriple_cat_equiv // !size_tuple !size_tab_stdtabn //. split; try by []. exists Q; split. + by apply/LRtripleP => //; rewrite LRtriple_fastE. + by rewrite /= cat_take_drop. move=> [/= Ht1 Ht2]. apply/imset2P; apply: (Imset2spec (x1 := t1) (x2 := t2)). + by rewrite freeSchurP. + by rewrite freeSchurP. + by apply: val_inj; rewrite /= cat_take_drop. Qed. (** Alternative proof from [LRrule_langQ] *) Lemma free_LR_rule_alternate : catlang (freeSchur Q1) (freeSchur Q2) = \bigcup_(Q in LRsupport) freeSchur Q. Proof using . rewrite /catlang. apply/setP/subset_eqP/andP; split; apply/subsetP=> /= t. - move/imset2P => [/= w1 w2]. rewrite !freeSchurP /= => Hw1 Hw2 ->. have : exists u v, [/\ w1 ++ w2 = u ++ v, u \in langQ Q1 & v \in langQ Q2]. by exists w1, w2. rewrite LRrule_langQ // => [] [w] [Htriple /= Hcat]. have:= is_stdtab_of_n_LRtriple (stdtabnP Q1) (stdtabnP Q2) Htriple. rewrite !size_tab_stdtabn => HQ. apply/bigcupP; exists (StdtabN HQ). rewrite /LRsupport inE -LRtriple_fastE //. apply/LRtripleP => //; exact: Htriple. by rewrite freeSchurP. - move/bigcupP => [/= Q]; rewrite /LRsupport freeSchurP inE -LRtriple_fastE //. move=> /(LRtripleP _ (stdtabnP Q1) (stdtabnP Q2)) Htriple /= Ht. have : exists Q, LRtriple Q1 Q2 Q /\ val t \in langQ Q by exists Q. rewrite -LRrule_langQ_alternate // => [] [/= u1] [/= u2] [Hcat Hu1 Hu2]. have:= Hu1 => /size_langQ; rewrite size_tab_stdtabn => /eqP Hsz1. pose t1 := Tuple Hsz1. have:= Hu2 => /size_langQ; rewrite size_tab_stdtabn => /eqP Hsz2. pose t2 := Tuple Hsz2. apply/imset2P; apply: (Imset2spec (x1 := t1) (x2 := t2)). + by rewrite freeSchurP. + by rewrite freeSchurP. + exact: val_inj. Qed. (** Passing to commutative image in the free LR rule *) Theorem LR_rule_tab : Schur (shape_deg Q1) * Schur (shape_deg Q2) = \sum_(Q in LRsupport) (Schur (shape_deg Q)). Proof using . rewrite !Schur_freeSchurE catlangM free_LR_rule. rewrite -cover_imset /polylang. rewrite big_trivIset /=; first last. apply/trivIsetP => S1 S2. move => /imsetP [/= T1]; rewrite inE => HT1 -> {S1}. move => /imsetP [/= T2]; rewrite inE => HT2 -> {S2}. rewrite /freeSchur => Hdiff. rewrite /disjoint; apply/pred0P => w /=. rewrite !inE; apply: negbTE; move: Hdiff; apply: contra. by move=> /andP [/eqP -> /eqP ->]. under [RHS]eq_bigr do rewrite Schur_freeSchurE. rewrite (big_setID [set set0]) /= big1 ?add0r; first last => [i|]. rewrite inE => /andP [_]; rewrite inE => /eqP ->. by rewrite big_set0. rewrite (big_setID [set x | freeSchur x == set0]) /=. rewrite [X in X + _]big1 ?add0r; first last => [i|]. rewrite inE => /andP [_]; rewrite inE => /eqP ->. by rewrite /polylang big_set0. rewrite -big_imset /=; first last => [T1 T2 /=|]. rewrite inE => /andP []; rewrite inE => /set0Pn [x1 Hx1] _ _. move: Hx1; rewrite inE => /eqP Hx1 /setP/(_ x1); rewrite !inE Hx1. rewrite eq_refl => /esym/eqP; exact: val_inj. apply: eq_bigl => s; rewrite !inE; apply/idP/idP. + move=> /andP [Hn0 /imsetP [Q HQ Hs]]; subst s. by rewrite imset_f //= inE HQ inE Hn0. + move/imsetP => [Q]; rewrite 2!inE => /andP [H1 H2] ->. by rewrite H1 /= imset_f. Qed. End FreeLRrule. Definition hyper_stdtab sh := RS (std (hyper_yam sh)). Lemma hyper_stdtabP sh : is_stdtab (hyper_stdtab sh). Proof using . by rewrite /hyper_stdtab /= RSstdE std_is_std. Qed. Lemma hyper_stdtabnP d (P : 'P_d) : is_stdtab_of_n d (hyper_stdtab P). Proof using . rewrite /is_stdtab_of_n /= hyper_stdtabP /= size_RS. rewrite size_std -evalseq_eq_size (evalseq_hyper_yam (intpartnP P)). by rewrite sumn_intpartn. Qed. Canonical hyper_stdtabn d (P : 'P_d) := StdtabN (hyper_stdtabnP P). Lemma shape_hyper_stdtabnP d (P : 'P_d) : shape (hyper_stdtabn P) = P. Proof using . rewrite shape_RS_std (shape_RS_yam (hyper_yamP (intpartnP P))). by rewrite (evalseq_hyper_yam (intpartnP P)). Qed. Lemma shaped_hyper_stdtabnP d (P : 'P_d) : shape_deg (hyper_stdtabn P) = P. Proof using . apply: val_inj => /=; exact: shape_hyper_stdtabnP. Qed. Section Coeffs. Variables d1 d2 : nat. Variables (P1 : 'P_d1) (P2 : 'P_d2). Definition LRtab_set (P : 'P_(d1 + d2)) := [set Q in (LRsupport (hyper_stdtabn P1) (hyper_stdtabn P2)) | (shape Q == P)]. Definition LRtab_coeff (P : 'P_(d1 + d2)) := #|LRtab_set P|. Theorem LRtab_coeffP : Schur P1 * Schur P2 = \sum_P (Schur P) *+ LRtab_coeff P. Proof using . rewrite /LRtab_coeff /LRtab_set. have:= LR_rule_tab (hyper_stdtabn P1) (hyper_stdtabn P2). rewrite !shaped_hyper_stdtabnP => ->. move : (LRsupport _ _) => LR. rewrite (partition_big (@shape_deg (d1 + d2)) predT) //=. apply: eq_bigr => P _. under eq_bigr => T /andP [_ /eqP ->] do []. rewrite sumr_const; congr (_ *+ _). by apply: eq_card => i /=; rewrite unfold_in inE. Qed. Lemma size_RSmapinv2_yam d (disp : unit) (Typ : inhOrderType disp) (tab : seq (seq Typ)) (T : stdtabn d) : size (RSmapinv2 (tab, yam_of_stdtab T)) = d. Proof using . rewrite -{2}(size_tab_stdtabn T) -size_yam_of_stdtab // /RSmapinv2 /=. elim: (yam_of_stdtab _) tab => [//= | w0 w /= IHw] tab. case: (invinstabnrow _ _) => [tr lr]. by rewrite size_rcons IHw. Qed. (** ** Invariance with respect to choice of the Q-Tableau *) Section Bij_LRsupport. Section ChangeUT. Variable (U1 T1 : stdtabn d1) (U2 T2 : stdtabn d2). Hypothesis Hsh1 : shape U1 = shape T1. Hypothesis Hsh2 : shape U2 = shape T2. Section TakeDrop. Context {disp : unit} {T : inhOrderType disp}. Lemma RStabE (w : seq T) : (RStab w).1 = (RS w). Proof using . by rewrite RStabmapE. Qed. Definition changeUT T1 T2 (w : seq T) : seq T := (RSmapinv2 (RS (take d1 w), yam_of_stdtab T1)) ++ (RSmapinv2 (RS (drop d1 w), yam_of_stdtab T2)). Variable w : seq T. Hypothesis Htake : shape (RS (take d1 w)) = shape U1. Hypothesis Hdrop : shape (RS (drop d1 w)) = shape U2. Lemma changeUtakeP : is_RStabpair (RS (take d1 w), val U1). Proof using Htake. by rewrite /is_RStabpair is_tableau_RS Htake /= eq_refl andbT. Qed. Lemma changeUdropP : is_RStabpair (RS (drop d1 w), val U2). Proof using Hdrop. by rewrite /is_RStabpair is_tableau_RS Hdrop /= eq_refl andbT. Qed. Lemma changeTtakeP : is_RStabpair (RS (take d1 w), val T1). Proof using Hsh1 Htake. by rewrite /is_RStabpair is_tableau_RS Htake /= Hsh1 eq_refl andbT. Qed. Lemma changeTdropP : is_RStabpair (RS (drop d1 w), val T2). Proof using Hdrop Hsh2. by rewrite /is_RStabpair is_tableau_RS Hdrop /= Hsh2 eq_refl andbT. Qed. Lemma toDepRSPair (u : seq T) d (t : stdtabn d) : forall H : is_RStabpair (RS u, val t), RSmapinv2 (RS u, yam_of_stdtab t) = RStabinv (RSTabPair H). Proof using . by []. Qed. Lemma plact_changeUT_take : take d1 (changeUT T1 T2 w) =Pl take d1 w. Proof using Hsh1 Htake. rewrite /changeUT take_size_cat; last by rewrite /= size_RSmapinv2_yam. rewrite (toDepRSPair changeTtakeP). apply Sch_plact; apply/eqP. by rewrite -[LHS]RStabE RStabinvK //. Qed. Lemma plact_changeUT_drop : drop d1 (changeUT T1 T2 w) =Pl drop d1 w. Proof using Hdrop Hsh2. rewrite /changeUT drop_size_cat; last by rewrite /= size_RSmapinv2_yam. rewrite (toDepRSPair changeTdropP). apply Sch_plact; apply/eqP. by rewrite -[LHS]RStabE RStabinvK //. Qed. Lemma plact_changeUT : changeUT T1 T2 w =Pl w. Proof using Hdrop Hsh1 Hsh2 Htake. rewrite /changeUT -{3}(cat_take_drop d1 w). apply: plact_cat. - have:= plact_changeUT_take. by rewrite /changeUT take_size_cat // size_RSmapinv2_yam. - have:= plact_changeUT_drop. by rewrite /changeUT drop_size_cat // size_RSmapinv2_yam. Qed. End TakeDrop. Lemma changeUTK (disp : unit) (T : inhOrderType disp) (w : seq T) : (take d1 w) \in langQ U1 -> (drop d1 w) \in langQ U2 -> changeUT U1 U2 (changeUT T1 T2 w) = w. Proof using Hsh1 Hsh2. rewrite !inE /= /changeUT => /eqP Htake /eqP Hdrop. rewrite ?take_size_cat ?drop_size_cat ?size_RSmapinv2_yam //. have Htk : shape (RS (take d1 w)) = shape U1. by rewrite -RStabmapE shape_RStabmapE Htake. have Hdp : shape (RS (drop d1 w)) = shape U2. by rewrite -RStabmapE shape_RStabmapE Hdrop. have -> : RS (RSmapinv2 (RS (take d1 w), yam_of_stdtab T1)) = RS (take d1 w). by rewrite (toDepRSPair (changeTtakeP _)) -RStabE RStabinvK /=. have -> : RS (RSmapinv2 (RS (drop d1 w), yam_of_stdtab T2)) = RS (drop d1 w). by rewrite (toDepRSPair (changeTdropP _)) -RStabE RStabinvK /=. rewrite -{3}(cat_take_drop d1 w); congr (_ ++ _). - move: Htake; rewrite /RStabmap /= -!RSmapE. case H : (RSmap (take d1 w)) => [Pt Qt] <- /=. rewrite stdtab_of_yamK -/((Pt, Qt).2) -H; last exact: is_yam_RSmap2. by rewrite RSmapK. - move: Hdrop; rewrite /RStabmap /= -!RSmapE. case H : (RSmap (drop d1 w)) => [Pt Qt] <- /=. rewrite stdtab_of_yamK -/((Pt, Qt).2) -H; last exact: is_yam_RSmap2. by rewrite RSmapK. Qed. Section DefBij. Variable Q : stdtabn (d1 + d2). Hypothesis HTriple : pred_LRtriple U1 U2 Q. Let w := RSmapinv2 (yamtab (shape Q), yam_of_stdtab Q). Lemma RSpairyamQ : is_RSpair (yamtab (shape Q), yam_of_stdtab Q). Proof using . rewrite /= yamtabP /=; last by apply: is_part_sht; exact: stdtabP. by rewrite yam_of_stdtabP //= shape_yam_of_stdtab // shape_yamtab. Qed. Lemma bij_LRsupportP : is_stdtab_of_n (d1 + d2) (RStab (changeUT T1 T2 (RSmapinv2 (yamtab (shape Q), yam_of_stdtab Q)))).2. Proof using . rewrite /is_stdtab_of_n /=. apply/andP; split; first exact: is_stdtab_RStabmap2. rewrite /size_tab /= -shape_RStabmapE RStabmapE -/(size_tab _) size_RS size_cat. by rewrite !size_RSmapinv2_yam. Qed. Definition bij_LRsupport := StdtabN bij_LRsupportP. Lemma take_drop_langQ : ((take d1 w) \in langQ U1 /\ (drop d1 w) \in langQ U2). Proof using HTriple. have:= HTriple => /LRtripleP-/(_ (stdtabnP _) (stdtabnP _)) Htriple. have Hszw : size w = (d1 + d2)%N by rewrite /w size_RSmapinv2_yam. rewrite LRtriple_cat_equiv //; split. - rewrite size_take size_tab_stdtabn Hszw. case: d2 => [| n]; first by rewrite addn0 ltnn. by rewrite addnS ltnS leq_addr. - by rewrite size_drop size_tab_stdtabn Hszw addKn. - exists (val Q); split; first exact: Htriple. rewrite cat_take_drop /w inE /= /RStabmap RSmapinv2K; last exact: RSpairyamQ. by rewrite yam_of_stdtabK. Qed. Lemma shape_bij_LRsupport : shape bij_LRsupport = shape Q. Proof using HTriple Hsh1 Hsh2. have:= take_drop_langQ; rewrite /= -shape_RStabmapE RStabmapE. rewrite !inE => [] [/eqP HU1 /eqP HU2]. have -> : RS (changeUT T1 T2 w) = RS w. apply/eqP; rewrite -plactic_RS; by apply: plact_changeUT; rewrite -RStabmapE shape_RStabmapE ?HU1 ?HU2. rewrite /w -RSmapE shape_RSmap_eq /w RSmapinv2K; last exact: RSpairyamQ. by rewrite //= shape_yam_of_stdtab. Qed. Lemma shape_takeRS : shape (RS (take d1 w)) = shape U1. Proof using HTriple. have:= take_drop_langQ; rewrite -/w => /= [] [Htake _]. move: Htake; rewrite inE => /eqP <-. by rewrite -RStabmapE shape_RStabmapE. Qed. Lemma shape_dropRS : shape (RS (drop d1 w)) = shape U2. Proof using HTriple. have:= take_drop_langQ; rewrite -/w => /= [] [_ Hdrop]. move: Hdrop; rewrite inE => /eqP <-. by rewrite -RStabmapE shape_RStabmapE. Qed. Lemma predLR_bij_LRsupport : pred_LRtriple T1 T2 bij_LRsupport. Proof using HTriple Hsh1 Hsh2. apply/LRtripleP => //=. have:= take_drop_langQ; rewrite -/w => /= [] [Htake Hdrop]. apply LRtriple_cat_langQ => //. - have Hpair := changeTtakeP shape_takeRS. rewrite (toDepRSPair Hpair) inE. by have/(congr1 (fun p => (val p).2)) := RStabinvK (RSTabPair Hpair) => /= ->. - have Hpair := changeTdropP shape_dropRS. rewrite (toDepRSPair Hpair) inE. by have/(congr1 (fun p => (val p).2)) := RStabinvK (RSTabPair Hpair) => /= ->. Qed. End DefBij. Lemma card_LRtab_set_leq (P : seq nat) : #|[set Q in (LRsupport U1 U2) | (shape Q == P)]| <= #|[set Q in (LRsupport T1 T2) | (shape Q == P)]|. Proof using Hsh1 Hsh2. rewrite /LRsupport. have Hsimpl A B C : [set Q in (LRsupport A B) | (shape Q == C)] = [set Q : stdtabn (d1 + d2) | pred_LRtriple A B Q & (shape Q == C)]. apply/setP => Q; rewrite /LRsupport 2!inE [RHS]inE. by congr (_ && _); rewrite LRtriple_fastE. rewrite !{}Hsimpl. rewrite -(card_in_imset (f := bij_LRsupport)). - apply subset_leqif_cards; apply/subsetP => Qres /imsetP [Q]. rewrite inE => /andP [Hpred /eqP <-] -> {Qres}. rewrite inE; apply/andP; split. + exact: predLR_bij_LRsupport. + by rewrite shape_bij_LRsupport. - move=> Q1 Q2; rewrite inE => /andP [HQ1 /eqP HshQ1]. rewrite inE => /andP [HQ2 /eqP]; rewrite -HshQ1 {HshQ1} => Heqsh. move=>/(congr1 (@val _ _ _)); rewrite /=. set w1 := (X in changeUT _ _ X). set w2 := (X in _ = (RStab (changeUT _ _ X)).2) => Heq1. have : RS w1 = RS w2. rewrite -!RSmapE /w1 /w2 !RSmapinv2K; first last. + rewrite /is_RSpair yamtabP /=; last by apply: is_part_sht; exact: stdtabP. by rewrite yam_of_stdtabP //= shape_yamtab shape_yam_of_stdtab. + rewrite /is_RSpair yamtabP /=; last by apply: is_part_sht; exact: stdtabP. by rewrite yam_of_stdtabP //= shape_yamtab shape_yam_of_stdtab. + by rewrite /= Heqsh. have:= take_drop_langQ HQ1. have:= plact_changeUT (shape_takeRS HQ1) (shape_dropRS HQ1); rewrite -/w1. rewrite plactic_RS => /eqP <- [HQ1take HQ1drop]. have:= take_drop_langQ HQ2. have:= plact_changeUT (shape_takeRS HQ2) (shape_dropRS HQ2); rewrite -/w2. rewrite plactic_RS => /eqP <- [HQ2take HQ2drop]. rewrite -!RStabE => Heq2. have {Heq1 Heq2 HQ1take HQ1drop HQ2take HQ2drop} Heq : w1 = w2. rewrite -(changeUTK HQ1take HQ1drop) -(changeUTK HQ2take HQ2drop). congr changeUT. rewrite -(RStabK (changeUT T1 T2 w1)) -(RStabK (changeUT T1 T2 w2)). congr RStabinv. apply val_inj; move: Heq1 Heq2 => /=. case: (RStabmap (changeUT T1 T2 w1)) => A1 B1. by case: (RStabmap (changeUT T1 T2 w2)) => A2 B2 /= -> ->. apply val_inj. rewrite /= -(yam_of_stdtabK (stdtabnP Q1)) -(yam_of_stdtabK (stdtabnP Q2)). congr stdtab_of_yam. have:= RSmapinv2K (RSpairyamQ Q1); rewrite -/w1 Heq /w2. by rewrite (RSmapinv2K (RSpairyamQ Q2)) => [] [_ ->]. Qed. End ChangeUT. Lemma card_LRtab_set_shapeE P (U1 T1 : stdtabn d1) (U2 T2 : stdtabn d2) : shape T1 = shape U1 -> shape T2 = shape U2 -> #|[set Q in (LRsupport U1 U2) | (shape Q == P)]| = #|[set Q in (LRsupport T1 T2) | (shape Q == P)]|. Proof using . by move=> H1 H2; apply anti_leq; rewrite !card_LRtab_set_leq // H1 H2. Qed. Theorem LRtab_coeff_shapeE (T1 : stdtabn d1) (T2 : stdtabn d2) P : shape T1 = P1 -> shape T2 = P2 -> LRtab_coeff P = #|[set Q in (LRsupport T1 T2) | (shape Q == P)]|. Proof using . rewrite /LRtab_coeff /LRtab_set => H1 H2. by apply card_LRtab_set_shapeE; rewrite shape_hyper_stdtabnP ?H1 ?H2. Qed. End Bij_LRsupport. End Coeffs. End FreeSchur. (** ** Conjugating tableaux in the free LR rule *) Section Conj. Variables d1 d2 : nat. Lemma LRsupport_conj (T1 : stdtabn d1) (T2 : stdtabn d2): LRsupport (conj_stdtabn T1) (conj_stdtabn T2) = (@conj_stdtabn _) @: (LRsupport T1 T2). Proof using . apply/setP => T; rewrite inE. apply/idP/idP. - rewrite -LRtriple_fastE; try exact: is_stdtab_conj => //; last exact: stdtabnP. move=> H. apply/imsetP; exists (conj_stdtabn T). + rewrite inE -LRtriple_fastE //. rewrite pred_LRtriple_conj // conj_tabK; first exact H. * exact: stdtabP. * apply val_inj; rewrite /= conj_tabK //; exact: stdtabP. - move=> /imsetP [U]; rewrite inE -LRtriple_fastE //. rewrite pred_LRtriple_conj // => H -> {T}. rewrite -LRtriple_fastE; try exact: is_stdtab_conj. exact: H. Qed. Theorem LRtab_coeff_conj (P1 : 'P_d1) (P2 : 'P_d2) (P : 'P_(d1 + d2)) : LRtab_coeff P1 P2 P = LRtab_coeff (conj_intpartn P1) (conj_intpartn P2) (conj_intpartn P). Proof using . rewrite [RHS](LRtab_coeff_shapeE (T1 := conj_stdtabn (hyper_stdtabn P1)) (T2 := conj_stdtabn (hyper_stdtabn P2))); first last. - by rewrite shape_conj_tab shape_hyper_stdtabnP. - by rewrite shape_conj_tab shape_hyper_stdtabnP. rewrite /LRtab_coeff /LRtab_set LRsupport_conj. have Hinj : injective (conj_stdtabn (n:=d1 + d2)). apply inv_inj => T; apply val_inj; rewrite /= conj_tabK //; exact: stdtabP. rewrite -(@card_imset _ _ (@conj_stdtabn _)) //. rewrite !setIdE imsetI; last by move=> a b /= _ _; exact: Hinj. congr (card (mem (_ :&: _))). apply/setP => T; rewrite !inE. apply/idP/idP. - move/imsetP => [U]; rewrite inE => /eqP HU -> /=. by rewrite shape_conj_tab HU. - move/eqP => H; apply/imsetP; exists (conj_stdtabn T). + by rewrite inE /= shape_conj_tab H /= conj_partK. + apply val_inj => //=; rewrite conj_tabK //; exact: stdtabP. Qed. End Conj.
State Before: α : Type u β : Type v ι : Sort w r r' s : α → α → Prop inst✝¹ : Preorder α a : α inst✝ : Preorder β f : α → β h : ScottContinuous f ⊢ Monotone f State After: α : Type u β : Type v ι : Sort w r r' s : α → α → Prop inst✝¹ : Preorder α a✝ : α inst✝ : Preorder β f : α → β h : ScottContinuous f a b : α hab : a ≤ b ⊢ f a ≤ f b Tactic: intro a b hab State Before: α : Type u β : Type v ι : Sort w r r' s : α → α → Prop inst✝¹ : Preorder α a✝ : α inst✝ : Preorder β f : α → β h : ScottContinuous f a b : α hab : a ≤ b e1 : IsLUB (f '' {a, b}) (f b) ⊢ f a ≤ f b State After: case a α : Type u β : Type v ι : Sort w r r' s : α → α → Prop inst✝¹ : Preorder α a✝ : α inst✝ : Preorder β f : α → β h : ScottContinuous f a b : α hab : a ≤ b e1 : IsLUB (f '' {a, b}) (f b) ⊢ f a ∈ f '' {a, b} Tactic: apply e1.1 State Before: case a α : Type u β : Type v ι : Sort w r r' s : α → α → Prop inst✝¹ : Preorder α a✝ : α inst✝ : Preorder β f : α → β h : ScottContinuous f a b : α hab : a ≤ b e1 : IsLUB (f '' {a, b}) (f b) ⊢ f a ∈ f '' {a, b} State After: case a α : Type u β : Type v ι : Sort w r r' s : α → α → Prop inst✝¹ : Preorder α a✝ : α inst✝ : Preorder β f : α → β h : ScottContinuous f a b : α hab : a ≤ b e1 : IsLUB (f '' {a, b}) (f b) ⊢ f a ∈ {f a, f b} Tactic: rw [Set.image_pair] State Before: case a α : Type u β : Type v ι : Sort w r r' s : α → α → Prop inst✝¹ : Preorder α a✝ : α inst✝ : Preorder β f : α → β h : ScottContinuous f a b : α hab : a ≤ b e1 : IsLUB (f '' {a, b}) (f b) ⊢ f a ∈ {f a, f b} State After: no goals Tactic: exact Set.mem_insert _ _ State Before: α : Type u β : Type v ι : Sort w r r' s : α → α → Prop inst✝¹ : Preorder α a✝ : α inst✝ : Preorder β f : α → β h : ScottContinuous f a b : α hab : a ≤ b ⊢ IsLUB (f '' {a, b}) (f b) State After: case a α : Type u β : Type v ι : Sort w r r' s : α → α → Prop inst✝¹ : Preorder α a✝ : α inst✝ : Preorder β f : α → β h : ScottContinuous f a b : α hab : a ≤ b ⊢ Set.Nonempty {a, b} case a α : Type u β : Type v ι : Sort w r r' s : α → α → Prop inst✝¹ : Preorder α a✝ : α inst✝ : Preorder β f : α → β h : ScottContinuous f a b : α hab : a ≤ b ⊢ DirectedOn (fun x x_1 => x ≤ x_1) {a, b} case a α : Type u β : Type v ι : Sort w r r' s : α → α → Prop inst✝¹ : Preorder α a✝ : α inst✝ : Preorder β f : α → β h : ScottContinuous f a b : α hab : a ≤ b ⊢ IsLUB {a, b} b Tactic: apply h State Before: case a α : Type u β : Type v ι : Sort w r r' s : α → α → Prop inst✝¹ : Preorder α a✝ : α inst✝ : Preorder β f : α → β h : ScottContinuous f a b : α hab : a ≤ b ⊢ Set.Nonempty {a, b} State After: no goals Tactic: exact Set.insert_nonempty _ _ State Before: case a α : Type u β : Type v ι : Sort w r r' s : α → α → Prop inst✝¹ : Preorder α a✝ : α inst✝ : Preorder β f : α → β h : ScottContinuous f a b : α hab : a ≤ b ⊢ DirectedOn (fun x x_1 => x ≤ x_1) {a, b} State After: no goals Tactic: exact directedOn_pair le_refl hab State Before: case a α : Type u β : Type v ι : Sort w r r' s : α → α → Prop inst✝¹ : Preorder α a✝ : α inst✝ : Preorder β f : α → β h : ScottContinuous f a b : α hab : a ≤ b ⊢ IsLUB {a, b} b State After: case a α : Type u β : Type v ι : Sort w r r' s : α → α → Prop inst✝¹ : Preorder α a✝ : α inst✝ : Preorder β f : α → β h : ScottContinuous f a b : α hab : a ≤ b ⊢ IsLeast (Set.Ici b) b Tactic: rw [IsLUB, upperBounds_insert, upperBounds_singleton, Set.inter_eq_self_of_subset_right (Set.Ici_subset_Ici.mpr hab)] State Before: case a α : Type u β : Type v ι : Sort w r r' s : α → α → Prop inst✝¹ : Preorder α a✝ : α inst✝ : Preorder β f : α → β h : ScottContinuous f a b : α hab : a ≤ b ⊢ IsLeast (Set.Ici b) b State After: no goals Tactic: exact isLeast_Ici
png("pdqr.png",width = 1250,height = 500) par(xpd = TRUE,mfrow = c(1,2), mar = c(4,2,0,3)) ## Normal dist. set.seed(1234) x <- sort(rnorm(1000)) plot(x,dnorm(x),type = "l",axes = FALSE,xlab = "",ylab = "",lwd = 3) n <- 200 p <- x[n] ## rnorm col <- grey.colors(1,alpha = 0.1) h <- hist(x,add = TRUE,freq = FALSE,border = col,cex = 2,col = col) text(h$breaks[14],h$density[10],cex = 2,"rnorm") arrows(h$breaks[13],h$density[10],h$breaks[11],h$density[10],col = 'lightgrey',lwd = 3) ## pnorm polygon(c(x[x <= p],rev(x[x <= p])),c(rep(0,length(x[x<=p])),dnorm(rev(x[x<=p]))),col = "grey") text(x[n/2],dnorm(x[n/2])/2,"pnorm",cex = 2) ## qnorm arrows(p,0,p,dnorm(p),lwd = 3,lty = 2,code = 0) text(p,0 - 0.025,"qnorm",cex = 2) ## dnorm arrows(min(x),dnorm(p),p,dnorm(p),code = 0, lwd = 3,lty = 3) text(min(x) - 0.5,dnorm(p),"dnorm",cex = 2,srt = 90) ## redo dnorm lines(x,dnorm(x),lwd = 3) ## Poisson set.seed(4321) lam <- 2.5 x <- sort(rpois(1000,lam)) b <- barplot(dpois(unique(x),lam),axes = FALSE,col = col,border = col, ylim = c(-0.03,max(dpois(unique(x),lam))) + 0.02) points(b[,1],dpois(unique(x),lam),cex = 3,pch = 20) ## rpois text(b[8,1],dpois(unique(x)[5],lam),"rpois",cex = 2) arrows(b[8,1],dpois(unique(x)[5],lam) - 0.01, b[6,1] + 0.5,dpois(unique(x)[7],lam) + 0.01,col = "lightgrey",lwd = 3) ## dpois text(b[8,1],dpois(unique(x)[4],lam),"dpois",cex = 2) arrows(b[7,1],dpois(unique(x)[4],lam),b[4,1],dpois(unique(x)[4],lam),code = 0,lwd = 3, lty = 3) ## qpois text(b[4,1],-0.01,"qpois",cex = 2) arrows(b[4,1],dpois(unique(x)[4],lam),b[4,1],0,code = 0,lwd = 3, lty = 2) ## ppois barplot(dpois(unique(x)[1:2],lam),col = "grey",add = TRUE,border = "grey",axes = FALSE) text(mean(b[1:2,1]),dpois(unique(x)[1],lam)/2,"ppois",cex = 2) ## redo dpois points(b[,1],dpois(unique(x),lam),cex = 3,pch = 20) dev.off()
""" BFGS Quasi-Newton descent method. """ mutable struct BFGS{T<:AbstractFloat, V<:AbstractVector{T}, M<:AbstractMatrix{T}} <: OptBuffer invH::M x::V g::V xpre::V gpre::V d::V xdiff::V gdiff::V y::T ypre::T end function BFGS(x::AbstractVector{T}) where {T} F = float(T) bfgs = BFGS( sqmatr(x, F), similar(x, F), similar(x, F), similar(x, F), similar(x, F), similar(x, F), similar(x, F), similar(x, F), F(NaN), F(NaN), ) reset!(bfgs) return bfgs end function init!( optfn!, M::BFGS{T}, x0; reset, constrain_step = infstep ) where {T} optfn!(x0, zero(T), x0) if reset M.xpre, M.x = M.x, M.xpre M.gpre, M.g = M.g, M.gpre M.ypre = M.y map!(-, M.d, M.gpre) αmax = constrain_step(M.xpre, M.d) α = strong_backtracking!( optfn!, M.xpre, M.d, M.ypre, M.gpre; αmax=αmax, β=one(T)/100, σ=one(T)/10 ) map!(-, M.xdiff, M.x, M.xpre) map!(-, M.gdiff, M.g, M.gpre) invH = M.invH nr, nc = size(invH) scale = dot(M.xdiff, M.gdiff) / dot(M.gdiff, M.gdiff) fill!(invH, 0) for i in 1:nc elt = M.xdiff[i] / M.gdiff[i] invH[i, i] = 1e-5 < elt / scale < 1e5 ? elt : scale end end return M end @inline function reset!(M::BFGS) invH = M.invH nr, nc = size(invH) for j in 1:nc, i in 1:nr invH[i, j] = i == j end return M end function reset!(M::BFGS, x0, scale::Real=1) copy!(M.x, x0) invH = M.invH nr, nc = size(invH) for j in 1:nc, i in 1:nr invH[i, j] = (i == j) * scale end return M end @inline function callfn!(fdf, M::BFGS, x, α, d) __update_arg!(M, x, α, d) y, g = fdf(M.x, M.g) __update_grad!(M, g) M.y = y return y, g end function __descent_dir!(M::BFGS) mul!(M.d, M.invH, M.gpre, -1, 0) return M.d end function step!(optfn!::F, M::BFGS; constrain_step::S=infstep) where {F,S} #= argument and gradient from the end of the last iteration are stored into `xpre` and `gpre` =# M.gpre, M.g = M.g, M.gpre M.xpre, M.x = M.x, M.xpre M.ypre = M.y x, xpre, g, gpre, invH = M.x, M.xpre, M.g, M.gpre, M.invH d = __descent_dir!(M) maxstep = constrain_step(xpre, d) α = strong_backtracking!(optfn!, xpre, d, M.ypre, gpre, αmax = maxstep, β = 0.01, σ = 0.9) if α > 0 #= BFGS update: δγ'B + Bγδ' ⌈ γ'Bγ ⌉ δδ' B <- B - ---------- + |1 + -----| --- δ'γ ⌊ δ'γ ⌋ δ'γ =# δ, γ = M.xdiff, M.gdiff map!(-, γ, g, gpre) map!(-, δ, x, xpre) denom = dot(δ, γ) δscale = 1 + dot(γ, invH, γ) / denom # d <- B * γ mul!(d, invH, γ, 1, 0) invH .= invH .- (δ .* d' .+ d .* δ') ./ denom .+ δscale .* δ .* δ' ./ denom else fill!(M.xdiff, 0) end return α end @inline function __update_arg!(M::BFGS, x, α, d) map!(M.x, d, x) do a, b muladd(α, a, b) end return M.x end @inline function __update_arg!(M::BFGS, x) if x !== M.x copy!(M.x, x) end return M.x end @inline function __update_grad!(M::BFGS, g) if M.g !== g copy!(M.g, g) end return M.g end
(*<*) theory LoweOntologicalArgument_6 imports QML begin nitpick_params[box=false, user_axioms=true, show_all, expect=genuine, format = 3, atoms e = a b c d] sledgehammer_params[verbose=true] (*>*) subsection \<open>Modified Modal Argument I\<close> text\<open>\noindent{In the following iterations we want to illustrate an approach in which we start our interpretive endeavor with no pre-understanding of the concepts involved. We start by taking all concepts as primitive without providing any definition or presupposing any interrelation between them. We see how we gradually improve our understanding of these concepts in the iterative process of adding and removing axioms and, therefore, by framing their inferential role in the argument.}\<close> consts Concrete::"e\<Rightarrow>wo" consts Abstract::"e\<Rightarrow>wo" consts Necessary::"e\<Rightarrow>wo" consts Contingent::"e\<Rightarrow>wo" consts dependence::"e\<Rightarrow>e\<Rightarrow>wo" (infix "dependsOn"(*<*)100(*>*)) consts explanation::"e\<Rightarrow>e\<Rightarrow>wo" (infix "explains"(*<*)100(*>*)) consts Dependent::"e\<Rightarrow>wo" abbreviation Independent::"e\<Rightarrow>wo" where "Independent x \<equiv> \<^bold>\<not>(Dependent x)" text\<open>\noindent{In order to honor the original intention of the author, i.e. providing a \emph{modal} variant of St. Anselm's ontological argument, we are required to make a change in Lowe's original formulation. In this variant we have restated the expressions "necessary abstract" and "necessary concrete" as "necessarily abstract" and "necessarily concrete" correspondingly. With this new adverbial reading of the former "necessary" predicate we are no longer talking about the concept of \emph{necessariness}, but of \emph{necessity} instead, so we use the modal box operator (\<open>\<box>\<close>) for its formalization. Note that in this variant we are not concerned with the interpretation of the original ontological argument anymore. We are interested, instead, in showing how our method can go beyond simple interpretation and foster a creative approach to assessing and improving philosophical arguments.}\<close> text\<open>\noindent{Premise P1 now reads: "God is, by definition, a necessari\emph{ly} concrete being."}\<close> abbreviation Godlike::"e\<Rightarrow>wo" where "Godlike x \<equiv> \<^bold>\<box>Concrete x" text\<open>\noindent{Premise P2 reads: "Some necessari\emph{ly} abstract beings exist". The rest of the premises remains unchanged.}\<close> axiomatization where P2: "\<lfloor>\<^bold>\<exists>x. \<^bold>\<box>Abstract x\<rfloor>" and P3: "\<lfloor>\<^bold>\<forall>x. Abstract x \<^bold>\<rightarrow> Dependent x\<rfloor>" and P4: "\<lfloor>\<^bold>\<forall>x. Dependent x \<^bold>\<rightarrow> (\<^bold>\<exists>y. Independent y \<^bold>\<and> x dependsOn y)\<rfloor>" and P5: "\<lfloor>\<^bold>\<not>(\<^bold>\<exists>x. \<^bold>\<exists>y. Contingent y \<^bold>\<and> Necessary x \<^bold>\<and> y explains x)\<rfloor>" text\<open>\noindent{Without postulating any additional axioms, C10 ("A \emph{necessarily} concrete being exists") can be falsified by Nitpick.}\<close> theorem C10: "\<lfloor>\<^bold>\<exists>x. Godlike x\<rfloor>" nitpick oops \<comment> \<open>Countermodel found\<close> text\<open>\noindent{An explication of the concepts of necessariness, contingency and explanation is provided below by axiomatizing their interrelation to other concepts. We regard necessariness as being \emph{necessarily abstract} or \emph{necessarily concrete}. We regard explanation as the inverse relation of dependence, as before.}\<close> axiomatization where Necessary_expl: "\<lfloor>\<^bold>\<forall>x. Necessary x \<^bold>\<leftrightarrow> (\<^bold>\<box>Abstract x \<^bold>\<or> \<^bold>\<box>Concrete x)\<rfloor>" and Contingent_expl: "\<lfloor>\<^bold>\<forall>x. Contingent x \<^bold>\<leftrightarrow> \<^bold>\<not>Necessary x\<rfloor>" and Explanation_expl: "\<lfloor>\<^bold>\<forall>x y. y explains x \<^bold>\<leftrightarrow> x dependsOn y\<rfloor>" text\<open>\noindent{Without any further constraints, C10 becomes falsified by Nitpick.}\<close> theorem C10: "\<lfloor>\<^bold>\<exists>x. Godlike x\<rfloor>" nitpick oops \<comment> \<open>Countermodel found\<close> text\<open>\noindent{We postulate further modal axioms (using the \emph{Sahlqvist correspondence}) and ask Isabelle's \emph{Sledgehammer} for a proof. Sledgehammer is able to find a proof for C10 which only relies on the modal axiom T (\<open>\<box>\<phi> \<^bold>\<rightarrow> \<phi>\<close>).}\<close> axiomatization where T_axiom: "reflexive R" and \<comment> \<open>@{text "\<box>\<phi> \<rightarrow> \<phi>"}\<close> B_axiom: "symmetric R" and \<comment> \<open>@{text "\<phi> \<rightarrow> \<box>\<diamond>\<phi>"}\<close> IV_axiom: "transitive R" \<comment> \<open>@{text "\<box>\<phi> \<rightarrow> \<box>\<box>\<phi>"}\<close> theorem C10: "\<lfloor>\<^bold>\<exists>x. Godlike x\<rfloor>" using Contingent_expl Explanation_expl Necessary_expl P2 P3 P4 P5 T_axiom by metis (*<*) (* We carry out our `sanity checks' as usual.*) lemma True nitpick[satisfy, user_axioms] oops (* model found: axioms are consistent *) lemma "\<lfloor>Necessary x\<rfloor>" nitpick[user_axioms] oops (* axioms do not trivialize argument *) lemma "\<lfloor>\<phi> \<^bold>\<rightarrow> \<^bold>\<box>\<phi>\<rfloor>" nitpick[user_axioms] oops (* counter-model found: modal collapse is not valid *) end (*>*)
using Oceananigans.BoundaryConditions: ContinuousBoundaryFunction function test_boundary_condition(arch, FT, topo, side, field_name, boundary_condition) grid = RegularCartesianGrid(FT, size=(1, 1, 1), extent=(1, π, 42), topology=topo) boundary_condition_kwarg = Dict(side => boundary_condition) field_boundary_conditions = TracerBoundaryConditions(grid; boundary_condition_kwarg...) bcs = NamedTuple{(field_name,)}((field_boundary_conditions,)) model = IncompressibleModel(grid=grid, architecture=arch, float_type=FT, boundary_conditions=bcs) success = try time_step!(model, 1e-16, euler=true) true catch err @warn "test_boundary_condition errored with " * sprint(showerror, err) false end return success end function test_flux_budget(arch, FT, fldname) N, κ, Lz = 16, 1, 0.7 grid = RegularCartesianGrid(FT, size=(N, N, N), extent=(1, 1, Lz)) bottom_flux = FT(0.3) flux_bc = BoundaryCondition(Flux, bottom_flux) if fldname == :u field_bcs = UVelocityBoundaryConditions(grid, bottom=flux_bc) elseif fldname == :v field_bcs = VVelocityBoundaryConditions(grid, bottom=flux_bc) else field_bcs = TracerBoundaryConditions(grid, bottom=flux_bc) end model_bcs = NamedTuple{(fldname,)}((field_bcs,)) closure = IsotropicDiffusivity(FT, ν=κ, κ=κ) model = IncompressibleModel(grid=grid, closure=closure, architecture=arch, tracers=(:T, :S), float_type=FT, buoyancy=nothing, boundary_conditions=model_bcs) field = get_model_field(fldname, model) @. field.data = 0 τκ = Lz^2 / κ # Diffusion time-scale Δt = 1e-6 * τκ # Time step much less than diffusion time-scale Nt = 10 # Number of time steps for n in 1:Nt time_step!(model, Δt, euler= n==1) end # budget: Lz*∂<ϕ>/∂t = -Δflux = -top_flux/Lz (left) + bottom_flux/Lz (right) # therefore <ϕ> = bottom_flux * t / Lz return mean(interior(field)) ≈ bottom_flux * model.clock.time / Lz end function fluxes_with_diffusivity_boundary_conditions_are_correct(arch, FT) Lz = 1 κ₀ = FT(exp(-3)) bz = FT(π) flux = - κ₀ * bz grid = RegularCartesianGrid(FT, size=(16, 16, 16), extent=(1, 1, Lz)) buoyancy_bcs = TracerBoundaryConditions(grid, bottom=BoundaryCondition(Gradient, bz)) κₑ_bcs = DiffusivityBoundaryConditions(grid, bottom=BoundaryCondition(Value, κ₀)) model_bcs = (b=buoyancy_bcs, κₑ=(b=κₑ_bcs,)) model = IncompressibleModel( grid=grid, architecture=arch, float_type=FT, tracers=:b, buoyancy=BuoyancyTracer(), closure=AnisotropicMinimumDissipation(), boundary_conditions=model_bcs ) b₀(x, y, z) = z * bz set!(model, b=b₀) b = model.tracers.b mean_b₀ = mean(interior(b)) τκ = Lz^2 / κ₀ # Diffusion time-scale Δt = 1e-6 * τκ # Time step much less than diffusion time-scale Nt = 10 # Number of time steps for n in 1:Nt time_step!(model, Δt, euler= n==1) end # budget: Lz*∂<ϕ>/∂t = -Δflux = -top_flux/Lz (left) + bottom_flux/Lz (right) # therefore <ϕ> = bottom_flux * t / Lz # # Use an atol of 1e-6 so test passes with Float32 as there's a big cancellation # error due to buoyancy order of magnitude. # # Float32: # mean_b₀ = -1.5707965f0 # mean(interior(b)) = -1.5708286f0 # mean(interior(b)) - mean_b₀ = -3.20673f-5 # (flux * model.clock.time) / Lz = -3.141593f-5 # # Float64 # mean_b₀ = -1.5707963267949192 # mean(interior(b)) = -1.57082774272148 # mean(interior(b)) - mean_b₀ = -3.141592656086267e-5 # (flux * model.clock.time) / Lz = -3.141592653589793e-5 return isapprox(mean(interior(b)) - mean_b₀, flux * model.clock.time / Lz, atol=1e-6) end test_boundary_conditions(C, FT, ArrayType) = (integer_bc(C, FT, ArrayType), float_bc(C, FT, ArrayType), irrational_bc(C, FT, ArrayType), array_bc(C, FT, ArrayType), simple_function_bc(C, FT, ArrayType), parameterized_function_bc(C, FT, ArrayType), field_dependent_function_bc(C, FT, ArrayType), parameterized_field_dependent_function_bc(C, FT, ArrayType), discrete_function_bc(C, FT, ArrayType), parameterized_discrete_function_bc(C, FT, ArrayType) ) @testset "Boundary condition integration tests" begin @info "Testing boundary condition integration into IncompressibleModel..." @testset "Boundary condition regularization" begin @info " Testing boundary condition regularization in IncompressibleModel constructor..." FT = Float64 arch = first(archs) # We use Periodic, Bounded, Bounded here because triply Bounded domains don't work on the GPU # yet. grid = RegularCartesianGrid(FT, size=(1, 1, 1), extent=(1, π, 42), topology=(Periodic, Bounded, Bounded)) u_boundary_conditions = UVelocityBoundaryConditions(grid; bottom = simple_function_bc(Value), top = simple_function_bc(Value), north = simple_function_bc(Value), south = simple_function_bc(Value)) v_boundary_conditions = VVelocityBoundaryConditions(grid; bottom = simple_function_bc(Value), top = simple_function_bc(Value), north = simple_function_bc(NormalFlow), south = simple_function_bc(NormalFlow)) w_boundary_conditions = VVelocityBoundaryConditions(grid; bottom = simple_function_bc(NormalFlow), top = simple_function_bc(NormalFlow), north = simple_function_bc(Value), south = simple_function_bc(Value)) T_boundary_conditions = TracerBoundaryConditions(grid; bottom = simple_function_bc(Value), top = simple_function_bc(Value), north = simple_function_bc(Value), south = simple_function_bc(Value)) boundary_conditions = (u=u_boundary_conditions, v=v_boundary_conditions, w=w_boundary_conditions, T=T_boundary_conditions) model = IncompressibleModel(architecture = arch, grid = grid, float_type = FT, boundary_conditions = boundary_conditions) @test location(model.velocities.u.boundary_conditions.bottom.condition) == (Face, Cell, Nothing) @test location(model.velocities.u.boundary_conditions.top.condition) == (Face, Cell, Nothing) @test location(model.velocities.u.boundary_conditions.north.condition) == (Face, Nothing, Cell) @test location(model.velocities.u.boundary_conditions.south.condition) == (Face, Nothing, Cell) @test location(model.velocities.v.boundary_conditions.bottom.condition) == (Cell, Face, Nothing) @test location(model.velocities.v.boundary_conditions.top.condition) == (Cell, Face, Nothing) @test location(model.velocities.v.boundary_conditions.north.condition) == (Cell, Nothing, Cell) @test location(model.velocities.v.boundary_conditions.south.condition) == (Cell, Nothing, Cell) @test location(model.velocities.w.boundary_conditions.bottom.condition) == (Cell, Cell, Nothing) @test location(model.velocities.w.boundary_conditions.top.condition) == (Cell, Cell, Nothing) @test location(model.velocities.w.boundary_conditions.north.condition) == (Cell, Nothing, Face) @test location(model.velocities.w.boundary_conditions.south.condition) == (Cell, Nothing, Face) @test location(model.tracers.T.boundary_conditions.bottom.condition) == (Cell, Cell, Nothing) @test location(model.tracers.T.boundary_conditions.top.condition) == (Cell, Cell, Nothing) @test location(model.tracers.T.boundary_conditions.north.condition) == (Cell, Nothing, Cell) @test location(model.tracers.T.boundary_conditions.south.condition) == (Cell, Nothing, Cell) end @testset "Boudnary condition time-stepping works" begin for arch in archs, FT in (Float64,) #float_types @info " Testing that time-stepping with boundary conditions works [$(typeof(arch)), $FT]..." topo = arch isa CPU ? (Bounded, Bounded, Bounded) : (Periodic, Bounded, Bounded) for C in (Gradient, Flux, Value), boundary_condition in test_boundary_conditions(C, FT, array_type(arch)) arch isa CPU && @test test_boundary_condition(arch, FT, topo, :east, :T, boundary_condition) @test test_boundary_condition(arch, FT, topo, :south, :T, boundary_condition) @test test_boundary_condition(arch, FT, topo, :top, :T, boundary_condition) end for boundary_condition in test_boundary_conditions(NormalFlow, FT, array_type(arch)) arch isa CPU && @test test_boundary_condition(arch, FT, topo, :east, :u, boundary_condition) @test test_boundary_condition(arch, FT, topo, :south, :v, boundary_condition) @test test_boundary_condition(arch, FT, topo, :top, :w, boundary_condition) end end end @testset "Budgets with Flux boundary conditions" begin for arch in archs, FT in float_types @info " Testing budgets with Flux boundary conditions on u, v, T [$(typeof(arch)), $FT]..." for field_name in (:u, :v, :T) @test test_flux_budget(arch, FT, field_name) end end end @testset "Custom diffusivity boundary conditions" begin for arch in archs, FT in (Float64,) #float_types @info " Testing flux budgets with diffusivity boundary conditions [$(typeof(arch)), $FT]..." @test fluxes_with_diffusivity_boundary_conditions_are_correct(arch, FT) end end end
#!/usr/bin/Rscript # Bhishan Poudel # Jan 8, 2016 # clear; Rscript rlist.r; rm *~ # ref: http://www.programiz.com/r-programming/list # 5 data types in R : vectors, matrix, list, data frame and a factor ######################################################################################################### # LIST CREATION ######################################################################################################### cat("\nCreating a List") cat("\nList can be created using the list() function. \n") x <- list("a"=2.5, "b"=TRUE, "c"=1:3) x # $a $b $c typeof(x) # [1] "list" length(x) # [1] 3 str(x) # List of 3 $ a: num 2.5 $ b: logi TRUE $ c: int [1:3] 1 2 3 cat("\nIn this example, a, b and c are called tags which makes it easier to reference the components \n") cat("\nWe can create the same list without the tags as follows \n") x <- list(2.5,TRUE,1:3) x # [[1]] [[2]] [[3]] ######################################################################################################### # LIST ELEMENTS ACCESSING ######################################################################################################### #cat("\nAccessing Components in List\n") x <- list("name"="Bhishan", "age"=30, "speaks"="English","Nepalese") x[c(1:2)] # index using integer vector x[-2] # using negative integer to exclude second components x[c(T,F,F)] # index using logical vector #cat("\nUse of [""] operator to access elements\n") cat("\n \n")
#ifndef AMGIF_H #define AMGIF_H #include <memory> #include <utility> #include <vector> #include <gsl/gsl_math.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_linalg.h> #include "constants.h" #include "operators_buffer.h" using std::pair; using std::shared_ptr; using std::tuple; using std::vector; // Smart pointer to matrix // Shorthand for template types using VecTriplet = tuple<gsl_vector *, gsl_vector *, gsl_vector *>; using OpBuf = OperatorBuffer; // Compute the AM-GIF estimates VecTriplet computeAMGIF( OpBuf& Cbuf, gsl_vector *me, gsl_vector *pe, const gsl_matrix *L, const double alpha, const double beta, const double tau, const double eps ); #endif // AMGIF_H
import Std.Classes.BEq namespace MWE_eq1 example [BEq α] [LawfulBEq α] {x y : α} (h : ¬(x = y)) : (match x == y with | true => 0 | false => 1) = 1 := by have : (x == y) = false := by simp [beq_eq_false_iff_ne, h] simp [this] end MWE_eq1
/*============================================================================= Copyright (c) 2010-2016 Bolero MURAKAMI https://github.com/bolero-MURAKAMI/Sprig Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) =============================================================================*/ #ifndef SPRIG_TYPE_TRAITS_IS_WCHAR_TYPE_HPP #define SPRIG_TYPE_TRAITS_IS_WCHAR_TYPE_HPP #include <sprig/config/config.hpp> #ifdef SPRIG_USING_PRAGMA_ONCE # pragma once #endif // #ifdef SPRIG_USING_PRAGMA_ONCE #include <boost/type_traits/is_same.hpp> #include <boost/type_traits/remove_cv.hpp> namespace sprig { // // is_wchar_type // template<typename T> struct is_wchar_type : public boost::is_same<typename boost::remove_cv<T>::type, wchar_t> {}; } // namespace sprig #endif // #ifndef SPRIG_TYPE_TRAITS_IS_WCHAR_TYPE_HPP
Formal statement is: lemma coeff_minus [simp]: "coeff (- p) n = - coeff p n" Informal statement is: The coefficient of $x^n$ in $-p$ is the negative of the coefficient of $x^n$ in $p$.
[GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w h : ¬∃ s, Nonempty (Basis { x // x ∈ s } R S) x : S ⊢ ↑(norm R) x = 1 [PROOFSTEP] rw [norm_apply, LinearMap.det] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w h : ¬∃ s, Nonempty (Basis { x // x ∈ s } R S) x : S ⊢ ↑(if H : ∃ s, Nonempty (Basis { x // x ∈ s } R S) then detAux (Trunc.mk (Nonempty.some (_ : Nonempty (Basis { x // x ∈ Exists.choose H } R S)))) else 1) (↑(lmul R S) x) = 1 [PROOFSTEP] split_ifs [GOAL] case pos R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w h : ¬∃ s, Nonempty (Basis { x // x ∈ s } R S) x : S h✝ : ∃ s, Nonempty (Basis { x // x ∈ s } R S) ⊢ ↑(detAux (Trunc.mk (Nonempty.some (_ : Nonempty (Basis { x // x ∈ Exists.choose h✝ } R S))))) (↑(lmul R S) x) = 1 [PROOFSTEP] trivial [GOAL] case neg R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w h : ¬∃ s, Nonempty (Basis { x // x ∈ s } R S) x : S h✝ : ¬∃ s, Nonempty (Basis { x // x ∈ s } R S) ⊢ ↑1 (↑(lmul R S) x) = 1 [PROOFSTEP] trivial [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w h : ¬Module.Finite R S x : S ⊢ ↑(norm R) x = 1 [PROOFSTEP] refine norm_eq_one_of_not_exists_basis _ (mt ?_ h) _ [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w h : ¬Module.Finite R S x : S ⊢ (∃ s, Nonempty (Basis { x // x ∈ s } R S)) → Module.Finite R S [PROOFSTEP] rintro ⟨s, ⟨b⟩⟩ [GOAL] case intro.intro R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w h : ¬Module.Finite R S x : S s : Finset S b : Basis { x // x ∈ s } R S ⊢ Module.Finite R S [PROOFSTEP] exact Module.Finite.of_basis b [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁹ : CommRing R inst✝⁸ : Ring S inst✝⁷ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁶ : Field K inst✝⁵ : Field L inst✝⁴ : Field F inst✝³ : Algebra K L inst✝² : Algebra K F ι : Type w inst✝¹ : Fintype ι inst✝ : DecidableEq ι b : Basis ι R S s : S ⊢ ↑(norm R) s = det (↑(leftMulMatrix b) s) [PROOFSTEP] rw [norm_apply, ← LinearMap.det_toMatrix b, ← toMatrix_lmul_eq] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁹ : CommRing R inst✝⁸ : Ring S inst✝⁷ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁶ : Field K inst✝⁵ : Field L inst✝⁴ : Field F inst✝³ : Algebra K L inst✝² : Algebra K F ι : Type w inst✝¹ : Fintype ι inst✝ : DecidableEq ι b : Basis ι R S s : S ⊢ det (↑(toMatrix b b) (↑(lmul R S) s)) = det (↑(toMatrix b b) (mulLeft R s)) [PROOFSTEP] rfl [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁸ : CommRing R inst✝⁷ : Ring S inst✝⁶ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁵ : Field K inst✝⁴ : Field L inst✝³ : Field F inst✝² : Algebra K L inst✝¹ : Algebra K F ι : Type w inst✝ : Fintype ι b : Basis ι R S x : R ⊢ ↑(norm R) (↑(algebraMap R S) x) = x ^ Fintype.card ι [PROOFSTEP] haveI := Classical.decEq ι [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁸ : CommRing R inst✝⁷ : Ring S inst✝⁶ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁵ : Field K inst✝⁴ : Field L inst✝³ : Field F inst✝² : Algebra K L inst✝¹ : Algebra K F ι : Type w inst✝ : Fintype ι b : Basis ι R S x : R this : DecidableEq ι ⊢ ↑(norm R) (↑(algebraMap R S) x) = x ^ Fintype.card ι [PROOFSTEP] rw [norm_apply, ← det_toMatrix b, lmul_algebraMap] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁸ : CommRing R inst✝⁷ : Ring S inst✝⁶ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁵ : Field K inst✝⁴ : Field L inst✝³ : Field F inst✝² : Algebra K L inst✝¹ : Algebra K F ι : Type w inst✝ : Fintype ι b : Basis ι R S x : R this : DecidableEq ι ⊢ det (↑(toMatrix b b) (↑(lsmul R R ((fun x => S) x)) x)) = x ^ Fintype.card ι [PROOFSTEP] convert @det_diagonal _ _ _ _ _ fun _ : ι => x [GOAL] case h.e'_2.h.e'_6 R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁸ : CommRing R inst✝⁷ : Ring S inst✝⁶ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁵ : Field K inst✝⁴ : Field L inst✝³ : Field F inst✝² : Algebra K L inst✝¹ : Algebra K F ι : Type w inst✝ : Fintype ι b : Basis ι R S x : R this : DecidableEq ι ⊢ ↑(toMatrix b b) (↑(lsmul R R ((fun x => S) x)) x) = diagonal fun x_1 => x [PROOFSTEP] ext (i j) [GOAL] case h.e'_2.h.e'_6.a.h R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁸ : CommRing R inst✝⁷ : Ring S inst✝⁶ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁵ : Field K inst✝⁴ : Field L inst✝³ : Field F inst✝² : Algebra K L inst✝¹ : Algebra K F ι : Type w inst✝ : Fintype ι b : Basis ι R S x : R this : DecidableEq ι i j : ι ⊢ ↑(toMatrix b b) (↑(lsmul R R ((fun x => S) x)) x) i j = diagonal (fun x_1 => x) i j [PROOFSTEP] rw [toMatrix_lsmul, Matrix.diagonal] [GOAL] case h.e'_3 R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁸ : CommRing R inst✝⁷ : Ring S inst✝⁶ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁵ : Field K inst✝⁴ : Field L inst✝³ : Field F inst✝² : Algebra K L inst✝¹ : Algebra K F ι : Type w inst✝ : Fintype ι b : Basis ι R S x : R this : DecidableEq ι ⊢ x ^ Fintype.card ι = ∏ i : ι, x [PROOFSTEP] rw [Finset.prod_const, Finset.card_univ] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁹ : CommRing R inst✝⁸ : Ring S inst✝⁷ : Algebra R S K : Type u_4 L✝ : Type u_5 F : Type u_6 inst✝⁶ : Field K inst✝⁵ : Field L✝ inst✝⁴ : Field F inst✝³ : Algebra K L✝ inst✝² : Algebra K F ι : Type w L : Type u_7 inst✝¹ : Ring L inst✝ : Algebra K L x : K ⊢ ↑(norm K) (↑(algebraMap K L) x) = x ^ finrank K L [PROOFSTEP] by_cases H : ∃ s : Finset L, Nonempty (Basis s K L) [GOAL] case pos R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁹ : CommRing R inst✝⁸ : Ring S inst✝⁷ : Algebra R S K : Type u_4 L✝ : Type u_5 F : Type u_6 inst✝⁶ : Field K inst✝⁵ : Field L✝ inst✝⁴ : Field F inst✝³ : Algebra K L✝ inst✝² : Algebra K F ι : Type w L : Type u_7 inst✝¹ : Ring L inst✝ : Algebra K L x : K H : ∃ s, Nonempty (Basis { x // x ∈ s } K L) ⊢ ↑(norm K) (↑(algebraMap K L) x) = x ^ finrank K L [PROOFSTEP] rw [norm_algebraMap_of_basis H.choose_spec.some, finrank_eq_card_basis H.choose_spec.some] [GOAL] case neg R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁹ : CommRing R inst✝⁸ : Ring S inst✝⁷ : Algebra R S K : Type u_4 L✝ : Type u_5 F : Type u_6 inst✝⁶ : Field K inst✝⁵ : Field L✝ inst✝⁴ : Field F inst✝³ : Algebra K L✝ inst✝² : Algebra K F ι : Type w L : Type u_7 inst✝¹ : Ring L inst✝ : Algebra K L x : K H : ¬∃ s, Nonempty (Basis { x // x ∈ s } K L) ⊢ ↑(norm K) (↑(algebraMap K L) x) = x ^ finrank K L [PROOFSTEP] rw [norm_eq_one_of_not_exists_basis K H, finrank_eq_zero_of_not_exists_basis, pow_zero] [GOAL] case neg R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁹ : CommRing R inst✝⁸ : Ring S inst✝⁷ : Algebra R S K : Type u_4 L✝ : Type u_5 F : Type u_6 inst✝⁶ : Field K inst✝⁵ : Field L✝ inst✝⁴ : Field F inst✝³ : Algebra K L✝ inst✝² : Algebra K F ι : Type w L : Type u_7 inst✝¹ : Ring L inst✝ : Algebra K L x : K H : ¬∃ s, Nonempty (Basis { x // x ∈ s } K L) ⊢ ¬∃ s, Nonempty (Basis (↑↑s) K L) [PROOFSTEP] rintro ⟨s, ⟨b⟩⟩ [GOAL] case neg.intro.intro R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁹ : CommRing R inst✝⁸ : Ring S inst✝⁷ : Algebra R S K : Type u_4 L✝ : Type u_5 F : Type u_6 inst✝⁶ : Field K inst✝⁵ : Field L✝ inst✝⁴ : Field F inst✝³ : Algebra K L✝ inst✝² : Algebra K F ι : Type w L : Type u_7 inst✝¹ : Ring L inst✝ : Algebra K L x : K H : ¬∃ s, Nonempty (Basis { x // x ∈ s } K L) s : Finset L b : Basis (↑↑s) K L ⊢ False [PROOFSTEP] exact H ⟨s, ⟨b⟩⟩ [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w pb : PowerBasis R S ⊢ ↑(norm R) pb.gen = (-1) ^ pb.dim * coeff (minpoly R pb.gen) 0 [PROOFSTEP] rw [norm_eq_matrix_det pb.basis, det_eq_sign_charpoly_coeff, charpoly_leftMulMatrix, Fintype.card_fin] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁸ : CommRing R inst✝⁷ : Ring S inst✝⁶ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁵ : Field K inst✝⁴ : Field L inst✝³ : Field F inst✝² : Algebra K L inst✝¹ : Algebra K F ι : Type w inst✝ : Algebra R F pb : PowerBasis R S hf : Splits (algebraMap R F) (minpoly R pb.gen) ⊢ ↑(algebraMap R F) (↑(norm R) pb.gen) = Multiset.prod (roots (Polynomial.map (algebraMap R F) (minpoly R pb.gen))) [PROOFSTEP] haveI := Module.nontrivial R F [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁸ : CommRing R inst✝⁷ : Ring S inst✝⁶ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁵ : Field K inst✝⁴ : Field L inst✝³ : Field F inst✝² : Algebra K L inst✝¹ : Algebra K F ι : Type w inst✝ : Algebra R F pb : PowerBasis R S hf : Splits (algebraMap R F) (minpoly R pb.gen) this : Nontrivial R ⊢ ↑(algebraMap R F) (↑(norm R) pb.gen) = Multiset.prod (roots (Polynomial.map (algebraMap R F) (minpoly R pb.gen))) [PROOFSTEP] have := minpoly.monic pb.isIntegral_gen [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁸ : CommRing R inst✝⁷ : Ring S inst✝⁶ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁵ : Field K inst✝⁴ : Field L inst✝³ : Field F inst✝² : Algebra K L inst✝¹ : Algebra K F ι : Type w inst✝ : Algebra R F pb : PowerBasis R S hf : Splits (algebraMap R F) (minpoly R pb.gen) this✝ : Nontrivial R this : Monic (minpoly R pb.gen) ⊢ ↑(algebraMap R F) (↑(norm R) pb.gen) = Multiset.prod (roots (Polynomial.map (algebraMap R F) (minpoly R pb.gen))) [PROOFSTEP] rw [PowerBasis.norm_gen_eq_coeff_zero_minpoly, ← pb.natDegree_minpoly, RingHom.map_mul, ← coeff_map, prod_roots_eq_coeff_zero_of_monic_of_split (this.map _) ((splits_id_iff_splits _).2 hf), this.natDegree_map, map_pow, ← mul_assoc, ← mul_pow] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁸ : CommRing R inst✝⁷ : Ring S inst✝⁶ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁵ : Field K inst✝⁴ : Field L inst✝³ : Field F inst✝² : Algebra K L inst✝¹ : Algebra K F ι : Type w inst✝ : Algebra R F pb : PowerBasis R S hf : Splits (algebraMap R F) (minpoly R pb.gen) this✝ : Nontrivial R this : Monic (minpoly R pb.gen) ⊢ (↑(algebraMap R F) (-1) * -1) ^ natDegree (minpoly R pb.gen) * Multiset.prod (roots (Polynomial.map (algebraMap R F) (minpoly R pb.gen))) = Multiset.prod (roots (Polynomial.map (algebraMap R F) (minpoly R pb.gen))) [PROOFSTEP] simp only [map_neg, _root_.map_one, neg_mul, neg_neg, one_pow, one_mul] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹¹ : CommRing R inst✝¹⁰ : Ring S inst✝⁹ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁸ : Field K inst✝⁷ : Field L inst✝⁶ : Field F inst✝⁵ : Algebra K L inst✝⁴ : Algebra K F ι : Type w inst✝³ : Finite ι inst✝² : Nontrivial S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S ⊢ ↑(norm R) 0 = 0 [PROOFSTEP] nontriviality [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹¹ : CommRing R inst✝¹⁰ : Ring S inst✝⁹ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁸ : Field K inst✝⁷ : Field L inst✝⁶ : Field F inst✝⁵ : Algebra K L inst✝⁴ : Algebra K F ι : Type w inst✝³ : Finite ι inst✝² : Nontrivial S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S ✝ : Nontrivial ((fun x => R) 0) ⊢ ↑(norm R) 0 = 0 [PROOFSTEP] rw [norm_apply, coe_lmul_eq_mul, map_zero, LinearMap.det_zero' (Module.Free.chooseBasis R S)] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w inst✝⁴ : Finite ι inst✝³ : IsDomain R inst✝² : IsDomain S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S x : S ⊢ ↑(norm R) x = 0 ↔ x = 0 [PROOFSTEP] constructor [GOAL] case mp R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w inst✝⁴ : Finite ι inst✝³ : IsDomain R inst✝² : IsDomain S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S x : S ⊢ ↑(norm R) x = 0 → x = 0 case mpr R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w inst✝⁴ : Finite ι inst✝³ : IsDomain R inst✝² : IsDomain S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S x : S ⊢ x = 0 → ↑(norm R) x = 0 [PROOFSTEP] let b := Module.Free.chooseBasis R S [GOAL] case mp R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w inst✝⁴ : Finite ι inst✝³ : IsDomain R inst✝² : IsDomain S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S x : S b : Basis (Module.Free.ChooseBasisIndex R S) R S := Module.Free.chooseBasis R S ⊢ ↑(norm R) x = 0 → x = 0 case mpr R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w inst✝⁴ : Finite ι inst✝³ : IsDomain R inst✝² : IsDomain S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S x : S ⊢ x = 0 → ↑(norm R) x = 0 [PROOFSTEP] swap [GOAL] case mpr R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w inst✝⁴ : Finite ι inst✝³ : IsDomain R inst✝² : IsDomain S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S x : S ⊢ x = 0 → ↑(norm R) x = 0 [PROOFSTEP] rintro rfl [GOAL] case mpr R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w inst✝⁴ : Finite ι inst✝³ : IsDomain R inst✝² : IsDomain S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S ⊢ ↑(norm R) 0 = 0 [PROOFSTEP] exact norm_zero [GOAL] case mp R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w inst✝⁴ : Finite ι inst✝³ : IsDomain R inst✝² : IsDomain S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S x : S b : Basis (Module.Free.ChooseBasisIndex R S) R S := Module.Free.chooseBasis R S ⊢ ↑(norm R) x = 0 → x = 0 [PROOFSTEP] letI := Classical.decEq (Module.Free.ChooseBasisIndex R S) [GOAL] case mp R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w inst✝⁴ : Finite ι inst✝³ : IsDomain R inst✝² : IsDomain S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S x : S b : Basis (Module.Free.ChooseBasisIndex R S) R S := Module.Free.chooseBasis R S this : DecidableEq (Module.Free.ChooseBasisIndex R S) := Classical.decEq (Module.Free.ChooseBasisIndex R S) ⊢ ↑(norm R) x = 0 → x = 0 [PROOFSTEP] rw [norm_eq_matrix_det b, ← Matrix.exists_mulVec_eq_zero_iff] [GOAL] case mp R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w inst✝⁴ : Finite ι inst✝³ : IsDomain R inst✝² : IsDomain S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S x : S b : Basis (Module.Free.ChooseBasisIndex R S) R S := Module.Free.chooseBasis R S this : DecidableEq (Module.Free.ChooseBasisIndex R S) := Classical.decEq (Module.Free.ChooseBasisIndex R S) ⊢ (∃ v x_1, mulVec (↑(leftMulMatrix b) x) v = 0) → x = 0 [PROOFSTEP] rintro ⟨v, v_ne, hv⟩ [GOAL] case mp.intro.intro R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w inst✝⁴ : Finite ι inst✝³ : IsDomain R inst✝² : IsDomain S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S x : S b : Basis (Module.Free.ChooseBasisIndex R S) R S := Module.Free.chooseBasis R S this : DecidableEq (Module.Free.ChooseBasisIndex R S) := Classical.decEq (Module.Free.ChooseBasisIndex R S) v : Module.Free.ChooseBasisIndex R S → (fun x => R) x v_ne : v ≠ 0 hv : mulVec (↑(leftMulMatrix b) x) v = 0 ⊢ x = 0 [PROOFSTEP] rw [← b.equivFun.apply_symm_apply v, b.equivFun_symm_apply, b.equivFun_apply, leftMulMatrix_mulVec_repr] at hv [GOAL] case mp.intro.intro R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w inst✝⁴ : Finite ι inst✝³ : IsDomain R inst✝² : IsDomain S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S x : S b : Basis (Module.Free.ChooseBasisIndex R S) R S := Module.Free.chooseBasis R S this : DecidableEq (Module.Free.ChooseBasisIndex R S) := Classical.decEq (Module.Free.ChooseBasisIndex R S) v : Module.Free.ChooseBasisIndex R S → (fun x => R) x v_ne : v ≠ 0 hv : ↑(↑b.repr (x * ∑ i : Module.Free.ChooseBasisIndex R S, v i • ↑b i)) = 0 ⊢ x = 0 [PROOFSTEP] refine (mul_eq_zero.mp (b.ext_elem fun i => ?_)).resolve_right (show ∑ i, v i • b i ≠ 0 from ?_) [GOAL] case mp.intro.intro.refine_1 R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w inst✝⁴ : Finite ι inst✝³ : IsDomain R inst✝² : IsDomain S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S x : S b : Basis (Module.Free.ChooseBasisIndex R S) R S := Module.Free.chooseBasis R S this : DecidableEq (Module.Free.ChooseBasisIndex R S) := Classical.decEq (Module.Free.ChooseBasisIndex R S) v : Module.Free.ChooseBasisIndex R S → (fun x => R) x v_ne : v ≠ 0 hv : ↑(↑b.repr (x * ∑ i : Module.Free.ChooseBasisIndex R S, v i • ↑b i)) = 0 i : Module.Free.ChooseBasisIndex R S ⊢ ↑(↑b.repr (x * ∑ i : Module.Free.ChooseBasisIndex R S, v i • ↑b i)) i = ↑(↑b.repr 0) i [PROOFSTEP] simpa only [LinearEquiv.map_zero, Pi.zero_apply] using congr_fun hv i [GOAL] case mp.intro.intro.refine_2 R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w inst✝⁴ : Finite ι inst✝³ : IsDomain R inst✝² : IsDomain S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S x : S b : Basis (Module.Free.ChooseBasisIndex R S) R S := Module.Free.chooseBasis R S this : DecidableEq (Module.Free.ChooseBasisIndex R S) := Classical.decEq (Module.Free.ChooseBasisIndex R S) v : Module.Free.ChooseBasisIndex R S → (fun x => R) x v_ne : v ≠ 0 hv : ↑(↑b.repr (x * ∑ i : Module.Free.ChooseBasisIndex R S, v i • ↑b i)) = 0 ⊢ ∑ i : Module.Free.ChooseBasisIndex R S, v i • ↑b i ≠ 0 [PROOFSTEP] contrapose! v_ne with sum_eq [GOAL] case mp.intro.intro.refine_2 R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w inst✝⁴ : Finite ι inst✝³ : IsDomain R inst✝² : IsDomain S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S x : S b : Basis (Module.Free.ChooseBasisIndex R S) R S := Module.Free.chooseBasis R S this : DecidableEq (Module.Free.ChooseBasisIndex R S) := Classical.decEq (Module.Free.ChooseBasisIndex R S) v : Module.Free.ChooseBasisIndex R S → (fun x => R) x hv : ↑(↑b.repr (x * ∑ i : Module.Free.ChooseBasisIndex R S, v i • ↑b i)) = 0 sum_eq : ∑ x : Module.Free.ChooseBasisIndex R S, v x • ↑(Module.Free.chooseBasis R S) x = 0 ⊢ v = 0 [PROOFSTEP] apply b.equivFun.symm.injective [GOAL] case mp.intro.intro.refine_2.a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w inst✝⁴ : Finite ι inst✝³ : IsDomain R inst✝² : IsDomain S inst✝¹ : Module.Free R S inst✝ : Module.Finite R S x : S b : Basis (Module.Free.ChooseBasisIndex R S) R S := Module.Free.chooseBasis R S this : DecidableEq (Module.Free.ChooseBasisIndex R S) := Classical.decEq (Module.Free.ChooseBasisIndex R S) v : Module.Free.ChooseBasisIndex R S → (fun x => R) x hv : ↑(↑b.repr (x * ∑ i : Module.Free.ChooseBasisIndex R S, v i • ↑b i)) = 0 sum_eq : ∑ x : Module.Free.ChooseBasisIndex R S, v x • ↑(Module.Free.chooseBasis R S) x = 0 ⊢ ↑(LinearEquiv.symm (Basis.equivFun b)) v = ↑(LinearEquiv.symm (Basis.equivFun b)) 0 [PROOFSTEP] rw [b.equivFun_symm_apply, sum_eq, LinearEquiv.map_zero] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁰ : CommRing R inst✝⁹ : Ring S inst✝⁸ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁷ : Field K inst✝⁶ : Field L inst✝⁵ : Field F inst✝⁴ : Algebra K L inst✝³ : Algebra K F ι : Type w inst✝² : Finite ι inst✝¹ : IsDomain R inst✝ : IsDomain S b : Basis ι R S x : S ⊢ ↑(norm R) x = 0 ↔ x = 0 [PROOFSTEP] haveI : Module.Free R S := Module.Free.of_basis b [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁰ : CommRing R inst✝⁹ : Ring S inst✝⁸ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁷ : Field K inst✝⁶ : Field L inst✝⁵ : Field F inst✝⁴ : Algebra K L inst✝³ : Algebra K F ι : Type w inst✝² : Finite ι inst✝¹ : IsDomain R inst✝ : IsDomain S b : Basis ι R S x : S this : Module.Free R S ⊢ ↑(norm R) x = 0 ↔ x = 0 [PROOFSTEP] haveI : Module.Finite R S := Module.Finite.of_basis b [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁰ : CommRing R inst✝⁹ : Ring S inst✝⁸ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁷ : Field K inst✝⁶ : Field L inst✝⁵ : Field F inst✝⁴ : Algebra K L inst✝³ : Algebra K F ι : Type w inst✝² : Finite ι inst✝¹ : IsDomain R inst✝ : IsDomain S b : Basis ι R S x : S this✝ : Module.Free R S this : Module.Finite R S ⊢ ↑(norm R) x = 0 ↔ x = 0 [PROOFSTEP] exact norm_eq_zero_iff [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁹ : CommRing R inst✝⁸ : Ring S inst✝⁷ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁶ : Field K inst✝⁵ : Field L inst✝⁴ : Field F inst✝³ : Algebra K L inst✝² : Algebra K F ι : Type w inst✝¹ : FiniteDimensional K L inst✝ : IsSeparable K L x : L ⊢ ↑(norm K) x = ↑(norm K) (AdjoinSimple.gen K x) ^ finrank { x_1 // x_1 ∈ K⟮x⟯ } L [PROOFSTEP] letI := isSeparable_tower_top_of_isSeparable K K⟮x⟯ L [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁹ : CommRing R inst✝⁸ : Ring S inst✝⁷ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁶ : Field K inst✝⁵ : Field L inst✝⁴ : Field F inst✝³ : Algebra K L inst✝² : Algebra K F ι : Type w inst✝¹ : FiniteDimensional K L inst✝ : IsSeparable K L x : L this : IsSeparable { x_1 // x_1 ∈ K⟮x⟯ } L := isSeparable_tower_top_of_isSeparable K { x_1 // x_1 ∈ K⟮x⟯ } L ⊢ ↑(norm K) x = ↑(norm K) (AdjoinSimple.gen K x) ^ finrank { x_1 // x_1 ∈ K⟮x⟯ } L [PROOFSTEP] let pbL := Field.powerBasisOfFiniteOfSeparable K⟮x⟯ L [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁹ : CommRing R inst✝⁸ : Ring S inst✝⁷ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁶ : Field K inst✝⁵ : Field L inst✝⁴ : Field F inst✝³ : Algebra K L inst✝² : Algebra K F ι : Type w inst✝¹ : FiniteDimensional K L inst✝ : IsSeparable K L x : L this : IsSeparable { x_1 // x_1 ∈ K⟮x⟯ } L := isSeparable_tower_top_of_isSeparable K { x_1 // x_1 ∈ K⟮x⟯ } L pbL : PowerBasis { x_1 // x_1 ∈ K⟮x⟯ } L := Field.powerBasisOfFiniteOfSeparable { x_1 // x_1 ∈ K⟮x⟯ } L ⊢ ↑(norm K) x = ↑(norm K) (AdjoinSimple.gen K x) ^ finrank { x_1 // x_1 ∈ K⟮x⟯ } L [PROOFSTEP] let pbx := IntermediateField.adjoin.powerBasis (IsSeparable.isIntegral K x) [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁹ : CommRing R inst✝⁸ : Ring S inst✝⁷ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁶ : Field K inst✝⁵ : Field L inst✝⁴ : Field F inst✝³ : Algebra K L inst✝² : Algebra K F ι : Type w inst✝¹ : FiniteDimensional K L inst✝ : IsSeparable K L x : L this : IsSeparable { x_1 // x_1 ∈ K⟮x⟯ } L := isSeparable_tower_top_of_isSeparable K { x_1 // x_1 ∈ K⟮x⟯ } L pbL : PowerBasis { x_1 // x_1 ∈ K⟮x⟯ } L := Field.powerBasisOfFiniteOfSeparable { x_1 // x_1 ∈ K⟮x⟯ } L pbx : PowerBasis K { x_1 // x_1 ∈ K⟮x⟯ } := adjoin.powerBasis (_ : IsIntegral K x) ⊢ ↑(norm K) x = ↑(norm K) (AdjoinSimple.gen K x) ^ finrank { x_1 // x_1 ∈ K⟮x⟯ } L [PROOFSTEP] rw [← AdjoinSimple.algebraMap_gen K x, norm_eq_matrix_det (pbx.basis.smul pbL.basis) _, smul_leftMulMatrix_algebraMap, det_blockDiagonal, norm_eq_matrix_det pbx.basis] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁹ : CommRing R inst✝⁸ : Ring S inst✝⁷ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁶ : Field K inst✝⁵ : Field L inst✝⁴ : Field F inst✝³ : Algebra K L inst✝² : Algebra K F ι : Type w inst✝¹ : FiniteDimensional K L inst✝ : IsSeparable K L x : L this : IsSeparable { x_1 // x_1 ∈ K⟮x⟯ } L := isSeparable_tower_top_of_isSeparable K { x_1 // x_1 ∈ K⟮x⟯ } L pbL : PowerBasis { x_1 // x_1 ∈ K⟮x⟯ } L := Field.powerBasisOfFiniteOfSeparable { x_1 // x_1 ∈ K⟮x⟯ } L pbx : PowerBasis K { x_1 // x_1 ∈ K⟮x⟯ } := adjoin.powerBasis (_ : IsIntegral K x) ⊢ ∏ k : Fin pbL.dim, det (↑(leftMulMatrix pbx.basis) (AdjoinSimple.gen K x)) = det (↑(leftMulMatrix pbx.basis) (AdjoinSimple.gen K (↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) (AdjoinSimple.gen K x)))) ^ finrank { x_1 // x_1 ∈ K⟮↑(algebraMap { x_2 // x_2 ∈ K⟮x⟯ } L) (AdjoinSimple.gen K x)⟯ } L [PROOFSTEP] simp only [Finset.card_fin, Finset.prod_const] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁹ : CommRing R inst✝⁸ : Ring S inst✝⁷ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁶ : Field K inst✝⁵ : Field L inst✝⁴ : Field F inst✝³ : Algebra K L inst✝² : Algebra K F ι : Type w inst✝¹ : FiniteDimensional K L inst✝ : IsSeparable K L x : L this : IsSeparable { x_1 // x_1 ∈ K⟮x⟯ } L := isSeparable_tower_top_of_isSeparable K { x_1 // x_1 ∈ K⟮x⟯ } L pbL : PowerBasis { x_1 // x_1 ∈ K⟮x⟯ } L := Field.powerBasisOfFiniteOfSeparable { x_1 // x_1 ∈ K⟮x⟯ } L pbx : PowerBasis K { x_1 // x_1 ∈ K⟮x⟯ } := adjoin.powerBasis (_ : IsIntegral K x) ⊢ det (↑(leftMulMatrix (adjoin.powerBasis (_ : IsIntegral K x)).basis) (AdjoinSimple.gen K x)) ^ (Field.powerBasisOfFiniteOfSeparable { x_1 // x_1 ∈ K⟮x⟯ } L).dim = det (↑(leftMulMatrix (adjoin.powerBasis (_ : IsIntegral K x)).basis) (AdjoinSimple.gen K (↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) (AdjoinSimple.gen K x)))) ^ finrank { x_1 // x_1 ∈ K⟮↑(algebraMap { x_2 // x_2 ∈ K⟮x⟯ } L) (AdjoinSimple.gen K x)⟯ } L [PROOFSTEP] congr [GOAL] case e_a R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁹ : CommRing R inst✝⁸ : Ring S inst✝⁷ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁶ : Field K inst✝⁵ : Field L inst✝⁴ : Field F inst✝³ : Algebra K L inst✝² : Algebra K F ι : Type w inst✝¹ : FiniteDimensional K L inst✝ : IsSeparable K L x : L this : IsSeparable { x_1 // x_1 ∈ K⟮x⟯ } L := isSeparable_tower_top_of_isSeparable K { x_1 // x_1 ∈ K⟮x⟯ } L pbL : PowerBasis { x_1 // x_1 ∈ K⟮x⟯ } L := Field.powerBasisOfFiniteOfSeparable { x_1 // x_1 ∈ K⟮x⟯ } L pbx : PowerBasis K { x_1 // x_1 ∈ K⟮x⟯ } := adjoin.powerBasis (_ : IsIntegral K x) ⊢ (Field.powerBasisOfFiniteOfSeparable { x_1 // x_1 ∈ K⟮x⟯ } L).dim = finrank { x_1 // x_1 ∈ K⟮↑(algebraMap { x_2 // x_2 ∈ K⟮x⟯ } L) (AdjoinSimple.gen K x)⟯ } L [PROOFSTEP] rw [← PowerBasis.finrank, AdjoinSimple.algebraMap_gen K x] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hx : ¬IsIntegral K x ⊢ ↑(norm K) (AdjoinSimple.gen K x) = 1 [PROOFSTEP] rw [norm_eq_one_of_not_exists_basis] [GOAL] case h R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hx : ¬IsIntegral K x ⊢ ¬∃ s, Nonempty (Basis { x_1 // x_1 ∈ s } K { x_1 // x_1 ∈ K⟮x⟯ }) [PROOFSTEP] contrapose! hx [GOAL] case h R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hx : ∃ s, Nonempty (Basis { x_1 // x_1 ∈ s } K { x_1 // x_1 ∈ K⟮x⟯ }) ⊢ IsIntegral K x [PROOFSTEP] obtain ⟨s, ⟨b⟩⟩ := hx [GOAL] case h.intro.intro R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L s : Finset { x_1 // x_1 ∈ K⟮x⟯ } b : Basis { x_1 // x_1 ∈ s } K { x_1 // x_1 ∈ K⟮x⟯ } ⊢ IsIntegral K x [PROOFSTEP] refine isIntegral_of_mem_of_FG K⟮x⟯.toSubalgebra ?_ x ?_ [GOAL] case h.intro.intro.refine_1 R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L s : Finset { x_1 // x_1 ∈ K⟮x⟯ } b : Basis { x_1 // x_1 ∈ s } K { x_1 // x_1 ∈ K⟮x⟯ } ⊢ Submodule.FG (↑Subalgebra.toSubmodule K⟮x⟯.toSubalgebra) [PROOFSTEP] exact (Submodule.fg_iff_finiteDimensional _).mpr (of_fintype_basis b) [GOAL] case h.intro.intro.refine_2 R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L s : Finset { x_1 // x_1 ∈ K⟮x⟯ } b : Basis { x_1 // x_1 ∈ s } K { x_1 // x_1 ∈ K⟮x⟯ } ⊢ x ∈ K⟮x⟯.toSubalgebra [PROOFSTEP] exact IntermediateField.subset_adjoin K _ (Set.mem_singleton x) [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) ⊢ ↑(algebraMap K F) (↑(norm K) (AdjoinSimple.gen K x)) = Multiset.prod (roots (Polynomial.map (algebraMap K F) (minpoly K x))) [PROOFSTEP] have injKxL := (algebraMap K⟮x⟯ L).injective [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) ⊢ ↑(algebraMap K F) (↑(norm K) (AdjoinSimple.gen K x)) = Multiset.prod (roots (Polynomial.map (algebraMap K F) (minpoly K x))) [PROOFSTEP] by_cases hx : IsIntegral K x [GOAL] case pos R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x ⊢ ↑(algebraMap K F) (↑(norm K) (AdjoinSimple.gen K x)) = Multiset.prod (roots (Polynomial.map (algebraMap K F) (minpoly K x))) case neg R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : ¬IsIntegral K x ⊢ ↑(algebraMap K F) (↑(norm K) (AdjoinSimple.gen K x)) = Multiset.prod (roots (Polynomial.map (algebraMap K F) (minpoly K x))) [PROOFSTEP] swap [GOAL] case neg R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : ¬IsIntegral K x ⊢ ↑(algebraMap K F) (↑(norm K) (AdjoinSimple.gen K x)) = Multiset.prod (roots (Polynomial.map (algebraMap K F) (minpoly K x))) [PROOFSTEP] simp [minpoly.eq_zero hx, IntermediateField.AdjoinSimple.norm_gen_eq_one hx] [GOAL] case pos R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x ⊢ ↑(algebraMap K F) (↑(norm K) (AdjoinSimple.gen K x)) = Multiset.prod (roots (Polynomial.map (algebraMap K F) (minpoly K x))) [PROOFSTEP] have hx' : IsIntegral K (AdjoinSimple.gen K x) := by rwa [← isIntegral_algebraMap_iff injKxL, AdjoinSimple.algebraMap_gen] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x ⊢ IsIntegral K (AdjoinSimple.gen K x) [PROOFSTEP] rwa [← isIntegral_algebraMap_iff injKxL, AdjoinSimple.algebraMap_gen] [GOAL] case pos R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ ↑(algebraMap K F) (↑(norm K) (AdjoinSimple.gen K x)) = Multiset.prod (roots (Polynomial.map (algebraMap K F) (minpoly K x))) [PROOFSTEP] rw [← adjoin.powerBasis_gen hx, PowerBasis.norm_gen_eq_prod_roots] [GOAL] case pos R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ Multiset.prod (roots (Polynomial.map (algebraMap K F) (minpoly K (adjoin.powerBasis hx).gen))) = Multiset.prod (roots (Polynomial.map (algebraMap K F) (minpoly K x))) [PROOFSTEP] rw [adjoin.powerBasis_gen hx, minpoly.eq_of_algebraMap_eq injKxL hx'] [GOAL] case pos.hf R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ Splits (algebraMap K F) (minpoly K (adjoin.powerBasis hx).gen) [PROOFSTEP] rw [adjoin.powerBasis_gen hx, minpoly.eq_of_algebraMap_eq injKxL hx'] [GOAL] case pos R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ x = ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) (AdjoinSimple.gen K x) [PROOFSTEP] try simp only [AdjoinSimple.algebraMap_gen _ _] [GOAL] case pos R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ x = ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) (AdjoinSimple.gen K x) [PROOFSTEP] simp only [AdjoinSimple.algebraMap_gen _ _] [GOAL] case pos.hf R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ Splits (algebraMap K F) (minpoly K ?m.762940) [PROOFSTEP] try simp only [AdjoinSimple.algebraMap_gen _ _] [GOAL] case pos.hf R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ Splits (algebraMap K F) (minpoly K ?m.762940) [PROOFSTEP] simp only [AdjoinSimple.algebraMap_gen _ _] [GOAL] case pos.hf R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ ?m.762940 = ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) (AdjoinSimple.gen K x) [PROOFSTEP] try simp only [AdjoinSimple.algebraMap_gen _ _] [GOAL] case pos.hf R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ ?m.762940 = ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) (AdjoinSimple.gen K x) [PROOFSTEP] simp only [AdjoinSimple.algebraMap_gen _ _] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ L [PROOFSTEP] try simp only [AdjoinSimple.algebraMap_gen _ _] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ L [PROOFSTEP] simp only [AdjoinSimple.algebraMap_gen _ _] [GOAL] case pos.hf R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ Splits (algebraMap K F) (minpoly K ?m.762940) case pos.hf R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ ?m.762940 = x R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ L [PROOFSTEP] try exact hf [GOAL] case pos.hf R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ Splits (algebraMap K F) (minpoly K ?m.762940) case pos.hf R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ ?m.762940 = x R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ L [PROOFSTEP] exact hf [GOAL] case pos.hf R : Type u_1 S : Type u_2 T : Type u_3 inst✝⁷ : CommRing R inst✝⁶ : Ring S inst✝⁵ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁴ : Field K inst✝³ : Field L inst✝² : Field F inst✝¹ : Algebra K L inst✝ : Algebra K F ι : Type w x : L hf : Splits (algebraMap K F) (minpoly K x) injKxL : Function.Injective ↑(algebraMap { x_1 // x_1 ∈ K⟮x⟯ } L) hx : IsIntegral K x hx' : IsIntegral K (AdjoinSimple.gen K x) ⊢ x = x [PROOFSTEP] rfl [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁰ : CommRing R inst✝⁹ : Ring S inst✝⁸ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁷ : Field K inst✝⁶ : Field L inst✝⁵ : Field F inst✝⁴ : Algebra K L inst✝³ : Algebra K F ι : Type w E : Type u_7 inst✝² : Field E inst✝¹ : Algebra K E inst✝ : Algebra R F pb : PowerBasis R S hE : Splits (algebraMap R F) (minpoly R pb.gen) hfx : Separable (minpoly R pb.gen) ⊢ ↑(algebraMap R F) (↑(norm R) pb.gen) = ∏ σ : S →ₐ[R] F, ↑σ pb.gen [PROOFSTEP] letI := Classical.decEq F [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁰ : CommRing R inst✝⁹ : Ring S inst✝⁸ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁷ : Field K inst✝⁶ : Field L inst✝⁵ : Field F inst✝⁴ : Algebra K L inst✝³ : Algebra K F ι : Type w E : Type u_7 inst✝² : Field E inst✝¹ : Algebra K E inst✝ : Algebra R F pb : PowerBasis R S hE : Splits (algebraMap R F) (minpoly R pb.gen) hfx : Separable (minpoly R pb.gen) this : DecidableEq F := Classical.decEq F ⊢ ↑(algebraMap R F) (↑(norm R) pb.gen) = ∏ σ : S →ₐ[R] F, ↑σ pb.gen [PROOFSTEP] rw [PowerBasis.norm_gen_eq_prod_roots pb hE] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁰ : CommRing R inst✝⁹ : Ring S inst✝⁸ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁷ : Field K inst✝⁶ : Field L inst✝⁵ : Field F inst✝⁴ : Algebra K L inst✝³ : Algebra K F ι : Type w E : Type u_7 inst✝² : Field E inst✝¹ : Algebra K E inst✝ : Algebra R F pb : PowerBasis R S hE : Splits (algebraMap R F) (minpoly R pb.gen) hfx : Separable (minpoly R pb.gen) this : DecidableEq F := Classical.decEq F ⊢ Multiset.prod (roots (Polynomial.map (algebraMap R F) (minpoly R pb.gen))) = ∏ σ : S →ₐ[R] F, ↑σ pb.gen [PROOFSTEP] rw [@Fintype.prod_equiv (S →ₐ[R] F) _ _ (PowerBasis.AlgHom.fintype pb) _ _ pb.liftEquiv' (fun σ => σ pb.gen) (fun x => x) ?_] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁰ : CommRing R inst✝⁹ : Ring S inst✝⁸ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁷ : Field K inst✝⁶ : Field L inst✝⁵ : Field F inst✝⁴ : Algebra K L inst✝³ : Algebra K F ι : Type w E : Type u_7 inst✝² : Field E inst✝¹ : Algebra K E inst✝ : Algebra R F pb : PowerBasis R S hE : Splits (algebraMap R F) (minpoly R pb.gen) hfx : Separable (minpoly R pb.gen) this : DecidableEq F := Classical.decEq F ⊢ Multiset.prod (roots (Polynomial.map (algebraMap R F) (minpoly R pb.gen))) = ∏ x : { y // y ∈ roots (Polynomial.map (algebraMap R F) (minpoly R pb.gen)) }, ↑x R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁰ : CommRing R inst✝⁹ : Ring S inst✝⁸ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁷ : Field K inst✝⁶ : Field L inst✝⁵ : Field F inst✝⁴ : Algebra K L inst✝³ : Algebra K F ι : Type w E : Type u_7 inst✝² : Field E inst✝¹ : Algebra K E inst✝ : Algebra R F pb : PowerBasis R S hE : Splits (algebraMap R F) (minpoly R pb.gen) hfx : Separable (minpoly R pb.gen) this : DecidableEq F := Classical.decEq F ⊢ ∀ (x : S →ₐ[R] F), (fun σ => ↑σ pb.gen) x = (fun x => ↑x) (↑(PowerBasis.liftEquiv' pb) x) [PROOFSTEP] rw [Finset.prod_mem_multiset, Finset.prod_eq_multiset_prod, Multiset.toFinset_val, Multiset.dedup_eq_self.mpr, Multiset.map_id] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁰ : CommRing R inst✝⁹ : Ring S inst✝⁸ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁷ : Field K inst✝⁶ : Field L inst✝⁵ : Field F inst✝⁴ : Algebra K L inst✝³ : Algebra K F ι : Type w E : Type u_7 inst✝² : Field E inst✝¹ : Algebra K E inst✝ : Algebra R F pb : PowerBasis R S hE : Splits (algebraMap R F) (minpoly R pb.gen) hfx : Separable (minpoly R pb.gen) this : DecidableEq F := Classical.decEq F ⊢ Multiset.Nodup (roots (Polynomial.map (algebraMap R F) (minpoly R pb.gen))) [PROOFSTEP] exact nodup_roots hfx.map [GOAL] case hfg R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁰ : CommRing R inst✝⁹ : Ring S inst✝⁸ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁷ : Field K inst✝⁶ : Field L inst✝⁵ : Field F inst✝⁴ : Algebra K L inst✝³ : Algebra K F ι : Type w E : Type u_7 inst✝² : Field E inst✝¹ : Algebra K E inst✝ : Algebra R F pb : PowerBasis R S hE : Splits (algebraMap R F) (minpoly R pb.gen) hfx : Separable (minpoly R pb.gen) this : DecidableEq F := Classical.decEq F ⊢ ∀ (x : { x // x ∈ roots (Polynomial.map (algebraMap R F) (minpoly R pb.gen)) }), ↑x = _root_.id ↑x [PROOFSTEP] intro x [GOAL] case hfg R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁰ : CommRing R inst✝⁹ : Ring S inst✝⁸ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁷ : Field K inst✝⁶ : Field L inst✝⁵ : Field F inst✝⁴ : Algebra K L inst✝³ : Algebra K F ι : Type w E : Type u_7 inst✝² : Field E inst✝¹ : Algebra K E inst✝ : Algebra R F pb : PowerBasis R S hE : Splits (algebraMap R F) (minpoly R pb.gen) hfx : Separable (minpoly R pb.gen) this : DecidableEq F := Classical.decEq F x : { x // x ∈ roots (Polynomial.map (algebraMap R F) (minpoly R pb.gen)) } ⊢ ↑x = _root_.id ↑x [PROOFSTEP] rfl [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁰ : CommRing R inst✝⁹ : Ring S inst✝⁸ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁷ : Field K inst✝⁶ : Field L inst✝⁵ : Field F inst✝⁴ : Algebra K L inst✝³ : Algebra K F ι : Type w E : Type u_7 inst✝² : Field E inst✝¹ : Algebra K E inst✝ : Algebra R F pb : PowerBasis R S hE : Splits (algebraMap R F) (minpoly R pb.gen) hfx : Separable (minpoly R pb.gen) this : DecidableEq F := Classical.decEq F ⊢ ∀ (x : S →ₐ[R] F), (fun σ => ↑σ pb.gen) x = (fun x => ↑x) (↑(PowerBasis.liftEquiv' pb) x) [PROOFSTEP] intro σ [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁰ : CommRing R inst✝⁹ : Ring S inst✝⁸ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁷ : Field K inst✝⁶ : Field L inst✝⁵ : Field F inst✝⁴ : Algebra K L inst✝³ : Algebra K F ι : Type w E : Type u_7 inst✝² : Field E inst✝¹ : Algebra K E inst✝ : Algebra R F pb : PowerBasis R S hE : Splits (algebraMap R F) (minpoly R pb.gen) hfx : Separable (minpoly R pb.gen) this : DecidableEq F := Classical.decEq F σ : S →ₐ[R] F ⊢ (fun σ => ↑σ pb.gen) σ = (fun x => ↑x) (↑(PowerBasis.liftEquiv' pb) σ) [PROOFSTEP] simp only [PowerBasis.liftEquiv'_apply_coe] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹¹ : CommRing R inst✝¹⁰ : Ring S inst✝⁹ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁸ : Field K inst✝⁷ : Field L inst✝⁶ : Field F inst✝⁵ : Algebra K L inst✝⁴ : Algebra K F ι : Type w E : Type u_7 inst✝³ : Field E inst✝² : Algebra K E inst✝¹ : IsSeparable K L inst✝ : FiniteDimensional K L x : L hF : Splits (algebraMap K F) (minpoly K x) ⊢ ↑(algebraMap K F) (↑(norm K) x) = Multiset.prod (roots (Polynomial.map (algebraMap K F) (minpoly K x))) ^ finrank { x_1 // x_1 ∈ K⟮x⟯ } L [PROOFSTEP] rw [norm_eq_norm_adjoin K x, map_pow, IntermediateField.AdjoinSimple.norm_gen_eq_prod_roots _ hF] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra L F inst✝³ : IsScalarTower K L F inst✝² : IsAlgClosed E inst✝¹ : IsSeparable K F inst✝ : FiniteDimensional K F pb : PowerBasis K L ⊢ ∏ σ : F →ₐ[K] E, ↑σ (↑(algebraMap L F) pb.gen) = (∏ σ : L →ₐ[K] E, ↑σ pb.gen) ^ finrank L F [PROOFSTEP] haveI : FiniteDimensional L F := FiniteDimensional.right K L F [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra L F inst✝³ : IsScalarTower K L F inst✝² : IsAlgClosed E inst✝¹ : IsSeparable K F inst✝ : FiniteDimensional K F pb : PowerBasis K L this : FiniteDimensional L F ⊢ ∏ σ : F →ₐ[K] E, ↑σ (↑(algebraMap L F) pb.gen) = (∏ σ : L →ₐ[K] E, ↑σ pb.gen) ^ finrank L F [PROOFSTEP] haveI : IsSeparable L F := isSeparable_tower_top_of_isSeparable K L F [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra L F inst✝³ : IsScalarTower K L F inst✝² : IsAlgClosed E inst✝¹ : IsSeparable K F inst✝ : FiniteDimensional K F pb : PowerBasis K L this✝ : FiniteDimensional L F this : IsSeparable L F ⊢ ∏ σ : F →ₐ[K] E, ↑σ (↑(algebraMap L F) pb.gen) = (∏ σ : L →ₐ[K] E, ↑σ pb.gen) ^ finrank L F [PROOFSTEP] letI : Fintype (L →ₐ[K] E) := PowerBasis.AlgHom.fintype pb [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra L F inst✝³ : IsScalarTower K L F inst✝² : IsAlgClosed E inst✝¹ : IsSeparable K F inst✝ : FiniteDimensional K F pb : PowerBasis K L this✝¹ : FiniteDimensional L F this✝ : IsSeparable L F this : Fintype (L →ₐ[K] E) := PowerBasis.AlgHom.fintype pb ⊢ ∏ σ : F →ₐ[K] E, ↑σ (↑(algebraMap L F) pb.gen) = (∏ σ : L →ₐ[K] E, ↑σ pb.gen) ^ finrank L F [PROOFSTEP] letI : ∀ f : L →ₐ[K] E, Fintype (@AlgHom L F E _ _ _ _ f.toRingHom.toAlgebra) := ?_ [GOAL] case refine_2 R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra L F inst✝³ : IsScalarTower K L F inst✝² : IsAlgClosed E inst✝¹ : IsSeparable K F inst✝ : FiniteDimensional K F pb : PowerBasis K L this✝² : FiniteDimensional L F this✝¹ : IsSeparable L F this✝ : Fintype (L →ₐ[K] E) := PowerBasis.AlgHom.fintype pb this : (f : L →ₐ[K] E) → Fintype (F →ₐ[L] E) := ?refine_1 ⊢ ∏ σ : F →ₐ[K] E, ↑σ (↑(algebraMap L F) pb.gen) = (∏ σ : L →ₐ[K] E, ↑σ pb.gen) ^ finrank L F case refine_1 R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra L F inst✝³ : IsScalarTower K L F inst✝² : IsAlgClosed E inst✝¹ : IsSeparable K F inst✝ : FiniteDimensional K F pb : PowerBasis K L this✝¹ : FiniteDimensional L F this✝ : IsSeparable L F this : Fintype (L →ₐ[K] E) := PowerBasis.AlgHom.fintype pb ⊢ (f : L →ₐ[K] E) → Fintype (F →ₐ[L] E) [PROOFSTEP] rw [Fintype.prod_equiv algHomEquivSigma (fun σ : F →ₐ[K] E => _) fun σ => σ.1 pb.gen, ← Finset.univ_sigma_univ, Finset.prod_sigma, ← Finset.prod_pow] [GOAL] case refine_2 R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra L F inst✝³ : IsScalarTower K L F inst✝² : IsAlgClosed E inst✝¹ : IsSeparable K F inst✝ : FiniteDimensional K F pb : PowerBasis K L this✝² : FiniteDimensional L F this✝¹ : IsSeparable L F this✝ : Fintype (L →ₐ[K] E) := PowerBasis.AlgHom.fintype pb this : (f : L →ₐ[K] E) → Fintype (F →ₐ[L] E) := ?refine_1 ⊢ ∏ a : L →ₐ[K] E, ∏ s : F →ₐ[L] E, ↑{ fst := a, snd := s }.fst pb.gen = ∏ x : L →ₐ[K] E, ↑x pb.gen ^ finrank L F case refine_2 R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra L F inst✝³ : IsScalarTower K L F inst✝² : IsAlgClosed E inst✝¹ : IsSeparable K F inst✝ : FiniteDimensional K F pb : PowerBasis K L this✝² : FiniteDimensional L F this✝¹ : IsSeparable L F this✝ : Fintype (L →ₐ[K] E) := PowerBasis.AlgHom.fintype pb this : (f : L →ₐ[K] E) → Fintype (F →ₐ[L] E) := ?refine_1 ⊢ ∀ (x : F →ₐ[K] E), ↑x (↑(algebraMap L F) pb.gen) = ↑(↑algHomEquivSigma x).fst pb.gen case refine_1 R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra L F inst✝³ : IsScalarTower K L F inst✝² : IsAlgClosed E inst✝¹ : IsSeparable K F inst✝ : FiniteDimensional K F pb : PowerBasis K L this✝¹ : FiniteDimensional L F this✝ : IsSeparable L F this : Fintype (L →ₐ[K] E) := PowerBasis.AlgHom.fintype pb ⊢ (f : L →ₐ[K] E) → Fintype (F →ₐ[L] E) [PROOFSTEP] refine Finset.prod_congr rfl fun σ _ => ?_ [GOAL] case refine_2 R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra L F inst✝³ : IsScalarTower K L F inst✝² : IsAlgClosed E inst✝¹ : IsSeparable K F inst✝ : FiniteDimensional K F pb : PowerBasis K L this✝² : FiniteDimensional L F this✝¹ : IsSeparable L F this✝ : Fintype (L →ₐ[K] E) := PowerBasis.AlgHom.fintype pb this : (f : L →ₐ[K] E) → Fintype (F →ₐ[L] E) := ?refine_1 σ : L →ₐ[K] E x✝ : σ ∈ Finset.univ ⊢ ∏ s : F →ₐ[L] E, ↑{ fst := σ, snd := s }.fst pb.gen = ↑σ pb.gen ^ finrank L F [PROOFSTEP] letI : Algebra L E := σ.toRingHom.toAlgebra [GOAL] case refine_2 R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra L F inst✝³ : IsScalarTower K L F inst✝² : IsAlgClosed E inst✝¹ : IsSeparable K F inst✝ : FiniteDimensional K F pb : PowerBasis K L this✝³ : FiniteDimensional L F this✝² : IsSeparable L F this✝¹ : Fintype (L →ₐ[K] E) := PowerBasis.AlgHom.fintype pb this✝ : (f : L →ₐ[K] E) → Fintype (F →ₐ[L] E) := ?refine_1 σ : L →ₐ[K] E x✝ : σ ∈ Finset.univ this : Algebra L E := RingHom.toAlgebra ↑σ ⊢ ∏ s : F →ₐ[L] E, ↑{ fst := σ, snd := s }.fst pb.gen = ↑σ pb.gen ^ finrank L F [PROOFSTEP] simp_rw [Finset.prod_const] [GOAL] case refine_2 R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra L F inst✝³ : IsScalarTower K L F inst✝² : IsAlgClosed E inst✝¹ : IsSeparable K F inst✝ : FiniteDimensional K F pb : PowerBasis K L this✝³ : FiniteDimensional L F this✝² : IsSeparable L F this✝¹ : Fintype (L →ₐ[K] E) := PowerBasis.AlgHom.fintype pb this✝ : (f : L →ₐ[K] E) → Fintype (F →ₐ[L] E) := ?refine_1 σ : L →ₐ[K] E x✝ : σ ∈ Finset.univ this : Algebra L E := RingHom.toAlgebra ↑σ ⊢ ↑σ pb.gen ^ Finset.card Finset.univ = ↑σ pb.gen ^ finrank L F [PROOFSTEP] congr [GOAL] case refine_2.e_a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra L F inst✝³ : IsScalarTower K L F inst✝² : IsAlgClosed E inst✝¹ : IsSeparable K F inst✝ : FiniteDimensional K F pb : PowerBasis K L this✝³ : FiniteDimensional L F this✝² : IsSeparable L F this✝¹ : Fintype (L →ₐ[K] E) := PowerBasis.AlgHom.fintype pb this✝ : (f : L →ₐ[K] E) → Fintype (F →ₐ[L] E) := ?refine_1 σ : L →ₐ[K] E x✝ : σ ∈ Finset.univ this : Algebra L E := RingHom.toAlgebra ↑σ ⊢ Finset.card Finset.univ = finrank L F [PROOFSTEP] exact AlgHom.card L F E [GOAL] case refine_2 R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra L F inst✝³ : IsScalarTower K L F inst✝² : IsAlgClosed E inst✝¹ : IsSeparable K F inst✝ : FiniteDimensional K F pb : PowerBasis K L this✝² : FiniteDimensional L F this✝¹ : IsSeparable L F this✝ : Fintype (L →ₐ[K] E) := PowerBasis.AlgHom.fintype pb this : (f : L →ₐ[K] E) → Fintype (F →ₐ[L] E) := fun σ => minpoly.AlgHom.fintype L F E ⊢ ∀ (x : F →ₐ[K] E), ↑x (↑(algebraMap L F) pb.gen) = ↑(↑algHomEquivSigma x).fst pb.gen [PROOFSTEP] intro σ [GOAL] case refine_2 R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra L F inst✝³ : IsScalarTower K L F inst✝² : IsAlgClosed E inst✝¹ : IsSeparable K F inst✝ : FiniteDimensional K F pb : PowerBasis K L this✝² : FiniteDimensional L F this✝¹ : IsSeparable L F this✝ : Fintype (L →ₐ[K] E) := PowerBasis.AlgHom.fintype pb this : (f : L →ₐ[K] E) → Fintype (F →ₐ[L] E) := fun σ => minpoly.AlgHom.fintype L F E σ : F →ₐ[K] E ⊢ ↑σ (↑(algebraMap L F) pb.gen) = ↑(↑algHomEquivSigma σ).fst pb.gen [PROOFSTEP] simp only [algHomEquivSigma, Equiv.coe_fn_mk, AlgHom.restrictDomain, AlgHom.comp_apply, IsScalarTower.coe_toAlgHom'] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : FiniteDimensional K L inst✝¹ : IsSeparable K L inst✝ : IsAlgClosed E x : L ⊢ ↑(algebraMap K E) (↑(norm K) x) = ∏ σ : L →ₐ[K] E, ↑σ x [PROOFSTEP] have hx := IsSeparable.isIntegral K x [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : FiniteDimensional K L inst✝¹ : IsSeparable K L inst✝ : IsAlgClosed E x : L hx : IsIntegral K x ⊢ ↑(algebraMap K E) (↑(norm K) x) = ∏ σ : L →ₐ[K] E, ↑σ x [PROOFSTEP] rw [norm_eq_norm_adjoin K x, RingHom.map_pow, ← adjoin.powerBasis_gen hx, norm_eq_prod_embeddings_gen E (adjoin.powerBasis hx) (IsAlgClosed.splits_codomain _)] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : FiniteDimensional K L inst✝¹ : IsSeparable K L inst✝ : IsAlgClosed E x : L hx : IsIntegral K x ⊢ (∏ σ : { x_1 // x_1 ∈ K⟮x⟯ } →ₐ[K] E, ↑σ (adjoin.powerBasis hx).gen) ^ finrank { x_1 // x_1 ∈ K⟮x⟯ } L = ∏ σ : L →ₐ[K] E, ↑σ x [PROOFSTEP] exact (prod_embeddings_eq_finrank_pow L (L := K⟮x⟯) E (adjoin.powerBasis hx)).symm [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : FiniteDimensional K L inst✝¹ : IsSeparable K L inst✝ : IsAlgClosed E x : L hx : IsIntegral K x ⊢ Separable (minpoly K (adjoin.powerBasis hx).gen) [PROOFSTEP] haveI := isSeparable_tower_bot_of_isSeparable K K⟮x⟯ L [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : FiniteDimensional K L inst✝¹ : IsSeparable K L inst✝ : IsAlgClosed E x : L hx : IsIntegral K x this : IsSeparable K { x_1 // x_1 ∈ K⟮x⟯ } ⊢ Separable (minpoly K (adjoin.powerBasis hx).gen) [PROOFSTEP] exact IsSeparable.separable K _ [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹¹ : CommRing R inst✝¹⁰ : Ring S inst✝⁹ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁸ : Field K inst✝⁷ : Field L inst✝⁶ : Field F inst✝⁵ : Algebra K L inst✝⁴ : Algebra K F ι : Type w E : Type u_7 inst✝³ : Field E inst✝² : Algebra K E inst✝¹ : FiniteDimensional K L inst✝ : IsGalois K L x : L ⊢ ↑(algebraMap K L) (↑(norm K) x) = ∏ σ : L ≃ₐ[K] L, ↑σ x [PROOFSTEP] apply NoZeroSMulDivisors.algebraMap_injective L (AlgebraicClosure L) [GOAL] case a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹¹ : CommRing R inst✝¹⁰ : Ring S inst✝⁹ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁸ : Field K inst✝⁷ : Field L inst✝⁶ : Field F inst✝⁵ : Algebra K L inst✝⁴ : Algebra K F ι : Type w E : Type u_7 inst✝³ : Field E inst✝² : Algebra K E inst✝¹ : FiniteDimensional K L inst✝ : IsGalois K L x : L ⊢ ↑(algebraMap L (AlgebraicClosure L)) (↑(algebraMap K L) (↑(norm K) x)) = ↑(algebraMap L (AlgebraicClosure L)) (∏ σ : L ≃ₐ[K] L, ↑σ x) [PROOFSTEP] rw [map_prod (algebraMap L (AlgebraicClosure L))] [GOAL] case a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹¹ : CommRing R inst✝¹⁰ : Ring S inst✝⁹ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁸ : Field K inst✝⁷ : Field L inst✝⁶ : Field F inst✝⁵ : Algebra K L inst✝⁴ : Algebra K F ι : Type w E : Type u_7 inst✝³ : Field E inst✝² : Algebra K E inst✝¹ : FiniteDimensional K L inst✝ : IsGalois K L x : L ⊢ ↑(algebraMap L (AlgebraicClosure L)) (↑(algebraMap K L) (↑(norm K) x)) = ∏ x_1 : L ≃ₐ[K] L, ↑(algebraMap L (AlgebraicClosure L)) (↑x_1 x) [PROOFSTEP] rw [← Fintype.prod_equiv (Normal.algHomEquivAut K (AlgebraicClosure L) L)] [GOAL] case a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹¹ : CommRing R inst✝¹⁰ : Ring S inst✝⁹ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁸ : Field K inst✝⁷ : Field L inst✝⁶ : Field F inst✝⁵ : Algebra K L inst✝⁴ : Algebra K F ι : Type w E : Type u_7 inst✝³ : Field E inst✝² : Algebra K E inst✝¹ : FiniteDimensional K L inst✝ : IsGalois K L x : L ⊢ ↑(algebraMap L (AlgebraicClosure L)) (↑(algebraMap K L) (↑(norm K) x)) = ∏ x : L →ₐ[K] AlgebraicClosure L, ?a.f✝ x [PROOFSTEP] rw [← norm_eq_prod_embeddings] [GOAL] case a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹¹ : CommRing R inst✝¹⁰ : Ring S inst✝⁹ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁸ : Field K inst✝⁷ : Field L inst✝⁶ : Field F inst✝⁵ : Algebra K L inst✝⁴ : Algebra K F ι : Type w E : Type u_7 inst✝³ : Field E inst✝² : Algebra K E inst✝¹ : FiniteDimensional K L inst✝ : IsGalois K L x : L ⊢ ↑(algebraMap L (AlgebraicClosure L)) (↑(algebraMap K L) (↑(norm K) x)) = ↑(algebraMap K (AlgebraicClosure L)) (↑(norm K) ?a.x✝) case a.x R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹¹ : CommRing R inst✝¹⁰ : Ring S inst✝⁹ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁸ : Field K inst✝⁷ : Field L inst✝⁶ : Field F inst✝⁵ : Algebra K L inst✝⁴ : Algebra K F ι : Type w E : Type u_7 inst✝³ : Field E inst✝² : Algebra K E inst✝¹ : FiniteDimensional K L inst✝ : IsGalois K L x : L ⊢ L [PROOFSTEP] simp only [algebraMap_eq_smul_one, smul_one_smul] [GOAL] case a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹¹ : CommRing R inst✝¹⁰ : Ring S inst✝⁹ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁸ : Field K inst✝⁷ : Field L inst✝⁶ : Field F inst✝⁵ : Algebra K L inst✝⁴ : Algebra K F ι : Type w E : Type u_7 inst✝³ : Field E inst✝² : Algebra K E inst✝¹ : FiniteDimensional K L inst✝ : IsGalois K L x : L ⊢ ↑(norm K) x • 1 = ↑(norm K) ?a.x✝ • 1 case a.x R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹¹ : CommRing R inst✝¹⁰ : Ring S inst✝⁹ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁸ : Field K inst✝⁷ : Field L inst✝⁶ : Field F inst✝⁵ : Algebra K L inst✝⁴ : Algebra K F ι : Type w E : Type u_7 inst✝³ : Field E inst✝² : Algebra K E inst✝¹ : FiniteDimensional K L inst✝ : IsGalois K L x : L ⊢ L [PROOFSTEP] rfl [GOAL] case a.h R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹¹ : CommRing R inst✝¹⁰ : Ring S inst✝⁹ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁸ : Field K inst✝⁷ : Field L inst✝⁶ : Field F inst✝⁵ : Algebra K L inst✝⁴ : Algebra K F ι : Type w E : Type u_7 inst✝³ : Field E inst✝² : Algebra K E inst✝¹ : FiniteDimensional K L inst✝ : IsGalois K L x : L ⊢ ∀ (x_1 : L →ₐ[K] AlgebraicClosure L), ↑x_1 x = ↑(algebraMap L (AlgebraicClosure L)) (↑(↑(Normal.algHomEquivAut K (AlgebraicClosure L) L) x_1) x) [PROOFSTEP] intro σ [GOAL] case a.h R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹¹ : CommRing R inst✝¹⁰ : Ring S inst✝⁹ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁸ : Field K inst✝⁷ : Field L inst✝⁶ : Field F inst✝⁵ : Algebra K L inst✝⁴ : Algebra K F ι : Type w E : Type u_7 inst✝³ : Field E inst✝² : Algebra K E inst✝¹ : FiniteDimensional K L inst✝ : IsGalois K L x : L σ : L →ₐ[K] AlgebraicClosure L ⊢ ↑σ x = ↑(algebraMap L (AlgebraicClosure L)) (↑(↑(Normal.algHomEquivAut K (AlgebraicClosure L) L) σ) x) [PROOFSTEP] simp only [Normal.algHomEquivAut, AlgHom.restrictNormal', Equiv.coe_fn_mk, AlgEquiv.coe_ofBijective, AlgHom.restrictNormal_commutes, id.map_eq_id, RingHom.id_apply] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra R L inst✝³ : Algebra R K inst✝² : IsScalarTower R K L inst✝¹ : IsSeparable K L inst✝ : FiniteDimensional K L x : L hx : IsIntegral R x ⊢ IsIntegral R (↑(norm K) x) [PROOFSTEP] have hx' : IsIntegral K x := isIntegral_of_isScalarTower hx [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra R L inst✝³ : Algebra R K inst✝² : IsScalarTower R K L inst✝¹ : IsSeparable K L inst✝ : FiniteDimensional K L x : L hx : IsIntegral R x hx' : IsIntegral K x ⊢ IsIntegral R (↑(norm K) x) [PROOFSTEP] rw [← isIntegral_algebraMap_iff (algebraMap K (AlgebraicClosure L)).injective, norm_eq_prod_roots] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra R L inst✝³ : Algebra R K inst✝² : IsScalarTower R K L inst✝¹ : IsSeparable K L inst✝ : FiniteDimensional K L x : L hx : IsIntegral R x hx' : IsIntegral K x ⊢ IsIntegral R (Multiset.prod (roots (Polynomial.map (algebraMap K (AlgebraicClosure L)) (minpoly K x))) ^ finrank { x_1 // x_1 ∈ K⟮x⟯ } L) [PROOFSTEP] refine' (IsIntegral.multiset_prod fun y hy => _).pow _ [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra R L inst✝³ : Algebra R K inst✝² : IsScalarTower R K L inst✝¹ : IsSeparable K L inst✝ : FiniteDimensional K L x : L hx : IsIntegral R x hx' : IsIntegral K x y : (fun x => AlgebraicClosure L) (↑(norm K) x) hy : y ∈ roots (Polynomial.map (algebraMap K (AlgebraicClosure L)) (minpoly K x)) ⊢ IsIntegral R y [PROOFSTEP] rw [mem_roots_map (minpoly.ne_zero hx')] at hy [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra R L inst✝³ : Algebra R K inst✝² : IsScalarTower R K L inst✝¹ : IsSeparable K L inst✝ : FiniteDimensional K L x : L hx : IsIntegral R x hx' : IsIntegral K x y : (fun x => AlgebraicClosure L) (↑(norm K) x) hy : eval₂ (algebraMap K (AlgebraicClosure L)) y (minpoly K x) = 0 ⊢ IsIntegral R y [PROOFSTEP] use minpoly R x, minpoly.monic hx [GOAL] case right R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra R L inst✝³ : Algebra R K inst✝² : IsScalarTower R K L inst✝¹ : IsSeparable K L inst✝ : FiniteDimensional K L x : L hx : IsIntegral R x hx' : IsIntegral K x y : (fun x => AlgebraicClosure L) (↑(norm K) x) hy : eval₂ (algebraMap K (AlgebraicClosure L)) y (minpoly K x) = 0 ⊢ eval₂ (algebraMap R ((fun x => AlgebraicClosure L) (↑(norm K) x))) y (minpoly R x) = 0 [PROOFSTEP] rw [← aeval_def] at hy ⊢ [GOAL] case right R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra R L inst✝³ : Algebra R K inst✝² : IsScalarTower R K L inst✝¹ : IsSeparable K L inst✝ : FiniteDimensional K L x : L hx : IsIntegral R x hx' : IsIntegral K x y : (fun x => AlgebraicClosure L) (↑(norm K) x) hy : ↑(aeval y) (minpoly K x) = 0 ⊢ ↑(aeval y) (minpoly R x) = 0 [PROOFSTEP] exact minpoly.aeval_of_isScalarTower R x y hy [GOAL] case hF R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹⁴ : CommRing R inst✝¹³ : Ring S inst✝¹² : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝¹¹ : Field K inst✝¹⁰ : Field L inst✝⁹ : Field F inst✝⁸ : Algebra K L inst✝⁷ : Algebra K F ι : Type w E : Type u_7 inst✝⁶ : Field E inst✝⁵ : Algebra K E inst✝⁴ : Algebra R L inst✝³ : Algebra R K inst✝² : IsScalarTower R K L inst✝¹ : IsSeparable K L inst✝ : FiniteDimensional K L x : L hx : IsIntegral R x hx' : IsIntegral K x ⊢ Splits (algebraMap K (AlgebraicClosure L)) (minpoly K x) [PROOFSTEP] apply IsAlgClosed.splits_codomain [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F ⊢ ↑(norm K) (↑(norm L) x) = ↑(norm K) x [PROOFSTEP] by_cases hKF : FiniteDimensional K F [GOAL] case pos R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : FiniteDimensional K F ⊢ ↑(norm K) (↑(norm L) x) = ↑(norm K) x [PROOFSTEP] let A := AlgebraicClosure K [GOAL] case pos R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : FiniteDimensional K F A : Type u_4 := AlgebraicClosure K ⊢ ↑(norm K) (↑(norm L) x) = ↑(norm K) x [PROOFSTEP] apply (algebraMap K A).injective [GOAL] case pos.a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : FiniteDimensional K F A : Type u_4 := AlgebraicClosure K ⊢ ↑(algebraMap K A) (↑(norm K) (↑(norm L) x)) = ↑(algebraMap K A) (↑(norm K) x) [PROOFSTEP] haveI : FiniteDimensional L F := FiniteDimensional.right K L F [GOAL] case pos.a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : FiniteDimensional K F A : Type u_4 := AlgebraicClosure K this : FiniteDimensional L F ⊢ ↑(algebraMap K A) (↑(norm K) (↑(norm L) x)) = ↑(algebraMap K A) (↑(norm K) x) [PROOFSTEP] haveI : FiniteDimensional K L := FiniteDimensional.left K L F [GOAL] case pos.a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : FiniteDimensional K F A : Type u_4 := AlgebraicClosure K this✝ : FiniteDimensional L F this : FiniteDimensional K L ⊢ ↑(algebraMap K A) (↑(norm K) (↑(norm L) x)) = ↑(algebraMap K A) (↑(norm K) x) [PROOFSTEP] haveI : IsSeparable K L := isSeparable_tower_bot_of_isSeparable K L F [GOAL] case pos.a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : FiniteDimensional K F A : Type u_4 := AlgebraicClosure K this✝¹ : FiniteDimensional L F this✝ : FiniteDimensional K L this : IsSeparable K L ⊢ ↑(algebraMap K A) (↑(norm K) (↑(norm L) x)) = ↑(algebraMap K A) (↑(norm K) x) [PROOFSTEP] haveI : IsSeparable L F := isSeparable_tower_top_of_isSeparable K L F [GOAL] case pos.a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : FiniteDimensional K F A : Type u_4 := AlgebraicClosure K this✝² : FiniteDimensional L F this✝¹ : FiniteDimensional K L this✝ : IsSeparable K L this : IsSeparable L F ⊢ ↑(algebraMap K A) (↑(norm K) (↑(norm L) x)) = ↑(algebraMap K A) (↑(norm K) x) [PROOFSTEP] letI : ∀ σ : L →ₐ[K] A, haveI := σ.toRingHom.toAlgebra Fintype (F →ₐ[L] A) := fun _ => inferInstance [GOAL] case pos.a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : FiniteDimensional K F A : Type u_4 := AlgebraicClosure K this✝³ : FiniteDimensional L F this✝² : FiniteDimensional K L this✝¹ : IsSeparable K L this✝ : IsSeparable L F this : (σ : L →ₐ[K] A) → Fintype (F →ₐ[L] A) := fun x => inferInstance ⊢ ↑(algebraMap K A) (↑(norm K) (↑(norm L) x)) = ↑(algebraMap K A) (↑(norm K) x) [PROOFSTEP] rw [norm_eq_prod_embeddings K A (_ : F), Fintype.prod_equiv algHomEquivSigma (fun σ : F →ₐ[K] A => σ x) (fun π : Σ f : L →ₐ[K] A, _ => (π.2 : F → A) x) fun _ => rfl] [GOAL] case pos.a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : FiniteDimensional K F A : Type u_4 := AlgebraicClosure K this✝³ : FiniteDimensional L F this✝² : FiniteDimensional K L this✝¹ : IsSeparable K L this✝ : IsSeparable L F this : (σ : L →ₐ[K] A) → Fintype (F →ₐ[L] A) := fun x => inferInstance ⊢ ↑(algebraMap K A) (↑(norm K) (↑(norm L) x)) = ∏ x_1 : (f : L →ₐ[K] A) × (F →ₐ[L] A), ↑x_1.snd x [PROOFSTEP] suffices ∀ σ : L →ₐ[K] A, haveI := σ.toRingHom.toAlgebra ∏ π : F →ₐ[L] A, π x = σ (norm L x) by simp_rw [← Finset.univ_sigma_univ, Finset.prod_sigma, this, norm_eq_prod_embeddings] [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : FiniteDimensional K F A : Type u_4 := AlgebraicClosure K this✝⁴ : FiniteDimensional L F this✝³ : FiniteDimensional K L this✝² : IsSeparable K L this✝¹ : IsSeparable L F this✝ : (σ : L →ₐ[K] A) → Fintype (F →ₐ[L] A) := fun x => inferInstance this : ∀ (σ : L →ₐ[K] A), ∏ π : F →ₐ[L] A, ↑π x = ↑σ (↑(norm L) x) ⊢ ↑(algebraMap K A) (↑(norm K) (↑(norm L) x)) = ∏ x_1 : (f : L →ₐ[K] A) × (F →ₐ[L] A), ↑x_1.snd x [PROOFSTEP] simp_rw [← Finset.univ_sigma_univ, Finset.prod_sigma, this, norm_eq_prod_embeddings] [GOAL] case pos.a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : FiniteDimensional K F A : Type u_4 := AlgebraicClosure K this✝³ : FiniteDimensional L F this✝² : FiniteDimensional K L this✝¹ : IsSeparable K L this✝ : IsSeparable L F this : (σ : L →ₐ[K] A) → Fintype (F →ₐ[L] A) := fun x => inferInstance ⊢ ∀ (σ : L →ₐ[K] A), ∏ π : F →ₐ[L] A, ↑π x = ↑σ (↑(norm L) x) [PROOFSTEP] intro σ [GOAL] case pos.a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : FiniteDimensional K F A : Type u_4 := AlgebraicClosure K this✝³ : FiniteDimensional L F this✝² : FiniteDimensional K L this✝¹ : IsSeparable K L this✝ : IsSeparable L F this : (σ : L →ₐ[K] A) → Fintype (F →ₐ[L] A) := fun x => inferInstance σ : L →ₐ[K] A ⊢ ∏ π : F →ₐ[L] A, ↑π x = ↑σ (↑(norm L) x) [PROOFSTEP] letI : Algebra L A := σ.toRingHom.toAlgebra [GOAL] case pos.a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : FiniteDimensional K F A : Type u_4 := AlgebraicClosure K this✝⁴ : FiniteDimensional L F this✝³ : FiniteDimensional K L this✝² : IsSeparable K L this✝¹ : IsSeparable L F this✝ : (σ : L →ₐ[K] A) → Fintype (F →ₐ[L] A) := fun x => inferInstance σ : L →ₐ[K] A this : Algebra L A := RingHom.toAlgebra ↑σ ⊢ ∏ π : F →ₐ[L] A, ↑π x = ↑σ (↑(norm L) x) [PROOFSTEP] rw [← norm_eq_prod_embeddings L A (_ : F)] [GOAL] case pos.a R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : FiniteDimensional K F A : Type u_4 := AlgebraicClosure K this✝⁴ : FiniteDimensional L F this✝³ : FiniteDimensional K L this✝² : IsSeparable K L this✝¹ : IsSeparable L F this✝ : (σ : L →ₐ[K] A) → Fintype (F →ₐ[L] A) := fun x => inferInstance σ : L →ₐ[K] A this : Algebra L A := RingHom.toAlgebra ↑σ ⊢ ↑(algebraMap L A) (↑(norm L) x) = ↑σ (↑(norm L) x) [PROOFSTEP] rfl [GOAL] case neg R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : ¬FiniteDimensional K F ⊢ ↑(norm K) (↑(norm L) x) = ↑(norm K) x [PROOFSTEP] rw [norm_eq_one_of_not_module_finite hKF] [GOAL] case neg R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : ¬FiniteDimensional K F ⊢ ↑(norm K) (↑(norm L) x) = 1 [PROOFSTEP] by_cases hKL : FiniteDimensional K L [GOAL] case pos R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : ¬FiniteDimensional K F hKL : FiniteDimensional K L ⊢ ↑(norm K) (↑(norm L) x) = 1 [PROOFSTEP] have hLF : ¬FiniteDimensional L F := by refine' (mt _) hKF intro hKF exact FiniteDimensional.trans K L F [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : ¬FiniteDimensional K F hKL : FiniteDimensional K L ⊢ ¬FiniteDimensional L F [PROOFSTEP] refine' (mt _) hKF [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : ¬FiniteDimensional K F hKL : FiniteDimensional K L ⊢ FiniteDimensional L F → FiniteDimensional K F [PROOFSTEP] intro hKF [GOAL] R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF✝ : ¬FiniteDimensional K F hKL : FiniteDimensional K L hKF : FiniteDimensional L F ⊢ FiniteDimensional K F [PROOFSTEP] exact FiniteDimensional.trans K L F [GOAL] case pos R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : ¬FiniteDimensional K F hKL : FiniteDimensional K L hLF : ¬FiniteDimensional L F ⊢ ↑(norm K) (↑(norm L) x) = 1 [PROOFSTEP] rw [norm_eq_one_of_not_module_finite hLF, _root_.map_one] [GOAL] case neg R : Type u_1 S : Type u_2 T : Type u_3 inst✝¹² : CommRing R inst✝¹¹ : Ring S inst✝¹⁰ : Algebra R S K : Type u_4 L : Type u_5 F : Type u_6 inst✝⁹ : Field K inst✝⁸ : Field L inst✝⁷ : Field F inst✝⁶ : Algebra K L inst✝⁵ : Algebra K F ι : Type w E : Type u_7 inst✝⁴ : Field E inst✝³ : Algebra K E inst✝² : Algebra L F inst✝¹ : IsScalarTower K L F inst✝ : IsSeparable K F x : F hKF : ¬FiniteDimensional K F hKL : ¬FiniteDimensional K L ⊢ ↑(norm K) (↑(norm L) x) = 1 [PROOFSTEP] rw [norm_eq_one_of_not_module_finite hKL]
#install.packages('rJava', type='mac.binary') #install.packages('xlsx', type='mac.binary') { library("xlsx") #Устанавливаем директорию setwd("/Users/georgiydemo/Projects/FA/Course II/R/Diksi/analytics") #Цикл по каждому товару. Надеемся на то, что во всех магазах одинаковые товары goods.table <- read.table(file = 'store1_price.txt', head = TRUE) goods <- goods.table[, 1] ############ Работаем только с таблицей .csv и .xlsx ######################### for (prod in goods) { #Индекс продукта element_index <- which(goods.table == prod) #Цена продажи product_price <- goods.table[element_index, 3] #Цена поставки supply_price <- goods.table[element_index, 2] #Цена утилизации util_price <- goods.table[element_index, 4] #Названия магазинов shop_names <- c() #Выручка shop_revenues <- c() #Прибыль shop_profits <- c() #Реализация shop_sales <- c() #Списание shop_writeoffs <- c() #Равномерность продаж shop_sr <- c() #Продажи макс shop_sales_max <- c() #День продажи макс shop_sales_maxdays <- c() #Продажи мин shop_sales_min <- c() #День продажи мин shop_sales_mindays <- c() #Списание макс shop_writeoff_max <- c() #День списания макс shop_writeoff_maxdays <- c() #Цикл по каждому магазину for (i in 1:10) { in1 <- read.table(file = paste0('store', as.character(i), '_in.txt'), head = TRUE) out1 <- read.table(file = paste0('store', as.character(i), '_out.txt'), head = TRUE) # Название магазина shop_names <- append(shop_names, paste0('shop', as.character(i))) # Списание buf_writeoff <- sum(in1[, prod]) - sum(out1[, prod]) shop_writeoffs <- append(shop_writeoffs, buf_writeoff) # Выручка buf_shoprevenue <- product_price * sum(out1[, prod]) shop_revenues <- append(shop_revenues, buf_shoprevenue) # Затраты buf_cost <- (sum(in1[, prod]) * supply_price) + (buf_writeoff * util_price) # Прибыль shop_profits <- append(shop_profits, buf_shoprevenue - buf_cost) # Реализация shop_sales <- append(shop_sales, sum(out1[, prod])) # Равномерность продаж shop_sr <- append(shop_sr, sd(out1[, prod])) # Продажи макс shop_sales_max <- append(shop_sales_max, max(out1[, prod])) # День продажи макс shop_sales_maxdays <- append(shop_sales_maxdays, out1[which.max(out1[, prod]), 1]) # Продажи мин shop_sales_min <- append(shop_sales_min, min(out1[, prod])) # День продажи мин shop_sales_mindays <- append(shop_sales_mindays, out1[which.min(out1[, prod]), 1]) # Списание макс shop_writeoff_max <- append(shop_writeoff_max, max(c(in1[, prod] - out1[, prod]))) # День списания макс shop_writeoff_maxdays <- append(shop_writeoff_maxdays, in1[which.max(c(in1[, prod] - out1[, prod])), 1]) } #Высчитываем итог и среднее для выручки, прибыли, реализации, списании, равномерности shop_names <- c(shop_names, c("Итог", "Среднее")) shop_revenues <- c(shop_revenues, c(sum(shop_revenues), mean(shop_revenues))) shop_profits <- c(shop_profits, c(sum(shop_profits), mean(shop_profits))) shop_sales <- c(shop_sales, c(sum(shop_sales), mean(shop_sales))) shop_writeoffs <- c(shop_writeoffs, c(sum(shop_writeoffs), mean(shop_writeoffs))) shop_sr <- c(shop_sr, c(sum(shop_sr), mean(shop_sr))) shop_sales_max <- c(shop_sales_max, c("", "")) shop_sales_maxdays <- c(shop_sales_maxdays, c("", "")) shop_sales_min <- c(shop_sales_min, c("", "")) shop_sales_mindays <- c(shop_sales_mindays, c("", "")) shop_writeoff_max <- c(shop_writeoff_max, c("", "")) shop_writeoff_maxdays <- c(shop_writeoff_maxdays, c("", "")) #Формируем датафрейм table <- data.frame( shop_names, shop_revenues, shop_profits, shop_sales , shop_writeoffs, shop_sr, shop_sales_max, shop_sales_maxdays, shop_sales_min, shop_sales_mindays, shop_writeoff_max, shop_writeoff_maxdays ) #Проставляем заголовки col_headings <- c( "Магазин" , "Выручка, руб" , "Прибыль", "Реализация" , "Списание, конт.", "Равномерность продаж" , "Продажи макс", "День продажи макс", "Продажи мин", "День продажи мин" , "Списание макс", "День макс списания" ) names(table) <- col_headings # Запись в .csv write.table( table, file = paste0( "/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/таблица_", prod, ".csv" ), col.names = TRUE, row.names = FALSE, sep = ';', dec = ',', fileEncoding = 'UTF-8' ) # Запись в .xlsx (это чтоб на маке отображалось корректно) write.xlsx( table, file = paste0( "/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/таблица_", prod, ".xlsx" ), sheetName = "DATA", col.names = TRUE, row.names = FALSE, append = FALSE ) } ######################## Формируем графики ################################### # Вектор всех возможных цветов для построения товаров plot_colors <- c("red3","forestgreen", "steelblue", "darkgreen","darkolivegreen3", "darkorange1","firebrick1","gold1", "lightcoral","mediumvioletred","navyblue", "tan1","turquoise1","chocolate1","blue","black","brown", "darkseagreen" ) # Вектор всех возможных значков для товаров plot_pchs <- seq(15,25) #Общая выручка со всех магазинов и со всех продуктов super_summ_shoprevenue <- rep(0,7) #Общая прибыль со всех магазинов и со всех продуктов super_summ_shopprofits <- rep(0,7) #Общее списание со всех магазинов и со всех продуктов super_summ_writeoffs <- rep(0,7) #Датафрейм с прибылью super_df_shopprofits <- data.frame(buf=rep(0,7)) #Датафрейм с рентабельностью super_df_profitability <- data.frame(buf=rep(0,7)) #Список датафреймов всего товара по магазинам. Нужен для динамики продаж всех товаров по всем магазина goods_list <- list() #Цикл по каждому магазу for (i in 1:10) { in1 <- read.table(file = paste0('store',as.character(i),'_in.txt'), head = TRUE) out1 <- read.table(file = paste0('store',as.character(i),'_out.txt'), head = TRUE) price1 <- read.table(file = paste0('store',as.character(i),'_price.txt'), head = TRUE) #Общая выручка со всех продуктов summ_shopprofits <- rep(0,7) summ_shoprevenue <- rep(0,7) summ_writeoffs <- rep(0,7) #Датафреймы по каждому продукту для вывода единых графиков с несколькими продуктами #объем продаж df_salesvolume <- data.frame(buf=rep(0,7)) #выручка df_shoprevenue <- data.frame(buf=rep(0,7)) #прибыль df_shopprofits <- data.frame(buf=rep(0,7)) #Списание df_writeoffs <- data.frame(buf=rep(0,7)) #Рентабельность df_profitability <- data.frame(buf=rep(0,7)) #Цикл по каждому продукту в каждом магазине for (prod in goods){ element_index <- which(goods.table == prod) #Цена продажи product_price <- goods.table[element_index, 3] #Цена поставки supply_price <- goods.table[element_index, 2] #Цена утилизации util_price <- goods.table[element_index, 4] # Выручка buf_shoprevenue <- product_price * out1[, prod] png(file=paste0("/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/shop",as.character(i),"/Выручка магазин ",as.character(i)," (",prod,").png"),width=600, height=450) plot(buf_shoprevenue, main=paste0('Выручка по дням в магазине ',as.character(i),' (',prod,')'), xlab='День', ylab=paste0("Выручка по товару '",prod,"', руб."),type='o') dev.off() # Списание buf_writeoff <- in1[, prod] - out1[, prod] xrange <- range(seq(1,7)) yrange <- range(buf_writeoff) png(file=paste0("/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/shop",as.character(i),"/Списание магазин ",as.character(i)," (",prod,").png"),width=600, height=450) plot(xrange, yrange, main=paste0('Списание ',prod,' в ',as.character(i),' магазине'), xlab="День", ylab="Списание, шт.", type = "n" ) points(seq(1,7), buf_writeoff, pch=19, col="red") lines(seq(1,7), buf_writeoff, pch=19, col="black") dev.off() # Затраты buf_cost <- (in1[, prod] * supply_price) + (buf_writeoff * util_price) # Прибыль shop_profits <- buf_shoprevenue - buf_cost png(file=paste0("/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/shop",as.character(i),"/Прибыль магазин ",as.character(i)," (",prod,").png"),width=600, height=450) plot(shop_profits, main=paste0('Прибыль по дням в ',as.character(i),' магазине (',prod,')'), xlab='День', ylab='Прибыль, .руб.',type='S') dev.off() #Добавляем данные во фреймы для построения графиков ниже #Объём продаж df_salesvolume <- data.frame(df_salesvolume, out1[, prod]) #выручка df_shoprevenue <- data.frame(df_shoprevenue, buf_shoprevenue) #прибыль df_shopprofits <- data.frame(df_shopprofits, shop_profits) #Списание df_writeoffs <- data.frame(df_writeoffs, buf_writeoff) #Рентабельность df_profitability <- data.frame(df_profitability, floor((shop_profits/buf_shoprevenue) * 100)) #Прибавляем к сумме выручки summ_shoprevenue <- summ_shoprevenue + buf_shoprevenue #Прибавляем к сумме прибыли summ_shopprofits <- summ_shopprofits + shop_profits #Прибавляем к сумме списаний summ_writeoffs <- summ_writeoffs + buf_writeoff } ############################################Графики с несколькими товарами на одном графике########################## #График объёма продаж товарав в магазине по дням df_salesvolume <- subset(df_salesvolume, select = -c(buf)) names(df_salesvolume) <- goods #Закидываем в list goods_list[[paste0("shop",as.character(i))]] <- df_salesvolume xrange <- range(seq(1,7)) yrange <- range(df_salesvolume) png(file=paste0("/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/shop",as.character(i),"/Объём продаж магазин ",as.character(i),".png"),width=600, height=450) graph <- plot(xrange, yrange, main=paste0('Объём продаж в магазине ',as.character(i)," по товарам"), xlab="День недели", ylab="Количество проданного товара, шт", type = "n", ) for (j in 1:length(goods)){ points(seq(1,7),df_salesvolume[, goods[j]], pch=plot_pchs[j], col=plot_colors[j]) lines(seq(1,7), df_salesvolume[, goods[j]], pch=plot_pchs[j], col=plot_colors[j]) } legend("topright", legend=goods,col=plot_colors, pch=plot_pchs) dev.off() #График выручки от товарав по дням df_shoprevenue <- subset(df_shoprevenue, select = -c(buf)) names(df_shoprevenue) <- goods xrange <- range(seq(1,7)) yrange <- range(df_shoprevenue) png(file=paste0("/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/shop",as.character(i),"/Выручка магазин ",as.character(i),".png"),width=600, height=450) graph <- plot(xrange, yrange, main=paste0('Выручка в магазине ',as.character(i)," по товарам"), xlab="День недели", ylab="Выручка, руб", type = "n", ) for (j in 1:length(goods)){ points(seq(1,7),df_shoprevenue[, goods[j]], pch=plot_pchs[j], col=plot_colors[j]) lines(seq(1,7), df_shoprevenue[, goods[j]], pch=plot_pchs[j], col=plot_colors[j]) } legend("topright", legend=goods,col=plot_colors, pch=plot_pchs) dev.off() # График прибыли от товарав по дням df_shopprofits <- subset(df_shopprofits, select = -c(buf)) names(df_shopprofits) <- goods xrange <- range(seq(1,7)) yrange <- range(df_shopprofits) png(file=paste0("/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/shop",as.character(i),"/Прибыль магазин ",as.character(i),".png"),width=600, height=450) graph <- plot(xrange, yrange, main=paste0('Прибыль в магазине ',as.character(i)," по товарам"), xlab="День недели", ylab="Прибыль, руб", type = "n", ) for (j in 1:length(goods)){ points(seq(1,7),df_shopprofits[, goods[j]], pch=plot_pchs[j], col=plot_colors[j]) lines(seq(1,7), df_shopprofits[, goods[j]], pch=plot_pchs[j], col=plot_colors[j]) } legend("topright", legend=goods,col=plot_colors, pch=plot_pchs) dev.off() # График списания товарав по дням df_writeoffs <- subset(df_writeoffs, select = -c(buf)) names(df_writeoffs) <- goods xrange <- range(seq(1,7)) yrange <- range(df_writeoffs) png(file=paste0("/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/shop",as.character(i),"/Списания магазин ",as.character(i),".png"),width=600, height=450) graph <- plot(xrange, yrange, main=paste0('Списания в магазине ',as.character(i)," по товарам"), xlab="День недели", ylab="Количество списанного товара, шт", type = "n", ) for (j in 1:length(goods)){ points(seq(1,7),df_writeoffs[, goods[j]], pch=plot_pchs[j], col=plot_colors[j]) lines(seq(1,7), df_writeoffs[, goods[j]], pch=plot_pchs[j], col=plot_colors[j]) } legend("topright", legend=goods,col=plot_colors, pch=plot_pchs) dev.off() # График рентабельности товарав по дням df_profitability <- subset(df_profitability, select = -c(buf)) names(df_profitability) <- goods xrange <- range(seq(1,7)) yrange <- range(df_profitability) png(file=paste0("/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/shop",as.character(i),"/Рентабельность магазин ",as.character(i),".png"),width=600, height=450) graph <- plot(xrange, yrange, main=paste0('Рентабельность в магазине ',as.character(i)," по товарам"), xlab="День", ylab="Рентабельность, %", type = "n" ) for (j in 1:length(goods)){ points(seq(1,7),df_profitability[, goods[j]], pch=plot_pchs[j], col=plot_colors[j]) lines(seq(1,7), df_profitability[, goods[j]], pch=plot_pchs[j], col=plot_colors[j]) } legend("topright", legend=goods,col=plot_colors, pch=plot_pchs) dev.off() #Строим общий график выручки по дням png(file=paste0("/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/shop",as.character(i),"/Общая выручка магазин ",as.character(i),".png"),width=600, height=450) plot(summ_shoprevenue, main=paste0('Выручка по дням в магазине ',as.character(i)), xlab='День', ylab=paste0("Общая выручка, руб."),type='o') dev.off() #Строим общий график прибыли по дням png(file=paste0("/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/shop",as.character(i),"/Общая прибыль магазин ",as.character(i),".png"),width=600, height=450) plot(summ_shopprofits, main=paste0('Прибыль по дням в магазине ',as.character(i)), xlab='День', ylab='Общая прибыль, руб.',type='S') dev.off() #Строим общий график списаний по дням xrange <- range(seq(1,7)) yrange <- range(summ_writeoffs) png(file=paste0("/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/shop",as.character(i),"/Общее списание магазин ",as.character(i),".png"),width=600, height=450) plot(xrange,yrange,main=paste0('Списания по дням в ',as.character(i),' магазине'), xlab="День", ylab="Списание, шт.", type = "n") points(seq(1,7), summ_writeoffs, pch=19, col="red") lines(seq(1,7), summ_writeoffs, pch=19, col="black") dev.off() #Строим график рентабельности для магазина summ_profitability <- floor((summ_shopprofits/summ_shoprevenue) * 100) xrange <- range(seq(1,7)) yrange <- range(summ_profitability) png(file=paste0("/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/shop",as.character(i),"/Общая рентабельность магазин ",as.character(i),".png"),width=600, height=450) plot(xrange, yrange, main=paste("Рентабельность по дням в",as.character(i),"магазине"), xlab="День", ylab="Рентабельность, %", type = "n" ) lines(seq(1,7), summ_profitability, pch=20, col="red3",lwd = 3, lty = 2) dev.off() #Прибавляем выручку к общей выручке super_summ_shoprevenue <- super_summ_shoprevenue + summ_shoprevenue #Прибавляем прибыль к общей прибыли super_summ_shopprofits <- super_summ_shopprofits + summ_shopprofits #Прибавляем списания к общим списаниям super_summ_writeoffs <- super_summ_writeoffs + summ_writeoffs #Прибавляем к датафрейму рентабельности super_df_profitability <- data.frame(super_df_profitability, summ_profitability) #Прибавляем к датафрейму прибыли super_df_shopprofits <- data.frame(super_df_shopprofits, summ_shopprofits) } #Строим график общей выручки super_summ_shoprevenue1 <- super_summ_shoprevenue / 1000 png(file="/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/Общая выручка.png", width=600, height=450) plot(super_summ_shoprevenue1, main='Выручка во всех магазинах по дням', xlab='День', ylab="Общая выручка, тыс руб.",type='o') dev.off() #Строим график общей прибыли super_summ_shopprofits1 <- super_summ_shopprofits / 1000 png(file="/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/Общая прибыль.png", width=600, height=450) plot(super_summ_shopprofits1, main='Прибыль во всех магазинах по дням', xlab='День', ylab='Общая прибыль, тыс руб.',type='S') dev.off() #Строим график общих списаний xrange <- range(seq(1,7)) yrange <- range(super_summ_writeoffs) png(file="/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/Общее списание.png",width=600, height=450) plot(xrange,yrange,main='Списание во всех магазинах по дням', xlab="День", ylab="Списание, шт.", type = "n") points(seq(1,7), super_summ_writeoffs, pch=19, col="red") lines(seq(1,7), super_summ_writeoffs, pch=19, col="black") dev.off() #Строим график рентабельности super_summ_profitability <- floor((super_summ_shopprofits/super_summ_shoprevenue) * 100) xrange <- range(seq(1,7)) yrange <- range(super_summ_profitability) png(file="/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/Общая рентабельность.png",width=600, height=450) plot(xrange, yrange, main="Рентабельность по дням общая", xlab="День", ylab="Рентабельность, %", type = "n" ) lines(seq(1,7), super_summ_profitability, pch=20, col="red3",lwd = 3, lty = 2) dev.off() ########################Строим сложный график рентабельности########################## #Выкидываем нулевой столбец super_df_profitability <- subset(super_df_profitability, select = -c(buf)) #Присваиваем имена столбцам для обращения по ним names(super_df_profitability) <- paste0("shop",as.character(seq(1,10))) xrange <- range(seq(1,7)) yrange <- range(super_df_profitability) png(file="/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/Общая рентабельность подробно.png",width=716, height=630) plot(xrange, yrange, main='Рентабельность по дням в магазинах', xlab="День", ylab="Рентабельность, %", type = "n" ) for (i in 1:length(super_df_profitability)){ points(seq(1,7), super_df_profitability[,paste0("shop",as.character(i))], pch=19, col=plot_colors[i]) } legend("bottomleft", legend=paste("Магазин", seq(1,10)),col=plot_colors,pch=c(19)) dev.off() ########################Строим сложный график прибыли########################## #Выкидываем нулевой столбец super_df_shopprofits <- subset(super_df_shopprofits, select = -c(buf)) #Присваиваем имена столбцам для обращения по ним names(super_df_shopprofits) <- paste0("shop",as.character(seq(1,10))) super_df_shopprofits <- super_df_shopprofits / 1000 xrange <- range(seq(1,7)) yrange <- range(super_df_shopprofits) png(file="/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/Общая прибыль подробно.png",width=716, height=630) plot(xrange, yrange, main='Прибыль по дням в магазинах', xlab="День", ylab="Прибыль, тыс руб", type = "n" ) for (i in 1:length(super_df_shopprofits)){ points(seq(1,7), super_df_shopprofits[,paste0("shop",as.character(i))], pch=18, col=plot_colors[i], cex=2.0) } legend("bottomleft", legend=paste("Магазин", seq(1,10)),col=plot_colors,pch=c(18)) dev.off() ############################График динамики продаж товаров по всем магазинам##################### #Каждый магазин выделять своим цветом #Каждый товар выделять своим значком. xrange <- range(seq(1,7)) yrange <- range(goods_list) png(file="/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/Динамика продаж товаров.png",width=800, height=1000) plot(xrange, yrange, main='Динамика продаж товаров по всем магазинам', xlab="День", ylab="Продажа, шт.", type = "n" ) for (i in 1:length(goods_list)){ for (j in 1:length(goods_list[[paste0("shop",as.character(i))]])){ points(seq(1,7), goods_list[[paste0("shop",as.character(i))]][j][, 1], pch=plot_pchs[j], col=plot_colors[i]) } } legend("bottomright", legend=paste("Магазин", seq(1,10)),col=plot_colors,pch=19) legend("topright", legend=goods,col="black",pch=plot_pchs) dev.off() #Подготовить диаграмму, на которой будет представлены объемы #продаж одного товара сразу по всем магазинам. Каждый магазин #выделять своим цветом. for (good in goods){ #Значения good_values <- c() for (i in 1:length(goods_list)){ value <- goods_list[[paste0("shop",as.character(i))]][, good] #Записываем сумму товара за неделю good_values <- append(good_values, sum(value)) } png(file=paste0("/Users/georgiydemo/Projects/FA/Course II/R/Diksi/result/graph/Объём продаж ",good,".png"),width=650, height=500) barplot(height = good_values, names=seq(1,10),main=paste0('Объём продаж товара ', good, " по магазинам"), col="#69b3a2",xlab="№ магазина", ylab="Объём продаж, шт") dev.off() } print("Завершение работы скрипта") }
[STATEMENT] lemma backwards_wdr_res_stab: "(ts \<^bold>\<midarrow>wdr(d,n) \<^bold>\<rightarrow> ts') \<and> (ts',v \<Turnstile> re(c)) \<longrightarrow> (ts,v \<Turnstile> re(c))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (ts \<^bold>\<midarrow>wdr( d, n ) \<^bold>\<rightarrow> ts') \<and> 0 < \<parallel>ext v\<parallel> \<and> len v ts' c = ext v \<and> restrict v (res ts') c = lan v \<and> |lan v| = 1 \<longrightarrow> 0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (res ts) c = lan v \<and> |lan v| = 1 [PROOF STEP] by (metis inf.absorb1 order_trans regular_sensors.withdraw_reservation_length_stable restrict_def' restriction.restrict_res traffic.withdraw_res_subseteq)
module Main import Data.Vect ---------------------------------------------------------------------- -- Hilffunktion remove : (x : a) -> (xs : Vect (S n) a) -> { auto prf : Elem x xs } -> Vect n a remove {prf = Here} x (x :: ys) = ys remove {prf = (There Here)} x (y :: (x :: xs)) = y :: xs remove {prf = (There (There later))} x (y :: (z :: xs)) = y :: remove x (z::xs) ---------------------------------------------------------------------- -- Spiel-Zustand data GameState : (guessesRemaining : Nat) -> (letters : Nat) -> Type where MkGameState : (word : String) -> (missing : Vect letters Char) -> GameState guessesRemaining letters data Finished : Type where Lost : (game : GameState 0 (S letters)) -> Finished Won : (game : GameState (S guesses) 0) -> Finished ---------------------------------------------------------------------- -- Spielereingabe prüfen data ValidInput : List Char -> Type where Letter : (c : Char) -> ValidInput [c] toFew : ValidInput [] -> Void toFew (Letter _) impossible toMuch : ValidInput (c::c'::cs) -> Void toMuch (Letter _) impossible isValidInput : (cs : List Char) -> Dec (ValidInput cs) isValidInput [] = No toFew isValidInput [c] = Yes (Letter c) isValidInput (c::c'::cs) = No toMuch isValidString : (s : String) -> Dec (ValidInput (unpack s)) isValidString s = isValidInput _ ---------------------------------------------------------------------- -- Eingabe readGuess : IO (c ** ValidInput c) readGuess = do putStrLn "Buchstabe? " input <- getLine case isValidString (toUpper input) of Yes prf => pure (_ ** prf) No _ => do print "ungültige Eingabe" readGuess ---------------------------------------------------------------------- -- Verarbeitung processGuess : (letter : Char) -> GameState (S guesses) (S letters) -> Either (GameState guesses (S letters)) (GameState (S guesses) letters) processGuess letter (MkGameState word missing) = case isElem letter missing of Yes ind => Right (MkGameState word (remove letter missing)) No _ => Left (MkGameState word missing) ---------------------------------------------------------------------- -- Spiel-Funktion game : GameState (S guesses) (S letters) -> IO Finished game {guesses} {letters} gameState = do (_ ** Letter letter) <- readGuess case processGuess letter gameState of Left nope => do putStrLn "falsch geraten" case guesses of Z => pure (Lost nope) S k => game nope Right yeah => do putStrLn "richtig geraten" case letters of Z => pure (Won yeah) S k => game yeah ---------------------------------------------------------------------- -- Main main : IO () main = do ergebnis <- game {guesses=5} (MkGameState "DevOpenSpace" ['D','E','V','O','P','N','S','A','C']) case ergebnis of Lost (MkGameState word _) => putStrLn ("Verloren - das Wort war " ++ word) Won _ => putStrLn "Gewonnen!"
// Copyright (c) 2001-2008 Hartmut Kaiser // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #if !defined(BOOST_SPIRIT_KARMA_BINARY_MAY_04_2007_0904AM) #define BOOST_SPIRIT_KARMA_BINARY_MAY_04_2007_0904AM #if defined(_MSC_VER) && (_MSC_VER >= 1020) #pragma once // MS compatible compilers support #pragma once #endif #include <boost/spirit/home/support/component.hpp> #include <boost/spirit/home/support/detail/integer/endian.hpp> #include <boost/spirit/home/support/attribute_of.hpp> #include <boost/spirit/home/karma/domain.hpp> #include <boost/spirit/home/karma/detail/generate_to.hpp> #include <boost/spirit/home/karma/delimit.hpp> namespace boost { namespace spirit { namespace karma { namespace detail { template <int bits> struct integer { #ifdef BOOST_HAS_LONG_LONG BOOST_MPL_ASSERT_MSG( bits == 8 || bits == 16 || bits == 32 || bits == 64, not_supported_binary_size, ()); #else BOOST_MPL_ASSERT_MSG( bits == 8 || bits == 16 || bits == 32, not_supported_binary_size, ()); #endif }; template <> struct integer<8> { typedef uint_least8_t type; }; template <> struct integer<16> { typedef uint_least16_t type; }; template <> struct integer<32> { typedef uint_least32_t type; }; #ifdef BOOST_HAS_LONG_LONG template <> struct integer<64> { typedef uint_least64_t type; }; #endif /////////////////////////////////////////////////////////////////////// template <boost::integer::endianness bits> struct what; template <> struct what<boost::integer::native> { static std::string is() { return "native-endian binary"; } }; template <> struct what<boost::integer::little> { static char const* is() { return "little-endian binary"; } }; template <> struct what<boost::integer::big> { static char const* is() { return "big-endian binary"; } }; } /////////////////////////////////////////////////////////////////////////// template <integer::endianness endian, int bits> struct any_binary_director { template <typename Component, typename Context, typename Unused> struct attribute { typedef boost::integer::endian< endian, typename karma::detail::integer<bits>::type, bits > type; }; template <typename Component, typename OutputIterator, typename Context, typename Delimiter, typename Parameter> static bool generate(Component const& /*component*/, OutputIterator& sink, Context& /*ctx*/, Delimiter const& d, Parameter const& param) { typename traits::attribute_of< karma::domain, Component, Context>::type p (param); unsigned char const* bytes = reinterpret_cast<unsigned char const*>(&p); for (unsigned int i = 0; i < sizeof(p); ++i) detail::generate_to(sink, *bytes++); karma::delimit(sink, d); // always do post-delimiting return true; } // this any_byte_director has no parameter attached, it needs to have // been initialized from a direct literal template <typename Component, typename OutputIterator, typename Context, typename Delimiter> static bool generate(Component const&, OutputIterator&, Context&, Delimiter const&, unused_type) { BOOST_MPL_ASSERT_MSG(false, binary_generator_not_usable_without_attribute, ()); return false; } template <typename Component, typename Context> static std::string what(Component const& component, Context const& ctx) { return karma::detail::what<endian>::is(); } }; /////////////////////////////////////////////////////////////////////////// template <integer::endianness endian, int bits> struct binary_lit_director { template <typename Component, typename Context, typename Unused> struct attribute { typedef unused_type type; }; template <typename Component, typename OutputIterator, typename Context, typename Delimiter, typename Parameter> static bool generate(Component const& component, OutputIterator& sink, Context& /*ctx*/, Delimiter const& d, Parameter const& /*param*/) { boost::integer::endian< endian, typename karma::detail::integer<bits>::type, bits > p (fusion::at_c<0>(component.elements)); unsigned char const* bytes = reinterpret_cast<unsigned char const*>(&p); for (unsigned int i = 0; i < sizeof(p); ++i) detail::generate_to(sink, *bytes++); karma::delimit(sink, d); // always do post-delimiting return true; } template <typename Component, typename Context> static std::string what(Component const& component, Context const& ctx) { return karma::detail::what<endian>::is(); } }; }}} #endif
module viscosity_module use amrex_fort_module, only : rt => amrex_real use network use eos_type_module use eos_module implicit none contains subroutine viscous_coeff(eos_state, visc_coeff) use extern_probin_module, only: const_viscosity type (eos_t), intent(in) :: eos_state real (rt), intent(inout) :: visc_coeff visc_coeff = const_viscosity end subroutine viscous_coeff end module viscosity_module
# Goal: Associative arrays (as in awk) or hashes (as in perl). # Or, more generally, adventures in R addressing. # Here's a plain R vector: x <- c(2,3,7,9) # But now I tag every elem with labels: names(x) <- c("kal","sho","sad","aja") # Associative array operations: x["kal"] <- 12 # Pretty printing the entire associative array: x # This works for matrices too: m <- matrix(runif(10), nrow=5) rownames(m) <- c("violet","indigo","blue","green","yellow") colnames(m) <- c("Asia","Africa") # The full matrix -- m # Or even better -- library(xtable) xtable(m) # Now address symbolically -- m[,"Africa"] m["indigo",] m["indigo","Africa"] # The "in" operator, as in awk -- for (colour in c("yellow", "orange", "red")) { if (colour %in% rownames(m)) { cat("For Africa and ", colour, " we have ", m[colour, "Africa"], "\n") } else { cat("Colour ", colour, " does not exist in the hash.\n") } } # This works for data frames also -- D <- data.frame(m) D # Look closely at what happened -- str(D) # The colours are the rownames(D). # Operations -- D$Africa D[,"Africa"] D["yellow",] # or subset(D, rownames(D)=="yellow") colnames(D) <- c("Antarctica","America") D D$America
We welcome you to visit with us over complimentary coffee, tea, donuts, cereal bars and fruit. We have tables set up for fellowship time before and after each service! Newborns and children up to four years of age are welcome in our church nursery located in the back of our hallway by our church offices. The nursery is only available at the 10:10am service until August 2018. We have trained staff available to welcome you and your child. If you need assistance in finding the nursery, please ask an usher. Our contemporary services include electric guitars, drums, violin, electric keyboard and more. Our songs are upbeat, biblically based and serve to express our love and gratitude to the Lord. Lyrics are projected on worship screens so they are easy to follow. Some of the songs you may know. Others, we hope, will be uplifting and serve to praise and give glory to God. Singing and clapping is highly encouraged and welcomed! We welcome you to come in whatever clothing is comfortable to you. At Peace With Christ people wear anything from casual clothes (even shorts) to business suits. For us at Peace With Christ, it’s not about what you look like or what you wear. Are you new to Peace with Christ? Or maybe you have been around here for a little while and want to know more. If so, your timing could not be more perfect because we are going to be holding a “Connect with Peace” class beginning Feb. 17th at 9am! “Connect with Peace” is our step toward church membership but coming to the class in no way means that you have to become a member! To be involved simply show up! Questions? Email Pastor Josh at [email protected].
import tactic data.set import prop.language prop.semantics prop.syntax namespace prop theorem completeness (v φ) : v ⊨ φ → ⊢ₗ φ := begin intro hm, sorry end end prop
section {* Domain Semirings *} theory Domain_Semiring imports "$AFP/Kleene_Algebra/Kleene_Algebra" begin subsection {* Domain Semirings *} text {* It is important to prove a dual statement for range semirings with each statement for domain semirings! Perhaps, in the future, these dual statements will be generated\<dots> *} text{* We only consider domain semirings over semirings (and dioids) with one and zero. A more refined hierarchy can of course be obtained. We have already defined domain over near semirings and pre-semirings with and without units. All this should eventually be included. *} class nabla_op = fixes nabla :: "'a \<Rightarrow> 'a" ("\<nabla>_" [90] 95) class fdiamond_op = fixes fdiamond :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" ("\<bar> _ \<rangle> _" [50,90] 95) class fbox_op = fixes fbox :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" ("\<bar> _ ] _" [50,90] 95) class bdiamond_op = fixes bdiamond :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" ("\<langle> _ \<bar> _" [50,90] 95) class bbox_op = fixes bbox :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" ("[ _ \<bar> _" [50,90] 95) class a_op = fixes a :: "'a \<Rightarrow> 'a" class d_op = fixes d :: "'a \<Rightarrow> 'a" class r_op = fixes r :: "'a \<Rightarrow> 'a" class ar_op = fixes ar :: "'a \<Rightarrow> 'a" class domain_semiring = semiring_one_zero + plus_ord + d_op + assumes d1: "x+(d(x)\<cdot>x) = d(x)\<cdot>x" and d2: "d(x\<cdot>y) = d(x\<cdot>d(y))" and d3: "d(x)+1 = 1" and d4 [simp]: "d(0) = 0" and d5: "d(x+y) = d(x)+d(y)" begin text {* We first show that every domain semiring is "automatically" idempotent, hence a dioid *} subclass dioid_one_zero proof fix x y z :: 'a show "x+x = x" by (metis add_commute local.d1 local.d3 local.distrib_right' local.mult_1_left) qed lemma d1_eq: "x = d(x)\<cdot>x" by (metis d1 d3 eq_iff less_eq_def mult_isor mult_onel) lemma domain_invol: "d(d(x)) = d(x)" by (metis d1_eq d2 mult_assoc) text {* The next lemma formulates the fixed point lemma without sets. It states that $x$ is a domain element (of some element $y$ if and only if $x=d(x)$. *} lemma domain_fixed_point: "(\<exists>y.(x = d(y))) \<longleftrightarrow> x = d(x)" by (metis domain_invol) text {* One can now use $x=d(x)$ for typing domain elements. The next lemma shows that two different ways of typing domain elements are equivalent *} lemma type_conv: "\<forall>P.(\<forall>x.(x = d(x) \<longrightarrow> P(x))) \<longleftrightarrow> (\<forall>x.(P(d(x))))" by (metis domain_invol) text {* We now continue proving properties. *} lemma domain_1: "d(x\<cdot>y) \<le> d(x)" by (metis local.add_ub1 local.d2 local.d3 local.d5 local.distrib_left local.mult_oner) lemma domain_subid: "x \<le> 1 \<longrightarrow> x \<le> d(x)" by (metis d1_eq mult_isol mult_oner) lemma domain_very_strict: "d(x) = 0 \<longleftrightarrow> x = 0" by (metis d1_eq local.annil local.d4) lemma domain_one [simp]: "d(1) = 1" by (metis d1_eq mult_oner) lemma dom_subid: "d(x) \<le> 1" by (metis domain_1 domain_one mult_onel) lemma domain_iso: "x \<le> y \<longrightarrow> d(x) \<le> d(y)" by (metis d5 less_eq_def) lemma domain_subdist: "d(x) \<le> d(x+y)" by (metis domain_iso order_prop) lemma domain_export: "d(d(x)\<cdot>y) = d(x)\<cdot>d(y)" proof - have "d(d(x)\<cdot>y) \<le> d(x)\<cdot>d(y)" by (metis d1_eq dom_subid domain_1 domain_invol domain_iso local.mult_isol_var local.mult_isor local.mult_onel) thus ?thesis by (metis dom_subid domain_subid local.antisym_conv local.d2 local.dual_order.trans local.mult_isol local.mult_oner) qed text {* Metis takes very long *} lemma dom_el_idemp: "d(x)\<cdot>d(x) = d(x)" by (metis d1_eq domain_export) lemma dom_el_comm: "d(x)\<cdot>d(y) = d(y)\<cdot>d(x)" by (metis d1_eq d2 domain_1 domain_export mult_assoc mult_isor mult_onel antisym_conv) text {* The next lemma shows that domain is a least left preserver; it is the leas (domain) element for which the left hand side holds *} lemma dom_llp: "x \<le> d(y)\<cdot>x \<longleftrightarrow> d(x) \<le> d(y)" by (metis add_comm d1_eq d3 domain_1 domain_invol less_eq_def mult_isor mult_onel) lemma dom_weakly_local: "x\<cdot>y = 0 \<longleftrightarrow> x\<cdot>d(y) = 0" by (metis annil d1_eq d2 d4) text {* We can now show that domain elements are closed under addition and multiplication. This means that they form a subalgebra of the domain semiring. *} lemma dom_add_closed: "d(d(x)+d(y)) = d(x)+d(y)" by (metis d5 domain_invol) lemma dom_mult_closed: "d(d(x)\<cdot>d(y)) = d(x)\<cdot>d(y)" by (metis d2 domain_export) lemma dom_lb: "d(x)\<cdot>d(y) \<le> d(x)" by (metis domain_1 domain_export domain_invol) lemma dom_glb: "d(x) \<le> d(y)\<cdot>d(z) \<longleftrightarrow> (d(x) \<le> d(y) \<and> d(x) \<le> d(z))" apply auto apply (metis dom_lb local.order.trans) apply (metis dom_el_comm dom_lb local.dual_order.trans) by (metis dom_el_idemp local.mult_isol_var) text {* We have already shown that domain elements form a semilattice under multiplication, and they form a semilattice under addition by definition of dioids. We now show that the absorption laws hold. *} lemma domain_absorption_1: "d(x)\<cdot>(d(x)+d(y)) = d(x)" by (metis add_comm d1_eq d3 distrib_left domain_export mult_oner) lemma domain_absorption_2: "d(x)+(d(x)\<cdot>d(y)) = d(x)" by (metis d1_eq distrib_left domain_absorption_1 domain_export) text {* This proves that domain elements form a lattice. It follows immediately from the semiring distributivity law that domain elements form a distributive lattice. In every lattice, one of the distributivity laws suffices for that. We now show the other distributivity law explicitly. *} lemma domain_distributivity: "d(x)+(d(y)\<cdot>d(z)) = (d(x)+d(y))\<cdot>(d(x)+d(z))" proof - have "d(x)+(d(y)\<cdot>d(z)) \<le> (d(x)+d(y))\<cdot>(d(x)+d(z))" by (metis domain_absorption_1 local.add_comm local.add_idem' local.add_iso_var local.distrib_right local.subdistl) thus ?thesis by (smt add_assoc' d1_eq distrib_left distrib_right dom_el_comm domain_absorption_2 domain_export order_refl) qed text {* This should probably be made explicit with a sublocale statement *} text {* Finally we relate domain elements with the order the meet semilattice way *} lemma domain_order: "d(x) \<le> d(y) \<longleftrightarrow> d(x) = d(x)\<cdot>d(y)" by (metis dom_glb domain_export eq_iff) end section {* Antidomain Semirings *} text {* In this setting, domain can be defined *} class antidomain_semiring = semiring_one_zero + plus_ord + a_op + assumes a1: "(a x) \<cdot> x = 0" and a2: "a(x\<cdot>y)+a(x\<cdot>a(a(y))) = a(x\<cdot>a(a(y)))" and a3: "a(a(x))+a(x) = 1" begin text {* Definition of domain. *} definition (in antidomain_semiring) antidomain_semiring_domain :: "'a \<Rightarrow> 'a" ("d") where "d(x) = a(a(x))" text {* Again, every antidomain semiring is a dioid *} subclass dioid proof fix x y z :: 'a show "x+x = x" by (metis local.a1 local.a2 local.a3 local.add_zerol local.annir local.distrib_left local.mult_oner) qed lemma a_fixed_point: "\<forall>x.(a(x) = x \<longrightarrow> (\<forall>y.(y = 0)))" by (metis a1 a3 add_idem annil mult_onel) lemma a_subid: "a(x) \<le> 1" by (metis a3 add_comm add_ub1) text {* The next lemma shows that antidomain elements are greatest left annihilators *} lemma a_gla: "a(x)\<cdot>y = 0 \<longleftrightarrow> a(x) \<le> a(y)" proof - have "a(x)\<cdot>y = 0 \<longrightarrow> a(x)\<cdot>d(y) = 0" by (metis a1 a2 a3 add_comm add_zerol antidomain_semiring_domain_def less_eq_def mult_onel mult_oner order_prop) hence "a(x)\<cdot>y = 0 \<longrightarrow> a(x) \<le> a(y)" by (metis a3 a_subid add_zerol antidomain_semiring_domain_def distrib_left mult_isor mult_onel mult_oner) thus ?thesis by (metis a1 a3 add_lub distrib_right eq_iff less_eq_def mult_onel mult_oner order_prop) qed lemma a2_eq: "a(x\<cdot>y) = a(x\<cdot>d(y))" sorry (* by (metis a1 a3 a_gla add_zerol antidomain_semiring_domain_def distrib_right' mult_assoc mult_onel a2 add_comm less_eq_def *) lemma a_closure: "d(a(x)) = a(x)" by (metis a2_eq antidomain_semiring_domain_def mult_onel) lemma a_subdist: "a(x+y) \<le> a(x)" by (metis a_gla local.add_ub1 local.add_zeror distrib_left local.less_eq_def) lemma a_idem: "a(x)\<cdot>a(x) = a(x)" by (metis a1 a3 add_zerol distrib_right mult_onel) lemma a_1: "a(x) = 1 \<longrightarrow> x = 0" by (metis a1 mult_onel) lemma a_3: "a(x)\<cdot>a(y)\<cdot>d(x+y) = 0" by (metis a2_eq a_gla a_subid local.add_comm local.distrib_left local.distrib_right local.less_eq_def local.mult_onel local.order_prop mult_assoc) lemma a_add: "a(x)\<cdot>a(y) = a(x+y)" proof - have "a(x)\<cdot>a(y) = a(x)\<cdot>a(y)\<cdot>a(x+y)" sorry hence "a(x)\<cdot>a(y) \<le> a(x+y)" by (metis a_subid mult_isor mult_onel order_trans) thus ?thesis by (metis a_idem a_subdist add_comm order_trans mult_isol mult_isor eq_iff) qed lemma a_export: "a(a(x)\<cdot>y) = d(x)+a(y)" proof - have "a(a(x)\<cdot>y) = (a(a(x)\<cdot>y)\<cdot>d(y))+(a(a(x)\<cdot>y)\<cdot>a(y))" by (metis a3 add_comm antidomain_semiring_domain_def distrib_left mult_oner) hence "a(a(x)\<cdot>y) \<le> (a(a(x)\<cdot>y)\<cdot>d(y))+a(y)" by (metis a_add a_subdist add_commute local.add_iso) hence "a(a(x)\<cdot>y) \<le> (a(a(x)\<cdot>y)\<cdot>(a(x)+d(x))\<cdot>d(y))+a(y)" by (metis a3 add_comm antidomain_semiring_domain_def mult_oner) hence one: "a(a(x)\<cdot>y) \<le> (a(a(x)\<cdot>y)\<cdot>a(x)\<cdot>d(y))+(a(a(x)\<cdot>y)\<cdot>d(x)\<cdot>d(y))+a(y)" by (metis add_comm distrib_left distrib_right mult_assoc) have two: "(a(a(x)\<cdot>y)\<cdot>a(x)\<cdot>d(y)) = 0" by (metis a1 a2_eq mult_assoc) from one two have three:"a(a(x)\<cdot>y) \<le> (a(a(x)\<cdot>y)\<cdot>d(x)\<cdot>d(y))+a(y)" by (metis add_zerol) have four: "\<dots> \<le> d(x)+a(y)" by (metis a_add a_subdist add_assoc add_comm add_ub1 antidomain_semiring_domain_def less_eq_def) from three four have "a(a(x)\<cdot>y) \<le> d(x)+a(y)" by (metis order_trans) thus ?thesis sorry qed end sublocale antidomain_semiring \<subseteq> domain_semiring "op +" "op \<cdot>" "(1\<Colon>'a)" "(0\<Colon>'a)" "d" "op \<le>" "op <" proof fix x y :: 'a have "x = d(x)\<cdot>x" by (metis a1 a3 add_comm add_zerol antidomain_semiring_domain_def distrib_right mult_onel) thus "x+d(x)\<cdot>x = d(x)\<cdot>x" by (metis add_idem) show "d(x\<cdot>y) = d(x\<cdot>d(y))" by (metis a2_eq antidomain_semiring_domain_def) show "d(x)+1 = 1" by (metis a_subid antidomain_semiring_domain_def less_eq_def) show "d(0) = 0" by (metis a1 a3 a_export antidomain_semiring_domain_def mult_oner) show "d(x+y) = d(x)+d(y)" by (metis a_add a_export antidomain_semiring_domain_def) qed text {* together with a-closure it now follows that antidomain elements form distributive lattices *} context antidomain_semiring begin lemma a_zero: "a(0) = 1" by (metis a3 add_zerol d4 antidomain_semiring_domain_def) lemma a_one: "a(1) = 0" by (metis a1 mult_oner) lemma a_comp_1: "d(x)\<cdot>a(x) = 0" by (metis a1 antidomain_semiring_domain_def) lemma a_comp_2: "a(x)\<cdot>d(x) = 0" by (metis a1 dom_weakly_local) text {* By the previous two lemmas it is now clear that antidomain algebras form a Boolean subalgebra *} text {* perhaps we should make this explict with a sublocale statement *} lemma a_2_var: "a(x)\<cdot>d(y) = 0 \<longleftrightarrow> a(x) \<le> a(y)" by (metis a_gla dom_weakly_local) lemma a_antitone: "x \<le> y \<longrightarrow> a(y) \<le> a(x)" by (metis a_subdist less_eq_def) lemma a_de_morgan: "a(a(x)\<cdot>a(y)) = d(x+y)" by (metis a_add antidomain_semiring_domain_def) lemma a_de_morgan_var_1: "a(a(x)\<cdot>a(y)) = d(x)+d(y)" by (metis a_export antidomain_semiring_domain_def) lemma a_de_morgan_var_2: "a(a(x)+a(y)) = d(x)\<cdot>d(y)" by (metis a_add antidomain_semiring_domain_def) lemma a_de_morgan_var_3: "a(d(x)+d(y)) =a(x)\<cdot>a(y)" by (metis a_add a_closure antidomain_semiring_domain_def) lemma a_de_morgan_var_4: "a(d(x)\<cdot>d(y))=a(x)+a(y)" by (metis a_closure a_export antidomain_semiring_domain_def) lemma a_comm: "a(x)\<cdot>a(y) = a(y)\<cdot>a(x)" by (metis a_add add_comm) lemma a_4: "a(x) \<le> a(x\<cdot>y)" by (metis a1 a_gla annir mult_assoc) lemma a_5: "a(d(x)) = a(x)" by (metis a_closure antidomain_semiring_domain_def) lemma a_6: "a(d(x)\<cdot>y) = a(x)+a(y)" by (metis a_closure a_export antidomain_semiring_domain_def) lemma a_7: "d(x)\<cdot>a(d(y)+d(z)) = d(x)\<cdot>a(y)\<cdot>a(z)" by (metis a_5 a_add d5 mult_assoc) text {* The following two lemmas give the Galois connection of Heyting algebras *} lemma d_a_galois1: "d(x)\<cdot>a(y) \<le> d(z) \<longleftrightarrow> d(x) \<le> d(z)+d(y)" by (metis a_add a_gla add_assoc add_comm d5 antidomain_semiring_domain_def) lemma d_a_galois2: "d(x)\<cdot>d(y) \<le> d(z) \<longleftrightarrow> d(x) \<le> d(z)+a(y)" by (metis a_closure d_a_galois1 antidomain_semiring_domain_def) lemma d_cancellation_1: "d(x) \<le> d(y)+(d(x)\<cdot>a(y))" by (metis a1 a_add a_export a_gla antidomain_semiring_domain_def mult_assoc) lemma d_cancellation_2: "(d(z)+d(y))\<cdot>a(y) \<le> d(z)" by (metis a_3 a_add a_export a_gla add_assoc' add_comm antidomain_semiring_domain_def) text {* The next lemmas explicitly show that antidomain elements are closed under addition and multiplication. *} lemma a_d_add_closure: "d(a(x)+a(y))=a(x)+a(y)" by (metis a_6 a_closure) lemma a_d_mult_closure: "d(a(x)\<cdot>a(y))=a(x)\<cdot>a(y)" by (metis a_add a_closure) lemma d_a_zero: "d(x)\<cdot>a(y) = 0 \<longleftrightarrow> d(x) \<le> d(y)" by (metis a_gla antidomain_semiring_domain_def) lemma d_d_zero: "d(x)\<cdot>d(y) = 0 \<longleftrightarrow> d(x) \<le> a(y)" by (metis a_2_var antidomain_semiring_domain_def) lemma d_6: "d(x)+d(y) = d(x)+a(x)\<cdot>d(y)" by (metis a1 a_add a_export add_zerol antidomain_semiring_domain_def distrib_left) lemma d_7: "a(x)+a(y) = a(x)+d(x)\<cdot>a(y)" proof - have "a(x)+a(y) = a(x)\<cdot>a(x)+a(x)\<cdot>a(y)+d(x)\<cdot>a(x)+d(x)\<cdot>a(y)" by (metis a3 add_comm antidomain_semiring_domain_def mult_onel distrib_left distrib_right add_assoc) thus ?thesis by (metis a_add a_gla a_subid add_comm add_idem add_zerol antidomain_semiring_domain_def domain_subid domain_absorption_2 add_assoc a_closure) qed lemma kat_1: "d(x)\<cdot>y \<le> y\<cdot>d(z) \<longrightarrow> y\<cdot>a(z) \<le> a(x)\<cdot>y" proof assume hyp: "d(x)\<cdot>y \<le> y\<cdot>d(z)" hence "d(x)\<cdot>y\<cdot>a(z)+a(x)\<cdot>y\<cdot>a(z) \<le> a(x)\<cdot>y\<cdot>a(z)" sorry hence "d(x)\<cdot>y\<cdot>a(z)+a(x)\<cdot>y\<cdot>a(z) \<le> a(x)\<cdot>y" by (metis a_subid mult_isol mult_oner order_trans) thus "y\<cdot>a(z) \<le> a(x)\<cdot>y" by (metis a1 a_export a_zero distrib_right mult_onel) qed lemma kat_2: "y\<cdot>a(z) \<le> a(x)\<cdot>y \<longrightarrow> d(x)\<cdot>y\<cdot>a(z) = 0" by (metis a_4 local.a1 local.antidomain_semiring_domain_def local.mult_isol_var local.zero_unique mult_assoc) lemma kat_3: "d(x)\<cdot>y\<cdot>a(z) = 0 \<longrightarrow> d(x)\<cdot>y = d(x)\<cdot>y\<cdot>d(z)" by (metis a3 antidomain_semiring_domain_def mult_oner distrib_left add_zeror) lemma kat_4: "d(x)\<cdot>y = d(x)\<cdot>y\<cdot>d(z) \<longrightarrow> d(x)\<cdot>y \<le> y\<cdot>d(z)" by (metis dom_subid mult_isor mult_onel) lemma kat_1_equiv: "d(x)\<cdot>y \<le> y\<cdot>d(z) \<longleftrightarrow> y\<cdot>a(z) \<le> a(x)\<cdot>y" by (metis kat_1 kat_2 kat_3 kat_4) lemma kat_2_equiv: "y\<cdot>a(z) \<le> a(x)\<cdot>y \<longleftrightarrow> d(x)\<cdot>y\<cdot>a(z) = 0" by (metis kat_1_equiv kat_2 kat_3 kat_4) lemma kat_3_equiv: "d(x)\<cdot>y\<cdot>a(z) = 0 \<longleftrightarrow> d(x)\<cdot>y = d(x)\<cdot>y\<cdot>d(z)" by (metis kat_1_equiv kat_2_equiv kat_3 kat_4) lemma kat_4_equiv: "d(x)\<cdot>y = d(x)\<cdot>y\<cdot>d(z) \<longleftrightarrow> d(x)\<cdot>y \<le> y\<cdot>d(z)" by (metis kat_1_equiv kat_2_equiv kat_3_equiv) lemma kat_1_equiv_opp: "y\<cdot>d(x) \<le> d(z)\<cdot>y \<longleftrightarrow> a(z)\<cdot>y \<le> y\<cdot>a(x)" by (metis a_closure antidomain_semiring_domain_def kat_1_equiv) lemma kat_2_equiv_opp: "a(z)\<cdot>y \<le> y\<cdot>a(x) \<longleftrightarrow> a(z)\<cdot>y\<cdot>d(x) = 0" by (metis a_closure antidomain_semiring_domain_def kat_1_equiv_opp kat_2_equiv) lemma kat_3_equiv_opp: "a(z)\<cdot>y\<cdot>d(x) = 0 \<longleftrightarrow> y\<cdot>d(x) = d(z)\<cdot>y\<cdot>d(x)" apply auto apply (metis a_zero d_7 local.add_zerol local.distrib_right local.mult_onel mult_assoc) by (metis a_4 a_comp_2 local.a_gla mult_assoc) lemma kat_4_equiv_opp: "y\<cdot>d(x) = d(z)\<cdot>y\<cdot>d(x) \<longleftrightarrow> y\<cdot>d(x) \<le> d(z)\<cdot>y" by (metis a_closure antidomain_semiring_domain_def kat_2_equiv kat_3_equiv_opp) end section{* Antidomain Kleene Algebras *} class antidomain_kleene_algebra = antidomain_semiring + kleene_algebra begin lemma fdom_star: "d(x\<^sup>\<star>) = 1" apply (rule antisym) apply (metis local.dom_subid) by (metis local.domain_iso local.domain_one local.star_ref) end section{* Forward Diamond *} text {* In this section we define a forward diamond operator over an antidomain semiring *} class fdiamond_semiring = antidomain_semiring + fdiamond_op + assumes fdiamond_def: "(\<bar>x\<rangle>y) = d(x\<cdot>y)" (* context antidomain_semiring begin definition fdiamond :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" ("( \<bar> _ \<rangle> _)" 90) where fdiamond_def: "(\<bar>x\<rangle>y) = d(x\<cdot>y)" *) begin lemma fdia_simp: "\<bar>x\<rangle>p = \<bar>x\<rangle>d(p)" by (metis d2 fdiamond_def) lemma fdia_simp_2: "\<bar>x\<rangle>p =d(\<bar>x\<rangle>p)" by (metis domain_invol fdiamond_def) lemma fdia_dom: "d(x) =\<bar>x\<rangle>1" by (metis fdiamond_def mult_oner) lemma fdia_add1: "\<bar>x\<rangle>(y+z) = (\<bar>x\<rangle>y)+(\<bar>x\<rangle>z)" by (metis a_add a_export antidomain_semiring_domain_def fdiamond_def distrib_left) lemma fdia_add2: "\<bar>x+y\<rangle>z = (\<bar>x\<rangle>z)+(\<bar>y\<rangle>z)" by (metis a_add a_export antidomain_semiring_domain_def fdiamond_def distrib_right) lemma fdia_mult: "\<bar>x\<cdot>y\<rangle>z = \<bar>x\<rangle>(\<bar>y\<rangle>z)" by (metis a2_eq antidomain_semiring_domain_def fdiamond_def mult_assoc) lemma fdia_one: "\<bar>1\<rangle>x = d(x)" by (metis antidomain_semiring_domain_def fdiamond_def mult_onel) lemma fdia_zero: "\<bar>x\<rangle>0 = 0" by (metis a_one a_zero annir antidomain_semiring_domain_def fdiamond_def) lemma fdemodalisation1: "d(z)\<cdot>(\<bar>x\<rangle>y) = 0 \<longleftrightarrow> d(z)\<cdot>x\<cdot>d(y) = 0" by (metis dom_weakly_local fdiamond_def mult_assoc) lemma fdemodalisation2: "\<bar>x\<rangle>y \<le> d(z) \<longleftrightarrow> a(z)\<cdot>x\<cdot>d(y) = 0" by (metis a2_eq a_gla fdiamond_def kat_1_equiv mult_assoc mult_onel mult_oner) lemma fdemodalisation3: "\<bar>x\<rangle>y \<le> d(z) \<longleftrightarrow> x\<cdot>d(y) \<le> d(z)\<cdot>x" by (metis fdemodalisation2 kat_1_equiv_opp kat_2_equiv_opp) lemma fdia_iso: "d(x) \<le> d(y) \<longrightarrow> \<bar>z\<rangle>x \<le> \<bar>z\<rangle>y" by (metis d2 d5 fdia_add1 fdiamond_def less_eq_def) lemma dia_iso_var: "x \<le> y \<longrightarrow> \<bar>x\<rangle>p \<le> \<bar>y\<rangle>p" by (metis a_add a_export antidomain_semiring_domain_def distrib_right fdiamond_def less_eq_def) lemma fdia_zero_var: "\<bar>0\<rangle>x = 0" by (metis a_one a_zero annil antidomain_semiring_domain_def fdiamond_def) lemma fdia_subdist_1: "\<bar>x\<rangle>p \<le> \<bar>x\<rangle>(p+q)" by (metis fdia_add1 add_lub order_refl) lemma fdia_subdist_2: "\<bar>x\<rangle>(d(p)\<cdot>d(q)) \<le> \<bar>x\<rangle>d(p)" by (metis a2_eq a_add a_d_add_closure a_subdist antidomain_semiring_domain_def fdia_iso fdiamond_def) lemma fdia_subdist: "\<bar>x\<rangle>(d(y)\<cdot>d(z)) \<le> (\<bar>x\<rangle>d(y))\<cdot>\<bar>x\<rangle>d(z)" by (metis fdia_subdist_2 dom_el_comm dom_glb fdia_simp_2) lemma dia_diff_var: "\<bar>x\<rangle>d(p) \<le> (\<bar>x\<rangle>(d(p)\<cdot>a(q)))+\<bar>x\<rangle>d(q)" by (metis fdia_simp antidomain_semiring_domain_def a1 a_add a_closure a_gla add_comm fdia_iso mult_assoc fdia_add1) lemma dia_diff: "(\<bar>x\<rangle>p)\<cdot>a(\<bar>x\<rangle>q) \<le> \<bar>x\<rangle>(d(p)\<cdot>a(q))" by (metis dia_diff_var fdia_add1 fdia_simp d5 d_a_galois1 fdiamond_def domain_invol) lemma fdia_export_1: "d(y)\<cdot>\<bar>x\<rangle>p = \<bar>d(y)\<cdot>x\<rangle>p" by (metis domain_export fdia_mult fdia_simp fdiamond_def) lemma fdia_export_2: "a(y)\<cdot>\<bar>x\<rangle>p = \<bar>a(y)\<cdot>x\<rangle>p" by (metis a_closure fdiamond_def domain_export mult_assoc) lemma fdia_split: "\<bar>x\<rangle>y = d(z)\<cdot>(\<bar>x\<rangle>y)+a(z)\<cdot>(\<bar>x\<rangle>y)" by (smt mult_onel a3 antidomain_semiring_domain_def distrib_right) end section {* Forward Box *} class fmodal_semiring = fdiamond_semiring + fbox_op + assumes fbox_def: "\<bar>x]y = a(x\<cdot>a(y))" (* definition fbox :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" ("( \<bar> _ ] _)" 90) where "(\<bar>x]y) = a(x\<cdot>a(y))" *) begin text {* the next lemmas establish the De Morgan duality between boxes and diamonds *} lemma fbox_fdia: "\<bar>x]y = a(\<bar>x\<rangle>a(y))" by (metis a_5 fdiamond_def fbox_def) lemma fdia_fbox: "\<bar>x\<rangle>y = a(\<bar>x]a(y))" by (metis antidomain_semiring_domain_def fdia_mult fdia_one fbox_fdia mult_onel mult_oner) lemma fbox_fdia_de_morgan_1: "a(\<bar>x]y) = \<bar>x\<rangle>a(y)" by (metis antidomain_semiring_domain_def fbox_def fdiamond_def) lemma fdia_fbox_de_morgan_2: "a(\<bar>x\<rangle>y) = \<bar>x]a(y)" by (metis a2_eq a_closure antidomain_semiring_domain_def fbox_def fdiamond_def) lemma fbox_simp: "\<bar>x]p = \<bar>x]d(p)" by (metis fbox_fdia a_5) lemma fbox_simp_2: "\<bar>x]p =d(\<bar>x]p)" by (metis a_closure fbox_fdia) text {* I use the following set of hypothesis for dualising statements. All box statements can then be obtained from the dual diamond statement *} (* sledgehammer (fdia_simp fdia_simp_2 fbox_simp fbox_simp_2 a_closure a_5 dom_add_closed dom_mult_closed a_antitone a_de_morgan_var_1 a_de_morgan_var_2 a_de_morgan_var_3 a_de_morgan_var_4 fdia_fbox fbox_fdia a_one a_zero fbox_fdia_de_morgan_1 fdia_fbox_de_morgan_2 antidomain_semiring_domain_def **dual_statement*** ) *) lemma fbox_dom: "a(x) =\<bar>x]0" by (metis a_5 a_zero fbox_fdia fdia_dom) lemma fbox_add1: "\<bar>x](d(y)\<cdot>d(z)) = (\<bar>x]y)\<cdot>(\<bar>x]z)" by (smt fdia_simp fdia_simp_2 fbox_simp fbox_simp_2 a_closure a_5 dom_add_closed dom_mult_closed a_antitone a_de_morgan_var_1 a_de_morgan_var_2 a_de_morgan_var_3 a_de_morgan_var_4 fdia_fbox fbox_fdia a_one a_zero fdia_add1) text {* Interestingly sledgehammer couldn't do this one\<dots> *} lemma fbox_add2: "\<bar>x+y]z = (\<bar>x]z)\<cdot>(\<bar>y]z)" by (metis a_de_morgan_var_3 fbox_fdia fdia_add2 fdia_simp_2) lemma fbox_mult: "\<bar>x\<cdot>y]z = \<bar>x](\<bar>y]z)" by (metis fbox_fdia fbox_fdia_de_morgan_1 fdia_mult) lemma fbox_one: "\<bar>1]x = d(x)" by (metis a_closure fbox_fdia_de_morgan_1 fbox_simp_2 fdia_fbox fdia_one) lemma fbox_one_1: "\<bar>x]1 = 1" by (metis a_one a_zero fbox_fdia fdia_zero) lemma fbox_demodalisation3: "d(y) \<le> \<bar>x]d(z) \<longleftrightarrow> d(y)\<cdot>x \<le> x\<cdot>d(z)" by (metis a_gla antidomain_semiring_domain_def fbox_def kat_2_equiv_opp mult_assoc) text {* Duality did not work here. *} lemma fbox_iso: "d(x) \<le> d(y) \<longrightarrow> \<bar>z]x \<le> \<bar>z]y" by (metis a_5 a_antitone a_closure fbox_fdia fdia_iso) lemma fbox_antitone_var: "x \<le> y \<longrightarrow> \<bar>y]p \<le> \<bar>x]p" by (metis a_antitone dia_iso_var fbox_fdia) lemma fbox_subdist_1: "\<bar>x](d(p)\<cdot>d(q)) \<le> \<bar>x]d(p)" by (metis a_antitone a_de_morgan_var_4 fbox_fdia fbox_simp fdia_subdist_1) lemma fbox_subdist_2: "\<bar>x]d(p) \<le>\<bar>x](d(p)+d(q))" by (metis a_antitone a_closure a_de_morgan_var_3 fbox_fdia fbox_simp fdia_subdist_2) lemma fbox_diff_var: "(\<bar>x](d(p)+a(q)))\<cdot>(\<bar>x]d(q)) \<le> \<bar>x]d(p)" by (smt a_antitone a_de_morgan_var_3 a_de_morgan_var_4 fbox_fdia fbox_fdia_de_morgan_1 fdia_fbox_de_morgan_2 antidomain_semiring_domain_def dia_diff_var) lemma fbox_diff: "\<bar>x](d(p)+a(q)) \<le> (\<bar>x]p)+a(\<bar>x]q)" by (smt a_antitone a_de_morgan_var_4 fbox_fdia fbox_fdia_de_morgan_1 fdia_fbox_de_morgan_2 antidomain_semiring_domain_def dia_diff) end class fmodal_kleene_algebra = fmodal_semiring + kleene_algebra begin lemma a_star: "a(x\<^sup>\<star>) = 0" by (metis a_gla a_subdist mult_oner star_plus_one) lemma dom_star: "d(x\<^sup>\<star>) = 1" by (metis a_star a_zero antidomain_semiring_domain_def) lemma fdia_star_unfold: "(\<bar>1\<rangle>z)+\<bar>x\<rangle>(\<bar>x\<^sup>\<star>\<rangle>z) = \<bar>x\<^sup>\<star>\<rangle>z" by (metis fdia_mult fdia_add2 star_unfoldl_eq) lemma fbox_star_unfold: "(\<bar>1]z)\<cdot>\<bar>x](\<bar>x\<^sup>\<star>]z) = \<bar>x\<^sup>\<star>]z" by (smt fdia_simp fdia_simp_2 fbox_simp fbox_simp_2 a_closure a_5 dom_add_closed dom_mult_closed a_antitone a_de_morgan_var_1 a_de_morgan_var_2 a_de_morgan_var_3 a_de_morgan_var_4 fdia_fbox fbox_fdia a_one a_zero fbox_fdia_de_morgan_1 fdia_fbox_de_morgan_2 antidomain_semiring_domain_def fdia_star_unfold ) lemma fdia_star_unfold_var: "d(z)+\<bar>x\<rangle>(\<bar>x\<^sup>\<star>\<rangle>z) = \<bar>x\<^sup>\<star>\<rangle>z" by (metis fdia_one fdia_star_unfold) lemma fbox_star_unfold_var: "d(z)\<cdot>\<bar>x](\<bar>x\<^sup>\<star>]z) = \<bar>x\<^sup>\<star>]z" by (metis a_closure a_de_morgan_var_2 antidomain_semiring_domain_def fbox_fdia_de_morgan_1 fbox_simp_2 fdia_star_unfold_var) lemma fdia_star_unfold_var_2: "d(z)+\<bar>x\<rangle>(\<bar>x\<^sup>\<star>\<rangle>d(z)) = \<bar>x\<^sup>\<star>\<rangle>d(z)" by (metis d2 fdia_star_unfold_var fdiamond_def) lemma fbox_star_unfold_var_2: "d(z)\<cdot>\<bar>x](\<bar>x\<^sup>\<star>]d(z)) = \<bar>x\<^sup>\<star>]d(z)" by (metis a_closure a_de_morgan_var_2 antidomain_semiring_domain_def fbox_fdia_de_morgan_1 fbox_simp_2 fdia_star_unfold_var_2) lemma fdia_star_unfoldr: "(\<bar>1\<rangle>z)+(\<bar>x\<^sup>\<star>\<rangle>(\<bar>x\<rangle>z)) = \<bar>x\<^sup>\<star>\<rangle>z" by (metis fdia_mult fdia_one fdia_star_unfold_var star_slide_var) lemma fbox_star_unfoldr: "(\<bar>1]z)\<cdot>(\<bar>x\<^sup>\<star>](\<bar>x]z)) = \<bar>x\<^sup>\<star>]z" by (smt fdia_simp fdia_simp_2 fbox_simp fbox_simp_2 a_closure a_5 dom_add_closed dom_mult_closed a_antitone a_de_morgan_var_1 a_de_morgan_var_2 a_de_morgan_var_3 a_de_morgan_var_4 fdia_fbox fbox_fdia a_one a_zero fbox_fdia_de_morgan_1 fdia_fbox_de_morgan_2 antidomain_semiring_domain_def fdia_star_unfoldr) lemma fdia_star_unfoldr_var: "d(z)+(\<bar>x\<^sup>\<star>\<rangle>(\<bar>x\<rangle>z)) = \<bar>x\<^sup>\<star>\<rangle>z" by (metis fdia_mult fdia_star_unfold_var star_slide_var) lemma fbox_star_unfoldr_var: "d(z)\<cdot>(\<bar>x\<^sup>\<star>](\<bar>x]z)) = \<bar>x\<^sup>\<star>]z" by (metis a_closure a_de_morgan_var_2 antidomain_semiring_domain_def fbox_fdia_de_morgan_1 fbox_simp_2 fdia_star_unfoldr_var) lemma fdia_star_induct_var: "\<bar>x\<rangle>d(y) \<le> d(y) \<longrightarrow> \<bar>x\<^sup>\<star>\<rangle>d(y) \<le> d(y)" by (metis d2 fdemodalisation3 fdiamond_def star_sim1) lemma fbox_star_induct_var: "d(y) \<le> \<bar>x]d(y) \<longrightarrow> d(y) \<le> \<bar>x\<^sup>\<star>]d(y)" by (metis a_antitone a_closure antidomain_semiring_domain_def fbox_fdia_de_morgan_1 fbox_simp_2 fdia_star_induct_var) lemma fdia_star_induct: "d(z)+(\<bar>x\<rangle>d(y)) \<le> d(y) \<Longrightarrow> \<bar>x\<^sup>\<star>\<rangle>d(z) \<le> d(y)" by (metis fdia_star_induct_var local.add_lub local.fdia_iso local.fdia_simp local.order_trans) lemma fbox_star_induct: "d(y) \<le> d(z)\<cdot>\<bar>x]d(y) \<longrightarrow> d(y) \<le> \<bar>x\<^sup>\<star>]d(z)" by (smt fdia_simp fdia_simp_2 fbox_simp fbox_simp_2 a_closure a_5 dom_add_closed dom_mult_closed a_antitone a_de_morgan_var_1 a_de_morgan_var_2 a_de_morgan_var_3 a_de_morgan_var_4 fdia_fbox fbox_fdia a_one a_zero fbox_fdia_de_morgan_1 fdia_fbox_de_morgan_2 antidomain_semiring_domain_def fdia_star_induct) lemma fdia_star_induct_eq: "d(z)+(\<bar>x\<rangle>d(y)) = d(y) \<longrightarrow> \<bar>x\<^sup>\<star>\<rangle>d(z) \<le> d(y)" by (metis fdia_star_induct order_refl) lemma fbox_star_induct_eq: "d(z)\<cdot>(\<bar>x]d(y)) = d(y) \<longrightarrow> d(y) \<le> \<bar>x\<^sup>\<star>]d(z)" by (smt fdia_simp fdia_simp_2 fbox_simp fbox_simp_2 a_closure a_5 dom_add_closed dom_mult_closed a_antitone a_de_morgan_var_1 a_de_morgan_var_2 a_de_morgan_var_3 a_de_morgan_var_4 fdia_fbox fbox_fdia a_one a_zero fbox_fdia_de_morgan_1 fdia_fbox_de_morgan_2 antidomain_semiring_domain_def fdia_star_induct_eq) lemma fbox_export_1: "a(p)+\<bar>x]d(p) = \<bar>d(p)\<cdot>x]d(p)" by (metis a_6 a_closure antidomain_semiring_domain_def fbox_fdia fdiamond_def mult_assoc) lemma fbox_export_2: "d(p)+\<bar>x]d(p) = \<bar>a(p)\<cdot>x]d(p)" by (metis a_closure a_export antidomain_semiring_domain_def fbox_fdia fdiamond_def mult_assoc) end end (* lemma a_a2_0: "a(x)\<cdot>a(a(x)) = 0" by (metis a_comp_2 antidomain_semiring_domain_def) lemma fbox_test: "a(p)\<cdot>\<bar>a(p)]a(q) = a(p)\<cdot>a(q)" by (metis a_export fbox_dom fbox_mult antidomain_semiring_domain_def mult_compl_intro pre_def) lemma d_restrict: "(d(x)\<cdot>y \<le> z) = (d(x)\<cdot>y \<le> d(x)\<cdot>z)" by (metis mult_isol dom_el_idemp mult_assoc dom_subid mult_isor mult_onel order_trans) *)
*----------------------------------------------------------------------* integer function idx_oplist(opname,ops,nops) *----------------------------------------------------------------------* * given an operator name, search ops(nops) and return index of * of corresponding operator *----------------------------------------------------------------------* implicit none include 'stdunit.h' include 'def_operator.h' integer, parameter :: & ntest = 00 character, intent(in) :: & opname*(*) integer, intent(in) :: & nops type(operator), intent(in) :: & ops(nops) integer :: & iop if (ntest.ge.100) then write(lulog,*) '------------------' write(lulog,*) 'this is idx_oplist' write(lulog,*) '------------------' write(lulog,*) ' looking for: "',trim(opname),'"' end if idx_oplist = -1 do iop = 1, nops if (trim(opname).eq.trim(ops(iop)%name)) then idx_oplist = iop exit end if end do if (ntest.ge.100) then write(lulog,*) 'result: ',idx_oplist end if return end
Require Import Coq.Strings.Ascii. Require Import Coq.Lists.List. Open Scope char_scope. Import ListNotations. Require Import Turing.Util. Require Import Turing.Lang. (* Length: *) Goal length ["c"; "a"; "r"] = 3. Proof. reflexivity. Qed. (* Concatenation *) Goal ["c"] ++ ["a"; "r"] = ["c"; "a"; "r"]. Proof. reflexivity. Qed. (* Power *) Goal pow ["c"; "a"; "r"] 3 = ["c"; "a"; "r"; "c"; "a"; "r"; "c"; "a"; "r"]. Proof. reflexivity. Qed. Goal pow ["c"; "a"; "r"] 1 = ["c"; "a"; "r"]. Proof. reflexivity. Qed. Goal pow ["c"; "a"; "r"] 0 = []. Proof. reflexivity. Qed. Definition L1 (w:word) := w = ["c"; "a"; "r"]. Goal In ["c"; "a"; "r"] L1. Proof. unfold L1. unfold In. reflexivity. Qed. Goal ~ In [] L1. Proof. unfold In, L1. intros N. inversion N. Qed. Lemma l1_inv: forall w, In w L1 -> w = ["c"; "a"; "r"]. Proof. unfold In, L1. intros. assumption. Qed. Definition L2 w := Char "a" w \/ Char "e" w \/ Char "i" w \/ Char "o" w \/ Char "u" w. Goal In ["i"] L2. Proof. unfold In, L2. unfold Char. right. right. left. reflexivity. Qed. Lemma aa_not_in_vowel: ~ In ["a"; "a"] L2. Proof. unfold In, L2. intros N. destruct N as [N|[N|[N|[N|N]]]]; inversion N. Qed. Goal In [] Nil. Proof. reflexivity. Qed. (* Coercion Char: ascii >-> language. *) Import LangNotations. Print Union. Infix "U" := Union. Goal forall (w:word), In w L1 -> In w (L1 U L2). Proof. unfold In. intros. unfold Union. left. assumption. Qed. Definition App L1 L2 w := exists w1 w2, In w1 L1 /\ In w2 L2 /\ w = w1 ++ w2. Definition L1' := App (Char "c") (App (Char "a") (Char "r")). Goal In ["c"; "a"; "r"] L1'. Proof. intros. Qed.
Midway through the poem , there is a split between the two actions of the poem : the first attempts to identify with the nightingale and its song , and the second discusses the convergence of the past with the future while experiencing the present . This second theme is reminiscent of Keats 's view of human progression through the Mansion of Many Apartments and how man develops from experiencing and wanting only pleasure to understanding truth as a mixture of both pleasure and pain . The Elysian fields and the nightingale 's song in the first half of the poem represent the pleasurable moments that overwhelm the individual like a drug . However , the experience does not last forever , and the body is left desiring it until the narrator feels helpless without the pleasure . Instead of embracing the coming truth , the narrator clings to poetry to hide from the loss of pleasure . Poetry does not bring about the pleasure that the narrator original asks for , but it does liberate him from his desire for only pleasure .
%%%%%%%%%%%%%%%%%%%% SHOW EXTRINSIC RESULTS %%%%%%%%%%%%%%%%%%%%%%%% if ~exist('show_camera'), show_camera = 1; end; if ~exist('n_ima')|~exist('fc'), fprintf(1,'No calibration data available.\n'); return; end; check_active_images; if ~exist(['omc_' num2str(ind_active(1))]), fprintf(1,'No calibration data available.\n'); return; end; %if ~exist('no_grid'), no_grid = 0; %end; if ~exist(['n_sq_x_' num2str(ind_active(1))]), no_grid = 1; end; if ~exist('alpha_c'), alpha_c = 0; end; if 0, err_std = std(ex'); fprintf(1,'\n\nCalibration results without principal point estimation:\n\n'); fprintf(1,'Focal Length: fc = [ %3.5f %3.5f]\n',fc); fprintf(1,'Principal point: cc = [ %3.5f %3.5f]\n',cc); fprintf(1,'Distortion: kc = [ %3.5f %3.5f %3.5f %3.5f]\n',kc); fprintf(1,'Pixel error: err = [ %3.5f %3.5f]\n\n',err_std); end; % Color code for each image: colors = 'brgkcm'; %%% Show the extrinsic parameters if ~exist('dX'), eval(['dX = norm(Tc_' num2str(ind_active(1)) ')/10;']); dY = dX; end; IP = 2*dX*[1 -alpha_c 0;0 1 0;0 0 1]*[1/fc(1) 0 0;0 1/fc(2) 0;0 0 1]*[1 0 -cc(1);0 1 -cc(2);0 0 1]*[0 nx-1 nx-1 0 0 ; 0 0 ny-1 ny-1 0;1 1 1 1 1]; BASE = 2*(.9)*dX*([0 1 0 0 0 0;0 0 0 1 0 0;0 0 0 0 0 1]); IP = reshape([IP;BASE(:,1)*ones(1,5);IP],3,15); POS = [[6*dX;0;0] [0;6*dX;0] [-dX;0;5*dX] [-dX;-dX;-dX] [0;0;-dX]]; if ishandle(4), figure(4); [a,b] = view; else figure(4); a = 50; b = 20; end; figure(4); clf; hold on; for kk = 1:n_ima, if active_images(kk); if exist(['X_' num2str(kk)]) & exist(['omc_' num2str(kk)]), eval(['XX_kk = X_' num2str(kk) ';']); if ~isnan(XX_kk(1,1)) eval(['omc_kk = omc_' num2str(kk) ';']); eval(['Tc_kk = Tc_' num2str(kk) ';']); N_kk = size(XX_kk,2); if ~exist(['n_sq_x_' num2str(kk)]), no_grid = 1; else eval(['n_sq_x = n_sq_x_' num2str(kk) ';']); if isnan(n_sq_x(1)), no_grid = 1; end; end; if ~no_grid, eval(['n_sq_x = n_sq_x_' num2str(kk) ';']); eval(['n_sq_y = n_sq_y_' num2str(kk) ';']); if (N_kk ~= ((n_sq_x+1)*(n_sq_y+1))), no_grid = 1; end; end; if ~isnan(omc_kk(1,1)), R_kk = rodrigues(omc_kk); BASEk = R_kk'*(BASE - Tc_kk * ones(1,6)); IPk = R_kk'*(IP - Tc_kk * ones(1,15)); POSk = R_kk'*(POS - Tc_kk * ones(1,5)); YY_kk = XX_kk; if ~no_grid, YYx = zeros(n_sq_x+1,n_sq_y+1); YYy = zeros(n_sq_x+1,n_sq_y+1); YYz = zeros(n_sq_x+1,n_sq_y+1); YYx(:) = YY_kk(1,:); YYy(:) = YY_kk(2,:); YYz(:) = YY_kk(3,:); figure(4); if show_camera, p1 = struct('vertices',IPk','faces',[1 4 2;2 4 7;2 7 10;2 10 1]); h1 = patch(p1); set(h1,'facecolor',[52 217 160]/255,'EdgeColor', 'r'); p2 = struct('vertices',IPk','faces',[1 10 7;7 4 1]); h2 = patch(p2); %set(h2,'facecolor',[236 171 76]/255,'EdgeColor', 'none'); set(h2,'facecolor',[247 239 7]/255,'EdgeColor', 'none'); plot3(BASEk(1,:),BASEk(2,:),BASEk(3,:),'b-','linewidth',1'); plot3(IPk(1,:),IPk(2,:),IPk(3,:),'r-','linewidth',1); text(POSk(1,5),POSk(2,5),POSk(3,5),num2str(kk),'fontsize',10,'color','k','FontWeight','bold'); end; hhh= mesh(YYx,YYy,YYz); set(hhh,'edgecolor',colors(rem(kk-1,6)+1),'linewidth',1); %,'facecolor','none'); else figure(4); if show_camera, p1 = struct('vertices',IPk','faces',[1 4 2;2 4 7;2 7 10;2 10 1]); h1 = patch(p1); set(h1,'facecolor',[52 217 160]/255,'EdgeColor', 'r'); p2 = struct('vertices',IPk','faces',[1 10 7;7 4 1]); h2 = patch(p2); %set(h2,'facecolor',[236 171 76]/255,'EdgeColor', 'none'); set(h2,'facecolor',[247 239 7]/255,'EdgeColor', 'none'); plot3(BASEk(1,:),BASEk(2,:),BASEk(3,:),'b-','linewidth',1'); plot3(IPk(1,:),IPk(2,:),IPk(3,:),'r-','linewidth',1); hww = text(POSk(1,5),POSk(2,5),POSk(3,5),num2str(kk),'fontsize',10,'color','k','FontWeight','bold'); end; plot3(YY_kk(1,:),YY_kk(2,:),YY_kk(3,:),['.' colors(rem(kk-1,6)+1)]); end; end; end; end; end; end; figure(4);rotate3d on; axis('equal'); title('Extrinsic parameters (world-centered)'); %view(60,30); xlabel('X_{world}') ylabel('Y_{world}') zlabel('Z_{world}') view(a,b); axis vis3d; axis tight; grid on; plot3(3*dX*[1 0 0 0 0],3*dX*[0 0 1 0 0],3*dX*[0 0 0 0 1],'r-','linewidth',3); hold off; set(4,'color',[1 1 1]); set(4,'Name','3D','NumberTitle','off'); %hh = axis; %hh(5) = 0; %axis(hh); %fprintf(1,'To generate the complete movie associated to the optimization loop, try: check_convergence;\n'); if exist('h_switch2')==1, if ishandle(h_switch2), delete(h_switch2); end; end; if n_ima ~= 0, if show_camera, h_switch2 = uicontrol('Parent',4,'Units','normalized', 'Callback','show_camera=0;ext_calib2;', 'Position',[1-.30 0.04 .30 .04],'String','Remove camera reference frames','fontsize',8,'fontname','clean','Tag','Pushbutton1'); else h_switch2 = uicontrol('Parent',4,'Units','normalized', 'Callback','show_camera=1;ext_calib2;', 'Position',[1-.30 0.04 .30 .04],'String','Add camera reference frames','fontsize',8,'fontname','clean','Tag','Pushbutton1'); end; end; if exist('h_switch')==1, if ishandle(h_switch), delete(h_switch); end; end; h_switch = uicontrol('Parent',4,'Units','normalized', 'Callback','ext_calib', 'Position',[1-.30 0 .30 .04],'String','Switch to camera-centered view','fontsize',8,'fontname','clean','Tag','Pushbutton1'); figure(4); rotate3d on;
The function scaleR is the same as the function scale.
(** SimpleSepTheory.v ** ** A simple implementation of a separation theory based on functional ** extensionality. **) Require Import Coq.Classes.RelationClasses. Require Import MirrorShard.SepTheory. Set Implicit Arguments. Set Strict Implicit. Module SimpleSepLog_Kernel <: SepTheory_Kernel. Require Import FunctionalExtensionality. Definition heap := nat -> option nat. Definition hprop := heap -> Prop. Definition himp (l r : hprop) : Prop := forall h, l h -> r h. Global Instance Refl_himp : Reflexive himp. Proof. repeat red; intuition. Qed. Global Instance Trans_himp : Transitive himp. Proof. repeat red; intuition. Qed. Definition inj (p : Prop) : hprop := fun m => p /\ forall p, m p = None. Definition emp : hprop := inj True. Definition split (h hl hr : heap) : Prop := forall p, (hl p = None \/ hr p = None) /\ h p = match hl p with | None => hr p | Some x => Some x end. Definition star (l r : hprop) : hprop := fun h => exists h1 h2, split h h1 h2 /\ l h1 /\ r h2. Definition ex (T : Type) (p : T -> hprop) : hprop := fun h => exists x, p x h. Ltac doIt := unfold himp, star, emp, split, ex, inj; intros; repeat match goal with | [ H : exists x, _ |- _ ] => destruct H | [ H : _ /\ _ |- _ ] => destruct H end. Theorem himp_star_comm : forall P Q : hprop, himp (star P Q) (star Q P). Proof. doIt. do 2 eexists; doIt. split. 2: eauto. intro. specialize (H p). intuition; rewrite H in *. destruct (x0 p); auto. destruct (x p); auto. Qed. Theorem himp_star_assoc : forall P Q R : hprop, himp (star (star P Q) R) (star P (star Q R)). Proof. doIt; intuition; doIt. exists x1. exists (fun p => match x2 p with | None => x0 p | Some x => Some x end); intuition. specialize (H p); specialize (H0 p); intuition; doIt. rewrite H0 in *. rewrite H4 in *. destruct (x1 p); eauto. rewrite H0 in *; rewrite H4 in *. auto. specialize (H p); specialize (H0 p); intuition; rewrite H0 in *; rewrite H4 in *; eauto. rewrite <- H6 in *; auto. destruct (x1 p); auto; congruence. rewrite H6 in *; auto. destruct (x1 p); auto. destruct (x p); eauto; try congruence. rewrite H6 in *; auto. exists x2. exists x0. intuition. specialize (H p); specialize (H0 p); intuition; doIt. rewrite H0 in *; rewrite H4 in *; auto. Qed. Theorem himp_star_emp_p : forall P : hprop, himp (star emp P) P. Proof. doIt. cutrewrite (h = x0); auto. eapply functional_extensionality; intros. specialize (H x1). specialize (H2 x1). rewrite H2 in *. intuition. Qed. Theorem himp_star_emp_c : forall P : hprop, himp P (star emp P). Proof. doIt. exists (fun _ => None). exists h. intuition. Qed. Theorem himp_star_frame : forall P Q R S, himp P Q -> himp R S -> himp (star P R) (star Q S). Proof. doIt. exists x; exists x0. intuition. Qed. Theorem himp_star_pure_p : forall (P Q : hprop) (F : Prop), himp (star (inj F) P) Q -> F -> himp P Q. Proof. doIt. eapply H. exists (fun _ => None). exists h. intuition. Qed. Theorem himp_star_pure_c : forall (P Q : hprop) (F : Prop), (F -> himp P Q) -> himp (star (inj F) P) Q. Proof. doIt. eapply H; auto. cutrewrite (h = x0); auto. eapply functional_extensionality; intros. specialize (H3 x1). specialize (H0 x1). rewrite H3 in *. intuition. Qed. Theorem himp_star_pure_cc : forall (P Q : hprop) (p : Prop), p -> himp P Q -> himp P (star (inj p) Q). Proof. doIt. eapply H0 in H1; clear H0. exists (fun _ => None). exists h. intuition. Qed. Theorem himp_ex_p : forall (T : Type) (P : T -> hprop) (Q : hprop), (forall v : T, himp (P v) Q) -> himp (ex P) Q. Proof. doIt. eauto. Qed. Theorem himp_ex_c : forall (T : Type) (P : T -> hprop) (Q : hprop), (exists v : T, himp Q (P v)) -> himp Q (ex P). Proof. doIt; eauto. Qed. Theorem himp_ex_star : forall T (P : T -> _) Q, himp (star (ex P) Q) (ex (fun x => star (P x) Q)). Proof. doIt. exists x1. exists x. exists x0. intuition. Qed. Theorem himp_star_ex : forall T (P : T -> _) Q, himp (ex (fun x => star (P x) Q)) (star (ex P) Q). Proof. doIt. exists x0; exists x1. intuition eauto. Qed. End SimpleSepLog_Kernel. Module SimpleSepLog := SepTheory_From_Kernel SimpleSepLog_Kernel.
/* * testCSP.cpp * @brief develop code for CSP solver * @date Feb 5, 2012 * @author Frank Dellaert */ #include <gtsam_unstable/discrete/CSP.h> #include <gtsam_unstable/discrete/Domain.h> #include <boost/assign/std/map.hpp> using boost::assign::insert; #include <CppUnitLite/TestHarness.h> #include <iostream> #include <fstream> using namespace std; using namespace gtsam; /* ************************************************************************* */ TEST_UNSAFE( BinaryAllDif, allInOne) { // Create keys and ordering size_t nrColors = 2; // DiscreteKey ID("Idaho", nrColors), UT("Utah", nrColors), AZ("Arizona", nrColors); DiscreteKey ID(0, nrColors), UT(2, nrColors), AZ(1, nrColors); // Check construction and conversion BinaryAllDiff c1(ID, UT); DecisionTreeFactor f1(ID & UT, "0 1 1 0"); EXPECT(assert_equal(f1,c1.toDecisionTreeFactor())); // Check construction and conversion BinaryAllDiff c2(UT, AZ); DecisionTreeFactor f2(UT & AZ, "0 1 1 0"); EXPECT(assert_equal(f2,c2.toDecisionTreeFactor())); DecisionTreeFactor f3 = f1*f2; EXPECT(assert_equal(f3,c1*f2)); EXPECT(assert_equal(f3,c2*f1)); } /* ************************************************************************* */ TEST_UNSAFE( CSP, allInOne) { // Create keys and ordering size_t nrColors = 2; DiscreteKey ID(0, nrColors), UT(2, nrColors), AZ(1, nrColors); // Create the CSP CSP csp; csp.addAllDiff(ID,UT); csp.addAllDiff(UT,AZ); // Check an invalid combination, with ID==UT==AZ all same color DiscreteFactor::Values invalid; invalid[ID.first] = 0; invalid[UT.first] = 0; invalid[AZ.first] = 0; EXPECT_DOUBLES_EQUAL(0, csp(invalid), 1e-9); // Check a valid combination DiscreteFactor::Values valid; valid[ID.first] = 0; valid[UT.first] = 1; valid[AZ.first] = 0; EXPECT_DOUBLES_EQUAL(1, csp(valid), 1e-9); // Just for fun, create the product and check it DecisionTreeFactor product = csp.product(); // product.dot("product"); DecisionTreeFactor expectedProduct(ID & AZ & UT, "0 1 0 0 0 0 1 0"); EXPECT(assert_equal(expectedProduct,product)); // Solve CSP::sharedValues mpe = csp.optimalAssignment(); CSP::Values expected; insert(expected)(ID.first, 1)(UT.first, 0)(AZ.first, 1); EXPECT(assert_equal(expected,*mpe)); EXPECT_DOUBLES_EQUAL(1, csp(*mpe), 1e-9); } /* ************************************************************************* */ TEST_UNSAFE( CSP, WesternUS) { // Create keys size_t nrColors = 4; DiscreteKey // Create ordering according to example in ND-CSP.lyx WA(0, nrColors), OR(3, nrColors), CA(1, nrColors),NV(2, nrColors), ID(8, nrColors), UT(9, nrColors), AZ(10, nrColors), MT(4, nrColors), WY(5, nrColors), CO(7, nrColors), NM(6, nrColors); // Create the CSP CSP csp; csp.addAllDiff(WA,ID); csp.addAllDiff(WA,OR); csp.addAllDiff(OR,ID); csp.addAllDiff(OR,CA); csp.addAllDiff(OR,NV); csp.addAllDiff(CA,NV); csp.addAllDiff(CA,AZ); csp.addAllDiff(ID,MT); csp.addAllDiff(ID,WY); csp.addAllDiff(ID,UT); csp.addAllDiff(ID,NV); csp.addAllDiff(NV,UT); csp.addAllDiff(NV,AZ); csp.addAllDiff(UT,WY); csp.addAllDiff(UT,CO); csp.addAllDiff(UT,NM); csp.addAllDiff(UT,AZ); csp.addAllDiff(AZ,CO); csp.addAllDiff(AZ,NM); csp.addAllDiff(MT,WY); csp.addAllDiff(WY,CO); csp.addAllDiff(CO,NM); // Solve Ordering ordering; ordering += Key(0),Key(1),Key(2),Key(3),Key(4),Key(5),Key(6),Key(7),Key(8),Key(9),Key(10); CSP::sharedValues mpe = csp.optimalAssignment(ordering); // GTSAM_PRINT(*mpe); CSP::Values expected; insert(expected) (WA.first,1)(CA.first,1)(NV.first,3)(OR.first,0) (MT.first,1)(WY.first,0)(NM.first,3)(CO.first,2) (ID.first,2)(UT.first,1)(AZ.first,0); // TODO: Fix me! mpe result seems to be right. (See the printing) // It has the same prob as the expected solution. // Is mpe another solution, or the expected solution is unique??? EXPECT(assert_equal(expected,*mpe)); EXPECT_DOUBLES_EQUAL(1, csp(*mpe), 1e-9); // Write out the dual graph for hmetis #ifdef DUAL VariableIndexOrdered index(csp); index.print("index"); ofstream os("/Users/dellaert/src/hmetis-1.5-osx-i686/US-West-dual.txt"); index.outputMetisFormat(os); #endif } /* ************************************************************************* */ TEST_UNSAFE( CSP, AllDiff) { // Create keys and ordering size_t nrColors = 3; DiscreteKey ID(0, nrColors), UT(2, nrColors), AZ(1, nrColors); // Create the CSP CSP csp; vector<DiscreteKey> dkeys; dkeys += ID,UT,AZ; csp.addAllDiff(dkeys); csp.addSingleValue(AZ,2); // GTSAM_PRINT(csp); // Check construction and conversion SingleValue s(AZ,2); DecisionTreeFactor f1(AZ,"0 0 1"); EXPECT(assert_equal(f1,s.toDecisionTreeFactor())); // Check construction and conversion AllDiff alldiff(dkeys); DecisionTreeFactor actual = alldiff.toDecisionTreeFactor(); // GTSAM_PRINT(actual); // actual.dot("actual"); DecisionTreeFactor f2(ID & AZ & UT, "0 0 0 0 0 1 0 1 0 0 0 1 0 0 0 1 0 0 0 1 0 1 0 0 0 0 0"); EXPECT(assert_equal(f2,actual)); // Check an invalid combination, with ID==UT==AZ all same color DiscreteFactor::Values invalid; invalid[ID.first] = 0; invalid[UT.first] = 1; invalid[AZ.first] = 0; EXPECT_DOUBLES_EQUAL(0, csp(invalid), 1e-9); // Check a valid combination DiscreteFactor::Values valid; valid[ID.first] = 0; valid[UT.first] = 1; valid[AZ.first] = 2; EXPECT_DOUBLES_EQUAL(1, csp(valid), 1e-9); // Solve CSP::sharedValues mpe = csp.optimalAssignment(); CSP::Values expected; insert(expected)(ID.first, 1)(UT.first, 0)(AZ.first, 2); EXPECT(assert_equal(expected,*mpe)); EXPECT_DOUBLES_EQUAL(1, csp(*mpe), 1e-9); // Arc-consistency vector<Domain> domains; domains += Domain(ID), Domain(AZ), Domain(UT); SingleValue singleValue(AZ,2); EXPECT(singleValue.ensureArcConsistency(1,domains)); EXPECT(alldiff.ensureArcConsistency(0,domains)); EXPECT(!alldiff.ensureArcConsistency(1,domains)); EXPECT(alldiff.ensureArcConsistency(2,domains)); LONGS_EQUAL(2,domains[0].nrValues()); LONGS_EQUAL(1,domains[1].nrValues()); LONGS_EQUAL(2,domains[2].nrValues()); // Parial application, version 1 DiscreteFactor::Values known; known[AZ.first] = 2; DiscreteFactor::shared_ptr reduced1 = alldiff.partiallyApply(known); DecisionTreeFactor f3(ID & UT, "0 1 1 1 0 1 1 1 0"); EXPECT(assert_equal(f3,reduced1->toDecisionTreeFactor())); DiscreteFactor::shared_ptr reduced2 = singleValue.partiallyApply(known); DecisionTreeFactor f4(AZ, "0 0 1"); EXPECT(assert_equal(f4,reduced2->toDecisionTreeFactor())); // Parial application, version 2 DiscreteFactor::shared_ptr reduced3 = alldiff.partiallyApply(domains); EXPECT(assert_equal(f3,reduced3->toDecisionTreeFactor())); DiscreteFactor::shared_ptr reduced4 = singleValue.partiallyApply(domains); EXPECT(assert_equal(f4,reduced4->toDecisionTreeFactor())); // full arc-consistency test csp.runArcConsistency(nrColors); } /* ************************************************************************* */ int main() { TestResult tr; return TestRegistry::runAllTests(tr); } /* ************************************************************************* */
theory Induction imports Main begin section {* induction *} lemma induction: "\<forall> n::nat. n + 0 = n" using [[simp_trace]] apply (simp) done end
(* ********************************************************************* Theory Computations.thy is part of a framework for modelling, verification and transformation of concurrent imperative programs. Copyright (c) 2021 M. Bortin The framework is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. For more details see the license agreement (LICENSE) you should have received along with the framework. ******************************************************************* *) theory Computations imports SmallSteps begin text "A Boolean value amends configurations to indicate what kind of transition (i.e. env. or program) leads to it. For instance, if (cf2, b2) follows (cf1, b1) then cf1 -p-> cf2 when b2 is True and cf1 -e-> cf2 otherwise. " definition stepR :: "(nat \<Rightarrow> 's LA) \<Rightarrow> (('s config \<times> bool) \<times> ('s config \<times> bool)) set" where "stepR \<rho> = {(((p, s), tk), ((p', t), tk')) | p s tk p' t tk'. if tk' then \<rho> \<turnstile> (p, s) -p\<rightarrow> (p', t) else \<turnstile> (p, s) -e\<rightarrow> (p', t)}" lemma stepR_D1 : "(((p, s), tk), ((p', t), True)) \<in> stepR \<rho> \<Longrightarrow> \<rho> \<turnstile> (p, s) -p\<rightarrow> (p', t)" by(simp add: stepR_def, fastforce) lemma stepR_D2 : "(((p, s), tk), ((p', t), False)) \<in> stepR \<rho> \<Longrightarrow> p = p'" by(simp add: stepR_def, fastforce) lemma stepR_exch : "(cf, cf') \<in> stepR \<rho> \<Longrightarrow> fst cf = fst cf'' \<Longrightarrow> (cf'', cf') \<in> stepR \<rho>" by(case_tac cf, case_tac cf', case_tac cf'', clarsimp simp: stepR_def) section "Infinite computations" text \<open>An infinite computation is an infinite sequence of tuples (config, bool) successively related by @{term "stepR"}.\<close> definition iCOMP :: "(nat \<Rightarrow> 's LA) \<Rightarrow> (nat \<Rightarrow> 's config \<times> bool) set" where "iCOMP \<rho> = {sq. \<forall>i. (sq i, sq(i+1)) \<in> stepR \<rho>}" lemma iCOMP_D : "sq \<in> iCOMP \<rho> \<Longrightarrow> (sq i, sq(i+1)) \<in> stepR \<rho>" by(simp add: iCOMP_def) lemma iCOMP_jumpfree : "sq \<in> iCOMP \<rho> \<Longrightarrow> jumpfree(progOf(sq j)) \<Longrightarrow> \<forall>i>j. jumpfree(progOf(sq i))" apply(rule allI) apply(induct_tac i, simp) apply clarify apply(subgoal_tac "jumpfree (progOf(sq n))") apply(clarsimp simp: iCOMP_def) apply(drule_tac x=n in spec) apply(case_tac "sq n") apply(case_tac "sq(n+1)", clarsimp) apply(rename_tac tk) apply(case_tac tk, simp) apply(drule stepR_D1) apply(erule jumpfree_pstep, assumption) apply simp apply(drule stepR_D2, simp) apply(case_tac "j=n", simp_all) done lemma iCOMP_Skip : "sq \<in> iCOMP \<rho> \<Longrightarrow> progOf(sq j) = Skip \<Longrightarrow> \<forall>i>j. progOf(sq i) = Skip \<and> \<not> snd(sq i)" apply(rule allI) apply(induct_tac i, simp) apply clarify apply(subgoal_tac "progOf(sq n) = Skip") apply(clarsimp simp: iCOMP_def) apply(drule_tac x=n in spec) apply(case_tac "sq n") apply(case_tac "sq(n+1)", clarsimp) apply(rename_tac tk) apply(case_tac tk, simp) apply(drule stepR_D1) apply(erule Skip_pstep) apply simp apply(drule stepR_D2, simp) apply(case_tac "j=n", simp_all) done lemma iCOMP_Skip' : "sq \<in> iCOMP \<rho> \<Longrightarrow> progOf(sq j) = Skip \<Longrightarrow> \<forall>i\<ge>j. progOf(sq i) = Skip" apply(drule iCOMP_Skip, assumption) apply clarsimp apply(case_tac "i=j", simp) by fastforce lemma iCOMP_estepsG : "sq \<in> iCOMP \<rho> \<Longrightarrow> \<forall>k<j. i < k \<longrightarrow> progOf(sq(k-1)) = progOf(sq i) \<longrightarrow> \<not>tkOf(sq k) \<Longrightarrow> \<forall>k<j. i \<le> k \<longrightarrow> progOf(sq k) = progOf(sq i)" apply(rule allI) apply(induct_tac k, simp) apply clarsimp apply(case_tac "i=n+1", clarsimp) apply(drule mp, simp) apply(drule spec, drule mp, assumption) apply(drule mp, simp) apply(drule mp, simp) apply(erule subst) apply(case_tac "sq n") apply(case_tac "sq(n+1)", clarsimp simp: iCOMP_def) apply(drule_tac x=n in spec, simp) apply(drule stepR_D2) by(erule sym) lemma iCOMP_esteps : "sq \<in> iCOMP \<rho> \<Longrightarrow> \<forall>k<j. i < k \<longrightarrow> \<not>tkOf(sq k) \<Longrightarrow> \<forall>k<j. i \<le> k \<longrightarrow> progOf(sq k) = progOf(sq i)" by(drule_tac i=i and j=j in iCOMP_estepsG, simp, assumption) fun fprefix :: "(nat \<Rightarrow> 'a) \<Rightarrow> nat \<Rightarrow> 'a list" where "fprefix sq 0 = [sq 0]" | "fprefix sq (Suc n) = fprefix sq n @ [sq (Suc n)]" lemma fprefix_length : "length(fprefix sq n) = n + 1" by(induct n, simp_all) lemma fprefix_nth : "i \<le> n \<Longrightarrow> (fprefix sq n)!i = sq i" apply(induct n, simp_all) apply(case_tac "i = Suc n", simp) apply(subst nth_append, simp add: fprefix_length) apply simp apply(subst nth_append, simp add: fprefix_length) done definition "isuffix sq d = (\<lambda>(i::nat). sq (i + d))" lemma isuffix_add : "isuffix(isuffix sq d) d' = isuffix sq (d' + d)" apply(simp add: isuffix_def, rule ext) apply(subst add.assoc, rule refl) done lemma iCOMP_isuffix : "sq \<in> iCOMP \<rho> \<Longrightarrow> isuffix sq d \<in> iCOMP \<rho>" by(simp add: iCOMP_def isuffix_def) text "Relating consecutive computation steps." definition cstep_cond :: "bool \<Rightarrow> 's staterel \<Rightarrow> 's config \<times> bool \<Rightarrow> 's config \<times> bool \<Rightarrow> bool" where "cstep_cond K R = (\<lambda>((p, s), tk) ((p', t), tk'). tk' = K \<longrightarrow> (s, t) \<in> R)" lemma cstep_cond_D : "cstep_cond K R cf cf' \<Longrightarrow> cf = ((p, s), tk) \<Longrightarrow> cf' = ((p', t), K) \<Longrightarrow> (s, t) \<in> R" by(simp add: cstep_cond_def) lemma not_cstep_cond_D : "\<not> cstep_cond K R cf cf' \<Longrightarrow> \<exists>p s tk p' t. cf = ((p, s), tk) \<and> cf' = ((p', t), K) \<and> (s, t) \<notin> R" by(clarsimp simp add: cstep_cond_def) lemma cstep_cond_mono : "cstep_cond K R cf cf' \<Longrightarrow> R \<subseteq> R' \<Longrightarrow> cstep_cond K R' cf cf'" by(clarsimp simp add: cstep_cond_def, force) definition EnvCond_i :: "(nat \<Rightarrow> 's LA) \<Rightarrow> 's staterel \<Rightarrow> (nat \<Rightarrow> 's config \<times> bool) set" where "EnvCond_i \<rho> R = {sq |sq. sq \<in> iCOMP \<rho> \<and> (\<forall>i. 0 < i \<longrightarrow> cstep_cond False R (sq (i-1)) (sq i))}" definition ProgCond_i :: "(nat \<Rightarrow> 's LA) \<Rightarrow> 's staterel \<Rightarrow> (nat \<Rightarrow> 's config \<times> bool) set" where "ProgCond_i \<rho> R = {sq |sq. sq \<in> iCOMP \<rho> \<and> (\<forall>i. 0 < i \<longrightarrow> cstep_cond True R (sq (i-1)) (sq i))}" definition "InitCond_i \<rho> P = {sq |sq c s tk. sq \<in> iCOMP \<rho> \<and> sq 0 = ((c, s), tk) \<and> s \<in> P}" definition "TermCond_i \<rho> Q = {sq |sq. sq \<in> iCOMP \<rho> \<and> (\<forall>j. progOf(sq j) = Skip \<longrightarrow> (\<exists>i s tk. i \<le> j \<and> sq i = ((Skip, s), tk) \<and> s \<in> Q))}" lemma EnvCond_i_suffix : "sq \<in> EnvCond_i \<rho> R \<Longrightarrow> isuffix sq d \<in> EnvCond_i \<rho> R" apply(clarsimp simp: EnvCond_i_def) apply(rule conjI) apply(erule iCOMP_isuffix) apply(clarsimp simp: isuffix_def) done lemma ProgCond_i_suffix : "sq \<in> ProgCond_i \<rho> R \<Longrightarrow> isuffix sq d \<in> ProgCond_i \<rho> R" apply(clarsimp simp: ProgCond_i_def) apply(rule conjI) apply(erule iCOMP_isuffix) apply(clarsimp simp: isuffix_def) done section "Finite computations" definition COMP :: "(nat \<Rightarrow> 's LA) \<Rightarrow> ('s config \<times> bool) list set" where "COMP \<rho> = {sq. sq \<noteq> [] \<and> (\<forall>i<length sq - 1. (sq!i, sq!(i+1)) \<in> stepR \<rho>)}" lemma COMP_eq : "(sq \<in> COMP \<rho>) = (sq \<noteq> [] \<and> (\<forall>i<length sq - 1. (sq!i, sq!(i+1)) \<in> stepR \<rho>))" by(simp add: COMP_def) lemma COMP_fprefix : "COMP \<rho> = {fprefix sq n |sq n. sq \<in> iCOMP \<rho>}" apply(rule set_eqI) apply(rename_tac sq) apply(subst COMP_eq) apply(rule iffI, clarsimp) apply(rule_tac x="\<lambda>i. if i<length sq then sq!i else (fst(sq!(length sq - 1)), False)" in exI) apply(rule conjI) apply(rule_tac x="length sq - 1" in exI) apply(rule nth_equalityI, simp add: fprefix_length, simp add: fprefix_nth) apply(clarsimp simp: iCOMP_def) apply(case_tac "sq!(length sq - 1)") apply(rule conjI, clarsimp) apply(drule leI) apply(subgoal_tac "i = length sq - 1") apply(subst stepR_def, clarsimp, rule_tac x=False in exI, simp) apply fastforce apply(subst stepR_def, clarsimp, rule_tac x=False in exI, clarsimp+) apply(rule conjI, clarsimp, drule_tac f=length in arg_cong, (clarsimp simp: fprefix_length fprefix_nth iCOMP_def)+) done corollary fprefix_COMP : "sq \<in> iCOMP \<rho> \<Longrightarrow> fprefix sq n \<in> COMP \<rho>" by(subst COMP_fprefix, fast) text "Finite potential computations of a program" definition pcs :: "'s LA \<Rightarrow> (nat \<Rightarrow> 's LA) \<Rightarrow> ('s config \<times> bool) list set" ("\<lbrakk>_\<rbrakk>\<^sub>_" [100,101] 100) where "\<lbrakk>p\<rbrakk>\<^sub>\<rho> = {sq |sq s tk. sq \<in> COMP \<rho> \<and> hd sq = ((p, s), tk)}" text "Picking actual computations, i.e. those where no env. transitions occur" definition acs :: "'s LA \<Rightarrow> (nat \<Rightarrow> 's LA) \<Rightarrow> ('s config \<times> bool) list set" ("\<^sup>A\<lbrakk>_\<rbrakk>\<^sub>_" [100,101] 100) where "\<^sup>A\<lbrakk>p\<rbrakk>\<^sub>\<rho> = {sq. sq \<in> \<lbrakk>p\<rbrakk>\<^sub>\<rho> \<and> (\<forall>i < length sq. 0 < i \<longrightarrow> snd(sq!i))}" section "Properties" corollary COMP_noNil : "sq \<in> COMP \<rho> \<Longrightarrow> sq \<noteq> []" by(subst (asm) COMP_eq, simp) corollary COMP_nth : "sq \<in> COMP \<rho> \<Longrightarrow> i < length sq \<Longrightarrow> 0 < i \<Longrightarrow> (sq!(i-1), sq!i) \<in> stepR \<rho>" by(subst (asm) COMP_eq, clarsimp, drule_tac x="i-1" in spec, simp) corollary nth_COMP[rule_format] : "\<forall>i<length sq. 0 < i \<longrightarrow> (sq!(i-1), sq!i) \<in> stepR \<rho> \<Longrightarrow> sq \<noteq> [] \<Longrightarrow> sq \<in> COMP \<rho>" by(subst COMP_eq, clarsimp, drule_tac x="i+1" in spec, simp) lemma COMP_compose' : "sq \<in> COMP \<rho> \<Longrightarrow> sq' \<in> COMP \<rho> \<Longrightarrow> fst(last sq) = fst(hd sq') \<Longrightarrow> (sq @ tl sq') \<in> COMP \<rho>" apply(case_tac "tl sq' = []", simp) apply(clarsimp simp: COMP_eq) apply(case_tac "i<length sq - 1", clarsimp simp: nth_append, fastforce) apply(drule leI) apply(case_tac "i=length sq - 1", clarsimp simp: nth_append) apply(subst (asm) last_conv_nth, assumption) apply(subst (asm) hd_conv_nth, assumption) apply(subst nth_tl, simp) apply(case_tac "sq!(length sq - 1)", clarsimp) apply(drule_tac x=0 in spec)+ apply(drule mp, fastforce) apply(clarsimp simp: stepR_def, fastforce) apply(clarsimp simp: nth_append) apply(rule conjI, fastforce) apply(subst nth_tl, simp)+ apply(drule_tac x="i - length sq + 1" in spec)+ apply(drule mp, fastforce) apply clarsimp apply(subgoal_tac "i - length sq + 1 = i + 1 - length sq", simp) by simp lemma COMP_compose2' : "sq \<in> COMP \<rho> \<Longrightarrow> (fst(last sq), tk) # sq' \<in> COMP \<rho> \<Longrightarrow> (sq @ sq') \<in> COMP \<rho>" by(drule COMP_compose', assumption, simp+) lemma COMP_compose : "sq \<in> COMP \<rho> \<Longrightarrow> sq' \<in> COMP \<rho> \<Longrightarrow> last sq = hd sq' \<Longrightarrow> (sq @ tl sq') \<in> COMP \<rho>" by(erule COMP_compose', simp_all) lemma COMP_compose2 : "sq \<in> COMP \<rho> \<Longrightarrow> last sq # sq' \<in> COMP \<rho> \<Longrightarrow> (sq @ sq') \<in> COMP \<rho>" by(drule COMP_compose, assumption, simp+) lemma iCOMP_pow : "sq \<in> iCOMP \<rho> \<Longrightarrow> (sq 0, sq i) \<in> stepR \<rho> ^^ i" apply(induct_tac i, simp_all) apply(erule relcompI) by(simp add: iCOMP_def) lemma COMP_pow : "sq \<in> COMP \<rho> \<Longrightarrow> \<forall>i<length sq. (sq!0, sq!i) \<in> (stepR \<rho>)^^i" apply(subst (asm) COMP_fprefix, clarsimp simp: fprefix_length fprefix_nth) by(erule iCOMP_pow) lemma pow_COMP[rule_format] : "\<forall>cf cf'. (cf, cf') \<in> (stepR \<rho>)^^i \<longrightarrow> (\<exists>sq. length sq = Suc i \<and> sq \<in> COMP \<rho> \<and> sq!0 = cf \<and> sq!i = cf')" apply(induct_tac i) apply clarsimp apply(rename_tac p s tk) apply(rule_tac x="[((p, s), tk)]" in exI, simp add: COMP_eq) apply clarsimp apply((drule spec)+, drule mp, assumption) apply clarify apply(rename_tac p s tk sq) apply(rule_tac x="sq@[((p, s), tk)]" in exI, simp) apply(frule COMP_noNil) apply(subst nth_append, simp) apply(subst nth_append, simp) apply(erule COMP_compose2) apply(subst last_conv_nth, assumption) by(clarsimp simp: COMP_eq) lemma pstep_pow_COMP[rule_format] : "\<forall>cf cf'. ((pstep \<rho>)^^i) cf cf' \<longrightarrow> (\<exists>sq. length sq = Suc i \<and> sq \<in> COMP \<rho> \<and> fst(sq!0) = cf \<and> fst(sq!i) = cf' \<and> (\<forall>j<length sq. snd(sq!j)))" apply(induct_tac i) apply clarsimp apply(rename_tac p s) apply(rule_tac x="[((p, s), True)]" in exI, simp add: COMP_eq) apply clarsimp apply((drule spec)+, drule mp, assumption) apply clarify apply(rename_tac p' t p s sq) apply(rule_tac x="sq@[((p', t), True)]" in exI, simp) apply(frule COMP_noNil) apply(subst nth_append, simp) apply(subst nth_append, simp) apply(rule conjI) apply(erule COMP_compose2) apply(subst last_conv_nth, assumption) apply(case_tac "sq!n", clarsimp simp: COMP_eq) apply(simp add: stepR_def, rule_tac x=True in exI, simp (no_asm)) apply clarsimp apply(case_tac "j = Suc n", clarsimp) apply(subst nth_append, simp) apply(subst nth_append, simp) done lemma COMP_pstep_pow[rule_format] : "sq \<in> COMP \<rho> \<Longrightarrow> \<forall>j<length sq. 0 < j \<longrightarrow> snd(sq!j) \<Longrightarrow> \<forall>i<length sq. ((pstep \<rho>)^^i) (fst(sq!0)) (fst(sq!i))" apply(rule allI) apply(induct_tac i, clarsimp+) apply(erule relcomppI) apply(drule spec, drule mp, assumption) apply(case_tac "sq!n", case_tac "sq!(n+1)", clarsimp) apply(drule_tac i="n+1" in COMP_nth, clarsimp+) by(erule stepR_D1) lemma esteps_COMP[rule_format] : "sq \<in> COMP \<rho> \<Longrightarrow> \<forall>i < length sq. 0 < i \<longrightarrow> \<not>tkOf(sq!i) \<and> (stateOf(sq!(i - 1)) \<in> S \<longrightarrow> stateOf(sq!i) \<in> S) \<Longrightarrow> stateOf(sq!0) \<in> S \<Longrightarrow> i < length sq \<Longrightarrow> progOf(sq!i) = progOf(sq!0) \<and> stateOf(sq!i) \<in> S" apply(induct i, simp_all) apply(drule spec, drule mp, assumption) apply clarsimp apply(drule_tac i="i+1" in COMP_nth, simp, simp) apply(clarsimp simp: stepR_def) done lemma Skip_COMP[rule_format] : "sq \<in> COMP \<rho> \<Longrightarrow> \<forall>i < length sq. \<forall>s t p tk. 0 < i \<longrightarrow> sq!(i - 1) = ((Skip, s), tk) \<longrightarrow> sq!i = ((p, t), False) \<longrightarrow> s \<in> Q \<longrightarrow> t \<in> Q \<Longrightarrow> progOf(sq!0) = Skip \<Longrightarrow> stateOf(sq!0) \<in> Q \<Longrightarrow> i < length sq \<Longrightarrow> progOf(sq!i) = Skip \<and> stateOf(sq!i) \<in> Q" apply(induct i, simp_all) apply(drule_tac x="i+1" in spec, simp) apply(case_tac "sq!i") apply(case_tac "sq!(i+1)", clarsimp) apply(drule_tac i="i+1" in COMP_nth, simp, simp) apply(rename_tac tk) apply(case_tac tk) apply clarsimp apply(drule stepR_D1) apply(erule Skip_pstep) apply clarsimp apply(drule stepR_D2, simp) done lemma Skip_COMP_pstep : "sq \<in> COMP \<rho> \<Longrightarrow> progOf(sq!0) = Skip \<Longrightarrow> i < length sq \<Longrightarrow> 0 < i \<Longrightarrow> tkOf(sq!i) \<Longrightarrow> P" apply(frule_tac i="i-1" and Q=UNIV in Skip_COMP, clarsimp, assumption, simp, simp) apply(case_tac "sq!(i-1)", clarsimp) apply(case_tac "sq!i", clarsimp) apply(drule_tac i=i in COMP_nth, assumption+) apply clarsimp apply(drule stepR_D1) by(erule Skip_pstep) section "Prefixes and suffixes of finite computations" definition "prefix pr sq = (\<exists>s. sq = pr@s)" lemma prefix_length : "prefix pr sq \<Longrightarrow> length pr \<le> length sq" by(clarsimp simp add: prefix_def) lemma prefix_take : "prefix (take i sq) sq" apply(simp add: prefix_def) apply(rule_tac x="drop i sq" in exI) by(rule sym, rule append_take_drop_id) lemma prefix_nth : "prefix pr sq \<Longrightarrow> i < length pr \<Longrightarrow> pr!i = sq!i" apply(clarsimp simp add: prefix_def) apply(subst nth_append, simp) done lemma prefix_appendI : "prefix pr xs \<Longrightarrow> prefix pr (xs @ zs)" apply(simp add: prefix_def, fastforce) done lemma prefix_appendD : "prefix pr (xs @ zs) \<Longrightarrow> prefix pr xs \<or> (\<exists>pr1. pr1 \<noteq> [] \<and> pr = xs @ pr1 \<and> prefix pr1 zs)" apply(simp add: prefix_def) apply(erule exE) apply(subst (asm) append_eq_append_conv2) apply(erule exE) apply(erule disjE, fastforce) apply fastforce done corollary prefix_snoc : "prefix pr (xs @ [a]) \<Longrightarrow> pr = (xs @ [a]) \<or> prefix pr xs" by(drule prefix_appendD, erule disjE, clarsimp+, case_tac pr1, (clarsimp simp: prefix_def)+) lemma prefix_length_eqD : "prefix pr xs \<Longrightarrow> length pr = length xs \<Longrightarrow> pr = xs" by(clarsimp simp: prefix_def) lemma COMP_prefix_cls : "sq \<in> COMP \<rho> \<Longrightarrow> prefix pr sq \<Longrightarrow> pr \<noteq> [] \<Longrightarrow> pr \<in> COMP \<rho>" apply(clarsimp simp: COMP_eq) apply(subst prefix_nth, assumption, simp)+ apply(drule_tac x=i in spec) apply(drule mp, erule less_le_trans, drule prefix_length) by fastforce definition "suffix su sq = (\<exists>x. sq = x@su)" lemma suffix_length : "suffix su sq \<Longrightarrow> length su \<le> length sq" by(clarsimp simp add: suffix_def) lemma suffix_drop : "suffix (drop i sq) sq" apply(simp add: suffix_def) apply(rule_tac x="take i sq" in exI) by(rule sym, rule append_take_drop_id) lemma suffix_nth : "suffix su sq \<Longrightarrow> i < length su \<Longrightarrow> su!i = sq!(i + (length sq - length su))" apply(clarsimp simp add: suffix_def) apply(subst nth_append, simp) done lemma COMP_suffix_cls : "sq \<in> COMP \<rho> \<Longrightarrow> suffix su sq \<Longrightarrow> su \<noteq> [] \<Longrightarrow> su \<in> COMP \<rho>" apply(clarsimp simp: COMP_eq) apply(subst suffix_nth, assumption, simp)+ apply(drule_tac x="i + (length sq - length su)" in spec, simp) apply(drule mp, drule suffix_length) by fastforce lemma COMP_decomp : "(sq @ sq') \<in> COMP \<rho> \<Longrightarrow> sq \<noteq> [] \<Longrightarrow> sq \<in> COMP \<rho> \<and> (last sq # sq') \<in> COMP \<rho>" apply(rule conjI) apply(erule COMP_prefix_cls) apply(simp add: prefix_def) apply assumption apply(erule COMP_suffix_cls) apply(simp add: suffix_def) apply(rule_tac x="butlast sq" in exI) by simp+ lemma Skip_COMP'[rule_format] : "sq \<in> COMP \<rho> \<Longrightarrow> (\<forall>i < length sq. \<forall>s t p tk. 0 < i \<longrightarrow> sq!(i - 1) = ((Skip, s), tk) \<longrightarrow> sq!i = ((p, t), False) \<longrightarrow> s \<in> Q \<longrightarrow> t \<in> Q) \<Longrightarrow> progOf(sq!i) = Skip \<Longrightarrow> stateOf(sq!i) \<in> Q \<Longrightarrow> i < length sq \<Longrightarrow> \<forall>j<length sq. i\<le>j \<longrightarrow> progOf(sq!j) = Skip \<and> stateOf(sq!j) \<in> Q" apply(drule_tac su="drop i sq" in COMP_suffix_cls) apply(rule suffix_drop, simp) apply clarsimp apply(drule_tac Q=Q and i="j-i" in Skip_COMP) apply clarsimp apply(rename_tac k s t p tk) apply(drule_tac x="i+k" in spec, simp) apply clarsimp+ apply fastforce apply clarsimp done subsection "Properties of finite potential computations of a program" lemma pcs_noNil : "sq \<in> \<lbrakk>p\<rbrakk>\<^sub>\<rho> \<Longrightarrow> sq \<noteq> []" apply(clarsimp simp add: pcs_def) by(drule COMP_noNil, clarify) lemma pcsD : "sq \<in> \<lbrakk>p\<rbrakk>\<^sub>\<rho> \<Longrightarrow> sq \<in> COMP \<rho>" by(simp add: pcs_def) lemma pcs_nth : "sq \<in> \<lbrakk>p\<rbrakk>\<^sub>\<rho> \<Longrightarrow> i < length sq \<Longrightarrow> 0 < i \<Longrightarrow> (sq!(i-1), sq!i) \<in> stepR \<rho>" apply(clarsimp simp: pcs_def) by(erule COMP_nth[simplified], assumption+) lemma pcs0 : "sq \<in> \<lbrakk>p\<rbrakk>\<^sub>\<rho> \<Longrightarrow> \<exists>s tk. sq!0 = ((p, s), tk)" apply(clarsimp simp add: pcs_def) apply(drule COMP_noNil) apply(subst (asm) hd_conv_nth, simp) by fastforce lemma esteps_pcs : "sq \<in> \<lbrakk>p\<rbrakk>\<^sub>\<rho> \<Longrightarrow> \<forall>i<length sq. 0 < i \<longrightarrow> \<not>tkOf(sq!i) \<and> (stateOf(sq!(i - 1)) \<in> P \<longrightarrow> stateOf(sq!i) \<in> P) \<Longrightarrow> stateOf(sq!0) \<in> P \<Longrightarrow> \<forall>i < length sq. progOf(sq!i) = p \<and> stateOf(sq!i) \<in> P" apply(clarsimp simp add: pcs_def) apply(frule COMP_noNil) apply(subst (asm) hd_conv_nth, assumption) apply(drule_tac i=i and S=P in esteps_COMP, simp, assumption+) by simp lemma Skip_pcs : "sq \<in> \<lbrakk>Skip\<rbrakk>\<^sub>\<rho> \<Longrightarrow> \<forall>i < length sq. \<forall>s t p' tk. 0 < i \<longrightarrow> sq!(i - 1) = ((Skip, s), tk) \<longrightarrow> sq!i = ((p', t), False) \<longrightarrow> s \<in> Q \<longrightarrow> t \<in> Q \<Longrightarrow> stateOf(sq!0) \<in> Q \<Longrightarrow> \<forall>i < length sq. progOf(sq!i) = Skip \<and> stateOf(sq!i) \<in> Q" apply(clarsimp simp add: pcs_def) apply(frule COMP_noNil) apply(subst (asm) hd_conv_nth, assumption) apply(drule_tac i=i and Q=Q in Skip_COMP) apply force apply simp_all done lemma Skip_pcs_pstep[rule_format] : "sq \<in> \<lbrakk>Skip\<rbrakk>\<^sub>\<rho> \<Longrightarrow> i < length sq \<Longrightarrow> 0 < i \<Longrightarrow> snd(sq!i) \<Longrightarrow> P" apply(clarsimp simp: pcs_def) apply(subst (asm) hd_conv_nth, erule COMP_noNil) apply(drule Skip_COMP_pstep, simp, assumption+) done lemma pcs_prefix_cls : "sq \<in> \<lbrakk>p\<rbrakk>\<^sub>\<rho> \<Longrightarrow> prefix pr sq \<Longrightarrow> pr \<noteq> [] \<Longrightarrow> pr \<in> \<lbrakk>p\<rbrakk>\<^sub>\<rho>" apply(clarsimp simp add: pcs_def) apply(drule COMP_prefix_cls, assumption+) apply(simp add: prefix_def) apply clarify apply(rule_tac x=s in exI) apply(rule_tac x=tk in exI) apply(case_tac pr, simp_all) done lemma pcs_suffix_cls : "sq \<in> \<lbrakk>p\<rbrakk>\<^sub>\<rho> \<Longrightarrow> suffix su sq \<Longrightarrow> su \<noteq> [] \<Longrightarrow> su \<in> \<lbrakk>progOf(sq!(length sq - length su))\<rbrakk>\<^sub>\<rho>" apply(clarsimp simp add: pcs_def) apply(drule COMP_suffix_cls, assumption+) apply(drule_tac i=0 in suffix_nth) apply simp apply clarsimp apply(erule_tac s="su!0" in subst) apply(case_tac su, clarify) apply clarsimp done lemma acs_noNil : "[] \<in> \<^sup>A\<lbrakk>p\<rbrakk>\<^sub>\<rho> \<Longrightarrow> P" by(simp add: acs_def, drule pcs_noNil, clarify) lemma acs_prefix_cls : "sq \<in> \<^sup>A\<lbrakk>p\<rbrakk>\<^sub>\<rho> \<Longrightarrow> prefix pr sq \<Longrightarrow> pr \<noteq> [] \<Longrightarrow> pr \<in> \<^sup>A\<lbrakk>p\<rbrakk>\<^sub>\<rho>" apply(clarsimp simp add: acs_def) apply(rule conjI) apply(erule pcs_prefix_cls, assumption+) apply(clarsimp simp: prefix_def) apply(drule_tac x=i in spec, simp) apply(subst (asm) nth_append, simp) done section "Pcs, constrained by conditions" text "Computations where all environment transitions satisfy a state relation" definition EnvCond :: "(nat \<Rightarrow> 's LA) \<Rightarrow> 's staterel \<Rightarrow> ('s config \<times> bool) list set" where "EnvCond \<rho> R = {sq |sq p. sq \<in> \<lbrakk>p\<rbrakk>\<^sub>\<rho> \<and> (\<forall>i. i < length sq \<longrightarrow> 0 < i \<longrightarrow> cstep_cond False R (sq!(i-1)) (sq!i))}" definition EnvCond_br :: "(nat \<Rightarrow> 's LA)\<Rightarrow> 's staterel \<Rightarrow> ('s config \<times> bool) list \<Rightarrow> nat \<Rightarrow> bool" where "EnvCond_br \<rho> R sq i = (\<exists>c s c' t tk. sq!(i - 1) = ((c, s), tk) \<and> sq!i = ((c', t), False) \<and> (s, t) \<notin> R)" lemma EnvCond_D : "sq \<in> EnvCond \<rho> R \<Longrightarrow> i < length sq \<Longrightarrow> 0 < i \<Longrightarrow> sq!(i - Suc 0) = ((c, s), tk) \<Longrightarrow> sq!i = ((c', t), False) \<Longrightarrow> (s, t) \<in> R" apply(clarsimp simp add: EnvCond_def) apply(drule_tac x=i in spec, simp) by(erule cstep_cond_D, rule refl, rule refl) lemma not_EnvCond_D : "sq \<notin> EnvCond \<rho> R \<Longrightarrow> sq \<in> \<lbrakk>p\<rbrakk>\<^sub>\<rho> \<Longrightarrow> \<exists>i. i < length sq \<and> 0 < i \<and> \<not> cstep_cond False R (sq!(i-1)) (sq!i)" by(clarsimp simp add: EnvCond_def) lemma not_EnvCond_D' : "sq \<notin> EnvCond \<rho> R \<Longrightarrow> sq \<in> \<lbrakk>p\<rbrakk>\<^sub>\<rho> \<Longrightarrow> \<exists>i c s c' t tk. i < length sq \<and> 0 < i \<and> sq!(i - 1) = ((c, s), tk) \<and> sq!i = ((c', t), False) \<and> (s, t) \<notin> R" apply(clarsimp simp add: EnvCond_def) apply(erule disjE, simp) apply clarsimp apply(rule_tac x=i in exI, simp) apply(case_tac "sq!i", case_tac "sq!(i-1)") apply(clarsimp simp add: cstep_cond_def) done lemma EnvCond_prefix_cls : "sq \<in> EnvCond \<rho> R' \<Longrightarrow> prefix pr sq \<Longrightarrow> pr \<noteq> [] \<Longrightarrow> R' \<subseteq> R \<Longrightarrow> pr \<in> EnvCond \<rho> R" apply(clarsimp simp add: EnvCond_def) apply(rule conjI) apply(rule exI, erule pcs_prefix_cls, assumption+) apply clarsimp apply(subst prefix_nth, assumption, simp)+ apply(drule prefix_length) apply(drule_tac x=i in spec, simp) by(erule cstep_cond_mono) lemma EnvCond_suffix_cls : "sq \<in> EnvCond \<rho> R' \<Longrightarrow> suffix sf sq \<Longrightarrow> sf \<noteq> [] \<Longrightarrow> R' \<subseteq> R \<Longrightarrow> sf \<in> EnvCond \<rho> R" apply(clarsimp simp add: EnvCond_def) apply(rule conjI) apply(rule exI, erule pcs_suffix_cls, assumption+) apply clarsimp apply(subst suffix_nth, assumption, simp)+ apply(drule suffix_length) apply(drule_tac x="i + (length sq - length sf)" in spec, simp) apply(drule mp, force) by(erule cstep_cond_mono, assumption) lemma EnvCond_compose : "sq \<in> EnvCond \<rho> R \<Longrightarrow> sq' \<in> EnvCond \<rho> R \<Longrightarrow> fst(last sq) = fst(hd sq') \<Longrightarrow> (sq @ tl sq') \<in> EnvCond \<rho> R" apply(case_tac "length sq' = 1") apply(case_tac sq', clarsimp, clarsimp) apply(subgoal_tac "0 < length sq \<and> 1 < length sq'") prefer 2 apply(rule ccontr, simp) apply(clarsimp simp: EnvCond_def) apply(drule pcs_noNil)+ apply(case_tac sq', clarsimp, clarsimp) apply(subst EnvCond_def, simp) apply(rule conjI) apply(clarsimp simp: EnvCond_def pcs_def) apply(frule COMP_noNil) apply(drule COMP_compose', assumption, simp) apply simp apply(clarsimp simp: cstep_cond_def) apply(drule_tac t="(sq @ tl sq') ! (i - Suc 0)" in sym) apply(subst (asm) nth_append)+ apply(simp split: if_splits) apply(erule_tac i=i in EnvCond_D, assumption+) apply(subgoal_tac "i = length sq", clarsimp) apply(subst (asm) last_conv_nth, simp) apply(subst (asm) hd_conv_nth, fastforce) apply(subst (asm) nth_tl, simp) apply(case_tac "sq'!0", clarsimp) apply(erule_tac i=1 and sq=sq' in EnvCond_D, simp, simp (no_asm), simp, simp) apply fastforce apply(drule leI)+ apply(subst (asm) nth_tl, simp)+ apply(subgoal_tac " Suc (i - Suc (length sq)) = i - length sq", simp) apply(erule_tac sq=sq' and i="i - length sq + 1" in EnvCond_D, simp+) done text "Computations where all program transitions satisfy a state relation" definition ProgCond :: "(nat \<Rightarrow> 's LA) \<Rightarrow> 's staterel \<Rightarrow> ('s config \<times> bool) list set" where "ProgCond \<rho> R = {sq |sq p. sq \<in> \<lbrakk>p\<rbrakk>\<^sub>\<rho> \<and> (\<forall>i. i < length sq \<longrightarrow> 0 < i \<longrightarrow> cstep_cond True R (sq!(i-1)) (sq!i))}" lemma ProgCond_D : "sq \<in> ProgCond \<rho> R \<Longrightarrow> i < length sq \<Longrightarrow> 0 < i \<Longrightarrow> sq!(i - Suc 0) = ((c, s), tk) \<Longrightarrow> sq!i = ((c', t), True) \<Longrightarrow> (s, t) \<in> R" apply(clarsimp simp add: ProgCond_def) apply(drule_tac x=i in spec, simp) by(erule cstep_cond_D, rule refl, rule refl) lemma not_ProgCond_D : "sq \<notin> ProgCond \<rho> R \<Longrightarrow> sq \<in> \<lbrakk>p\<rbrakk>\<^sub>\<rho> \<Longrightarrow> \<exists>i c s c' t tk. i < length sq \<and> 0 < i \<and> \<not> cstep_cond True R (sq!(i-1)) (sq!i)" by(clarsimp simp add: ProgCond_def) lemma not_ProgCond_D' : "sq \<notin> ProgCond \<rho> R \<Longrightarrow> sq \<in> \<lbrakk>p\<rbrakk>\<^sub>\<rho> \<Longrightarrow> \<exists>i c s c' t tk. i < length sq \<and> 0 < i \<and> sq!(i - 1) = ((c, s), tk) \<and> sq!i = ((c', t), True) \<and> (s, t) \<notin> R" apply(simp add: ProgCond_def) apply(erule disjE, simp) apply clarsimp apply(rule_tac x=i in exI, simp) apply(case_tac "sq!i", case_tac "sq!(i-1)") apply(clarsimp simp add: cstep_cond_def) done lemma ProgCond_prefix_cls : "sq \<in> ProgCond \<rho> R' \<Longrightarrow> prefix pr sq \<Longrightarrow> pr \<noteq> [] \<Longrightarrow> R' \<subseteq> R \<Longrightarrow> pr \<in> ProgCond \<rho> R" apply(clarsimp simp add: ProgCond_def) apply(rule conjI) apply(rule exI, erule pcs_prefix_cls, assumption+) apply clarsimp apply(subst prefix_nth, assumption, simp)+ apply(drule prefix_length) apply(drule_tac x=i in spec, simp) by(erule cstep_cond_mono) lemma ProgCond_suffix_cls : "sq \<in> ProgCond \<rho> R' \<Longrightarrow> suffix su sq \<Longrightarrow> su \<noteq> [] \<Longrightarrow> R' \<subseteq> R \<Longrightarrow> su \<in> ProgCond \<rho> R" apply(clarsimp simp add: ProgCond_def) apply(rule conjI) apply(rule exI, erule pcs_suffix_cls, assumption+) apply clarsimp apply(subst suffix_nth, assumption, simp)+ apply(drule suffix_length) apply(drule_tac x="i + (length sq - length su)" in spec, simp) apply(drule mp, fastforce) by(erule cstep_cond_mono) lemma ProgCond_compose : "sq \<in> ProgCond \<rho> R \<Longrightarrow> sq' \<in> ProgCond \<rho> R \<Longrightarrow> fst(last sq) = fst(hd sq') \<Longrightarrow> (sq @ tl sq') \<in> ProgCond \<rho> R" apply(case_tac "length sq' = 1") apply(case_tac sq', clarsimp, clarsimp) apply(subgoal_tac "0 < length sq \<and> 1 < length sq'") prefer 2 apply(rule ccontr, simp) apply(clarsimp simp: ProgCond_def) apply(drule pcs_noNil)+ apply(case_tac sq', clarsimp, clarsimp) apply(subst ProgCond_def, simp) apply(rule conjI) apply(clarsimp simp: ProgCond_def pcs_def) apply(frule COMP_noNil) apply(drule COMP_compose', assumption, simp) apply simp apply(clarsimp simp: cstep_cond_def) apply(drule_tac t="(sq @ tl sq') ! (i - Suc 0)" in sym) apply(subst (asm) nth_append)+ apply(simp split: if_splits) apply(erule_tac i=i in ProgCond_D, assumption+) apply(subgoal_tac "i = length sq", clarsimp) apply(subst (asm) last_conv_nth, simp) apply(subst (asm) hd_conv_nth, fastforce) apply(subst (asm) nth_tl, simp) apply(case_tac "sq'!0", clarsimp) apply(erule_tac i=1 and sq=sq' in ProgCond_D, simp, simp (no_asm), simp, simp) apply fastforce apply(drule leI)+ apply(subst (asm) nth_tl, simp)+ apply(subgoal_tac " Suc (i - Suc (length sq)) = i - length sq", simp) apply(erule_tac sq=sq' and i="i - length sq + 1" in ProgCond_D, simp+) done lemma ProgCond_decompose : "(sq @ tl sq') \<in> ProgCond \<rho> R \<Longrightarrow> 0 < length sq \<Longrightarrow> 0 < length sq' \<Longrightarrow> fst(last sq) = fst(hd sq') \<Longrightarrow> sq \<in> ProgCond \<rho> R \<and> sq' \<in> ProgCond \<rho> R" apply(rule conjI) apply(erule ProgCond_prefix_cls, simp add: prefix_def, fast, rule subset_refl) apply(subst ProgCond_def, simp) apply(rule conjI) apply(rule_tac x="progOf(sq'!0)" in exI) apply(clarsimp simp: ProgCond_def pcs_def) apply(rule conjI) apply(drule COMP_decomp, assumption) apply(subst (asm) hd_conv_nth, assumption)+ apply(subst (asm) last_conv_nth, assumption)+ apply(clarsimp simp: COMP_eq) apply(case_tac i, clarsimp) apply(drule_tac x=0 in spec)+ apply clarsimp apply(subst (asm) nth_tl, simp) apply(erule stepR_exch, assumption) apply(drule_tac x=i in spec, drule mp, assumption) apply clarsimp apply(subst (asm) nth_tl, simp)+ apply assumption apply(subst hd_conv_nth, assumption) apply(case_tac "sq'!0", clarsimp) apply(clarsimp simp: cstep_cond_def) apply(drule_tac t="sq' ! (i - Suc 0)" in sym) apply(case_tac "i = 1", clarsimp) apply(subst (asm) last_conv_nth, assumption) apply(subst (asm) hd_conv_nth, assumption) apply(case_tac "sq!(length sq - 1)", clarsimp) apply(drule_tac i="length sq" in ProgCond_D, simp, simp) apply(subst nth_append, simp) apply(subst nth_append, simp) apply(subst nth_tl, simp, assumption+) apply(drule_tac i="length sq + i - 1" in ProgCond_D, simp, simp) apply(subst nth_append, simp) apply(rule conjI, fastforce) apply clarsimp apply(subst nth_tl, simp) apply(case_tac i, simp, simp) apply(subst nth_append, simp) apply(rule conjI, fastforce) apply clarsimp apply(subst nth_tl, simp) apply(case_tac i, simp, simp) by assumption text "Computations where the initial state satisfies a state predicate" definition "InitCond \<rho> A = {sq |sq p c s tk. sq \<in> \<lbrakk>p\<rbrakk>\<^sub>\<rho> \<and> hd sq = ((c, s), tk) \<and> s \<in> A}" lemma InitCond_mono : "sq \<in> InitCond \<rho> A' \<Longrightarrow> A' \<subseteq> A \<Longrightarrow> sq \<in> InitCond \<rho> A" by(clarsimp simp add: InitCond_def, fastforce) lemma InitCond_prefix_cls : "sq \<in> InitCond \<rho> A' \<Longrightarrow> prefix pr sq \<Longrightarrow> pr \<noteq> [] \<Longrightarrow> A' \<subseteq> A \<Longrightarrow> pr \<in> InitCond \<rho> A" apply(clarsimp simp add: InitCond_def) apply(rule conjI) apply(rule exI, erule pcs_prefix_cls, assumption+) apply(subst hd_conv_nth, assumption) apply(subst prefix_nth, assumption, simp) apply(subst hd_conv_nth[THEN sym]) apply(drule prefix_length, force) apply force done text "condition upon termination" definition "TermCond \<rho> Q = {sq |sq p. sq \<in> \<lbrakk>p\<rbrakk>\<^sub>\<rho> \<and> (\<forall>j<length sq. progOf(sq!j) = Skip \<longrightarrow> (\<exists>i t tk. i \<le> j \<and> sq!i = ((Skip, t), tk) \<and> t \<in> Q))}" lemma TermCond_D : "sq \<in> TermCond \<rho> Q \<Longrightarrow> (\<forall>j < length sq. progOf(sq!j) = Skip \<longrightarrow> (\<exists>i t tk. i \<le> j \<and> sq!i = ((Skip, t), tk) \<and> t \<in> Q))" by(simp add: TermCond_def) end
open import Agda.Builtin.List foldr : {A B : Set} → (A → B → B) → B → List A → B foldr _⊕_ ε [] = ε foldr _⊕_ ε (x ∷ xs) = x ⊕ foldr _⊕_ ε xs infixr 5 _++_ _++_ : {A : Set} → List A → List A → List A xs ++ ys = foldr _∷_ ys xs record R (F : Set → Set) : Set₁ where field f : {A : Set} → A → F A → F A open R ⦃ … ⦄ public postulate D : {A : Set} → List A → Set easy : {A : Set} {@0 xs : List A} → D xs record Q (A : Set) : Set where field @0 index : List A d : D index g : {A : Set} → A → Q A → Q A g x q .Q.index = q .Q.index ++ x ∷ [] g x q .Q.d = easy record _×_ (A B : Set) : Set where constructor _,_ field proj₁ : A proj₂ : B data Maybe (A : Set) : Set where nothing : Maybe A just : A → Maybe A postulate m₁ : {A : Set} {B : Maybe A → Set} → ((x : A) → B (just x)) → (x : Maybe A) → B x m₂ : {A B : Set} → (A → Maybe B) → Maybe A → Maybe B m₂ f x = m₁ f x postulate P : {A : Set} → A → Set p : {A : Set} (x : A) → P x A : Set x : Maybe (A × Q A) instance _ : R Q _ = record { f = g } _ : P (m₂ (λ { (x , q) → just (f x q) }) x) _ = p (m₁ (λ { (x , q) → just (f x q) }) x)
[GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) x : P × P × P hx12 : x.fst ≠ x.snd.fst hx32 : x.snd.snd ≠ x.snd.fst ⊢ ContinuousAt (fun y => ∡ y.fst y.snd.fst y.snd.snd) x [PROOFSTEP] let f : P × P × P → V × V := fun y => (y.1 -ᵥ y.2.1, y.2.2 -ᵥ y.2.1) [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) x : P × P × P hx12 : x.fst ≠ x.snd.fst hx32 : x.snd.snd ≠ x.snd.fst f : P × P × P → V × V := fun y => (y.fst -ᵥ y.snd.fst, y.snd.snd -ᵥ y.snd.fst) ⊢ ContinuousAt (fun y => ∡ y.fst y.snd.fst y.snd.snd) x [PROOFSTEP] have hf1 : (f x).1 ≠ 0 := by simp [hx12] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) x : P × P × P hx12 : x.fst ≠ x.snd.fst hx32 : x.snd.snd ≠ x.snd.fst f : P × P × P → V × V := fun y => (y.fst -ᵥ y.snd.fst, y.snd.snd -ᵥ y.snd.fst) ⊢ (f x).fst ≠ 0 [PROOFSTEP] simp [hx12] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) x : P × P × P hx12 : x.fst ≠ x.snd.fst hx32 : x.snd.snd ≠ x.snd.fst f : P × P × P → V × V := fun y => (y.fst -ᵥ y.snd.fst, y.snd.snd -ᵥ y.snd.fst) hf1 : (f x).fst ≠ 0 ⊢ ContinuousAt (fun y => ∡ y.fst y.snd.fst y.snd.snd) x [PROOFSTEP] have hf2 : (f x).2 ≠ 0 := by simp [hx32] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) x : P × P × P hx12 : x.fst ≠ x.snd.fst hx32 : x.snd.snd ≠ x.snd.fst f : P × P × P → V × V := fun y => (y.fst -ᵥ y.snd.fst, y.snd.snd -ᵥ y.snd.fst) hf1 : (f x).fst ≠ 0 ⊢ (f x).snd ≠ 0 [PROOFSTEP] simp [hx32] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) x : P × P × P hx12 : x.fst ≠ x.snd.fst hx32 : x.snd.snd ≠ x.snd.fst f : P × P × P → V × V := fun y => (y.fst -ᵥ y.snd.fst, y.snd.snd -ᵥ y.snd.fst) hf1 : (f x).fst ≠ 0 hf2 : (f x).snd ≠ 0 ⊢ ContinuousAt (fun y => ∡ y.fst y.snd.fst y.snd.snd) x [PROOFSTEP] exact (o.continuousAt_oangle hf1 hf2).comp ((continuous_fst.vsub continuous_snd.fst).prod_mk (continuous_snd.snd.vsub continuous_snd.fst)).continuousAt [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ : P ⊢ ∡ p₁ p₁ p₂ = 0 [PROOFSTEP] simp [oangle] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ : P ⊢ ∡ p₁ p₂ p₂ = 0 [PROOFSTEP] simp [oangle] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ∡ p₁ p₂ p₃ ≠ 0 ⊢ p₁ ≠ p₂ [PROOFSTEP] rw [← @vsub_ne_zero V] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ∡ p₁ p₂ p₃ ≠ 0 ⊢ p₁ -ᵥ p₂ ≠ 0 [PROOFSTEP] exact o.left_ne_zero_of_oangle_ne_zero h [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ∡ p₁ p₂ p₃ ≠ 0 ⊢ p₃ ≠ p₂ [PROOFSTEP] rw [← @vsub_ne_zero V] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ∡ p₁ p₂ p₃ ≠ 0 ⊢ p₃ -ᵥ p₂ ≠ 0 [PROOFSTEP] exact o.right_ne_zero_of_oangle_ne_zero h [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ∡ p₁ p₂ p₃ ≠ 0 ⊢ p₁ ≠ p₃ [PROOFSTEP] rw [← (vsub_left_injective p₂).ne_iff] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ∡ p₁ p₂ p₃ ≠ 0 ⊢ p₁ -ᵥ p₂ ≠ p₃ -ᵥ p₂ [PROOFSTEP] exact o.ne_of_oangle_ne_zero h [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : Real.Angle.sign (∡ p₁ p₂ p₃) = 1 ⊢ 1 ≠ 0 [PROOFSTEP] decide [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : Real.Angle.sign (∡ p₁ p₂ p₃) = 1 ⊢ 1 ≠ 0 [PROOFSTEP] decide [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : Real.Angle.sign (∡ p₁ p₂ p₃) = 1 ⊢ 1 ≠ 0 [PROOFSTEP] decide [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : Real.Angle.sign (∡ p₁ p₂ p₃) = -1 ⊢ -1 ≠ 0 [PROOFSTEP] decide [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : Real.Angle.sign (∡ p₁ p₂ p₃) = -1 ⊢ -1 ≠ 0 [PROOFSTEP] decide [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : Real.Angle.sign (∡ p₁ p₂ p₃) = -1 ⊢ -1 ≠ 0 [PROOFSTEP] decide [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ ∡ p₁ p₂ p₃ ≠ 0 ∧ ∡ p₁ p₂ p₃ ≠ ↑π ↔ AffineIndependent ℝ ![p₁, p₂, p₃] [PROOFSTEP] rw [oangle, o.oangle_ne_zero_and_ne_pi_iff_linearIndependent, affineIndependent_iff_linearIndependent_vsub ℝ _ (1 : Fin 3), ← linearIndependent_equiv (finSuccAboveEquiv (1 : Fin 3)).toEquiv] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ LinearIndependent ℝ ![p₁ -ᵥ p₂, p₃ -ᵥ p₂] ↔ LinearIndependent ℝ ((fun i => Matrix.vecCons p₁ ![p₂, p₃] ↑i -ᵥ Matrix.vecCons p₁ ![p₂, p₃] 1) ∘ ↑(finSuccAboveEquiv 1).toEquiv) [PROOFSTEP] convert Iff.rfl [GOAL] case h.e'_2.h.e'_4 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ (fun i => Matrix.vecCons p₁ ![p₂, p₃] ↑i -ᵥ Matrix.vecCons p₁ ![p₂, p₃] 1) ∘ ↑(finSuccAboveEquiv 1).toEquiv = ![p₁ -ᵥ p₂, p₃ -ᵥ p₂] [PROOFSTEP] ext i [GOAL] case h.e'_2.h.e'_4.h V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P i : Fin 2 ⊢ ((fun i => Matrix.vecCons p₁ ![p₂, p₃] ↑i -ᵥ Matrix.vecCons p₁ ![p₂, p₃] 1) ∘ ↑(finSuccAboveEquiv 1).toEquiv) i = Matrix.vecCons (p₁ -ᵥ p₂) ![p₃ -ᵥ p₂] i [PROOFSTEP] fin_cases i [GOAL] case h.e'_2.h.e'_4.h.head V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ ((fun i => Matrix.vecCons p₁ ![p₂, p₃] ↑i -ᵥ Matrix.vecCons p₁ ![p₂, p₃] 1) ∘ ↑(finSuccAboveEquiv 1).toEquiv) { val := 0, isLt := (_ : 0 < 2) } = Matrix.vecCons (p₁ -ᵥ p₂) ![p₃ -ᵥ p₂] { val := 0, isLt := (_ : 0 < 2) } [PROOFSTEP] rfl [GOAL] case h.e'_2.h.e'_4.h.tail.head V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ ((fun i => Matrix.vecCons p₁ ![p₂, p₃] ↑i -ᵥ Matrix.vecCons p₁ ![p₂, p₃] 1) ∘ ↑(finSuccAboveEquiv 1).toEquiv) { val := 1, isLt := (_ : (fun a => a < 2) 1) } = Matrix.vecCons (p₁ -ᵥ p₂) ![p₃ -ᵥ p₂] { val := 1, isLt := (_ : (fun a => a < 2) 1) } [PROOFSTEP] rfl [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ ∡ p₁ p₂ p₃ = 0 ∨ ∡ p₁ p₂ p₃ = ↑π ↔ Collinear ℝ {p₁, p₂, p₃} [PROOFSTEP] rw [← not_iff_not, not_or, oangle_ne_zero_and_ne_pi_iff_affineIndependent, affineIndependent_iff_not_collinear_set] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ p₆ : P h : 2 • ∡ p₁ p₂ p₃ = 2 • ∡ p₄ p₅ p₆ ⊢ AffineIndependent ℝ ![p₁, p₂, p₃] ↔ AffineIndependent ℝ ![p₄, p₅, p₆] [PROOFSTEP] simp_rw [← oangle_ne_zero_and_ne_pi_iff_affineIndependent, ← Real.Angle.two_zsmul_ne_zero_iff, h] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ p₆ : P h : 2 • ∡ p₁ p₂ p₃ = 2 • ∡ p₄ p₅ p₆ ⊢ Collinear ℝ {p₁, p₂, p₃} ↔ Collinear ℝ {p₄, p₅, p₆} [PROOFSTEP] simp_rw [← oangle_eq_zero_or_eq_pi_iff_collinear, ← Real.Angle.two_zsmul_eq_zero_iff, h] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ p₆ : P h₁₂₄₅ : vectorSpan ℝ {p₁, p₂} = vectorSpan ℝ {p₄, p₅} h₃₂₆₅ : vectorSpan ℝ {p₃, p₂} = vectorSpan ℝ {p₆, p₅} ⊢ 2 • ∡ p₁ p₂ p₃ = 2 • ∡ p₄ p₅ p₆ [PROOFSTEP] simp_rw [vectorSpan_pair] at h₁₂₄₅ h₃₂₆₅ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ p₆ : P h₁₂₄₅ : Submodule.span ℝ {p₁ -ᵥ p₂} = Submodule.span ℝ {p₄ -ᵥ p₅} h₃₂₆₅ : Submodule.span ℝ {p₃ -ᵥ p₂} = Submodule.span ℝ {p₆ -ᵥ p₅} ⊢ 2 • ∡ p₁ p₂ p₃ = 2 • ∡ p₄ p₅ p₆ [PROOFSTEP] exact o.two_zsmul_oangle_of_span_eq_of_span_eq h₁₂₄₅ h₃₂₆₅ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ p₆ : P h₁₂₄₅ : affineSpan ℝ {p₁, p₂} ∥ affineSpan ℝ {p₄, p₅} h₃₂₆₅ : affineSpan ℝ {p₃, p₂} ∥ affineSpan ℝ {p₆, p₅} ⊢ 2 • ∡ p₁ p₂ p₃ = 2 • ∡ p₄ p₅ p₆ [PROOFSTEP] rw [AffineSubspace.affineSpan_pair_parallel_iff_vectorSpan_eq] at h₁₂₄₅ h₃₂₆₅ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ p₆ : P h₁₂₄₅ : vectorSpan ℝ {p₁, p₂} = vectorSpan ℝ {p₄, p₅} h₃₂₆₅ : vectorSpan ℝ {p₃, p₂} = vectorSpan ℝ {p₆, p₅} ⊢ 2 • ∡ p₁ p₂ p₃ = 2 • ∡ p₄ p₅ p₆ [PROOFSTEP] exact two_zsmul_oangle_of_vectorSpan_eq h₁₂₄₅ h₃₂₆₅ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : dist p₁ p₂ = dist p₁ p₃ ⊢ ∡ p₁ p₂ p₃ = ∡ p₂ p₃ p₁ [PROOFSTEP] simp_rw [dist_eq_norm_vsub V] at h [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ‖p₁ -ᵥ p₂‖ = ‖p₁ -ᵥ p₃‖ ⊢ ∡ p₁ p₂ p₃ = ∡ p₂ p₃ p₁ [PROOFSTEP] rw [oangle, oangle, ← vsub_sub_vsub_cancel_left p₃ p₂ p₁, ← vsub_sub_vsub_cancel_left p₂ p₃ p₁, o.oangle_sub_eq_oangle_sub_rev_of_norm_eq h] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P hn : p₂ ≠ p₃ h : dist p₁ p₂ = dist p₁ p₃ ⊢ ∡ p₃ p₁ p₂ = ↑π - 2 • ∡ p₁ p₂ p₃ [PROOFSTEP] simp_rw [dist_eq_norm_vsub V] at h [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P hn : p₂ ≠ p₃ h : ‖p₁ -ᵥ p₂‖ = ‖p₁ -ᵥ p₃‖ ⊢ ∡ p₃ p₁ p₂ = ↑π - 2 • ∡ p₁ p₂ p₃ [PROOFSTEP] rw [oangle, oangle] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P hn : p₂ ≠ p₃ h : ‖p₁ -ᵥ p₂‖ = ‖p₁ -ᵥ p₃‖ ⊢ Orientation.oangle o (p₃ -ᵥ p₁) (p₂ -ᵥ p₁) = ↑π - 2 • Orientation.oangle o (p₁ -ᵥ p₂) (p₃ -ᵥ p₂) [PROOFSTEP] convert o.oangle_eq_pi_sub_two_zsmul_oangle_sub_of_norm_eq _ h using 1 [GOAL] case h.e'_2 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P hn : p₂ ≠ p₃ h : ‖p₁ -ᵥ p₂‖ = ‖p₁ -ᵥ p₃‖ ⊢ Orientation.oangle o (p₃ -ᵥ p₁) (p₂ -ᵥ p₁) = Orientation.oangle o (p₁ -ᵥ p₃) (p₁ -ᵥ p₂) [PROOFSTEP] rw [← neg_vsub_eq_vsub_rev p₁ p₃, ← neg_vsub_eq_vsub_rev p₁ p₂, o.oangle_neg_neg] [GOAL] case h.e'_3 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P hn : p₂ ≠ p₃ h : ‖p₁ -ᵥ p₂‖ = ‖p₁ -ᵥ p₃‖ ⊢ ↑π - 2 • Orientation.oangle o (p₁ -ᵥ p₂) (p₃ -ᵥ p₂) = ↑π - 2 • Orientation.oangle o (p₁ -ᵥ p₃ - (p₁ -ᵥ p₂)) (p₁ -ᵥ p₃) [PROOFSTEP] rw [← o.oangle_sub_eq_oangle_sub_rev_of_norm_eq h] [GOAL] case h.e'_3 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P hn : p₂ ≠ p₃ h : ‖p₁ -ᵥ p₂‖ = ‖p₁ -ᵥ p₃‖ ⊢ ↑π - 2 • Orientation.oangle o (p₁ -ᵥ p₂) (p₃ -ᵥ p₂) = ↑π - 2 • Orientation.oangle o (p₁ -ᵥ p₂) (p₁ -ᵥ p₂ - (p₁ -ᵥ p₃)) [PROOFSTEP] simp [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P hn : p₂ ≠ p₃ h : ‖p₁ -ᵥ p₂‖ = ‖p₁ -ᵥ p₃‖ ⊢ p₁ -ᵥ p₂ ≠ p₁ -ᵥ p₃ [PROOFSTEP] simpa using hn [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : dist p₁ p₂ = dist p₁ p₃ ⊢ |Real.Angle.toReal (∡ p₁ p₂ p₃)| < π / 2 [PROOFSTEP] simp_rw [dist_eq_norm_vsub V] at h [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ‖p₁ -ᵥ p₂‖ = ‖p₁ -ᵥ p₃‖ ⊢ |Real.Angle.toReal (∡ p₁ p₂ p₃)| < π / 2 [PROOFSTEP] rw [oangle, ← vsub_sub_vsub_cancel_left p₃ p₂ p₁] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ‖p₁ -ᵥ p₂‖ = ‖p₁ -ᵥ p₃‖ ⊢ |Real.Angle.toReal (Orientation.oangle o (p₁ -ᵥ p₂) (p₁ -ᵥ p₂ - (p₁ -ᵥ p₃)))| < π / 2 [PROOFSTEP] exact o.abs_oangle_sub_right_toReal_lt_pi_div_two h [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p p₁ p₂ : P h : Real.Angle.sign (∡ p₁ p p₂) = 0 ⊢ p₁ = p ∨ p₂ = p ∨ ∠ p₁ p p₂ = 0 ∨ ∠ p₁ p p₂ = π [PROOFSTEP] convert o.eq_zero_or_angle_eq_zero_or_pi_of_sign_oangle_eq_zero h [GOAL] case h.e'_1.a V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p p₁ p₂ : P h : Real.Angle.sign (∡ p₁ p p₂) = 0 ⊢ p₁ = p ↔ p₁ -ᵥ p = 0 [PROOFSTEP] simp [GOAL] case h.e'_2.h.e'_1.a V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p p₁ p₂ : P h : Real.Angle.sign (∡ p₁ p p₂) = 0 ⊢ p₂ = p ↔ p₂ -ᵥ p = 0 [PROOFSTEP] simp [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ∡ p₁ p₂ p₃ = ↑(π / 2) ⊢ ∠ p₁ p₂ p₃ = π / 2 [PROOFSTEP] rw [angle, ← InnerProductGeometry.inner_eq_zero_iff_angle_eq_pi_div_two] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ∡ p₁ p₂ p₃ = ↑(π / 2) ⊢ inner (p₁ -ᵥ p₂) (p₃ -ᵥ p₂) = 0 [PROOFSTEP] exact o.inner_eq_zero_of_oangle_eq_pi_div_two h [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ∡ p₁ p₂ p₃ = ↑(π / 2) ⊢ ∠ p₃ p₂ p₁ = π / 2 [PROOFSTEP] rw [angle_comm] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ∡ p₁ p₂ p₃ = ↑(π / 2) ⊢ ∠ p₁ p₂ p₃ = π / 2 [PROOFSTEP] exact angle_eq_pi_div_two_of_oangle_eq_pi_div_two h [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ∡ p₁ p₂ p₃ = ↑(-π / 2) ⊢ ∠ p₁ p₂ p₃ = π / 2 [PROOFSTEP] rw [angle, ← InnerProductGeometry.inner_eq_zero_iff_angle_eq_pi_div_two] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ∡ p₁ p₂ p₃ = ↑(-π / 2) ⊢ inner (p₁ -ᵥ p₂) (p₃ -ᵥ p₂) = 0 [PROOFSTEP] exact o.inner_eq_zero_of_oangle_eq_neg_pi_div_two h [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ∡ p₁ p₂ p₃ = ↑(-π / 2) ⊢ ∠ p₃ p₂ p₁ = π / 2 [PROOFSTEP] rw [angle_comm] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ∡ p₁ p₂ p₃ = ↑(-π / 2) ⊢ ∠ p₁ p₂ p₃ = π / 2 [PROOFSTEP] exact angle_eq_pi_div_two_of_oangle_eq_neg_pi_div_two h [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ -Real.Angle.sign (∡ p₁ p₂ p₃) = Real.Angle.sign (∡ p₂ p₁ p₃) [PROOFSTEP] rw [eq_comm, oangle, oangle, ← o.oangle_neg_neg, neg_vsub_eq_vsub_rev, neg_vsub_eq_vsub_rev, ← vsub_sub_vsub_cancel_left p₁ p₃ p₂, ← neg_vsub_eq_vsub_rev p₃ p₂, sub_eq_add_neg, neg_vsub_eq_vsub_rev p₂ p₁, add_comm, ← @neg_one_smul ℝ] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ Real.Angle.sign (Orientation.oangle o (p₁ -ᵥ p₂) (p₁ -ᵥ p₂ + -1 • (p₃ -ᵥ p₂))) = -Real.Angle.sign (Orientation.oangle o (p₁ -ᵥ p₂) (p₃ -ᵥ p₂)) [PROOFSTEP] nth_rw 2 [← one_smul ℝ (p₁ -ᵥ p₂)] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ Real.Angle.sign (Orientation.oangle o (p₁ -ᵥ p₂) (1 • (p₁ -ᵥ p₂) + -1 • (p₃ -ᵥ p₂))) = -Real.Angle.sign (Orientation.oangle o (p₁ -ᵥ p₂) (p₃ -ᵥ p₂)) [PROOFSTEP] rw [o.oangle_sign_smul_add_smul_right] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ ↑SignType.sign (-1) * Real.Angle.sign (Orientation.oangle o (p₁ -ᵥ p₂) (p₃ -ᵥ p₂)) = -Real.Angle.sign (Orientation.oangle o (p₁ -ᵥ p₂) (p₃ -ᵥ p₂)) [PROOFSTEP] simp [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ -Real.Angle.sign (∡ p₁ p₂ p₃) = Real.Angle.sign (∡ p₃ p₂ p₁) [PROOFSTEP] rw [oangle_rev, Real.Angle.sign_neg, neg_neg] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ -Real.Angle.sign (∡ p₁ p₂ p₃) = Real.Angle.sign (∡ p₁ p₃ p₂) [PROOFSTEP] rw [oangle_swap₁₃_sign, ← oangle_swap₁₂_sign, oangle_swap₁₃_sign] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ Real.Angle.sign (∡ p₂ p₃ p₁) = Real.Angle.sign (∡ p₁ p₂ p₃) [PROOFSTEP] rw [← oangle_swap₁₂_sign, oangle_swap₁₃_sign] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ ∡ p₁ p₂ p₃ = ↑π ↔ Sbtw ℝ p₁ p₂ p₃ [PROOFSTEP] rw [oangle_eq_pi_iff_angle_eq_pi, angle_eq_pi_iff_sbtw] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : Sbtw ℝ p₁ p₂ p₃ ⊢ ∡ p₃ p₂ p₁ = ↑π [PROOFSTEP] rw [oangle_eq_pi_iff_oangle_rev_eq_pi, ← h.oangle₁₂₃_eq_pi] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : Wbtw ℝ p₁ p₂ p₃ ⊢ ∡ p₂ p₁ p₃ = 0 [PROOFSTEP] by_cases hp₂p₁ : p₂ = p₁ [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : Wbtw ℝ p₁ p₂ p₃ hp₂p₁ : p₂ = p₁ ⊢ ∡ p₂ p₁ p₃ = 0 [PROOFSTEP] simp [hp₂p₁] [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : Wbtw ℝ p₁ p₂ p₃ hp₂p₁ : ¬p₂ = p₁ ⊢ ∡ p₂ p₁ p₃ = 0 [PROOFSTEP] by_cases hp₃p₁ : p₃ = p₁ [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : Wbtw ℝ p₁ p₂ p₃ hp₂p₁ : ¬p₂ = p₁ hp₃p₁ : p₃ = p₁ ⊢ ∡ p₂ p₁ p₃ = 0 [PROOFSTEP] simp [hp₃p₁] [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : Wbtw ℝ p₁ p₂ p₃ hp₂p₁ : ¬p₂ = p₁ hp₃p₁ : ¬p₃ = p₁ ⊢ ∡ p₂ p₁ p₃ = 0 [PROOFSTEP] rw [oangle_eq_zero_iff_angle_eq_zero hp₂p₁ hp₃p₁] [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : Wbtw ℝ p₁ p₂ p₃ hp₂p₁ : ¬p₂ = p₁ hp₃p₁ : ¬p₃ = p₁ ⊢ ∠ p₂ p₁ p₃ = 0 [PROOFSTEP] exact h.angle₂₁₃_eq_zero_of_ne hp₂p₁ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : Wbtw ℝ p₁ p₂ p₃ ⊢ ∡ p₃ p₁ p₂ = 0 [PROOFSTEP] rw [oangle_eq_zero_iff_oangle_rev_eq_zero, h.oangle₂₁₃_eq_zero] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ ∡ p₁ p₂ p₃ = 0 ↔ Wbtw ℝ p₂ p₁ p₃ ∨ Wbtw ℝ p₂ p₃ p₁ [PROOFSTEP] by_cases hp₁p₂ : p₁ = p₂ [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P hp₁p₂ : p₁ = p₂ ⊢ ∡ p₁ p₂ p₃ = 0 ↔ Wbtw ℝ p₂ p₁ p₃ ∨ Wbtw ℝ p₂ p₃ p₁ [PROOFSTEP] simp [hp₁p₂] [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P hp₁p₂ : ¬p₁ = p₂ ⊢ ∡ p₁ p₂ p₃ = 0 ↔ Wbtw ℝ p₂ p₁ p₃ ∨ Wbtw ℝ p₂ p₃ p₁ [PROOFSTEP] by_cases hp₃p₂ : p₃ = p₂ [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P hp₁p₂ : ¬p₁ = p₂ hp₃p₂ : p₃ = p₂ ⊢ ∡ p₁ p₂ p₃ = 0 ↔ Wbtw ℝ p₂ p₁ p₃ ∨ Wbtw ℝ p₂ p₃ p₁ [PROOFSTEP] simp [hp₃p₂] [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P hp₁p₂ : ¬p₁ = p₂ hp₃p₂ : ¬p₃ = p₂ ⊢ ∡ p₁ p₂ p₃ = 0 ↔ Wbtw ℝ p₂ p₁ p₃ ∨ Wbtw ℝ p₂ p₃ p₁ [PROOFSTEP] rw [oangle_eq_zero_iff_angle_eq_zero hp₁p₂ hp₃p₂, angle_eq_zero_iff_ne_and_wbtw] [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P hp₁p₂ : ¬p₁ = p₂ hp₃p₂ : ¬p₃ = p₂ ⊢ p₁ ≠ p₂ ∧ Wbtw ℝ p₂ p₁ p₃ ∨ p₃ ≠ p₂ ∧ Wbtw ℝ p₂ p₃ p₁ ↔ Wbtw ℝ p₂ p₁ p₃ ∨ Wbtw ℝ p₂ p₃ p₁ [PROOFSTEP] simp [hp₁p₂, hp₃p₂] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₁' p₂ p₃ : P h : Wbtw ℝ p₂ p₁ p₁' hp₁p₂ : p₁ ≠ p₂ ⊢ ∡ p₁ p₂ p₃ = ∡ p₁' p₂ p₃ [PROOFSTEP] by_cases hp₃p₂ : p₃ = p₂ [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₁' p₂ p₃ : P h : Wbtw ℝ p₂ p₁ p₁' hp₁p₂ : p₁ ≠ p₂ hp₃p₂ : p₃ = p₂ ⊢ ∡ p₁ p₂ p₃ = ∡ p₁' p₂ p₃ [PROOFSTEP] simp [hp₃p₂] [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₁' p₂ p₃ : P h : Wbtw ℝ p₂ p₁ p₁' hp₁p₂ : p₁ ≠ p₂ hp₃p₂ : ¬p₃ = p₂ ⊢ ∡ p₁ p₂ p₃ = ∡ p₁' p₂ p₃ [PROOFSTEP] by_cases hp₁'p₂ : p₁' = p₂ [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₁' p₂ p₃ : P h : Wbtw ℝ p₂ p₁ p₁' hp₁p₂ : p₁ ≠ p₂ hp₃p₂ : ¬p₃ = p₂ hp₁'p₂ : p₁' = p₂ ⊢ ∡ p₁ p₂ p₃ = ∡ p₁' p₂ p₃ [PROOFSTEP] rw [hp₁'p₂, wbtw_self_iff] at h [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₁' p₂ p₃ : P h : p₁ = p₂ hp₁p₂ : p₁ ≠ p₂ hp₃p₂ : ¬p₃ = p₂ hp₁'p₂ : p₁' = p₂ ⊢ ∡ p₁ p₂ p₃ = ∡ p₁' p₂ p₃ [PROOFSTEP] exact False.elim (hp₁p₂ h) [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₁' p₂ p₃ : P h : Wbtw ℝ p₂ p₁ p₁' hp₁p₂ : p₁ ≠ p₂ hp₃p₂ : ¬p₃ = p₂ hp₁'p₂ : ¬p₁' = p₂ ⊢ ∡ p₁ p₂ p₃ = ∡ p₁' p₂ p₃ [PROOFSTEP] rw [← oangle_add hp₁'p₂ hp₁p₂ hp₃p₂, h.oangle₃₁₂_eq_zero, zero_add] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₃' : P h : Wbtw ℝ p₂ p₃ p₃' hp₃p₂ : p₃ ≠ p₂ ⊢ ∡ p₁ p₂ p₃ = ∡ p₁ p₂ p₃' [PROOFSTEP] rw [oangle_rev, h.oangle_eq_left hp₃p₂, ← oangle_rev] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ ∡ (midpoint ℝ p₁ p₂) p₂ p₃ = ∡ p₁ p₂ p₃ [PROOFSTEP] by_cases h : p₁ = p₂ [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : p₁ = p₂ ⊢ ∡ (midpoint ℝ p₁ p₂) p₂ p₃ = ∡ p₁ p₂ p₃ [PROOFSTEP] simp [h] [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ¬p₁ = p₂ ⊢ ∡ (midpoint ℝ p₁ p₂) p₂ p₃ = ∡ p₁ p₂ p₃ [PROOFSTEP] exact (sbtw_midpoint_of_ne ℝ h).symm.oangle_eq_left [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ ∡ (midpoint ℝ p₂ p₁) p₂ p₃ = ∡ p₁ p₂ p₃ [PROOFSTEP] rw [midpoint_comm, oangle_midpoint_left] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ ∡ p₁ p₂ (midpoint ℝ p₃ p₂) = ∡ p₁ p₂ p₃ [PROOFSTEP] by_cases h : p₃ = p₂ [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : p₃ = p₂ ⊢ ∡ p₁ p₂ (midpoint ℝ p₃ p₂) = ∡ p₁ p₂ p₃ [PROOFSTEP] simp [h] [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P h : ¬p₃ = p₂ ⊢ ∡ p₁ p₂ (midpoint ℝ p₃ p₂) = ∡ p₁ p₂ p₃ [PROOFSTEP] exact (sbtw_midpoint_of_ne ℝ h).symm.oangle_eq_right [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ : P ⊢ ∡ p₁ p₂ (midpoint ℝ p₂ p₃) = ∡ p₁ p₂ p₃ [PROOFSTEP] rw [midpoint_comm, oangle_midpoint_right] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₁' p₂ p₃ : P h : Sbtw ℝ p₁ p₂ p₁' hp₃p₂ : p₃ ≠ p₂ ⊢ ∡ p₁ p₂ p₃ = ∡ p₁' p₂ p₃ + ↑π [PROOFSTEP] rw [← h.oangle₁₂₃_eq_pi, oangle_add_swap h.left_ne h.right_ne hp₃p₂] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₃' : P h : Sbtw ℝ p₃ p₂ p₃' hp₁p₂ : p₁ ≠ p₂ ⊢ ∡ p₁ p₂ p₃ = ∡ p₁ p₂ p₃' + ↑π [PROOFSTEP] rw [← h.oangle₃₂₁_eq_pi, oangle_add hp₁p₂ h.right_ne h.left_ne] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₁' p₂ p₃ p₃' : P h₁ : Sbtw ℝ p₁ p₂ p₁' h₃ : Sbtw ℝ p₃ p₂ p₃' ⊢ ∡ p₁ p₂ p₃ = ∡ p₁' p₂ p₃' [PROOFSTEP] rw [h₁.oangle_eq_add_pi_left h₃.left_ne, h₃.oangle_eq_add_pi_right h₁.right_ne, add_assoc, Real.Angle.coe_pi_add_coe_pi, add_zero] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₁' p₂ p₃ : P h : Collinear ℝ {p₁, p₂, p₁'} hp₁p₂ : p₁ ≠ p₂ hp₁'p₂ : p₁' ≠ p₂ ⊢ 2 • ∡ p₁ p₂ p₃ = 2 • ∡ p₁' p₂ p₃ [PROOFSTEP] by_cases hp₃p₂ : p₃ = p₂ [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₁' p₂ p₃ : P h : Collinear ℝ {p₁, p₂, p₁'} hp₁p₂ : p₁ ≠ p₂ hp₁'p₂ : p₁' ≠ p₂ hp₃p₂ : p₃ = p₂ ⊢ 2 • ∡ p₁ p₂ p₃ = 2 • ∡ p₁' p₂ p₃ [PROOFSTEP] simp [hp₃p₂] [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₁' p₂ p₃ : P h : Collinear ℝ {p₁, p₂, p₁'} hp₁p₂ : p₁ ≠ p₂ hp₁'p₂ : p₁' ≠ p₂ hp₃p₂ : ¬p₃ = p₂ ⊢ 2 • ∡ p₁ p₂ p₃ = 2 • ∡ p₁' p₂ p₃ [PROOFSTEP] rcases h.wbtw_or_wbtw_or_wbtw with (hw | hw | hw) [GOAL] case neg.inl V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₁' p₂ p₃ : P h : Collinear ℝ {p₁, p₂, p₁'} hp₁p₂ : p₁ ≠ p₂ hp₁'p₂ : p₁' ≠ p₂ hp₃p₂ : ¬p₃ = p₂ hw : Wbtw ℝ p₁ p₂ p₁' ⊢ 2 • ∡ p₁ p₂ p₃ = 2 • ∡ p₁' p₂ p₃ [PROOFSTEP] have hw' : Sbtw ℝ p₁ p₂ p₁' := ⟨hw, hp₁p₂.symm, hp₁'p₂.symm⟩ [GOAL] case neg.inl V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₁' p₂ p₃ : P h : Collinear ℝ {p₁, p₂, p₁'} hp₁p₂ : p₁ ≠ p₂ hp₁'p₂ : p₁' ≠ p₂ hp₃p₂ : ¬p₃ = p₂ hw : Wbtw ℝ p₁ p₂ p₁' hw' : Sbtw ℝ p₁ p₂ p₁' ⊢ 2 • ∡ p₁ p₂ p₃ = 2 • ∡ p₁' p₂ p₃ [PROOFSTEP] rw [hw'.oangle_eq_add_pi_left hp₃p₂, smul_add, Real.Angle.two_zsmul_coe_pi, add_zero] [GOAL] case neg.inr.inl V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₁' p₂ p₃ : P h : Collinear ℝ {p₁, p₂, p₁'} hp₁p₂ : p₁ ≠ p₂ hp₁'p₂ : p₁' ≠ p₂ hp₃p₂ : ¬p₃ = p₂ hw : Wbtw ℝ p₂ p₁' p₁ ⊢ 2 • ∡ p₁ p₂ p₃ = 2 • ∡ p₁' p₂ p₃ [PROOFSTEP] rw [hw.oangle_eq_left hp₁'p₂] [GOAL] case neg.inr.inr V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₁' p₂ p₃ : P h : Collinear ℝ {p₁, p₂, p₁'} hp₁p₂ : p₁ ≠ p₂ hp₁'p₂ : p₁' ≠ p₂ hp₃p₂ : ¬p₃ = p₂ hw : Wbtw ℝ p₁' p₁ p₂ ⊢ 2 • ∡ p₁ p₂ p₃ = 2 • ∡ p₁' p₂ p₃ [PROOFSTEP] rw [hw.symm.oangle_eq_left hp₁p₂] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₃' : P h : Collinear ℝ {p₃, p₂, p₃'} hp₃p₂ : p₃ ≠ p₂ hp₃'p₂ : p₃' ≠ p₂ ⊢ 2 • ∡ p₁ p₂ p₃ = 2 • ∡ p₁ p₂ p₃' [PROOFSTEP] rw [oangle_rev, smul_neg, h.two_zsmul_oangle_eq_left hp₃p₂ hp₃'p₂, ← smul_neg, ← oangle_rev] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p : P h : p₁ ≠ p₂ ⊢ dist p₁ p = dist p₂ p ↔ ∃ r, r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁) +ᵥ midpoint ℝ p₁ p₂ = p [PROOFSTEP] refine' ⟨fun hd => _, fun hr => _⟩ [GOAL] case refine'_1 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p : P h : p₁ ≠ p₂ hd : dist p₁ p = dist p₂ p ⊢ ∃ r, r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁) +ᵥ midpoint ℝ p₁ p₂ = p [PROOFSTEP] have hi : ⟪p₂ -ᵥ p₁, p -ᵥ midpoint ℝ p₁ p₂⟫ = 0 := by rw [@dist_eq_norm_vsub' V, @dist_eq_norm_vsub' V, ← mul_self_inj (norm_nonneg _) (norm_nonneg _), ← real_inner_self_eq_norm_mul_norm, ← real_inner_self_eq_norm_mul_norm] at hd simp_rw [vsub_midpoint, ← vsub_sub_vsub_cancel_left p₂ p₁ p, inner_sub_left, inner_add_right, inner_smul_right, hd, real_inner_comm (p -ᵥ p₁)] abel [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p : P h : p₁ ≠ p₂ hd : dist p₁ p = dist p₂ p ⊢ inner (p₂ -ᵥ p₁) (p -ᵥ midpoint ℝ p₁ p₂) = 0 [PROOFSTEP] rw [@dist_eq_norm_vsub' V, @dist_eq_norm_vsub' V, ← mul_self_inj (norm_nonneg _) (norm_nonneg _), ← real_inner_self_eq_norm_mul_norm, ← real_inner_self_eq_norm_mul_norm] at hd [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p : P h : p₁ ≠ p₂ hd✝¹ : ‖p -ᵥ p₁‖ * ‖p -ᵥ p₁‖ = ‖p -ᵥ p₂‖ * ‖p -ᵥ p₂‖ hd✝ : inner (p -ᵥ p₁) (p -ᵥ p₁) = ‖p -ᵥ p₂‖ * ‖p -ᵥ p₂‖ hd : inner (p -ᵥ p₁) (p -ᵥ p₁) = inner (p -ᵥ p₂) (p -ᵥ p₂) ⊢ inner (p₂ -ᵥ p₁) (p -ᵥ midpoint ℝ p₁ p₂) = 0 [PROOFSTEP] simp_rw [vsub_midpoint, ← vsub_sub_vsub_cancel_left p₂ p₁ p, inner_sub_left, inner_add_right, inner_smul_right, hd, real_inner_comm (p -ᵥ p₁)] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p : P h : p₁ ≠ p₂ hd✝¹ : ‖p -ᵥ p₁‖ * ‖p -ᵥ p₁‖ = ‖p -ᵥ p₂‖ * ‖p -ᵥ p₂‖ hd✝ : inner (p -ᵥ p₁) (p -ᵥ p₁) = ‖p -ᵥ p₂‖ * ‖p -ᵥ p₂‖ hd : inner (p -ᵥ p₁) (p -ᵥ p₁) = inner (p -ᵥ p₂) (p -ᵥ p₂) ⊢ ⅟2 * inner (p -ᵥ p₂) (p -ᵥ p₂) + ⅟2 * inner (p -ᵥ p₁) (p -ᵥ p₂) - (⅟2 * inner (p -ᵥ p₁) (p -ᵥ p₂) + ⅟2 * inner (p -ᵥ p₂) (p -ᵥ p₂)) = 0 [PROOFSTEP] abel [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p : P h : p₁ ≠ p₂ hd✝¹ : ‖p -ᵥ p₁‖ * ‖p -ᵥ p₁‖ = ‖p -ᵥ p₂‖ * ‖p -ᵥ p₂‖ hd✝ : inner (p -ᵥ p₁) (p -ᵥ p₁) = ‖p -ᵥ p₂‖ * ‖p -ᵥ p₂‖ hd : inner (p -ᵥ p₁) (p -ᵥ p₁) = inner (p -ᵥ p₂) (p -ᵥ p₂) ⊢ ⅟2 * inner (p -ᵥ p₂) (p -ᵥ p₂) + ⅟2 * inner (p -ᵥ p₁) (p -ᵥ p₂) - (⅟2 * inner (p -ᵥ p₁) (p -ᵥ p₂) + ⅟2 * inner (p -ᵥ p₂) (p -ᵥ p₂)) = 0 [PROOFSTEP] abel [GOAL] case refine'_1 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p : P h : p₁ ≠ p₂ hd : dist p₁ p = dist p₂ p hi : inner (p₂ -ᵥ p₁) (p -ᵥ midpoint ℝ p₁ p₂) = 0 ⊢ ∃ r, r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁) +ᵥ midpoint ℝ p₁ p₂ = p [PROOFSTEP] rw [@Orientation.inner_eq_zero_iff_eq_zero_or_eq_smul_rotation_pi_div_two V _ _ _ o, or_iff_right (vsub_ne_zero.2 h.symm)] at hi [GOAL] case refine'_1 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p : P h : p₁ ≠ p₂ hd : dist p₁ p = dist p₂ p hi : ∃ r, r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁) = p -ᵥ midpoint ℝ p₁ p₂ ⊢ ∃ r, r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁) +ᵥ midpoint ℝ p₁ p₂ = p [PROOFSTEP] rcases hi with ⟨r, hr⟩ [GOAL] case refine'_1.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p : P h : p₁ ≠ p₂ hd : dist p₁ p = dist p₂ p r : ℝ hr : r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁) = p -ᵥ midpoint ℝ p₁ p₂ ⊢ ∃ r, r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁) +ᵥ midpoint ℝ p₁ p₂ = p [PROOFSTEP] rw [eq_comm, ← eq_vadd_iff_vsub_eq] at hr [GOAL] case refine'_1.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p : P h : p₁ ≠ p₂ hd : dist p₁ p = dist p₂ p r : ℝ hr : p = r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁) +ᵥ midpoint ℝ p₁ p₂ ⊢ ∃ r, r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁) +ᵥ midpoint ℝ p₁ p₂ = p [PROOFSTEP] exact ⟨r, hr.symm⟩ [GOAL] case refine'_2 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p : P h : p₁ ≠ p₂ hr : ∃ r, r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁) +ᵥ midpoint ℝ p₁ p₂ = p ⊢ dist p₁ p = dist p₂ p [PROOFSTEP] rcases hr with ⟨r, rfl⟩ [GOAL] case refine'_2.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ : P h : p₁ ≠ p₂ r : ℝ ⊢ dist p₁ (r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁) +ᵥ midpoint ℝ p₁ p₂) = dist p₂ (r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁) +ᵥ midpoint ℝ p₁ p₂) [PROOFSTEP] simp_rw [@dist_eq_norm_vsub V, vsub_vadd_eq_vsub_sub, left_vsub_midpoint, right_vsub_midpoint, invOf_eq_inv, ← neg_vsub_eq_vsub_rev p₂ p₁, ← mul_self_inj (norm_nonneg _) (norm_nonneg _), ← real_inner_self_eq_norm_mul_norm, inner_sub_sub_self] [GOAL] case refine'_2.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ : P h : p₁ ≠ p₂ r : ℝ ⊢ inner (2⁻¹ • -(p₂ -ᵥ p₁)) (2⁻¹ • -(p₂ -ᵥ p₁)) - inner (2⁻¹ • -(p₂ -ᵥ p₁)) (r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁)) - inner (r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁)) (2⁻¹ • -(p₂ -ᵥ p₁)) + inner (r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁)) (r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁)) = inner (2⁻¹ • (p₂ -ᵥ p₁)) (2⁻¹ • (p₂ -ᵥ p₁)) - inner (2⁻¹ • (p₂ -ᵥ p₁)) (r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁)) - inner (r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁)) (2⁻¹ • (p₂ -ᵥ p₁)) + inner (r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁)) (r • ↑(Orientation.rotation o ↑(π / 2)) (p₂ -ᵥ p₁)) [PROOFSTEP] simp [-neg_vsub_eq_vsub_rev] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) ⊢ Real.Angle.sign (∡ p₁ p₅ p₂) = Real.Angle.sign (∡ p₃ p₅ p₄) [PROOFSTEP] by_cases hc₅₁₂ : Collinear ℝ ({ p₅, p₁, p₂ } : Set P) [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : Collinear ℝ {p₅, p₁, p₂} ⊢ Real.Angle.sign (∡ p₁ p₅ p₂) = Real.Angle.sign (∡ p₃ p₅ p₄) [PROOFSTEP] have hc₅₁₂₃₄ : Collinear ℝ ({ p₅, p₁, p₂, p₃, p₄ } : Set P) := (hc.collinear_insert_iff_of_ne (Set.mem_insert _ _) (Set.mem_insert_of_mem _ (Set.mem_insert _ _)) hp₁p₂).2 hc₅₁₂ [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : Collinear ℝ {p₅, p₁, p₂} hc₅₁₂₃₄ : Collinear ℝ {p₅, p₁, p₂, p₃, p₄} ⊢ Real.Angle.sign (∡ p₁ p₅ p₂) = Real.Angle.sign (∡ p₃ p₅ p₄) [PROOFSTEP] have hc₅₃₄ : Collinear ℝ ({ p₅, p₃, p₄ } : Set P) := (hc.collinear_insert_iff_of_ne (Set.mem_insert_of_mem _ (Set.mem_insert_of_mem _ (Set.mem_insert _ _))) (Set.mem_insert_of_mem _ (Set.mem_insert_of_mem _ (Set.mem_insert_of_mem _ (Set.mem_singleton _)))) hp₃p₄).1 hc₅₁₂₃₄ [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : Collinear ℝ {p₅, p₁, p₂} hc₅₁₂₃₄ : Collinear ℝ {p₅, p₁, p₂, p₃, p₄} hc₅₃₄ : Collinear ℝ {p₅, p₃, p₄} ⊢ Real.Angle.sign (∡ p₁ p₅ p₂) = Real.Angle.sign (∡ p₃ p₅ p₄) [PROOFSTEP] rw [Set.insert_comm] at hc₅₁₂ hc₅₃₄ [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : Collinear ℝ {p₁, p₅, p₂} hc₅₁₂₃₄ : Collinear ℝ {p₅, p₁, p₂, p₃, p₄} hc₅₃₄ : Collinear ℝ {p₃, p₅, p₄} ⊢ Real.Angle.sign (∡ p₁ p₅ p₂) = Real.Angle.sign (∡ p₃ p₅ p₄) [PROOFSTEP] have hs₁₅₂ := oangle_eq_zero_or_eq_pi_iff_collinear.2 hc₅₁₂ [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : Collinear ℝ {p₁, p₅, p₂} hc₅₁₂₃₄ : Collinear ℝ {p₅, p₁, p₂, p₃, p₄} hc₅₃₄ : Collinear ℝ {p₃, p₅, p₄} hs₁₅₂ : ∡ p₁ p₅ p₂ = 0 ∨ ∡ p₁ p₅ p₂ = ↑π ⊢ Real.Angle.sign (∡ p₁ p₅ p₂) = Real.Angle.sign (∡ p₃ p₅ p₄) [PROOFSTEP] have hs₃₅₄ := oangle_eq_zero_or_eq_pi_iff_collinear.2 hc₅₃₄ [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : Collinear ℝ {p₁, p₅, p₂} hc₅₁₂₃₄ : Collinear ℝ {p₅, p₁, p₂, p₃, p₄} hc₅₃₄ : Collinear ℝ {p₃, p₅, p₄} hs₁₅₂ : ∡ p₁ p₅ p₂ = 0 ∨ ∡ p₁ p₅ p₂ = ↑π hs₃₅₄ : ∡ p₃ p₅ p₄ = 0 ∨ ∡ p₃ p₅ p₄ = ↑π ⊢ Real.Angle.sign (∡ p₁ p₅ p₂) = Real.Angle.sign (∡ p₃ p₅ p₄) [PROOFSTEP] rw [← Real.Angle.sign_eq_zero_iff] at hs₁₅₂ hs₃₅₄ [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : Collinear ℝ {p₁, p₅, p₂} hc₅₁₂₃₄ : Collinear ℝ {p₅, p₁, p₂, p₃, p₄} hc₅₃₄ : Collinear ℝ {p₃, p₅, p₄} hs₁₅₂ : Real.Angle.sign (∡ p₁ p₅ p₂) = 0 hs₃₅₄ : Real.Angle.sign (∡ p₃ p₅ p₄) = 0 ⊢ Real.Angle.sign (∡ p₁ p₅ p₂) = Real.Angle.sign (∡ p₃ p₅ p₄) [PROOFSTEP] rw [hs₁₅₂, hs₃₅₄] [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} ⊢ Real.Angle.sign (∡ p₁ p₅ p₂) = Real.Angle.sign (∡ p₃ p₅ p₄) [PROOFSTEP] let s : Set (P × P × P) := (fun x : line[ℝ, p₁, p₂] × V => (x.1, p₅, x.2 +ᵥ (x.1 : P))) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} ⊢ Real.Angle.sign (∡ p₁ p₅ p₂) = Real.Angle.sign (∡ p₃ p₅ p₄) [PROOFSTEP] have hco : IsConnected s := haveI : ConnectedSpace line[ℝ, p₁, p₂] := AddTorsor.connectedSpace _ _ (isConnected_univ.prod (isConnected_setOf_sameRay_and_ne_zero (vsub_ne_zero.2 hp₁p₂.symm))).image _ (continuous_fst.subtype_val.prod_mk (continuous_const.prod_mk (continuous_snd.vadd continuous_fst.subtype_val))).continuousOn [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s ⊢ Real.Angle.sign (∡ p₁ p₅ p₂) = Real.Angle.sign (∡ p₃ p₅ p₄) [PROOFSTEP] have hf : ContinuousOn (fun p : P × P × P => ∡ p.1 p.2.1 p.2.2) s := by refine' ContinuousAt.continuousOn fun p hp => continuousAt_oangle _ _ all_goals simp_rw [Set.mem_image, Set.mem_prod, Set.mem_univ, true_and_iff, Prod.ext_iff] at hp obtain ⟨q₁, q₅, q₂⟩ := p dsimp only at hp ⊢ obtain ⟨⟨⟨q, hq⟩, v⟩, hv, rfl, rfl, rfl⟩ := hp dsimp only [Subtype.coe_mk, Set.mem_setOf] at hv ⊢ obtain ⟨hvr, -⟩ := hv rintro rfl refine' hc₅₁₂ ((collinear_insert_iff_of_mem_affineSpan _).2 (collinear_pair _ _ _)) · exact hq · refine' vadd_mem_of_mem_direction _ hq rw [← exists_nonneg_left_iff_sameRay (vsub_ne_zero.2 hp₁p₂.symm)] at hvr obtain ⟨r, -, rfl⟩ := hvr rw [direction_affineSpan] exact smul_vsub_rev_mem_vectorSpan_pair _ _ _ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s ⊢ ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s [PROOFSTEP] refine' ContinuousAt.continuousOn fun p hp => continuousAt_oangle _ _ [GOAL] case refine'_1 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s p : P × P × P hp : p ∈ s ⊢ p.fst ≠ p.snd.fst case refine'_2 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s p : P × P × P hp : p ∈ s ⊢ p.snd.snd ≠ p.snd.fst [PROOFSTEP] all_goals simp_rw [Set.mem_image, Set.mem_prod, Set.mem_univ, true_and_iff, Prod.ext_iff] at hp obtain ⟨q₁, q₅, q₂⟩ := p dsimp only at hp ⊢ obtain ⟨⟨⟨q, hq⟩, v⟩, hv, rfl, rfl, rfl⟩ := hp dsimp only [Subtype.coe_mk, Set.mem_setOf] at hv ⊢ obtain ⟨hvr, -⟩ := hv rintro rfl refine' hc₅₁₂ ((collinear_insert_iff_of_mem_affineSpan _).2 (collinear_pair _ _ _)) [GOAL] case refine'_1 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s p : P × P × P hp : p ∈ s ⊢ p.fst ≠ p.snd.fst [PROOFSTEP] simp_rw [Set.mem_image, Set.mem_prod, Set.mem_univ, true_and_iff, Prod.ext_iff] at hp [GOAL] case refine'_1 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s p : P × P × P hp : ∃ x, x.snd ∈ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} ∧ ↑x.fst = p.fst ∧ p₅ = p.snd.fst ∧ x.snd +ᵥ ↑x.fst = p.snd.snd ⊢ p.fst ≠ p.snd.fst [PROOFSTEP] obtain ⟨q₁, q₅, q₂⟩ := p [GOAL] case refine'_1.mk.mk V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s q₁ q₅ q₂ : P hp : ∃ x, x.snd ∈ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} ∧ ↑x.fst = (q₁, q₅, q₂).fst ∧ p₅ = (q₁, q₅, q₂).snd.fst ∧ x.snd +ᵥ ↑x.fst = (q₁, q₅, q₂).snd.snd ⊢ (q₁, q₅, q₂).fst ≠ (q₁, q₅, q₂).snd.fst [PROOFSTEP] dsimp only at hp ⊢ [GOAL] case refine'_1.mk.mk V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s q₁ q₅ q₂ : P hp : ∃ x, x.snd ∈ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} ∧ ↑x.fst = q₁ ∧ p₅ = q₅ ∧ x.snd +ᵥ ↑x.fst = q₂ ⊢ q₁ ≠ q₅ [PROOFSTEP] obtain ⟨⟨⟨q, hq⟩, v⟩, hv, rfl, rfl, rfl⟩ := hp [GOAL] case refine'_1.mk.mk.intro.mk.mk.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s v : V q : P hq : q ∈ affineSpan ℝ {p₁, p₂} hv : ({ val := q, property := hq }, v).snd ∈ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} ⊢ ↑({ val := q, property := hq }, v).fst ≠ p₅ [PROOFSTEP] dsimp only [Subtype.coe_mk, Set.mem_setOf] at hv ⊢ [GOAL] case refine'_1.mk.mk.intro.mk.mk.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s v : V q : P hq : q ∈ affineSpan ℝ {p₁, p₂} hv : v ∈ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} ⊢ q ≠ p₅ [PROOFSTEP] obtain ⟨hvr, -⟩ := hv [GOAL] case refine'_1.mk.mk.intro.mk.mk.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s v : V q : P hq : q ∈ affineSpan ℝ {p₁, p₂} hvr : SameRay ℝ (p₂ -ᵥ p₁) v ⊢ q ≠ p₅ [PROOFSTEP] rintro rfl [GOAL] case refine'_1.mk.mk.intro.mk.mk.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) v : V q : P hq : q ∈ affineSpan ℝ {p₁, p₂} hvr : SameRay ℝ (p₂ -ᵥ p₁) v hc₅₁₂ : ¬Collinear ℝ {q, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, q, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s ⊢ False [PROOFSTEP] refine' hc₅₁₂ ((collinear_insert_iff_of_mem_affineSpan _).2 (collinear_pair _ _ _)) [GOAL] case refine'_2 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s p : P × P × P hp : p ∈ s ⊢ p.snd.snd ≠ p.snd.fst [PROOFSTEP] simp_rw [Set.mem_image, Set.mem_prod, Set.mem_univ, true_and_iff, Prod.ext_iff] at hp [GOAL] case refine'_2 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s p : P × P × P hp : ∃ x, x.snd ∈ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} ∧ ↑x.fst = p.fst ∧ p₅ = p.snd.fst ∧ x.snd +ᵥ ↑x.fst = p.snd.snd ⊢ p.snd.snd ≠ p.snd.fst [PROOFSTEP] obtain ⟨q₁, q₅, q₂⟩ := p [GOAL] case refine'_2.mk.mk V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s q₁ q₅ q₂ : P hp : ∃ x, x.snd ∈ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} ∧ ↑x.fst = (q₁, q₅, q₂).fst ∧ p₅ = (q₁, q₅, q₂).snd.fst ∧ x.snd +ᵥ ↑x.fst = (q₁, q₅, q₂).snd.snd ⊢ (q₁, q₅, q₂).snd.snd ≠ (q₁, q₅, q₂).snd.fst [PROOFSTEP] dsimp only at hp ⊢ [GOAL] case refine'_2.mk.mk V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s q₁ q₅ q₂ : P hp : ∃ x, x.snd ∈ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} ∧ ↑x.fst = q₁ ∧ p₅ = q₅ ∧ x.snd +ᵥ ↑x.fst = q₂ ⊢ q₂ ≠ q₅ [PROOFSTEP] obtain ⟨⟨⟨q, hq⟩, v⟩, hv, rfl, rfl, rfl⟩ := hp [GOAL] case refine'_2.mk.mk.intro.mk.mk.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s v : V q : P hq : q ∈ affineSpan ℝ {p₁, p₂} hv : ({ val := q, property := hq }, v).snd ∈ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} ⊢ ({ val := q, property := hq }, v).snd +ᵥ ↑({ val := q, property := hq }, v).fst ≠ p₅ [PROOFSTEP] dsimp only [Subtype.coe_mk, Set.mem_setOf] at hv ⊢ [GOAL] case refine'_2.mk.mk.intro.mk.mk.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s v : V q : P hq : q ∈ affineSpan ℝ {p₁, p₂} hv : v ∈ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} ⊢ v +ᵥ q ≠ p₅ [PROOFSTEP] obtain ⟨hvr, -⟩ := hv [GOAL] case refine'_2.mk.mk.intro.mk.mk.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s v : V q : P hq : q ∈ affineSpan ℝ {p₁, p₂} hvr : SameRay ℝ (p₂ -ᵥ p₁) v ⊢ v +ᵥ q ≠ p₅ [PROOFSTEP] rintro rfl [GOAL] case refine'_2.mk.mk.intro.mk.mk.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) v : V q : P hq : q ∈ affineSpan ℝ {p₁, p₂} hvr : SameRay ℝ (p₂ -ᵥ p₁) v hc₅₁₂ : ¬Collinear ℝ {v +ᵥ q, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, v +ᵥ q, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s ⊢ False [PROOFSTEP] refine' hc₅₁₂ ((collinear_insert_iff_of_mem_affineSpan _).2 (collinear_pair _ _ _)) [GOAL] case refine'_1.mk.mk.intro.mk.mk.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) v : V q : P hq : q ∈ affineSpan ℝ {p₁, p₂} hvr : SameRay ℝ (p₂ -ᵥ p₁) v hc₅₁₂ : ¬Collinear ℝ {q, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, q, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s ⊢ q ∈ affineSpan ℝ {p₁, p₂} [PROOFSTEP] exact hq [GOAL] case refine'_2.mk.mk.intro.mk.mk.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) v : V q : P hq : q ∈ affineSpan ℝ {p₁, p₂} hvr : SameRay ℝ (p₂ -ᵥ p₁) v hc₅₁₂ : ¬Collinear ℝ {v +ᵥ q, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, v +ᵥ q, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s ⊢ v +ᵥ q ∈ affineSpan ℝ {p₁, p₂} [PROOFSTEP] refine' vadd_mem_of_mem_direction _ hq [GOAL] case refine'_2.mk.mk.intro.mk.mk.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) v : V q : P hq : q ∈ affineSpan ℝ {p₁, p₂} hvr : SameRay ℝ (p₂ -ᵥ p₁) v hc₅₁₂ : ¬Collinear ℝ {v +ᵥ q, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, v +ᵥ q, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s ⊢ v ∈ direction (affineSpan ℝ {p₁, p₂}) [PROOFSTEP] rw [← exists_nonneg_left_iff_sameRay (vsub_ne_zero.2 hp₁p₂.symm)] at hvr [GOAL] case refine'_2.mk.mk.intro.mk.mk.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) v : V q : P hq : q ∈ affineSpan ℝ {p₁, p₂} hvr : ∃ r, 0 ≤ r ∧ r • (p₂ -ᵥ p₁) = v hc₅₁₂ : ¬Collinear ℝ {v +ᵥ q, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, v +ᵥ q, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s ⊢ v ∈ direction (affineSpan ℝ {p₁, p₂}) [PROOFSTEP] obtain ⟨r, -, rfl⟩ := hvr [GOAL] case refine'_2.mk.mk.intro.mk.mk.intro.intro.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) q : P hq : q ∈ affineSpan ℝ {p₁, p₂} r : ℝ hc₅₁₂ : ¬Collinear ℝ {r • (p₂ -ᵥ p₁) +ᵥ q, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, r • (p₂ -ᵥ p₁) +ᵥ q, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s ⊢ r • (p₂ -ᵥ p₁) ∈ direction (affineSpan ℝ {p₁, p₂}) [PROOFSTEP] rw [direction_affineSpan] [GOAL] case refine'_2.mk.mk.intro.mk.mk.intro.intro.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) q : P hq : q ∈ affineSpan ℝ {p₁, p₂} r : ℝ hc₅₁₂ : ¬Collinear ℝ {r • (p₂ -ᵥ p₁) +ᵥ q, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, r • (p₂ -ᵥ p₁) +ᵥ q, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s ⊢ r • (p₂ -ᵥ p₁) ∈ vectorSpan ℝ {p₁, p₂} [PROOFSTEP] exact smul_vsub_rev_mem_vectorSpan_pair _ _ _ [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s ⊢ Real.Angle.sign (∡ p₁ p₅ p₂) = Real.Angle.sign (∡ p₃ p₅ p₄) [PROOFSTEP] have hsp : ∀ p : P × P × P, p ∈ s → ∡ p.1 p.2.1 p.2.2 ≠ 0 ∧ ∡ p.1 p.2.1 p.2.2 ≠ π := by intro p hp simp_rw [Set.mem_image, Set.mem_prod, Set.mem_setOf, Set.mem_univ, true_and_iff, Prod.ext_iff] at hp obtain ⟨q₁, q₅, q₂⟩ := p dsimp only at hp ⊢ obtain ⟨⟨⟨q, hq⟩, v⟩, hv, rfl, rfl, rfl⟩ := hp dsimp only [Subtype.coe_mk, Set.mem_setOf] at hv ⊢ obtain ⟨hvr, hv0⟩ := hv rw [← exists_nonneg_left_iff_sameRay (vsub_ne_zero.2 hp₁p₂.symm)] at hvr obtain ⟨r, -, rfl⟩ := hvr change q ∈ line[ℝ, p₁, p₂] at hq rw [oangle_ne_zero_and_ne_pi_iff_affineIndependent] refine' affineIndependent_of_ne_of_mem_of_not_mem_of_mem _ hq (fun h => hc₅₁₂ ((collinear_insert_iff_of_mem_affineSpan h).2 (collinear_pair _ _ _))) _ · rwa [← @vsub_ne_zero V, vsub_vadd_eq_vsub_sub, vsub_self, zero_sub, neg_ne_zero] · refine' vadd_mem_of_mem_direction _ hq rw [direction_affineSpan] exact smul_vsub_rev_mem_vectorSpan_pair _ _ _ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s ⊢ ∀ (p : P × P × P), p ∈ s → ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π [PROOFSTEP] intro p hp [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s p : P × P × P hp : p ∈ s ⊢ ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π [PROOFSTEP] simp_rw [Set.mem_image, Set.mem_prod, Set.mem_setOf, Set.mem_univ, true_and_iff, Prod.ext_iff] at hp [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s p : P × P × P hp : ∃ x, (SameRay ℝ (p₂ -ᵥ p₁) x.snd ∧ x.snd ≠ 0) ∧ ↑x.fst = p.fst ∧ p₅ = p.snd.fst ∧ x.snd +ᵥ ↑x.fst = p.snd.snd ⊢ ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π [PROOFSTEP] obtain ⟨q₁, q₅, q₂⟩ := p [GOAL] case mk.mk V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s q₁ q₅ q₂ : P hp : ∃ x, (SameRay ℝ (p₂ -ᵥ p₁) x.snd ∧ x.snd ≠ 0) ∧ ↑x.fst = (q₁, q₅, q₂).fst ∧ p₅ = (q₁, q₅, q₂).snd.fst ∧ x.snd +ᵥ ↑x.fst = (q₁, q₅, q₂).snd.snd ⊢ ∡ (q₁, q₅, q₂).fst (q₁, q₅, q₂).snd.fst (q₁, q₅, q₂).snd.snd ≠ 0 ∧ ∡ (q₁, q₅, q₂).fst (q₁, q₅, q₂).snd.fst (q₁, q₅, q₂).snd.snd ≠ ↑π [PROOFSTEP] dsimp only at hp ⊢ [GOAL] case mk.mk V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s q₁ q₅ q₂ : P hp : ∃ x, (SameRay ℝ (p₂ -ᵥ p₁) x.snd ∧ x.snd ≠ 0) ∧ ↑x.fst = q₁ ∧ p₅ = q₅ ∧ x.snd +ᵥ ↑x.fst = q₂ ⊢ ∡ q₁ q₅ q₂ ≠ 0 ∧ ∡ q₁ q₅ q₂ ≠ ↑π [PROOFSTEP] obtain ⟨⟨⟨q, hq⟩, v⟩, hv, rfl, rfl, rfl⟩ := hp [GOAL] case mk.mk.intro.mk.mk.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s v : V q : P hq : q ∈ affineSpan ℝ {p₁, p₂} hv : SameRay ℝ (p₂ -ᵥ p₁) ({ val := q, property := hq }, v).snd ∧ ({ val := q, property := hq }, v).snd ≠ 0 ⊢ ∡ (↑({ val := q, property := hq }, v).fst) p₅ (({ val := q, property := hq }, v).snd +ᵥ ↑({ val := q, property := hq }, v).fst) ≠ 0 ∧ ∡ (↑({ val := q, property := hq }, v).fst) p₅ (({ val := q, property := hq }, v).snd +ᵥ ↑({ val := q, property := hq }, v).fst) ≠ ↑π [PROOFSTEP] dsimp only [Subtype.coe_mk, Set.mem_setOf] at hv ⊢ [GOAL] case mk.mk.intro.mk.mk.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s v : V q : P hq : q ∈ affineSpan ℝ {p₁, p₂} hv : SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0 ⊢ ∡ q p₅ (v +ᵥ q) ≠ 0 ∧ ∡ q p₅ (v +ᵥ q) ≠ ↑π [PROOFSTEP] obtain ⟨hvr, hv0⟩ := hv [GOAL] case mk.mk.intro.mk.mk.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s v : V q : P hq : q ∈ affineSpan ℝ {p₁, p₂} hvr : SameRay ℝ (p₂ -ᵥ p₁) v hv0 : v ≠ 0 ⊢ ∡ q p₅ (v +ᵥ q) ≠ 0 ∧ ∡ q p₅ (v +ᵥ q) ≠ ↑π [PROOFSTEP] rw [← exists_nonneg_left_iff_sameRay (vsub_ne_zero.2 hp₁p₂.symm)] at hvr [GOAL] case mk.mk.intro.mk.mk.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s v : V q : P hq : q ∈ affineSpan ℝ {p₁, p₂} hvr : ∃ r, 0 ≤ r ∧ r • (p₂ -ᵥ p₁) = v hv0 : v ≠ 0 ⊢ ∡ q p₅ (v +ᵥ q) ≠ 0 ∧ ∡ q p₅ (v +ᵥ q) ≠ ↑π [PROOFSTEP] obtain ⟨r, -, rfl⟩ := hvr [GOAL] case mk.mk.intro.mk.mk.intro.intro.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s q : P hq : q ∈ affineSpan ℝ {p₁, p₂} r : ℝ hv0 : r • (p₂ -ᵥ p₁) ≠ 0 ⊢ ∡ q p₅ (r • (p₂ -ᵥ p₁) +ᵥ q) ≠ 0 ∧ ∡ q p₅ (r • (p₂ -ᵥ p₁) +ᵥ q) ≠ ↑π [PROOFSTEP] change q ∈ line[ℝ, p₁, p₂] at hq [GOAL] case mk.mk.intro.mk.mk.intro.intro.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s q : P r : ℝ hv0 : r • (p₂ -ᵥ p₁) ≠ 0 hq : q ∈ affineSpan ℝ {p₁, p₂} ⊢ ∡ q p₅ (r • (p₂ -ᵥ p₁) +ᵥ q) ≠ 0 ∧ ∡ q p₅ (r • (p₂ -ᵥ p₁) +ᵥ q) ≠ ↑π [PROOFSTEP] rw [oangle_ne_zero_and_ne_pi_iff_affineIndependent] [GOAL] case mk.mk.intro.mk.mk.intro.intro.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s q : P r : ℝ hv0 : r • (p₂ -ᵥ p₁) ≠ 0 hq : q ∈ affineSpan ℝ {p₁, p₂} ⊢ AffineIndependent ℝ ![q, p₅, r • (p₂ -ᵥ p₁) +ᵥ q] [PROOFSTEP] refine' affineIndependent_of_ne_of_mem_of_not_mem_of_mem _ hq (fun h => hc₅₁₂ ((collinear_insert_iff_of_mem_affineSpan h).2 (collinear_pair _ _ _))) _ [GOAL] case mk.mk.intro.mk.mk.intro.intro.intro.intro.intro.intro.refine'_1 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s q : P r : ℝ hv0 : r • (p₂ -ᵥ p₁) ≠ 0 hq : q ∈ affineSpan ℝ {p₁, p₂} ⊢ q ≠ r • (p₂ -ᵥ p₁) +ᵥ q [PROOFSTEP] rwa [← @vsub_ne_zero V, vsub_vadd_eq_vsub_sub, vsub_self, zero_sub, neg_ne_zero] [GOAL] case mk.mk.intro.mk.mk.intro.intro.intro.intro.intro.intro.refine'_2 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s q : P r : ℝ hv0 : r • (p₂ -ᵥ p₁) ≠ 0 hq : q ∈ affineSpan ℝ {p₁, p₂} ⊢ r • (p₂ -ᵥ p₁) +ᵥ q ∈ affineSpan ℝ {p₁, p₂} [PROOFSTEP] refine' vadd_mem_of_mem_direction _ hq [GOAL] case mk.mk.intro.mk.mk.intro.intro.intro.intro.intro.intro.refine'_2 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s q : P r : ℝ hv0 : r • (p₂ -ᵥ p₁) ≠ 0 hq : q ∈ affineSpan ℝ {p₁, p₂} ⊢ r • (p₂ -ᵥ p₁) ∈ direction (affineSpan ℝ {p₁, p₂}) [PROOFSTEP] rw [direction_affineSpan] [GOAL] case mk.mk.intro.mk.mk.intro.intro.intro.intro.intro.intro.refine'_2 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s q : P r : ℝ hv0 : r • (p₂ -ᵥ p₁) ≠ 0 hq : q ∈ affineSpan ℝ {p₁, p₂} ⊢ r • (p₂ -ᵥ p₁) ∈ vectorSpan ℝ {p₁, p₂} [PROOFSTEP] exact smul_vsub_rev_mem_vectorSpan_pair _ _ _ [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s hsp : ∀ (p : P × P × P), p ∈ s → ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π ⊢ Real.Angle.sign (∡ p₁ p₅ p₂) = Real.Angle.sign (∡ p₃ p₅ p₄) [PROOFSTEP] have hp₁p₂s : (p₁, p₅, p₂) ∈ s := by simp_rw [Set.mem_image, Set.mem_prod, Set.mem_setOf, Set.mem_univ, true_and_iff, Prod.ext_iff] refine' ⟨⟨⟨p₁, left_mem_affineSpan_pair _ _ _⟩, p₂ -ᵥ p₁⟩, ⟨SameRay.rfl, vsub_ne_zero.2 hp₁p₂.symm⟩, _⟩ simp [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s hsp : ∀ (p : P × P × P), p ∈ s → ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π ⊢ (p₁, p₅, p₂) ∈ s [PROOFSTEP] simp_rw [Set.mem_image, Set.mem_prod, Set.mem_setOf, Set.mem_univ, true_and_iff, Prod.ext_iff] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s hsp : ∀ (p : P × P × P), p ∈ s → ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π ⊢ ∃ x, (SameRay ℝ (p₂ -ᵥ p₁) x.snd ∧ x.snd ≠ 0) ∧ ↑x.fst = p₁ ∧ True ∧ x.snd +ᵥ ↑x.fst = p₂ [PROOFSTEP] refine' ⟨⟨⟨p₁, left_mem_affineSpan_pair _ _ _⟩, p₂ -ᵥ p₁⟩, ⟨SameRay.rfl, vsub_ne_zero.2 hp₁p₂.symm⟩, _⟩ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s hsp : ∀ (p : P × P × P), p ∈ s → ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π ⊢ ↑({ val := p₁, property := (_ : p₁ ∈ affineSpan ℝ {p₁, p₂}) }, p₂ -ᵥ p₁).fst = p₁ ∧ True ∧ ({ val := p₁, property := (_ : p₁ ∈ affineSpan ℝ {p₁, p₂}) }, p₂ -ᵥ p₁).snd +ᵥ ↑({ val := p₁, property := (_ : p₁ ∈ affineSpan ℝ {p₁, p₂}) }, p₂ -ᵥ p₁).fst = p₂ [PROOFSTEP] simp [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s hsp : ∀ (p : P × P × P), p ∈ s → ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π hp₁p₂s : (p₁, p₅, p₂) ∈ s ⊢ Real.Angle.sign (∡ p₁ p₅ p₂) = Real.Angle.sign (∡ p₃ p₅ p₄) [PROOFSTEP] have hp₃p₄s : (p₃, p₅, p₄) ∈ s := by simp_rw [Set.mem_image, Set.mem_prod, Set.mem_setOf, Set.mem_univ, true_and_iff, Prod.ext_iff] refine' ⟨⟨⟨p₃, hc.mem_affineSpan_of_mem_of_ne (Set.mem_insert _ _) (Set.mem_insert_of_mem _ (Set.mem_insert _ _)) (Set.mem_insert_of_mem _ (Set.mem_insert_of_mem _ (Set.mem_insert _ _))) hp₁p₂⟩, p₄ -ᵥ p₃⟩, ⟨hr, vsub_ne_zero.2 hp₃p₄.symm⟩, _⟩ simp [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s hsp : ∀ (p : P × P × P), p ∈ s → ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π hp₁p₂s : (p₁, p₅, p₂) ∈ s ⊢ (p₃, p₅, p₄) ∈ s [PROOFSTEP] simp_rw [Set.mem_image, Set.mem_prod, Set.mem_setOf, Set.mem_univ, true_and_iff, Prod.ext_iff] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s hsp : ∀ (p : P × P × P), p ∈ s → ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π hp₁p₂s : (p₁, p₅, p₂) ∈ s ⊢ ∃ x, (SameRay ℝ (p₂ -ᵥ p₁) x.snd ∧ x.snd ≠ 0) ∧ ↑x.fst = p₃ ∧ True ∧ x.snd +ᵥ ↑x.fst = p₄ [PROOFSTEP] refine' ⟨⟨⟨p₃, hc.mem_affineSpan_of_mem_of_ne (Set.mem_insert _ _) (Set.mem_insert_of_mem _ (Set.mem_insert _ _)) (Set.mem_insert_of_mem _ (Set.mem_insert_of_mem _ (Set.mem_insert _ _))) hp₁p₂⟩, p₄ -ᵥ p₃⟩, ⟨hr, vsub_ne_zero.2 hp₃p₄.symm⟩, _⟩ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s hsp : ∀ (p : P × P × P), p ∈ s → ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π hp₁p₂s : (p₁, p₅, p₂) ∈ s ⊢ ↑({ val := p₃, property := (_ : p₃ ∈ affineSpan ℝ {p₁, p₂}) }, p₄ -ᵥ p₃).fst = p₃ ∧ True ∧ ({ val := p₃, property := (_ : p₃ ∈ affineSpan ℝ {p₁, p₂}) }, p₄ -ᵥ p₃).snd +ᵥ ↑({ val := p₃, property := (_ : p₃ ∈ affineSpan ℝ {p₁, p₂}) }, p₄ -ᵥ p₃).fst = p₄ [PROOFSTEP] simp [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ p₅ : P hp₁p₂ : p₁ ≠ p₂ hp₃p₄ : p₃ ≠ p₄ hc : Collinear ℝ {p₁, p₂, p₃, p₄} hr : SameRay ℝ (p₂ -ᵥ p₁) (p₄ -ᵥ p₃) hc₅₁₂ : ¬Collinear ℝ {p₅, p₁, p₂} s : Set (P × P × P) := (fun x => (↑x.fst, p₅, x.snd +ᵥ ↑x.fst)) '' Set.univ ×ˢ {v | SameRay ℝ (p₂ -ᵥ p₁) v ∧ v ≠ 0} hco : IsConnected s hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) s hsp : ∀ (p : P × P × P), p ∈ s → ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π hp₁p₂s : (p₁, p₅, p₂) ∈ s hp₃p₄s : (p₃, p₅, p₄) ∈ s ⊢ Real.Angle.sign (∡ p₁ p₅ p₂) = Real.Angle.sign (∡ p₃ p₅ p₄) [PROOFSTEP] convert Real.Angle.sign_eq_of_continuousOn hco hf hsp hp₃p₄s hp₁p₂s [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ : P h : Sbtw ℝ p₁ p₂ p₃ ⊢ Collinear ℝ {p₁, p₂, p₂, p₃} [PROOFSTEP] simpa using h.wbtw.collinear [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ : P h : Wbtw ℝ p₁ p₂ p₃ hne : p₁ ≠ p₂ ⊢ Collinear ℝ {p₁, p₂, p₁, p₃} [PROOFSTEP] simpa [Set.insert_comm p₂] using h.collinear [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) p₁ p₂ p₃ p₄ : P h : Wbtw ℝ p₁ p₂ p₃ hne : p₂ ≠ p₃ ⊢ Real.Angle.sign (∡ p₂ p₄ p₃) = Real.Angle.sign (∡ p₁ p₄ p₃) [PROOFSTEP] simp_rw [oangle_rev p₃, Real.Angle.sign_neg, h.symm.oangle_sign_eq_of_ne_left _ hne.symm] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ ⊢ Real.Angle.sign (∡ p₁ p₄ p₂) = Real.Angle.sign (∡ p₁ p₃ p₂) [PROOFSTEP] by_cases h : p₁ = p₂ [GOAL] case pos V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : p₁ = p₂ ⊢ Real.Angle.sign (∡ p₁ p₄ p₂) = Real.Angle.sign (∡ p₁ p₃ p₂) [PROOFSTEP] simp [h] [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ ⊢ Real.Angle.sign (∡ p₁ p₄ p₂) = Real.Angle.sign (∡ p₁ p₃ p₂) [PROOFSTEP] let sp : Set (P × P × P) := (fun p : P => (p₁, p, p₂)) '' {p | s.SSameSide p₃ p} [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} ⊢ Real.Angle.sign (∡ p₁ p₄ p₂) = Real.Angle.sign (∡ p₁ p₃ p₂) [PROOFSTEP] have hc : IsConnected sp := (isConnected_setOf_sSameSide hp₃p₄.2.1 hp₃p₄.nonempty).image _ (continuous_const.prod_mk (Continuous.Prod.mk_left _)).continuousOn [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp ⊢ Real.Angle.sign (∡ p₁ p₄ p₂) = Real.Angle.sign (∡ p₁ p₃ p₂) [PROOFSTEP] have hf : ContinuousOn (fun p : P × P × P => ∡ p.1 p.2.1 p.2.2) sp := by refine' ContinuousAt.continuousOn fun p hp => continuousAt_oangle _ _ all_goals simp_rw [Set.mem_image, Set.mem_setOf] at hp obtain ⟨p', hp', rfl⟩ := hp dsimp only rintro rfl · exact hp'.2.2 hp₁ · exact hp'.2.2 hp₂ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp ⊢ ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) sp [PROOFSTEP] refine' ContinuousAt.continuousOn fun p hp => continuousAt_oangle _ _ [GOAL] case refine'_1 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp p : P × P × P hp : p ∈ sp ⊢ p.fst ≠ p.snd.fst case refine'_2 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp p : P × P × P hp : p ∈ sp ⊢ p.snd.snd ≠ p.snd.fst [PROOFSTEP] all_goals simp_rw [Set.mem_image, Set.mem_setOf] at hp obtain ⟨p', hp', rfl⟩ := hp dsimp only rintro rfl [GOAL] case refine'_1 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp p : P × P × P hp : p ∈ sp ⊢ p.fst ≠ p.snd.fst [PROOFSTEP] simp_rw [Set.mem_image, Set.mem_setOf] at hp [GOAL] case refine'_1 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp p : P × P × P hp : ∃ x, SSameSide s p₃ x ∧ (p₁, x, p₂) = p ⊢ p.fst ≠ p.snd.fst [PROOFSTEP] obtain ⟨p', hp', rfl⟩ := hp [GOAL] case refine'_1.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp p' : P hp' : SSameSide s p₃ p' ⊢ (p₁, p', p₂).fst ≠ (p₁, p', p₂).snd.fst [PROOFSTEP] dsimp only [GOAL] case refine'_1.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp p' : P hp' : SSameSide s p₃ p' ⊢ p₁ ≠ p' [PROOFSTEP] rintro rfl [GOAL] case refine'_2 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp p : P × P × P hp : p ∈ sp ⊢ p.snd.snd ≠ p.snd.fst [PROOFSTEP] simp_rw [Set.mem_image, Set.mem_setOf] at hp [GOAL] case refine'_2 V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp p : P × P × P hp : ∃ x, SSameSide s p₃ x ∧ (p₁, x, p₂) = p ⊢ p.snd.snd ≠ p.snd.fst [PROOFSTEP] obtain ⟨p', hp', rfl⟩ := hp [GOAL] case refine'_2.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp p' : P hp' : SSameSide s p₃ p' ⊢ (p₁, p', p₂).snd.snd ≠ (p₁, p', p₂).snd.fst [PROOFSTEP] dsimp only [GOAL] case refine'_2.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp p' : P hp' : SSameSide s p₃ p' ⊢ p₂ ≠ p' [PROOFSTEP] rintro rfl [GOAL] case refine'_1.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp hp' : SSameSide s p₃ p₁ ⊢ False [PROOFSTEP] exact hp'.2.2 hp₁ [GOAL] case refine'_2.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp hp' : SSameSide s p₃ p₂ ⊢ False [PROOFSTEP] exact hp'.2.2 hp₂ [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) sp ⊢ Real.Angle.sign (∡ p₁ p₄ p₂) = Real.Angle.sign (∡ p₁ p₃ p₂) [PROOFSTEP] have hsp : ∀ p : P × P × P, p ∈ sp → ∡ p.1 p.2.1 p.2.2 ≠ 0 ∧ ∡ p.1 p.2.1 p.2.2 ≠ π := by intro p hp simp_rw [Set.mem_image, Set.mem_setOf] at hp obtain ⟨p', hp', rfl⟩ := hp dsimp only rw [oangle_ne_zero_and_ne_pi_iff_affineIndependent] exact affineIndependent_of_ne_of_mem_of_not_mem_of_mem h hp₁ hp'.2.2 hp₂ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) sp ⊢ ∀ (p : P × P × P), p ∈ sp → ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π [PROOFSTEP] intro p hp [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) sp p : P × P × P hp : p ∈ sp ⊢ ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π [PROOFSTEP] simp_rw [Set.mem_image, Set.mem_setOf] at hp [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) sp p : P × P × P hp : ∃ x, SSameSide s p₃ x ∧ (p₁, x, p₂) = p ⊢ ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π [PROOFSTEP] obtain ⟨p', hp', rfl⟩ := hp [GOAL] case intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) sp p' : P hp' : SSameSide s p₃ p' ⊢ ∡ (p₁, p', p₂).fst (p₁, p', p₂).snd.fst (p₁, p', p₂).snd.snd ≠ 0 ∧ ∡ (p₁, p', p₂).fst (p₁, p', p₂).snd.fst (p₁, p', p₂).snd.snd ≠ ↑π [PROOFSTEP] dsimp only [GOAL] case intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) sp p' : P hp' : SSameSide s p₃ p' ⊢ ∡ p₁ p' p₂ ≠ 0 ∧ ∡ p₁ p' p₂ ≠ ↑π [PROOFSTEP] rw [oangle_ne_zero_and_ne_pi_iff_affineIndependent] [GOAL] case intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) sp p' : P hp' : SSameSide s p₃ p' ⊢ AffineIndependent ℝ ![p₁, p', p₂] [PROOFSTEP] exact affineIndependent_of_ne_of_mem_of_not_mem_of_mem h hp₁ hp'.2.2 hp₂ [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) sp hsp : ∀ (p : P × P × P), p ∈ sp → ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π ⊢ Real.Angle.sign (∡ p₁ p₄ p₂) = Real.Angle.sign (∡ p₁ p₃ p₂) [PROOFSTEP] have hp₃ : (p₁, p₃, p₂) ∈ sp := Set.mem_image_of_mem _ (sSameSide_self_iff.2 ⟨hp₃p₄.nonempty, hp₃p₄.2.1⟩) [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) sp hsp : ∀ (p : P × P × P), p ∈ sp → ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π hp₃ : (p₁, p₃, p₂) ∈ sp ⊢ Real.Angle.sign (∡ p₁ p₄ p₂) = Real.Angle.sign (∡ p₁ p₃ p₂) [PROOFSTEP] have hp₄ : (p₁, p₄, p₂) ∈ sp := Set.mem_image_of_mem _ hp₃p₄ [GOAL] case neg V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SSameSide s p₃ p₄ h : ¬p₁ = p₂ sp : Set (P × P × P) := (fun p => (p₁, p, p₂)) '' {p | SSameSide s p₃ p} hc : IsConnected sp hf : ContinuousOn (fun p => ∡ p.fst p.snd.fst p.snd.snd) sp hsp : ∀ (p : P × P × P), p ∈ sp → ∡ p.fst p.snd.fst p.snd.snd ≠ 0 ∧ ∡ p.fst p.snd.fst p.snd.snd ≠ ↑π hp₃ : (p₁, p₃, p₂) ∈ sp hp₄ : (p₁, p₄, p₂) ∈ sp ⊢ Real.Angle.sign (∡ p₁ p₄ p₂) = Real.Angle.sign (∡ p₁ p₃ p₂) [PROOFSTEP] convert Real.Angle.sign_eq_of_continuousOn hc hf hsp hp₃ hp₄ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SOppSide s p₃ p₄ ⊢ Real.Angle.sign (∡ p₁ p₄ p₂) = -Real.Angle.sign (∡ p₁ p₃ p₂) [PROOFSTEP] have hp₁p₃ : p₁ ≠ p₃ := by rintro rfl; exact hp₃p₄.left_not_mem hp₁ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SOppSide s p₃ p₄ ⊢ p₁ ≠ p₃ [PROOFSTEP] rintro rfl [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SOppSide s p₁ p₄ ⊢ False [PROOFSTEP] exact hp₃p₄.left_not_mem hp₁ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : InnerProductSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P hd2 : Fact (finrank ℝ V = 2) inst✝ : Module.Oriented ℝ V (Fin 2) s : AffineSubspace ℝ P p₁ p₂ p₃ p₄ : P hp₁ : p₁ ∈ s hp₂ : p₂ ∈ s hp₃p₄ : SOppSide s p₃ p₄ hp₁p₃ : p₁ ≠ p₃ ⊢ Real.Angle.sign (∡ p₁ p₄ p₂) = -Real.Angle.sign (∡ p₁ p₃ p₂) [PROOFSTEP] rw [← (hp₃p₄.symm.trans (sOppSide_pointReflection hp₁ hp₃p₄.left_not_mem)).oangle_sign_eq hp₁ hp₂, ← oangle_rotate_sign p₁, ← oangle_rotate_sign p₁, oangle_swap₁₃_sign, (sbtw_pointReflection_of_ne ℝ hp₁p₃).symm.oangle_sign_eq _]
lemma leibniz_rule_holomorphic: fixes f::"complex \<Rightarrow> 'b::euclidean_space \<Rightarrow> complex" assumes "\<And>x t. x \<in> U \<Longrightarrow> t \<in> cbox a b \<Longrightarrow> ((\<lambda>x. f x t) has_field_derivative fx x t) (at x within U)" assumes "\<And>x. x \<in> U \<Longrightarrow> (f x) integrable_on cbox a b" assumes "continuous_on (U \<times> (cbox a b)) (\<lambda>(x, t). fx x t)" assumes "convex U" shows "(\<lambda>x. integral (cbox a b) (f x)) holomorphic_on U"
#' GET a url. #' #' @section RFC2616: #' The GET method means retrieve whatever information (in the form of an #' entity) is identified by the Request-URI. If the Request-URI refers to a #' data-producing process, it is the produced data which shall be returned as #' the entity in the response and not the source text of the process, unless #' that text happens to be the output of the process. #' #' The semantics of the GET method change to a "conditional GET" if the #' request message includes an If-Modified-Since, If-Unmodified-Since, #' If-Match, If-None-Match, or If-Range header field. A conditional GET method #' requests that the entity be transferred only under the circumstances #' described by the conditional header field(s). The conditional GET method is #' intended to reduce unnecessary network usage by allowing cached entities to #' be refreshed without requiring multiple requests or transferring data #' already held by the client. #' #' The semantics of the GET method change to a "partial GET" if the request #' message includes a Range header field. A partial GET requests that only #' part of the entity be transferred, as described in <http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35> #' The partial GET method is intended to reduce unnecessary network usage by #' allowing partially-retrieved entities to be completed without transferring #' data already held by the client. #' #' @param url the url of the page to retrieve #' @param ... Further named parameters, such as `query`, `path`, etc, #' passed on to [modify_url()]. Unnamed parameters will be combined #' with [config()]. #' @param config Additional configuration settings such as http #' authentication ([authenticate()]), additional headers #' ([add_headers()]), cookies ([set_cookies()]) etc. #' See [config()] for full details and list of helpers. #' @param handle The handle to use with this request. If not #' supplied, will be retrieved and reused from the [handle_pool()] #' based on the scheme, hostname and port of the url. By default \pkg{httr} # automatically reuses the same http connection (aka handle) for mulitple #' requests to the same scheme/host/port combo. This substantially reduces #' connection time, and ensures that cookies are maintained over multiple #' requests to the same host. See [handle_pool()] for more #' details. #' #' @return A [response()] object. #' #' @family http methods #' @export #' @examples #' GET("http://google.com/") #' GET("http://google.com/", path = "search") #' GET("http://google.com/", path = "search", query = list(q = "ham")) #' #' # See what GET is doing with httpbin.org #' url <- "http://httpbin.org/get" #' GET(url) #' GET(url, add_headers(a = 1, b = 2)) #' GET(url, set_cookies(a = 1, b = 2)) #' GET(url, add_headers(a = 1, b = 2), set_cookies(a = 1, b = 2)) #' GET(url, authenticate("username", "password")) #' GET(url, verbose()) #' #' # You might want to manually specify the handle so you can have multiple #' # independent logins to the same website. #' google <- handle("http://google.com") #' GET(handle = google, path = "/") #' GET(handle = google, path = "search") GET <- function(url = NULL, config = list(), ..., handle = NULL) { hu <- handle_url(handle, url, ...) req <- request_build("GET", hu$url, as.request(config), ...) request_perform(req, hu$handle$handle) }
Require Export Lists. Inductive list (X:Type) : Type := | nil : list X | cons : X -> list X -> list X. Check nil. Check cons. Check (cons nat 2 (cons nat 1 (nil nat))). Module MUMBLEBAZ. Inductive mumble : Type := | a : mumble | b : mumble -> nat -> mumble | c : mumble. Inductive grumble (X : Type) : Type := | d : mumble -> grumble X | e : X -> grumble X. Check (d mumble (b a 5)). Check (d bool (b a 5)). Check (e bool true). Check (e mumble (b c 0)). Check c. End MUMBLEBAZ. Check (cons nat 2 (nil nat)). Check cons. Fixpoint app X l1 l2 : list X := match l1 with | nil => l2 | cons h t => cons X h (app X t l2) end. Arguments nil {X}. Arguments cons {X} _ _. Fixpoint length {X : Type} (l : list X) : nat := match l with | nil => O | cons h t => S (length t) end. Arguments app {X} l1 l2. Notation "x :: y" := (cons x y) (at level 60, right associativity). Notation "[]" := nil. Notation "[ x ; .. ; y ]" := (cons x .. (cons y []) ..). Notation "x ++ y" := (app x y) (at level 60, right associativity). Fixpoint snoc {X : Type} (a : list X) (b : X) : list X := match a with | nil => [b] | h :: t => h :: (snoc t b) end. Theorem snoc_with_append : forall (X : Type) (l1 l2 : list X) (v : X), snoc (l1 ++ l2) v = l1 ++ (snoc l2 v). Proof. induction l1. reflexivity. simpl. intros l2 v. rewrite -> IHl1. reflexivity. Qed. Inductive prod (X Y :Type) : Type := pair : X -> Y -> prod X Y. Arguments pair {X Y} _ _. Notation "( x , y )" := (pair x y). Notation "X * Y" := (prod X Y) : type_scope. Definition prod_curry {X Y Z:Type} (f : (X * Y) -> Z) (x : X) (y : Y) : Z := f (x,y). Definition prod_uncurry {X Y Z :Type} (f : X -> Y -> Z) ( p : (X * Y)) : Z := match p with | (x , y) => f x y end. Theorem uncurry_curry : forall (X Y Z : Type), forall (f : X -> Y -> Z), forall (x : X) (y : Y), (prod_curry (prod_uncurry f)) x y = f x y. Proof. intros X Y Z f x y. reflexivity. Qed. Theorem curry_uncurry : forall (X Y Z : Type) (f : (X * Y) -> Z) (p : (X * Y)), (prod_uncurry (prod_curry f)) p = f p. Proof. intros X Y Z f p. destruct p as [x y]. reflexivity. Qed. Definition doit3times {X:Type} (f : X -> X) (x : X) : X := f (f (f x)). Check (doit3times (fun n => match n with | O => O | S x => x end) 10). Definition override {Y : Type} (f : nat -> Y) (x : nat) (y : Y) :(nat -> Y):= fun (x' : nat) => if (beq_nat x' x) then y else f x'. Definition plus3 := plus 3. Theorem unfold_example : forall n m, 3 + n = m -> plus3 n + 1 = m + 1. Proof. intros n m H. unfold plus3. rewrite -> H. reflexivity. Qed. Theorem override_eq : forall (X : Type) x k (f : nat -> X), (override f k x) k = x. Proof. intros X x k f. unfold override. replace (beq_nat k k) with true. reflexivity. induction k. reflexivity. simpl. rewrite -> IHk. reflexivity. Qed. Module CHURCH. Definition nat := forall (X : Type), (X -> X) -> X -> X. Definition zero : nat := fun (X : Type) (f : X -> X) (x : X) => x. Definition succ (a : nat) : nat := fun (X : Type) (f : X -> X) (x : X) => f (a X f x). Definition one : nat := fun (X : Type) (f : X -> X) (x : X) => f x. Example succ_1 : succ zero = one. Proof. reflexivity. Qed. Definition plus (a b : nat) : nat := fun (X : Type) (f : X -> X) (x : X) => b X f (a X f x). Example plus_1 : plus zero one = one. Proof. reflexivity. Qed.
State Before: α : Type u_2 β : Type ?u.337219 γ : Type ?u.337222 δ : Type ?u.337225 ι : Type u_1 R : Type ?u.337231 R' : Type ?u.337234 m0 : MeasurableSpace α inst✝² : MeasurableSpace β inst✝¹ : MeasurableSpace γ μ✝ μ₁ μ₂ μ₃ ν ν' ν₁ ν₂ : Measure α s✝ s' t : Set α inst✝ : Countable ι μ : ι → Measure α s : Set α ⊢ ↑↑(sum μ) s = 0 ↔ ∀ (i : ι), ↑↑(μ i) s = 0 State After: α : Type u_2 β : Type ?u.337219 γ : Type ?u.337222 δ : Type ?u.337225 ι : Type u_1 R : Type ?u.337231 R' : Type ?u.337234 m0 : MeasurableSpace α inst✝² : MeasurableSpace β inst✝¹ : MeasurableSpace γ μ✝ μ₁ μ₂ μ₃ ν ν' ν₁ ν₂ : Measure α s✝ s' t : Set α inst✝ : Countable ι μ : ι → Measure α s : Set α h : ∀ (i : ι), ↑↑(μ i) s = 0 ⊢ ↑↑(sum μ) s ≤ 0 Tactic: refine' ⟨fun h i => nonpos_iff_eq_zero.1 <| h ▸ le_iff'.1 (le_sum μ i) _, fun h => nonpos_iff_eq_zero.1 _⟩ State Before: α : Type u_2 β : Type ?u.337219 γ : Type ?u.337222 δ : Type ?u.337225 ι : Type u_1 R : Type ?u.337231 R' : Type ?u.337234 m0 : MeasurableSpace α inst✝² : MeasurableSpace β inst✝¹ : MeasurableSpace γ μ✝ μ₁ μ₂ μ₃ ν ν' ν₁ ν₂ : Measure α s✝ s' t : Set α inst✝ : Countable ι μ : ι → Measure α s : Set α h : ∀ (i : ι), ↑↑(μ i) s = 0 ⊢ ↑↑(sum μ) s ≤ 0 State After: case intro.intro.intro α : Type u_2 β : Type ?u.337219 γ : Type ?u.337222 δ : Type ?u.337225 ι : Type u_1 R : Type ?u.337231 R' : Type ?u.337234 m0 : MeasurableSpace α inst✝² : MeasurableSpace β inst✝¹ : MeasurableSpace γ μ✝ μ₁ μ₂ μ₃ ν ν' ν₁ ν₂ : Measure α s✝ s' t✝ : Set α inst✝ : Countable ι μ : ι → Measure α s : Set α h : ∀ (i : ι), ↑↑(μ i) s = 0 t : Set α hst : s ⊆ t htm : MeasurableSet t ht : ∀ (i : ι), ↑↑(μ i) t = ↑↑(μ i) s ⊢ ↑↑(sum μ) s ≤ 0 Tactic: rcases exists_measurable_superset_forall_eq μ s with ⟨t, hst, htm, ht⟩ State Before: case intro.intro.intro α : Type u_2 β : Type ?u.337219 γ : Type ?u.337222 δ : Type ?u.337225 ι : Type u_1 R : Type ?u.337231 R' : Type ?u.337234 m0 : MeasurableSpace α inst✝² : MeasurableSpace β inst✝¹ : MeasurableSpace γ μ✝ μ₁ μ₂ μ₃ ν ν' ν₁ ν₂ : Measure α s✝ s' t✝ : Set α inst✝ : Countable ι μ : ι → Measure α s : Set α h : ∀ (i : ι), ↑↑(μ i) s = 0 t : Set α hst : s ⊆ t htm : MeasurableSet t ht : ∀ (i : ι), ↑↑(μ i) t = ↑↑(μ i) s ⊢ ↑↑(sum μ) s ≤ 0 State After: no goals Tactic: calc sum μ s ≤ sum μ t := measure_mono hst _ = 0 := by simp [*] State Before: α : Type u_2 β : Type ?u.337219 γ : Type ?u.337222 δ : Type ?u.337225 ι : Type u_1 R : Type ?u.337231 R' : Type ?u.337234 m0 : MeasurableSpace α inst✝² : MeasurableSpace β inst✝¹ : MeasurableSpace γ μ✝ μ₁ μ₂ μ₃ ν ν' ν₁ ν₂ : Measure α s✝ s' t✝ : Set α inst✝ : Countable ι μ : ι → Measure α s : Set α h : ∀ (i : ι), ↑↑(μ i) s = 0 t : Set α hst : s ⊆ t htm : MeasurableSet t ht : ∀ (i : ι), ↑↑(μ i) t = ↑↑(μ i) s ⊢ ↑↑(sum μ) t = 0 State After: no goals Tactic: simp [*]
[STATEMENT] lemma (in Group) ZassenhausTr3_5:"\<lbrakk>G \<guillemotright> H; G \<guillemotright> H1; G \<guillemotright> K; G \<guillemotright> K1; Gp G H \<triangleright> H1; Gp G K \<triangleright> K1\<rbrakk> \<Longrightarrow> (Gp G (H \<inter> K)) \<triangleright> (H1 \<inter> K) \<diamondop>\<^bsub>G\<^esub> (H \<inter> K1)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>G \<guillemotright> H ; G \<guillemotright> H1 ; G \<guillemotright> K ; G \<guillemotright> K1 ; \<natural>H \<triangleright> H1; \<natural>K \<triangleright> K1\<rbrakk> \<Longrightarrow> \<natural>H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K1 [PROOF STEP] apply (frule inter_sgs[of "H" "K"], assumption, frule inter_sgs[of "H1" "K"], assumption, frule inter_sgs[of "K" "H"], assumption, frule inter_sgs[of "H" "K1"], assumption+) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>G \<guillemotright> H ; G \<guillemotright> H1 ; G \<guillemotright> K ; G \<guillemotright> K1 ; \<natural>H \<triangleright> H1; \<natural>K \<triangleright> K1; G \<guillemotright> H \<inter> K ; G \<guillemotright> H1 \<inter> K ; G \<guillemotright> K \<inter> H ; G \<guillemotright> H \<inter> K1 \<rbrakk> \<Longrightarrow> \<natural>H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K1 [PROOF STEP] apply (frule ZassenhausTr3[of "H \<inter> K" "H1 \<inter> K" "K \<inter> H" "H \<inter> K1"], assumption+, frule ZassenhausTr0[of "K" "K1" "H" "H1"], assumption+, simp add:Int_commute, frule ZassenhausTr0[of "H" "H1" "K" "K1"], assumption+, simp add:Int_commute) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>G \<guillemotright> H ; G \<guillemotright> H1 ; G \<guillemotright> K ; G \<guillemotright> K1 ; \<natural>H \<triangleright> H1; \<natural>K \<triangleright> K1; G \<guillemotright> H \<inter> K ; G \<guillemotright> H1 \<inter> K ; G \<guillemotright> K \<inter> H ; G \<guillemotright> H \<inter> K1 ; \<natural>H1 \<inter> K \<diamondop> H \<inter> K \<inter> (K \<inter> H) \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K \<inter> (H \<inter> K1)\<rbrakk> \<Longrightarrow> \<natural>H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K1 [PROOF STEP] apply (frule ZassenhausTr2_3 [of "K" "K1"], assumption+, frule ZassenhausTr2_3 [of "H" "H1"], assumption+) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>G \<guillemotright> H ; G \<guillemotright> H1 ; G \<guillemotright> K ; G \<guillemotright> K1 ; \<natural>H \<triangleright> H1; \<natural>K \<triangleright> K1; G \<guillemotright> H \<inter> K ; G \<guillemotright> H1 \<inter> K ; G \<guillemotright> K \<inter> H ; G \<guillemotright> H \<inter> K1 ; \<natural>H1 \<inter> K \<diamondop> H \<inter> K \<inter> (K \<inter> H) \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K \<inter> (H \<inter> K1); K1 \<subseteq> K; H1 \<subseteq> H\<rbrakk> \<Longrightarrow> \<natural>H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K1 [PROOF STEP] apply (simp add:Int_commute[of "K" "H"]) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>G \<guillemotright> H ; G \<guillemotright> H1 ; G \<guillemotright> K ; G \<guillemotright> K1 ; \<natural>H \<triangleright> H1; \<natural>K \<triangleright> K1; G \<guillemotright> H \<inter> K ; G \<guillemotright> H1 \<inter> K ; G \<guillemotright> H \<inter> K1 ; \<natural>H1 \<inter> K \<diamondop> H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K \<inter> (H \<inter> K1); K1 \<subseteq> K; H1 \<subseteq> H\<rbrakk> \<Longrightarrow> \<natural>H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K1 [PROOF STEP] apply (cut_tac Int_mono[of "H" "H" "K1" "K"]) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>G \<guillemotright> H ; G \<guillemotright> H1 ; G \<guillemotright> K ; G \<guillemotright> K1 ; \<natural>H \<triangleright> H1; \<natural>K \<triangleright> K1; G \<guillemotright> H \<inter> K ; G \<guillemotright> H1 \<inter> K ; G \<guillemotright> H \<inter> K1 ; \<natural>H1 \<inter> K \<diamondop> H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K \<inter> (H \<inter> K1); K1 \<subseteq> K; H1 \<subseteq> H; H \<inter> K1 \<subseteq> H \<inter> K\<rbrakk> \<Longrightarrow> \<natural>H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K1 2. \<lbrakk>G \<guillemotright> H ; G \<guillemotright> H1 ; G \<guillemotright> K ; G \<guillemotright> K1 ; \<natural>H \<triangleright> H1; \<natural>K \<triangleright> K1; G \<guillemotright> H \<inter> K ; G \<guillemotright> H1 \<inter> K ; G \<guillemotright> H \<inter> K1 ; \<natural>H1 \<inter> K \<diamondop> H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K \<inter> (H \<inter> K1); K1 \<subseteq> K; H1 \<subseteq> H\<rbrakk> \<Longrightarrow> H \<subseteq> H 3. \<lbrakk>G \<guillemotright> H ; G \<guillemotright> H1 ; G \<guillemotright> K ; G \<guillemotright> K1 ; \<natural>H \<triangleright> H1; \<natural>K \<triangleright> K1; G \<guillemotright> H \<inter> K ; G \<guillemotright> H1 \<inter> K ; G \<guillemotright> H \<inter> K1 ; \<natural>H1 \<inter> K \<diamondop> H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K \<inter> (H \<inter> K1); K1 \<subseteq> K; H1 \<subseteq> H\<rbrakk> \<Longrightarrow> K1 \<subseteq> K [PROOF STEP] apply (cut_tac Int_mono[of "H1" "H" "K" "K"]) [PROOF STATE] proof (prove) goal (5 subgoals): 1. \<lbrakk>G \<guillemotright> H ; G \<guillemotright> H1 ; G \<guillemotright> K ; G \<guillemotright> K1 ; \<natural>H \<triangleright> H1; \<natural>K \<triangleright> K1; G \<guillemotright> H \<inter> K ; G \<guillemotright> H1 \<inter> K ; G \<guillemotright> H \<inter> K1 ; \<natural>H1 \<inter> K \<diamondop> H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K \<inter> (H \<inter> K1); K1 \<subseteq> K; H1 \<subseteq> H; H \<inter> K1 \<subseteq> H \<inter> K; H1 \<inter> K \<subseteq> H \<inter> K\<rbrakk> \<Longrightarrow> \<natural>H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K1 2. \<lbrakk>G \<guillemotright> H ; G \<guillemotright> H1 ; G \<guillemotright> K ; G \<guillemotright> K1 ; \<natural>H \<triangleright> H1; \<natural>K \<triangleright> K1; G \<guillemotright> H \<inter> K ; G \<guillemotright> H1 \<inter> K ; G \<guillemotright> H \<inter> K1 ; \<natural>H1 \<inter> K \<diamondop> H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K \<inter> (H \<inter> K1); K1 \<subseteq> K; H1 \<subseteq> H; H \<inter> K1 \<subseteq> H \<inter> K\<rbrakk> \<Longrightarrow> H1 \<subseteq> H 3. \<lbrakk>G \<guillemotright> H ; G \<guillemotright> H1 ; G \<guillemotright> K ; G \<guillemotright> K1 ; \<natural>H \<triangleright> H1; \<natural>K \<triangleright> K1; G \<guillemotright> H \<inter> K ; G \<guillemotright> H1 \<inter> K ; G \<guillemotright> H \<inter> K1 ; \<natural>H1 \<inter> K \<diamondop> H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K \<inter> (H \<inter> K1); K1 \<subseteq> K; H1 \<subseteq> H; H \<inter> K1 \<subseteq> H \<inter> K\<rbrakk> \<Longrightarrow> K \<subseteq> K 4. \<lbrakk>G \<guillemotright> H ; G \<guillemotright> H1 ; G \<guillemotright> K ; G \<guillemotright> K1 ; \<natural>H \<triangleright> H1; \<natural>K \<triangleright> K1; G \<guillemotright> H \<inter> K ; G \<guillemotright> H1 \<inter> K ; G \<guillemotright> H \<inter> K1 ; \<natural>H1 \<inter> K \<diamondop> H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K \<inter> (H \<inter> K1); K1 \<subseteq> K; H1 \<subseteq> H\<rbrakk> \<Longrightarrow> H \<subseteq> H 5. \<lbrakk>G \<guillemotright> H ; G \<guillemotright> H1 ; G \<guillemotright> K ; G \<guillemotright> K1 ; \<natural>H \<triangleright> H1; \<natural>K \<triangleright> K1; G \<guillemotright> H \<inter> K ; G \<guillemotright> H1 \<inter> K ; G \<guillemotright> H \<inter> K1 ; \<natural>H1 \<inter> K \<diamondop> H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K \<inter> (H \<inter> K1); K1 \<subseteq> K; H1 \<subseteq> H\<rbrakk> \<Longrightarrow> K1 \<subseteq> K [PROOF STEP] apply (simp only:Int_absorb1[of "H \<inter> K1" "H \<inter> K"], simp only:K_absorb_HK[of "H1 \<inter> K" "H \<inter> K"]) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>G \<guillemotright> H ; G \<guillemotright> H1 ; G \<guillemotright> K ; G \<guillemotright> K1 ; \<natural>H \<triangleright> H1; \<natural>K \<triangleright> K1; G \<guillemotright> H \<inter> K ; G \<guillemotright> H1 \<inter> K ; G \<guillemotright> H \<inter> K1 ; \<natural>H1 \<inter> K \<diamondop> H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K \<inter> (H \<inter> K1); K1 \<subseteq> K; H1 \<subseteq> H; H \<inter> K1 \<subseteq> H \<inter> K\<rbrakk> \<Longrightarrow> H1 \<subseteq> H 2. \<lbrakk>G \<guillemotright> H ; G \<guillemotright> H1 ; G \<guillemotright> K ; G \<guillemotright> K1 ; \<natural>H \<triangleright> H1; \<natural>K \<triangleright> K1; G \<guillemotright> H \<inter> K ; G \<guillemotright> H1 \<inter> K ; G \<guillemotright> H \<inter> K1 ; \<natural>H1 \<inter> K \<diamondop> H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K \<inter> (H \<inter> K1); K1 \<subseteq> K; H1 \<subseteq> H; H \<inter> K1 \<subseteq> H \<inter> K\<rbrakk> \<Longrightarrow> K \<subseteq> K 3. \<lbrakk>G \<guillemotright> H ; G \<guillemotright> H1 ; G \<guillemotright> K ; G \<guillemotright> K1 ; \<natural>H \<triangleright> H1; \<natural>K \<triangleright> K1; G \<guillemotright> H \<inter> K ; G \<guillemotright> H1 \<inter> K ; G \<guillemotright> H \<inter> K1 ; \<natural>H1 \<inter> K \<diamondop> H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K \<inter> (H \<inter> K1); K1 \<subseteq> K; H1 \<subseteq> H\<rbrakk> \<Longrightarrow> H \<subseteq> H 4. \<lbrakk>G \<guillemotright> H ; G \<guillemotright> H1 ; G \<guillemotright> K ; G \<guillemotright> K1 ; \<natural>H \<triangleright> H1; \<natural>K \<triangleright> K1; G \<guillemotright> H \<inter> K ; G \<guillemotright> H1 \<inter> K ; G \<guillemotright> H \<inter> K1 ; \<natural>H1 \<inter> K \<diamondop> H \<inter> K \<triangleright> H1 \<inter> K \<diamondop> H \<inter> K \<inter> (H \<inter> K1); K1 \<subseteq> K; H1 \<subseteq> H\<rbrakk> \<Longrightarrow> K1 \<subseteq> K [PROOF STEP] apply simp+ [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
lemma closure_hull: "closure S = closed hull S"
function planC = gamma_transPlanC(planC) % % Copyright 2010, Joseph O. Deasy, on behalf of the CERR development team. % % This file is part of The Computational Environment for Radiotherapy Research (CERR). % % CERR development has been led by: Aditya Apte, Divya Khullar, James Alaly, and Joseph O. Deasy. % % CERR has been financially supported by the US National Institutes of Health under multiple grants. % % CERR is distributed under the terms of the Lesser GNU Public License. % % This version of CERR is free software: you can redistribute it and/or modify % it under the terms of the GNU General Public License as published by % the Free Software Foundation, either version 3 of the License, or % (at your option) any later version. % % CERR is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; % without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. % See the GNU General Public License for more details. % % You should have received a copy of the GNU General Public License % along with CERR. If not, see <http://www.gnu.org/licenses/>. indexS = planC{end}; oldScanSet = length(planC{indexS.scan}); for scanSet = 1:oldScanSet % the Scan Set will always be 1 as the 1st scan is deleted. planC = gamma_transScan(planC, 1); end for i = 1:length(planC{indexS.structures}) planC{indexS.structures}(i).rasterized = 0; planC{indexS.structures}.rasterSegments = []; end planC{indexS.structureArray} = []; planC = setUniformizedData(planC);
context("Labels") test_that("Setting guide labels", { expect_identical(xlab("my label")$x, "my label") expect_identical(labs(x = "my label")$x, "my label") expect_identical(ylab("my label")$y, "my label") expect_identical(labs(y = "my label")$y, "my label") # Colour expect_identical(labs(colour = "my label")$colour, "my label") # American spelling expect_identical(labs(color = "my label")$colour, "my label") })
(* Title: HOL/ex/CTL.thy Author: Gertrud Bauer *) section \<open>CTL formulae\<close> theory CTL imports Main begin text \<open> We formalize basic concepts of Computational Tree Logic (CTL) \<^cite>\<open>"McMillan-PhDThesis"\<close> within the simply-typed set theory of HOL. By using the common technique of ``shallow embedding'', a CTL formula is identified with the corresponding set of states where it holds. Consequently, CTL operations such as negation, conjunction, disjunction simply become complement, intersection, union of sets. We only require a separate operation for implication, as point-wise inclusion is usually not encountered in plain set-theory. \<close> lemmas [intro!] = Int_greatest Un_upper2 Un_upper1 Int_lower1 Int_lower2 type_synonym 'a ctl = "'a set" definition imp :: "'a ctl \<Rightarrow> 'a ctl \<Rightarrow> 'a ctl" (infixr "\<rightarrow>" 75) where "p \<rightarrow> q = - p \<union> q" lemma [intro!]: "p \<inter> p \<rightarrow> q \<subseteq> q" unfolding imp_def by auto lemma [intro!]: "p \<subseteq> (q \<rightarrow> p)" unfolding imp_def by rule text \<open> \<^smallskip> The CTL path operators are more interesting; they are based on an arbitrary, but fixed model \<open>\<M>\<close>, which is simply a transition relation over states \<^typ>\<open>'a\<close>. \<close> axiomatization \<M> :: "('a \<times> 'a) set" text \<open> The operators \<open>\<^bold>E\<^bold>X\<close>, \<open>\<^bold>E\<^bold>F\<close>, \<open>\<^bold>E\<^bold>G\<close> are taken as primitives, while \<open>\<^bold>A\<^bold>X\<close>, \<open>\<^bold>A\<^bold>F\<close>, \<open>\<^bold>A\<^bold>G\<close> are defined as derived ones. The formula \<open>\<^bold>E\<^bold>X p\<close> holds in a state \<open>s\<close>, iff there is a successor state \<open>s'\<close> (with respect to the model \<open>\<M>\<close>), such that \<open>p\<close> holds in \<open>s'\<close>. The formula \<open>\<^bold>E\<^bold>F p\<close> holds in a state \<open>s\<close>, iff there is a path in \<open>\<M>\<close>, starting from \<open>s\<close>, such that there exists a state \<open>s'\<close> on the path, such that \<open>p\<close> holds in \<open>s'\<close>. The formula \<open>\<^bold>E\<^bold>G p\<close> holds in a state \<open>s\<close>, iff there is a path, starting from \<open>s\<close>, such that for all states \<open>s'\<close> on the path, \<open>p\<close> holds in \<open>s'\<close>. It is easy to see that \<open>\<^bold>E\<^bold>F p\<close> and \<open>\<^bold>E\<^bold>G p\<close> may be expressed using least and greatest fixed points \<^cite>\<open>"McMillan-PhDThesis"\<close>. \<close> definition EX ("\<^bold>E\<^bold>X _" [80] 90) where [simp]: "\<^bold>E\<^bold>X p = {s. \<exists>s'. (s, s') \<in> \<M> \<and> s' \<in> p}" definition EF ("\<^bold>E\<^bold>F _" [80] 90) where [simp]: "\<^bold>E\<^bold>F p = lfp (\<lambda>s. p \<union> \<^bold>E\<^bold>X s)" definition EG ("\<^bold>E\<^bold>G _" [80] 90) where [simp]: "\<^bold>E\<^bold>G p = gfp (\<lambda>s. p \<inter> \<^bold>E\<^bold>X s)" text \<open> \<open>\<^bold>A\<^bold>X\<close>, \<open>\<^bold>A\<^bold>F\<close> and \<open>\<^bold>A\<^bold>G\<close> are now defined dually in terms of \<open>\<^bold>E\<^bold>X\<close>, \<open>\<^bold>E\<^bold>F\<close> and \<open>\<^bold>E\<^bold>G\<close>. \<close> definition AX ("\<^bold>A\<^bold>X _" [80] 90) where [simp]: "\<^bold>A\<^bold>X p = - \<^bold>E\<^bold>X - p" definition AF ("\<^bold>A\<^bold>F _" [80] 90) where [simp]: "\<^bold>A\<^bold>F p = - \<^bold>E\<^bold>G - p" definition AG ("\<^bold>A\<^bold>G _" [80] 90) where [simp]: "\<^bold>A\<^bold>G p = - \<^bold>E\<^bold>F - p" subsection \<open>Basic fixed point properties\<close> text \<open> First of all, we use the de-Morgan property of fixed points. \<close> lemma lfp_gfp: "lfp f = - gfp (\<lambda>s::'a set. - (f (- s)))" proof show "lfp f \<subseteq> - gfp (\<lambda>s. - f (- s))" proof show "x \<in> - gfp (\<lambda>s. - f (- s))" if l: "x \<in> lfp f" for x proof assume "x \<in> gfp (\<lambda>s. - f (- s))" then obtain u where "x \<in> u" and "u \<subseteq> - f (- u)" by (auto simp add: gfp_def) then have "f (- u) \<subseteq> - u" by auto then have "lfp f \<subseteq> - u" by (rule lfp_lowerbound) from l and this have "x \<notin> u" by auto with \<open>x \<in> u\<close> show False by contradiction qed qed show "- gfp (\<lambda>s. - f (- s)) \<subseteq> lfp f" proof (rule lfp_greatest) fix u assume "f u \<subseteq> u" then have "- u \<subseteq> - f u" by auto then have "- u \<subseteq> - f (- (- u))" by simp then have "- u \<subseteq> gfp (\<lambda>s. - f (- s))" by (rule gfp_upperbound) then show "- gfp (\<lambda>s. - f (- s)) \<subseteq> u" by auto qed qed lemma lfp_gfp': "- lfp f = gfp (\<lambda>s::'a set. - (f (- s)))" by (simp add: lfp_gfp) lemma gfp_lfp': "- gfp f = lfp (\<lambda>s::'a set. - (f (- s)))" by (simp add: lfp_gfp) text \<open> In order to give dual fixed point representations of \<^term>\<open>\<^bold>A\<^bold>F p\<close> and \<^term>\<open>\<^bold>A\<^bold>G p\<close>: \<close> lemma AF_lfp: "\<^bold>A\<^bold>F p = lfp (\<lambda>s. p \<union> \<^bold>A\<^bold>X s)" by (simp add: lfp_gfp) lemma AG_gfp: "\<^bold>A\<^bold>G p = gfp (\<lambda>s. p \<inter> \<^bold>A\<^bold>X s)" by (simp add: lfp_gfp) lemma EF_fp: "\<^bold>E\<^bold>F p = p \<union> \<^bold>E\<^bold>X \<^bold>E\<^bold>F p" proof - have "mono (\<lambda>s. p \<union> \<^bold>E\<^bold>X s)" by rule auto then show ?thesis by (simp only: EF_def) (rule lfp_unfold) qed lemma AF_fp: "\<^bold>A\<^bold>F p = p \<union> \<^bold>A\<^bold>X \<^bold>A\<^bold>F p" proof - have "mono (\<lambda>s. p \<union> \<^bold>A\<^bold>X s)" by rule auto then show ?thesis by (simp only: AF_lfp) (rule lfp_unfold) qed lemma EG_fp: "\<^bold>E\<^bold>G p = p \<inter> \<^bold>E\<^bold>X \<^bold>E\<^bold>G p" proof - have "mono (\<lambda>s. p \<inter> \<^bold>E\<^bold>X s)" by rule auto then show ?thesis by (simp only: EG_def) (rule gfp_unfold) qed text \<open> From the greatest fixed point definition of \<^term>\<open>\<^bold>A\<^bold>G p\<close>, we derive as a consequence of the Knaster-Tarski theorem on the one hand that \<^term>\<open>\<^bold>A\<^bold>G p\<close> is a fixed point of the monotonic function \<^term>\<open>\<lambda>s. p \<inter> \<^bold>A\<^bold>X s\<close>. \<close> lemma AG_fp: "\<^bold>A\<^bold>G p = p \<inter> \<^bold>A\<^bold>X \<^bold>A\<^bold>G p" proof - have "mono (\<lambda>s. p \<inter> \<^bold>A\<^bold>X s)" by rule auto then show ?thesis by (simp only: AG_gfp) (rule gfp_unfold) qed text \<open> This fact may be split up into two inequalities (merely using transitivity of \<open>\<subseteq>\<close>, which is an instance of the overloaded \<open>\<le>\<close> in Isabelle/HOL). \<close> lemma AG_fp_1: "\<^bold>A\<^bold>G p \<subseteq> p" proof - note AG_fp also have "p \<inter> \<^bold>A\<^bold>X \<^bold>A\<^bold>G p \<subseteq> p" by auto finally show ?thesis . qed lemma AG_fp_2: "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>X \<^bold>A\<^bold>G p" proof - note AG_fp also have "p \<inter> \<^bold>A\<^bold>X \<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>X \<^bold>A\<^bold>G p" by auto finally show ?thesis . qed text \<open> On the other hand, we have from the Knaster-Tarski fixed point theorem that any other post-fixed point of \<^term>\<open>\<lambda>s. p \<inter> \<^bold>A\<^bold>X s\<close> is smaller than \<^term>\<open>\<^bold>A\<^bold>G p\<close>. A post-fixed point is a set of states \<open>q\<close> such that \<^term>\<open>q \<subseteq> p \<inter> \<^bold>A\<^bold>X q\<close>. This leads to the following co-induction principle for \<^term>\<open>\<^bold>A\<^bold>G p\<close>. \<close> lemma AG_I: "q \<subseteq> p \<inter> \<^bold>A\<^bold>X q \<Longrightarrow> q \<subseteq> \<^bold>A\<^bold>G p" by (simp only: AG_gfp) (rule gfp_upperbound) subsection \<open>The tree induction principle \label{sec:calc-ctl-tree-induct}\<close> text \<open> With the most basic facts available, we are now able to establish a few more interesting results, leading to the \<^emph>\<open>tree induction\<close> principle for \<open>\<^bold>A\<^bold>G\<close> (see below). We will use some elementary monotonicity and distributivity rules. \<close> lemma AX_int: "\<^bold>A\<^bold>X (p \<inter> q) = \<^bold>A\<^bold>X p \<inter> \<^bold>A\<^bold>X q" by auto lemma AX_mono: "p \<subseteq> q \<Longrightarrow> \<^bold>A\<^bold>X p \<subseteq> \<^bold>A\<^bold>X q" by auto lemma AG_mono: "p \<subseteq> q \<Longrightarrow> \<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>G q" by (simp only: AG_gfp, rule gfp_mono) auto text \<open> The formula \<^term>\<open>AG p\<close> implies \<^term>\<open>AX p\<close> (we use substitution of \<open>\<subseteq>\<close> with monotonicity). \<close> lemma AG_AX: "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>X p" proof - have "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>X \<^bold>A\<^bold>G p" by (rule AG_fp_2) also have "\<^bold>A\<^bold>G p \<subseteq> p" by (rule AG_fp_1) moreover note AX_mono finally show ?thesis . qed text \<open> Furthermore we show idempotency of the \<open>\<^bold>A\<^bold>G\<close> operator. The proof is a good example of how accumulated facts may get used to feed a single rule step. \<close> lemma AG_AG: "\<^bold>A\<^bold>G \<^bold>A\<^bold>G p = \<^bold>A\<^bold>G p" proof show "\<^bold>A\<^bold>G \<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>G p" by (rule AG_fp_1) next show "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>G \<^bold>A\<^bold>G p" proof (rule AG_I) have "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>G p" .. moreover have "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>X \<^bold>A\<^bold>G p" by (rule AG_fp_2) ultimately show "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>G p \<inter> \<^bold>A\<^bold>X \<^bold>A\<^bold>G p" .. qed qed text \<open> \<^smallskip> We now give an alternative characterization of the \<open>\<^bold>A\<^bold>G\<close> operator, which describes the \<open>\<^bold>A\<^bold>G\<close> operator in an ``operational'' way by tree induction: In a state holds \<^term>\<open>AG p\<close> iff in that state holds \<open>p\<close>, and in all reachable states \<open>s\<close> follows from the fact that \<open>p\<close> holds in \<open>s\<close>, that \<open>p\<close> also holds in all successor states of \<open>s\<close>. We use the co-induction principle @{thm [source] AG_I} to establish this in a purely algebraic manner. \<close> theorem AG_induct: "p \<inter> \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p) = \<^bold>A\<^bold>G p" proof show "p \<inter> \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p) \<subseteq> \<^bold>A\<^bold>G p" (is "?lhs \<subseteq> _") proof (rule AG_I) show "?lhs \<subseteq> p \<inter> \<^bold>A\<^bold>X ?lhs" proof show "?lhs \<subseteq> p" .. show "?lhs \<subseteq> \<^bold>A\<^bold>X ?lhs" proof - { have "\<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p) \<subseteq> p \<rightarrow> \<^bold>A\<^bold>X p" by (rule AG_fp_1) also have "p \<inter> p \<rightarrow> \<^bold>A\<^bold>X p \<subseteq> \<^bold>A\<^bold>X p" .. finally have "?lhs \<subseteq> \<^bold>A\<^bold>X p" by auto } moreover { have "p \<inter> \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p) \<subseteq> \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p)" .. also have "\<dots> \<subseteq> \<^bold>A\<^bold>X \<dots>" by (rule AG_fp_2) finally have "?lhs \<subseteq> \<^bold>A\<^bold>X \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p)" . } ultimately have "?lhs \<subseteq> \<^bold>A\<^bold>X p \<inter> \<^bold>A\<^bold>X \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p)" .. also have "\<dots> = \<^bold>A\<^bold>X ?lhs" by (simp only: AX_int) finally show ?thesis . qed qed qed next show "\<^bold>A\<^bold>G p \<subseteq> p \<inter> \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p)" proof show "\<^bold>A\<^bold>G p \<subseteq> p" by (rule AG_fp_1) show "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p)" proof - have "\<^bold>A\<^bold>G p = \<^bold>A\<^bold>G \<^bold>A\<^bold>G p" by (simp only: AG_AG) also have "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>X p" by (rule AG_AX) moreover note AG_mono also have "\<^bold>A\<^bold>X p \<subseteq> (p \<rightarrow> \<^bold>A\<^bold>X p)" .. moreover note AG_mono finally show ?thesis . qed qed qed subsection \<open>An application of tree induction \label{sec:calc-ctl-commute}\<close> text \<open> Further interesting properties of CTL expressions may be demonstrated with the help of tree induction; here we show that \<open>\<^bold>A\<^bold>X\<close> and \<open>\<^bold>A\<^bold>G\<close> commute. \<close> theorem AG_AX_commute: "\<^bold>A\<^bold>G \<^bold>A\<^bold>X p = \<^bold>A\<^bold>X \<^bold>A\<^bold>G p" proof - have "\<^bold>A\<^bold>G \<^bold>A\<^bold>X p = \<^bold>A\<^bold>X p \<inter> \<^bold>A\<^bold>X \<^bold>A\<^bold>G \<^bold>A\<^bold>X p" by (rule AG_fp) also have "\<dots> = \<^bold>A\<^bold>X (p \<inter> \<^bold>A\<^bold>G \<^bold>A\<^bold>X p)" by (simp only: AX_int) also have "p \<inter> \<^bold>A\<^bold>G \<^bold>A\<^bold>X p = \<^bold>A\<^bold>G p" (is "?lhs = _") proof have "\<^bold>A\<^bold>X p \<subseteq> p \<rightarrow> \<^bold>A\<^bold>X p" .. also have "p \<inter> \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p) = \<^bold>A\<^bold>G p" by (rule AG_induct) also note Int_mono AG_mono ultimately show "?lhs \<subseteq> \<^bold>A\<^bold>G p" by fast next have "\<^bold>A\<^bold>G p \<subseteq> p" by (rule AG_fp_1) moreover { have "\<^bold>A\<^bold>G p = \<^bold>A\<^bold>G \<^bold>A\<^bold>G p" by (simp only: AG_AG) also have "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>X p" by (rule AG_AX) also note AG_mono ultimately have "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>G \<^bold>A\<^bold>X p" . } ultimately show "\<^bold>A\<^bold>G p \<subseteq> ?lhs" .. qed finally show ?thesis . qed end
#include <cctbx/boost_python/flex_fwd.h> #include <boost/python/module.hpp> #include <boost/python/scope.hpp> #include <boost/python/class.hpp> #include <boost/python/def.hpp> #include <boost/python/tuple.hpp> #include <boost/python/enum.hpp> #include <iostream> #include <rstbx/simulation/sim_xfel1.h> namespace rstbx { namespace boost_python { namespace { void init_simulation_module() { using namespace boost::python; typedef return_value_policy<return_by_value> rbv; class_<xfel1>("xfel1",init<>()) .def("set_indices",&xfel1::set_indices) .def("set_intensities",&xfel1::set_intensities) .def("select_proximal_indices",&xfel1::select_proximal_indices, (arg_("half_edge"), arg_("detector_distance_m"), arg_("pixel_size_m"), arg_("orientation"), arg_("mosaicity_full_width"), arg_("bandpass_full_width"),arg_("wavelength_m"), arg_("limiting_resolution_Ang"))) .def("raw_diffraction",&xfel1::raw_diffraction, (arg_("selection"),arg_("pixels"), arg_("mosaic_domains"), arg_("detector_distance_m"), arg_("pixel_size_m"), arg_("darwin_factor") )) .add_property("indices_all",make_getter(&xfel1::indices_all, rbv())) .add_property("intensities_all",make_getter(&xfel1::intensities_all, rbv())) .add_property("spots", make_getter(&xfel1::spots, rbv())) .add_property("signals", make_getter(&xfel1::selection_raw_counts, rbv())) .add_property("partialities", make_getter(&xfel1::selection_partiality, rbv())) ; } }}} // namespace rstbx::boost_python::<anonymous> BOOST_PYTHON_MODULE(rstbx_simulation_ext) { rstbx::boost_python::init_simulation_module(); }
(** **** SNU 4190.310, 2016 Spring *) (** Assignment 01 *) (** Due: 2015/03/20 23:59 *) (* Important: - You are NOT allowed to use the [admit] tactic. - You are NOT allowed to use the following tactics. [tauto], [intuition], [firstorder], [omega]. - Just leave [exact GIVEUP] for those problems that you fail to prove. *) Definition GIVEUP {T: Type} : T. Admitted. Fixpoint beq_nat (n m : nat) : bool := match n with | O => match m with | O => true | S m' => false end | S n' => match m with | O => false | S m' => beq_nat n' m' end end.
(* Author: Dmitry Traytel *) theory List_More imports Main "List-Index.List_Index" "HOL-Library.Multiset" begin subsection \<open>Library Functions\<close> abbreviation "bool_product_lists n \<equiv> product_lists (replicate n [True, False])" lemma in_set_bool_product_lists[simp]: "bs \<in> set (bool_product_lists n) \<longleftrightarrow> length bs = n" proof (induct n arbitrary: bs) case (Suc n) thus ?case by (cases bs) auto qed simp text \<open>More on sort and remdups\<close> lemma insort_min[simp]: "\<forall>y \<in> set xs. x < y \<Longrightarrow> insort x xs = x # xs" by (induct xs) auto lemma insort_max[simp]: "\<forall>y \<in> set xs. x > y \<Longrightarrow> insort x xs = xs @ [x]" by (induct xs) auto lemma insort_snoc[simp]: "\<forall>z \<in> set xs. y > z \<Longrightarrow> insort x (xs @ [y]) = (if x < y then insort x xs @ [y] else xs @ [y, x])" by (induct xs) auto declare set_insort_key[simp] lemma insort_remdups[simp]: "\<lbrakk>sorted xs; a \<notin> set xs\<rbrakk> \<Longrightarrow> insort a (remdups xs) = remdups (insort a xs)" proof (induct xs) case (Cons x xs) thus ?case by (cases xs) (auto) qed simp lemma remdups_insort[simp]: "a \<in> set xs \<Longrightarrow> remdups (insort a xs) = remdups xs" by (induct xs) auto lemma sort_remdups[simp]: "sort (remdups xs) = remdups (sort xs)" by (induct xs) auto lemma sort_map_insort[simp]: "sorted xs \<Longrightarrow> sort (map f (insort a xs)) = insort (f a) (sort (map f xs))" by (induct xs) (auto simp: insort_left_comm) lemma sort_map_sort[simp]: "sort (map f (sort xs)) = sort (map f xs)" by (induct xs) auto lemma remdups_append: "remdups (xs @ ys) = remdups (filter (\<lambda>x. x \<notin> set ys) xs) @ remdups ys" by (induct xs) auto lemma remdups_concat_map_remdups: "remdups (concat (map f (remdups xs))) = remdups (concat (map f xs))" by (induct xs) (auto simp: remdups_append filter_empty_conv) (*remdups'*) primrec remdups' where "remdups' f [] = []" | "remdups' f (x # xs) = (case List.find (\<lambda>y. f x = f y) xs of None \<Rightarrow> x # remdups' f xs | _ \<Rightarrow> remdups' f xs)" lemma map_remdups'[simp]: "map f (remdups' f xs) = remdups (map f xs)" by (induct xs) (auto split: option.splits simp add: find_Some_iff find_None_iff) lemma remdups'_map[simp]: "remdups' f (map g xs) = map g (remdups' (f o g) xs)" by (induct xs) (auto split: option.splits simp add: find_None_iff, auto simp: find_Some_iff elim: imageI[OF nth_mem]) lemma map_apfst_remdups': "map (f o fst) (remdups' snd xs) = map fst (remdups' snd (map (apfst f) xs))" by (auto simp: comp_def) lemma set_remdups'[simp]: "f ` set (remdups' f xs) = f ` set xs" by (induct xs) (auto split: option.splits simp add: find_Some_iff) lemma subset_remdups': "set (remdups' f xs) \<subseteq> set xs" by (induct xs) (auto split: option.splits) lemma find_append[simp]: "List.find P (xs @ ys) = None = (List.find P xs = None \<and> List.find P ys = None)" by (induct xs) auto lemma subset_remdups'_append: "set (remdups' f (xs @ ys)) \<subseteq> set (remdups' f xs) \<union> set (remdups' f ys)" by (induct xs arbitrary: ys) (auto split: option.splits) lemmas mp_remdups' = subsetD[OF subset_remdups'] lemmas mp_remdups'_append = subsetD[OF subset_remdups'_append] lemma inj_on_set_remdups'[simp]: "inj_on f (set (remdups' f xs))" by (induct xs) (auto split: option.splits simp add: find_None_iff dest!: mp_remdups') lemma distinct_remdups'[simp]: "distinct (map f (remdups' f xs))" by (induct xs) (auto split: option.splits simp: find_None_iff) lemma distinct_remdups'_strong: "(\<forall>x\<in>set xs. \<forall>y\<in>set xs. g x = g y \<longrightarrow> f x = f y) \<Longrightarrow> distinct (map g (remdups' f xs))" proof (induct xs) case (Cons x xs) thus ?case by (auto split: option.splits) (fastforce simp: find_None_iff dest!: mp_remdups') qed simp lemma set_remdups'_strong: "f ` set (remdups' g xs) = f ` set xs" if "\<forall>x\<in>set xs. \<forall>y\<in>set xs. g x = g y \<longrightarrow> f x = f y" using that proof (induction xs) case Nil then show ?case by simp next case (Cons x xs) then have "\<forall>x\<in>set xs. \<forall>y\<in>set xs. g x = g y \<longrightarrow> f x = f y" by (auto simp only: set_simps) then have "f ` set (remdups' g xs) = f ` set xs" by (rule Cons.IH) then show ?case by (auto simp add: find_Some_iff split: option.splits) (metis Cons.prems image_eqI list.set_intros(1) list.set_intros(2) nth_mem) qed (*multisets only needed below*) lemma multiset_concat_gen: "M + mset (concat xs) = fold (\<lambda>x M. M + mset x) xs M" by (induct xs arbitrary: M) (auto, metis union_assoc) corollary multiset_concat: "mset (concat xs) = fold (\<lambda>x M. M + mset x) xs {#}" using multiset_concat_gen[of "{#}" xs] by simp lemma fold_mset_insort[simp]: "fold (\<lambda>x M. M + mset (f x)) (insort x xs) M = fold (\<lambda>x M. M + mset (f x)) xs (mset (f x) + M)" by (induct xs arbitrary: M) (auto simp: ac_simps) lemma fold_mset_sort[simp]: "fold (\<lambda>x M. M + mset (f x)) (sort xs) M = fold (\<lambda>x M. M + mset (f x)) xs M" by (induct xs arbitrary: M) (auto simp: ac_simps) lemma multiset_concat_map_sort[simp]: "mset (concat (map f (sort xs))) = mset (concat (map f xs))" by (auto simp: multiset_concat fold_map o_def) lemma sort_concat_map_sort[simp]: "sort (concat (map f (sort xs))) = sort (concat (map f xs))" by (auto intro: properties_for_sort) end
import data.set.finite import algebra.big_operators import data.support import data.finsupp variables {α : Type*} open set lemma set.insert_inter_of_mem {α : Type*} {s₁ s₂ : set α} {a : α} (h : a ∈ s₂) : insert a s₁ ∩ s₂ = insert a (s₁ ∩ s₂) := by simp [set.insert_inter, h] lemma set.insert_inter_of_not_mem {s₁ s₂ : set α} {a : α} (h : a ∉ s₂) : insert a s₁ ∩ s₂ = s₁ ∩ s₂ := begin ext x, simp only [mem_inter_iff, mem_insert_iff, mem_inter_eq, and.congr_left_iff, or_iff_right_iff_imp], cc, end lemma set.image_inter_support_eq {α β M : Type*} {f : α → M} [has_zero M] {s : set β} {g : β → α} : (g '' s ∩ function.support f) = g '' (s ∩ function.support (f ∘ g)) := begin ext y, split; intro hy, { rcases hy with ⟨⟨x, hx₀, rfl⟩, hy⟩, exact ⟨x, ⟨hx₀, hy⟩, rfl⟩ }, { rcases hy with ⟨y, ⟨hys, hyfg⟩, rfl⟩, exact ⟨⟨y, hys, rfl⟩, hyfg⟩ } end lemma set.image_inter_support_finite_iff {α β M : Type*} {f : α → M} [has_zero M] {s : set β} {g : β → α} (hg : set.inj_on g s) : (g '' s ∩ function.support f).finite ↔ (s ∩ function.support (f ∘ g)).finite := begin rw [set.image_inter_support_eq, set.finite_image_iff], exact set.inj_on.mono (set.inter_subset_left s _) hg end
At Burgil Coffee you'll find a range of some of the best coffees from around the world, such as Peru, Mexican and Cuba organic fairtrade ground and bean coffee or Latin American gourmet blend. They also sell "coffee tours", which consist of a selection of their organic blends automatically delivered to you each month. Best Shop Deals currently does not have any voucher codes or discount coupons listed for Burgil Coffee. However, you can check out our Forum where you can request and exchange discount codes and promotional store vouchers with other online shoppers. Alternatively you can visit Burgil Coffee directly an go to the sales or promotions sections for the latest store offers. We recommend that you check the actual discount for any voucher or code is applied before you complete the check out process at Burgil Coffee. Expiration of codes and/or vouchers can occur at any time without prior notice, and while we try to keep site up-to-date, we cannot be held responsible nor liable as to the accuracy of the content, offers and discounts. If you notice any expired Burgil Coffee voucher codes then please 'Contact Us' so we can update the information.
section \<open> tock-Circus \<close> theory tockcircus imports tcircus_calc begin recall_syntax subsection \<open> Healthiness Conditions \<close> text \<open> This is the same as Circus $Skip$, except that it includes an unstable intermediate state. \<close> definition Skip :: "('s,'e) taction" where [rdes_def]: "Skip = \<^bold>R(true\<^sub>r \<turnstile> \<U>(true, []) \<diamondop> \<F>(true, [], id\<^sub>s))" definition TC1 :: "('s, 'e) taction \<Rightarrow> ('s, 'e) taction" where [rdes_def]: "TC1(P) = Skip ;; P" lemma Skip_self_unit: "Skip ;; Skip = Skip" by rdes_eq lemma TC1_idem: "TC1(TC1(P)) = TC1(P)" by (simp add: RA1 Skip_self_unit TC1_def) definition TC2 :: "('s, 'e) taction \<Rightarrow> ('s, 'e) taction" where [rdes_def]: "TC2(P) = P ;; Skip" lemma TC2_idem: "TC2(TC2(P)) = TC2(P)" by (simp add: seqr_assoc Skip_self_unit TC2_def) definition [upred_defs]: "TC = NRD \<circ> TC2 \<circ> TC1" lemma TC_implies_NRD [closure]: "P is TC \<Longrightarrow> P is NRD" by (metis (no_types, opaque_lifting) Healthy_def TC_def NRD_idem comp_apply) lemma NRD_rdes [rdes_def]: assumes "P is RC" "Q is RR" "R is RR" shows "NRD(\<^bold>R(P \<turnstile> Q \<diamondop> R)) = (\<^bold>R(P \<turnstile> Q \<diamondop> R))" by (simp add: Healthy_if NRD_rdes_intro assms) lemma TC1_rdes: assumes "P is RC" "Q is RR" "R is RR" shows "TC1(\<^bold>R(P \<turnstile> Q \<diamondop> R)) = \<^bold>R(II\<^sub>t wp\<^sub>r P \<turnstile> (\<U>(true, []) \<or> TRR(Q)) \<diamondop> TRR(R))" using assms by (rdes_simp, simp add: TRR_def TRR1_def Healthy_if) lemma TC1_TRR_rdes [rdes_def]: assumes "P is TRC" "Q is TRR" "R is TRR" shows "TC1(\<^bold>R(P \<turnstile> Q \<diamondop> R)) = \<^bold>R(P \<turnstile> (\<U>(true, []) \<or> Q) \<diamondop> R)" by (subst TC1_rdes, simp_all add: closure assms wp Healthy_if) lemma TC2_rdes [rdes_def]: assumes "P is TRC" "Q is TRR" "R is TRR" shows "TC2(\<^bold>R(P \<turnstile> Q \<diamondop> R)) = \<^bold>R(P \<turnstile>(Q \<or> R ;; \<U>(true, [])) \<diamondop> R ;; II\<^sub>t)" using assms by (rdes_simp) lemma TC_implies_TC1 [closure]: assumes "P is TC" shows "P is TC1" proof - have a:"P is NRD" by (simp add: closure assms) have "TC1(TC(P)) = TC(P)" by (rdes_eq cls: a simps: TC_def) thus ?thesis by (metis Healthy_def assms) qed lemma TC_implies_TC2 [closure]: assumes "P is TC" shows "P is TC2" proof - have a:"P is NRD" by (simp add: closure assms) have "TC2(TC(P)) = TC(P)" by (rdes_eq cls: a simps: TC_def) thus ?thesis by (metis Healthy_def assms) qed lemma TC_rdes [rdes_def]: assumes "P is TRC" "Q is TRR" "R is TRR" shows "TC(\<^bold>R(P \<turnstile> Q \<diamondop> R)) = \<^bold>R (P \<turnstile> (Q \<or> \<U>(true, []) \<or> R ;; \<U>(true, [])) \<diamondop> R ;; II\<^sub>t)" by (simp add: TC_def rdes_def closure assms rpred wp disj_comm disj_assoc) lemma TC_closed_seqr [closure]: assumes "P is TC" "Q is TC" shows "P ;; Q is TC" proof - have "P ;; Q is TC1" by (metis (no_types, opaque_lifting) Healthy_def RA1 TC1_def TC_implies_TC1 assms(1)) moreover have "P ;; Q is TC2" by (metis (no_types, opaque_lifting) Healthy_def RA1 TC2_def TC_implies_TC2 assms(2)) ultimately show ?thesis by (metis Healthy_comp NRD_seqr_closure TC_def TC_implies_NRD assms(1) assms(2)) qed lemma TC_inner_closures [closure]: assumes "P is TC" shows "pre\<^sub>R(P) is TRC" "peri\<^sub>R(P) is TRR" "post\<^sub>R(P) is TRF" "peri\<^sub>R(P) \<sqsubseteq> \<U>(true, [])" "peri\<^sub>R P \<sqsubseteq> post\<^sub>R P ;; \<U>(true, [])" proof - have a: "P is NRD" using TC_implies_NRD assms by blast have b: "P = TC1(\<^bold>R(pre\<^sub>R P \<turnstile> peri\<^sub>R P \<diamondop> post\<^sub>R P))" by (simp add: Healthy_if NRD_is_RD RD_reactive_tri_design TC_implies_TC1 TC_implies_TC2 a assms) hence 1: "P = \<^bold>R(II\<^sub>t wp\<^sub>r pre\<^sub>R P \<turnstile> (\<U>(true, []) \<or> TRR (peri\<^sub>R P)) \<diamondop> TRR (post\<^sub>R P))" by (simp add: TC1_rdes TC2_rdes closure assms) hence 2: "II\<^sub>t wp\<^sub>r pre\<^sub>R P = pre\<^sub>R P" by (metis TRR_implies_RR TRR_tc_skip a preR_NRD_RR preR_rdes wp_rea_RR_closed) thus [closure]: "pre\<^sub>R(P) is TRC" by (simp add: NRD_neg_pre_RC TRC_wp_intro a) have peri: "peri\<^sub>R(P) = (pre\<^sub>R(P) \<Rightarrow>\<^sub>r (\<U>(true, []) \<or> TRR (peri\<^sub>R P)))" by (subst 1, simp add: rdes closure assms 2) also have "... is TRR" by (simp add: closure assms) finally show [closure]: "peri\<^sub>R(P) is TRR" . show "peri\<^sub>R(P) \<sqsubseteq> \<U>(true, [])" by (metis peri rea_impl_disj utp_pred_laws.sup.cobounded1) have "post\<^sub>R(P) = (pre\<^sub>R(P) \<Rightarrow>\<^sub>r TRR (post\<^sub>R P))" by (metis 1 2 Healthy_Idempotent TRR_implies_RR a postR_rdes preR_NRD_RR trel_theory.HCond_Idempotent) also have "... is TRR" by (simp add: closure assms) finally have [closure]: "post\<^sub>R(P) is TRR" . have "P = TC2(\<^bold>R(pre\<^sub>R P \<turnstile> peri\<^sub>R P \<diamondop> post\<^sub>R P))" by (simp add: Healthy_if NRD_is_RD RD_reactive_tri_design TC_implies_TC2 a assms) hence 3: "P = \<^bold>R (pre\<^sub>R P \<turnstile> (peri\<^sub>R P \<or> post\<^sub>R P ;; \<U>(true, [])) \<diamondop> post\<^sub>R P ;; II\<^sub>t)" by (simp add: TC2_rdes closure assms) hence "post\<^sub>R(P) = (pre\<^sub>R(P) \<Rightarrow>\<^sub>r post\<^sub>R P ;; II\<^sub>t)" by (metis TRR_implies_RR TRR_tc_skip \<open>post\<^sub>R P is TRR\<close> a postR_rdes preR_NRD_RR rrel_theory.Healthy_Sequence) also have "... is TRF" by (rule TRF_intro, simp_all add: closure assms unrest) finally show "post\<^sub>R(P) is TRF" . have "peri\<^sub>R(P) = (pre\<^sub>R(P) \<Rightarrow>\<^sub>r (peri\<^sub>R P \<or> post\<^sub>R P ;; \<U>(true, [])))" by (subst 3, simp add: rdes closure) thus "peri\<^sub>R P \<sqsubseteq> post\<^sub>R P ;; \<U>(true, [])" by (metis (no_types, opaque_lifting) rea_impl_disj utp_pred_laws.sup.cobounded1 utp_pred_laws.sup_commute) qed lemma TC_elim [RD_elim]: "P is TC \<Longrightarrow> Q (\<^bold>R (pre\<^sub>R P \<turnstile> peri\<^sub>R P \<diamondop> post\<^sub>R P)) \<Longrightarrow> Q P" by (simp add: NRD_elim TC_implies_NRD) lemma TC_elim': "P is TC \<Longrightarrow> Q (\<^bold>R (pre\<^sub>R P \<turnstile> (peri\<^sub>R P \<or> \<U>(true, []) \<or> post\<^sub>R P ;; \<U>(true, [])) \<diamondop> post\<^sub>R P)) \<Longrightarrow> Q P" by (simp add: NRD_elim TC_implies_NRD TC_inner_closures(4) TC_inner_closures(5) utp_pred_laws.sup_absorb1) lemma TC_intro: assumes "P\<^sub>1 is TRC" "P\<^sub>2 is TRR" "P\<^sub>3 is TRF" "P\<^sub>2 \<sqsubseteq> \<U>(true, [])" "P\<^sub>2 \<sqsubseteq> P\<^sub>3 ;; \<U>(true, [])" shows "\<^bold>R(P\<^sub>1 \<turnstile> P\<^sub>2 \<diamondop> P\<^sub>3) is TC" proof - have "TC1(\<^bold>R(P\<^sub>1 \<turnstile> P\<^sub>2 \<diamondop> P\<^sub>3)) = \<^bold>R(P\<^sub>1 \<turnstile> P\<^sub>2 \<diamondop> P\<^sub>3)" by (simp add: TC1_rdes assms closure wp Healthy_if utp_pred_laws.sup_absorb2) moreover have "TC2(\<^bold>R(P\<^sub>1 \<turnstile> P\<^sub>2 \<diamondop> P\<^sub>3)) = \<^bold>R(P\<^sub>1 \<turnstile> P\<^sub>2 \<diamondop> P\<^sub>3)" by (simp add: TC2_rdes assms closure wp rpred Healthy_if utp_pred_laws.sup_absorb1 utp_pred_laws.sup_absorb2) ultimately show ?thesis by (simp add: TC_def Healthy_intro NRD_rdes TRC_implies_RC TRF_implies_TRR TRR_implies_RR assms) qed subsection \<open> Basic Constructs \<close> text \<open> The divergent action cannot terminate and exhibits only instability in the pericondition. \<close> definition Div :: "('s,'e) taction" where [rdes_def]: "Div = \<^bold>R(true\<^sub>r \<turnstile> \<U>(true, []) \<diamondop> false)" lemma Div_TC [closure]: "Div is TC" by (rule Healthy_intro, rdes_eq) definition AssignsT :: "'s usubst \<Rightarrow> ('s,'e) taction" ("\<langle>_\<rangle>\<^sub>T") where [rdes_def]: "AssignsT \<sigma> = \<^bold>R(true\<^sub>r \<turnstile> \<U>(true, []) \<diamondop> \<F>(true, [], \<sigma>))" lemma AssignsT_TC [closure]: "\<langle>\<sigma>\<rangle>\<^sub>T is TC" by (rule Healthy_intro, rdes_eq) text \<open> A timed deadlock does not terminate, but permits any period of time to pass, always remaining in a quiescent state where another $tock$ can occur. \<close> definition Stop :: "('s,'e) taction" where [rdes_def]: "Stop = \<^bold>R(true\<^sub>r \<turnstile> \<T>({}, {0..}) ;; \<E>(true, [], {}, true) \<diamondop> false)" lemma Stop_TC [closure]: "Stop is TC" by (rule Healthy_intro, rdes_eq) text \<open> An untimed deadlock is stable, but does not accept any events. \<close> definition Stop\<^sub>U :: "('s,'e) taction" where [rdes_def]: "Stop\<^sub>U = \<^bold>R(true\<^sub>r \<turnstile> \<E>(true, [], {}, false) \<diamondop> false)" lemma Stop\<^sub>U_TC [closure]: "Stop\<^sub>U is TC" by (rule Healthy_intro, rdes_eq) text \<open> SDF: Check the following definition against the tick-tock paper. It only allows prefixing of non-tock events for now. \<close> definition DoT :: "('e, 's) uexpr \<Rightarrow> ('s, 'e) taction" ("do\<^sub>T'(_')") where [rdes_def]: "DoT a = \<^bold>R(true\<^sub>r \<turnstile> \<T>({a}, {0..}) ;; (\<E>(true, [], {a}, true) \<or> \<U>(true, [Evt a])) \<diamondop> \<T>({a}, {0..}) ;; \<F>(true, [Evt a], id\<^sub>s))" lemma DoT_TC: "do\<^sub>T(e) is TC" by (rule Healthy_intro, rdes_eq) definition Wait :: "(nat, 's) uexpr \<Rightarrow> ('s,'e) taction" where [rdes_def]: "Wait n = \<^bold>R(true\<^sub>r \<turnstile> ((\<T>({}, {0..<n}) ;; \<E>(true, [], {}, true)) \<or> (\<T>({}, {n}) ;; \<U>(true, []))) \<diamondop> \<T>({}, {n}))" utp_lift_notation Wait lemma Wait_TC: "Wait n is TC" by (rule Healthy_intro, rdes_eq) subsection \<open> Algebraic Laws \<close> lemma "Skip ;; Stop = Stop" by (rdes_eq) lemma "Stop \<sqsubseteq> Div" by (rdes_refine) utp_const lift_state_pre lemma Wait_0: "Wait 0 = Skip" by (rdes_eq) lemma Wait_Wait: "Wait m ;; Wait n = Wait(m + n)" apply (rdes_eq_split) apply (rel_auto) apply (simp_all add: rpred closure seqr_assoc[THEN sym]) apply (rel_auto) done text \<open> This is a pleasing result although @{const Wait} raises instability, this is swallowed up by the sequential composition. \<close> lemma Wait_Stop: "Wait m ;; Stop = Stop" by (rdes_eq_split, simp_all add: rpred closure seqr_assoc[THEN sym], rel_auto) lemma "\<langle>[x \<mapsto>\<^sub>s &x + 1]\<rangle>\<^sub>T ;; do\<^sub>T(a) ;; \<langle>[x \<mapsto>\<^sub>s &x + 1]\<rangle>\<^sub>T = \<^bold>R (\<^U>(R1 true) \<turnstile> (\<U>(true, []) \<or> \<F>(true, [], \<^U>([x \<mapsto>\<^sub>s &x + 1])) ;; \<T>({a}, {0..}) ;; \<E>(true, [], {a}, true) \<or> \<F>(true, [], \<^U>([x \<mapsto>\<^sub>s &x + 1])) ;; \<T>({a}, {0..}) ;; \<U>(true, [Evt a])) \<diamondop> \<F>(true, [], \<^U>([x \<mapsto>\<^sub>s &x + 1])) ;; \<T>({a}, {0..}) ;; \<F>(true, [Evt a], \<^U>([x \<mapsto>\<^sub>s &x + 1])))" by (rdes_simp, simp add: rpred seqr_assoc usubst) lemma "Wait(m) ;; do\<^sub>T(a) ;; \<langle>[x \<mapsto>\<^sub>s &x + 1]\<rangle>\<^sub>T = \<^bold>R (true\<^sub>r \<turnstile> (\<T>({}, {0..<m}) ;; \<E>(true, [], {}, true) \<or> \<T>({}, {m}) ;; \<U>(true, []) \<or> \<T>({}, {m}) ;; \<T>({a}, {0..}) ;; \<E>(true, [], {a}, true) \<or> \<T>({}, {m}) ;; \<T>({a}, {0..}) ;; \<U>(true, [Evt a])) \<diamondop> \<T>({}, {m}) ;; \<T>({a}, {0..}) ;; \<F>(true, [Evt a], [x \<mapsto>\<^sub>s &x + 1]))" apply (rdes_simp) apply (simp add: rpred seqr_assoc usubst) oops definition ExtChoice :: "'i set \<Rightarrow> ('i \<Rightarrow> ('s, 'e) taction) \<Rightarrow> ('s, 'e) taction" where [upred_defs]: "ExtChoice I P = \<^bold>R(R1(\<And> i\<in>I \<bullet> pre\<^sub>R(P i)) \<comment> \<open> Require all preconditions \<close> \<turnstile> (idle(\<And> i\<in>I \<bullet> idle(peri\<^sub>R(P i))) \<comment> \<open> Allow all idle behaviours \<close> \<or> (\<Or> i\<in>I \<bullet> active(peri\<^sub>R(P i)) \<comment> \<open> Allow one active action to resolve the choice ...\<close> \<and> (\<And> j\<in>I \<bullet> time(peri\<^sub>R(P j))))) \<comment> \<open> ... whilst the others remain idle \<close> \<diamondop> ((\<Or> i\<in>I \<bullet> post\<^sub>R(P i) \<comment> \<open> The postcondition can terminate the external choice without an event ... \<close> \<and> (\<And> j\<in>I \<bullet> time(peri\<^sub>R(P j))))))" \<comment> \<open> ... whilst the others remain quiescent and idle \<close> (* definition extChoice :: "('s, 'e) taction \<Rightarrow> ('s, 'e) taction \<Rightarrow> ('s, 'e) taction" (infixl "\<box>" 69) where [upred_defs]: "P \<box> Q = ExtChoice {P, Q} id" *) definition extChoice :: "('s, 'e) taction \<Rightarrow> ('s, 'e) taction \<Rightarrow> ('s, 'e) taction" (infixl "\<box>" 69) where [upred_defs]: "P \<box> Q = \<^bold>R((pre\<^sub>R(P) \<and> pre\<^sub>R(Q)) \<turnstile> (idle(peri\<^sub>R(P)) \<and> idle(peri\<^sub>R(Q)) \<or> time(peri\<^sub>R(P)) \<and> active(peri\<^sub>R(Q)) \<or> time(peri\<^sub>R(Q)) \<and> active(peri\<^sub>R(P))) \<diamondop> (time(peri\<^sub>R(P)) \<and> post\<^sub>R(Q) \<or> time(peri\<^sub>R(Q)) \<and> post\<^sub>R(P)))" lemma ExtChoice_empty: "ExtChoice {} P = Stop" by (simp add: ExtChoice_def Stop_def rpred) lemma ExtChoice_single: assumes "P i is TC" "peri\<^sub>R(P i) is TIP" shows "ExtChoice {i} P = P i" proof - have 1: "time(peri\<^sub>R (P i)) \<sqsubseteq> post\<^sub>R (P i)" by (simp add: time_peri_in_post assms closure) show ?thesis by (rdes_simp cls: assms simps: ExtChoice_def 1 Healthy_if utp_pred_laws.inf_absorb1) qed lemma ExtChoice_rdes_def [rdes_def]: assumes "\<And> i. P\<^sub>1(i) is TRC" "\<And> i. P\<^sub>2(i) is TRR" "\<And> i. P\<^sub>3(i) is TRR" shows "ExtChoice I (\<lambda> i. \<^bold>R(P\<^sub>1(i) \<turnstile> P\<^sub>2(i) \<diamondop> P\<^sub>3(i))) = \<^bold>R ((\<And> i\<in>I \<bullet> P\<^sub>1(i)) \<turnstile> (idle(\<And> i\<in>I \<bullet> idle(P\<^sub>2 i)) \<or> (\<Or> i\<in>I \<bullet> active(P\<^sub>2 i) \<and> (\<And> j\<in>I \<bullet> time(P\<^sub>2 j)))) \<diamondop> (\<Or> i\<in>I \<bullet> (P\<^sub>3 i) \<and> (\<And> j\<in>I \<bullet> time(P\<^sub>2 j))))" proof (cases "I = {}") case True then show ?thesis by (simp add: ExtChoice_empty rpred Stop_def, rel_auto) next case False have "((\<And> i\<in>I \<bullet> RC2(P\<^sub>1(i))) \<Rightarrow>\<^sub>r (idle(\<And> i\<in>I \<bullet> idle(RC2(P\<^sub>1 i) \<Rightarrow>\<^sub>r P\<^sub>2 i)) \<or> (\<Or> i\<in>I \<bullet> active(RC2(P\<^sub>1 i) \<Rightarrow>\<^sub>r P\<^sub>2 i) \<and> (\<And> j\<in>I \<bullet> time(RC2(P\<^sub>1 j) \<Rightarrow>\<^sub>r P\<^sub>2 j))))) = ((\<And> i\<in>I \<bullet> RC2(P\<^sub>1(i))) \<Rightarrow>\<^sub>r (idle(\<And> i\<in>I \<bullet> idle(P\<^sub>2 i)) \<or> (\<Or> i\<in>I \<bullet> active(P\<^sub>2 i) \<and> (\<And> j\<in>I \<bullet> time(P\<^sub>2 j)))))" apply (trr_simp cls: assms False, safe) apply meson apply meson apply blast apply blast apply (metis idleprefix_concat_Evt list_append_prefixD tocks_idleprefix_fp) apply (metis idleprefix_concat_Evt list_append_prefixD tocks_idleprefix_fp) apply (metis idleprefix_concat_Evt list_append_prefixD tocks_idleprefix_fp) apply blast+ done hence 1: "((\<And> i\<in>I \<bullet> P\<^sub>1(i)) \<Rightarrow>\<^sub>r (idle(\<And> i\<in>I \<bullet> idle(P\<^sub>1 i \<Rightarrow>\<^sub>r P\<^sub>2 i)) \<or> (\<Or> i\<in>I \<bullet> active(P\<^sub>1 i \<Rightarrow>\<^sub>r P\<^sub>2 i) \<and> (\<And> j\<in>I \<bullet> time(P\<^sub>1 j \<Rightarrow>\<^sub>r P\<^sub>2 j))))) = ((\<And> i\<in>I \<bullet> P\<^sub>1(i)) \<Rightarrow>\<^sub>r (idle(\<And> i\<in>I \<bullet> idle(P\<^sub>2 i)) \<or> (\<Or> i\<in>I \<bullet> active(P\<^sub>2 i) \<and> (\<And> j\<in>I \<bullet> time(P\<^sub>2 j)))))" by (simp add: Healthy_if assms closure) have "((\<And> i\<in>I \<bullet> RC2(P\<^sub>1(i))) \<Rightarrow>\<^sub>r (\<Or> i\<in>I \<bullet> (RC2(P\<^sub>1 i) \<Rightarrow>\<^sub>r P\<^sub>3 i) \<and> (\<And> j\<in>I \<bullet> time(RC2(P\<^sub>1 j) \<Rightarrow>\<^sub>r P\<^sub>2 j)))) = ((\<And> i\<in>I \<bullet> RC2(P\<^sub>1(i))) \<Rightarrow>\<^sub>r (\<Or> i\<in>I \<bullet> (P\<^sub>3 i) \<and> (\<And> j\<in>I \<bullet> time(P\<^sub>2 j))))" apply (trr_simp cls: assms False, safe) apply auto[1] apply (meson idleprefix_prefix order.trans) apply blast done hence 2: "((\<And> i\<in>I \<bullet> P\<^sub>1(i)) \<Rightarrow>\<^sub>r (\<Or> i\<in>I \<bullet> (P\<^sub>1 i \<Rightarrow>\<^sub>r P\<^sub>3 i) \<and> (\<And> j\<in>I \<bullet> time(P\<^sub>1 j \<Rightarrow>\<^sub>r P\<^sub>2 j)))) = ((\<And> i\<in>I \<bullet> P\<^sub>1(i)) \<Rightarrow>\<^sub>r (\<Or> i\<in>I \<bullet> (P\<^sub>3 i) \<and> (\<And> j\<in>I \<bullet> time(P\<^sub>2 j))))" by (simp add: Healthy_if assms closure) show ?thesis by (simp add: ExtChoice_def rdes assms closure False Healthy_if) (metis (no_types, lifting) "1" "2" rdes_tri_eq_intro rea_impl_mp) qed lemma ExtChoice_dual: assumes "P is TC" "Q is TC" "peri\<^sub>R P is TIP" "peri\<^sub>R Q is TIP" shows "ExtChoice {P, Q} id = P \<box> Q" apply (simp add: ExtChoice_def closure assms extChoice_def rpred usup_and uinf_or conj_disj_distr) apply (rule rdes_tri_eq_intro) apply (simp_all add: assms Healthy_if closure) apply (smt TC_inner_closures(2) TIP_time_active assms(1) assms(2) assms(3) assms(4) conj_comm utp_pred_laws.inf_left_commute utp_pred_laws.sup_commute) oops text \<open> Proving idempotence of binary external choice is complicated by the need to show that @{term "(time(peri\<^sub>R(P)) \<and> post\<^sub>R(P)) = post\<^sub>R(P)"} \<close> lemma e: "ExtChoice {\<^bold>R(P\<^sub>1 \<turnstile> P\<^sub>2 \<diamondop> P\<^sub>3), \<^bold>R(Q\<^sub>1 \<turnstile> Q\<^sub>2 \<diamondop> Q\<^sub>3)} id = ExtChoice {True, False} (\<lambda> p. \<^bold>R((if p then P\<^sub>1 else Q\<^sub>1) \<turnstile> (if p then P\<^sub>2 else Q\<^sub>2) \<diamondop> (if p then P\<^sub>3 else Q\<^sub>3)))" by (simp add: ExtChoice_def) lemma extChoice_rdes_def [rdes_def]: assumes "P\<^sub>1 is TRC" "P\<^sub>2 is TRR" "P\<^sub>3 is TRR" "Q\<^sub>1 is TRC" "Q\<^sub>2 is TRR" "Q\<^sub>3 is TRR" shows "\<^bold>R(P\<^sub>1 \<turnstile> P\<^sub>2 \<diamondop> P\<^sub>3) \<box> \<^bold>R(Q\<^sub>1 \<turnstile> Q\<^sub>2 \<diamondop> Q\<^sub>3) = \<^bold>R((P\<^sub>1 \<and> Q\<^sub>1) \<turnstile> (idle(P\<^sub>2) \<and> idle(Q\<^sub>2) \<or> time(P\<^sub>2) \<and> active(Q\<^sub>2) \<or> time(Q\<^sub>2) \<and> active(P\<^sub>2)) \<diamondop> (time(P\<^sub>2) \<and> Q\<^sub>3 \<or> time(Q\<^sub>2) \<and> P\<^sub>3))" proof - have 1: "((P\<^sub>1 \<and> Q\<^sub>1) \<and> (idle(RC2 P\<^sub>1 \<Rightarrow>\<^sub>r P\<^sub>2) \<and> idle(RC2 Q\<^sub>1 \<Rightarrow>\<^sub>r Q\<^sub>2) \<or> time(RC2 P\<^sub>1 \<Rightarrow>\<^sub>r P\<^sub>2) \<and> active(RC2 Q\<^sub>1 \<Rightarrow>\<^sub>r Q\<^sub>2) \<or> time(RC2 Q\<^sub>1 \<Rightarrow>\<^sub>r Q\<^sub>2) \<and> active(RC2 P\<^sub>1 \<Rightarrow>\<^sub>r P\<^sub>2))) = ((P\<^sub>1 \<and> Q\<^sub>1) \<and> (idle(P\<^sub>2) \<and> idle(Q\<^sub>2) \<or> time(P\<^sub>2) \<and> active(Q\<^sub>2) \<or> time(Q\<^sub>2) \<and> active(P\<^sub>2)))" using idleprefix_prefix by (trr_simp cls: assms, blast) have 2: "((P\<^sub>1 \<and> Q\<^sub>1) \<and> (time(RC2 P\<^sub>1 \<Rightarrow>\<^sub>r P\<^sub>2) \<and> (RC2 Q\<^sub>1 \<Rightarrow>\<^sub>r Q\<^sub>3) \<or> time(RC2 Q\<^sub>1 \<Rightarrow>\<^sub>r Q\<^sub>2) \<and> (RC2 P\<^sub>1 \<Rightarrow>\<^sub>r P\<^sub>3))) = ((P\<^sub>1 \<and> Q\<^sub>1) \<and> (time(P\<^sub>2) \<and> (Q\<^sub>3) \<or> time(Q\<^sub>2) \<and> (P\<^sub>3)))" using idleprefix_prefix by (trr_simp cls: assms, blast) from 1 2 show ?thesis by (simp add: extChoice_def rpred closure assms Healthy_if rdes, metis (no_types, lifting) rdes_tri_eq_intro) qed lemma [rpred]: "active(\<T>(X, A) ;; \<E>(s, [], E, p)) = false" by (rel_auto) lemma "Skip \<box> Stop = Skip" by (rdes_eq) lemma "Wait m \<box> Wait m = Wait m" by (rdes_eq) lemma "Wait m \<box> Wait n = Wait U(min m n)" apply (rdes_eq_split, simp_all add: rpred closure) oops lemma "Skip \<box> Stop\<^sub>U = Skip" by (rdes_eq) lemma "Skip \<box> Div = Skip" by (rdes_eq) lemma "Wait(n + 1) \<box> Div = Div" by (rdes_eq) lemma "Wait(n + 1) \<box> Stop\<^sub>U = Stop\<^sub>U" by (rdes_eq) lemma "Stop \<box> do\<^sub>T(a) = do\<^sub>T(a)" apply (rdes_eq_split) apply (simp_all add: rpred closure) apply (trr_auto) using tocks_idleprefix_fp tocks_iff_idleprefix_fp apply blast done lemma "Wait m \<box> Skip = Skip" by (rdes_eq) lemma extChoice_commute: assumes "P is TC" "Q is TC" shows "P \<box> Q = Q \<box> P" by (rdes_eq_split cls: assms, simp_all add: conj_comm conj_assoc disj_comm) lemma TRC_conj [closure]: "\<lbrakk> P is TRC; Q is TRC \<rbrakk> \<Longrightarrow> (P \<and> Q) is TRC" by (simp add: TRC_implies_RC TRC_wp_intro TRR_wp_unit conj_RC_closed wp_rea_conj) lemma TRF_conj [closure]: "\<lbrakk> P is TRF; Q is TRF \<rbrakk> \<Longrightarrow> (P \<and> Q) is TRF" by (simp add: TRF_implies_TRR TRF_intro TRF_unrests(1) TRF_unrests(2) TRR_conj unrest_conj) lemma uns_refine: "P \<sqsubseteq> \<U>(true, []) \<Longrightarrow> idle(P) \<sqsubseteq> \<U>(true, [])" by (rel_auto) lemma extChoice_closure [closure]: assumes "P is TC" "Q is TC" shows "P \<box> Q is TC" apply (rdes_simp cls: assms) apply (rule TC_intro) apply (simp_all add: closure assms) apply (simp add: TC_inner_closures(4) assms(1) assms(2) uns_refine utp_pred_laws.le_supI1) oops lemma extChoice_idem: assumes "P is TC" "peri\<^sub>R(P) is TIP" shows "P \<box> P = P" proof - have 1: "time(peri\<^sub>R P) \<sqsubseteq> post\<^sub>R P" by (rule time_peri_in_post, simp_all add: closure assms) show ?thesis apply (rdes_eq_split cls: assms) apply (simp add: assms rpred closure) apply (simp_all add: assms utp_pred_laws.inf_commute closure rpred) apply (simp add: "1" conj_comm utp_pred_laws.inf.absorb1) done qed lemma extChoice_unit: assumes "P is TC" shows "Stop \<box> P = P" by (rdes_eq_split cls: assms) lemma "Stop \<box> \<langle>\<sigma>\<rangle>\<^sub>T = \<langle>\<sigma>\<rangle>\<^sub>T" by (simp add: AssignsT_TC extChoice_unit) text \<open> Pedro Comment: Renaming should be a relation rather than a function. \<close> end
theory flash46Bra imports flash46Rev begin lemma onInv46: assumes a1:"iInv1 \<le> N" and a2:"iInv2 \<le> N" and a3:"iInv1~=iInv2 " and b1:"r \<in> rules N" and b2:"invf=inv46 iInv1 iInv2 " shows "invHoldForRule' s invf r (invariants N)" proof - have c1:"ex1P N (% iRule1 . r=NI_Local_GetX_PutX1 N iRule1 )\<or>ex1P N (% iRule1 . r=NI_Local_GetX_GetX iRule1 )\<or>ex1P N (% iRule1 . r=NI_Replace iRule1 )\<or>ex0P N ( r=NI_ShWb N )\<or>ex0P N ( r=PI_Local_GetX_GetX2 )\<or>ex0P N ( r=NI_Local_PutXAcksDone )\<or>ex1P N (% iRule1 . r=NI_Local_GetX_PutX7 N iRule1 )\<or>ex1P N (% iRule1 . r=NI_Local_Get_Nak2 iRule1 )\<or>ex0P N ( r=NI_ReplaceHomeShrVld )\<or>ex1P N (% iRule1 . r=NI_Remote_Put iRule1 )\<or>ex1P N (% iRule1 . r=NI_Local_GetX_PutX5 N iRule1 )\<or>ex0P N ( r=NI_Wb )\<or>ex1P N (% iRule1 . r=NI_Local_Get_Get iRule1 )\<or>ex0P N ( r=PI_Local_Replace )\<or>ex1P N (% iRule1 . r=NI_ReplaceShrVld iRule1 )\<or>ex2P N (% iRule1 iRule2 . r=NI_Local_GetX_PutX8 N iRule1 iRule2 )\<or>ex1P N (% iRule1 . r=NI_InvAck_2 N iRule1 )\<or>ex2P N (% iRule1 iRule2 . r=NI_Remote_Get_Nak2 iRule1 iRule2 )\<or>ex1P N (% iRule1 . r=PI_Remote_Replace iRule1 )\<or>ex0P N ( r=NI_Nak_Home )\<or>ex1P N (% iRule1 . r=NI_Local_Get_Put2 iRule1 )\<or>ex2P N (% iRule1 iRule2 . r=NI_InvAck_1 iRule1 iRule2 )\<or>ex1P N (% iRule1 . r=NI_Local_GetX_PutX11 N iRule1 )\<or>ex1P N (% iRule1 . r=NI_Local_GetX_PutX6 N iRule1 )\<or>ex2P N (% iRule1 iRule2 . r=NI_Remote_Get_Put2 iRule1 iRule2 )\<or>ex0P N ( r=PI_Local_Get_Put )\<or>ex0P N ( r=PI_Local_GetX_PutX1 N )\<or>ex1P N (% iRule1 . r=NI_InvAck_1_Home iRule1 )\<or>ex1P N (% iRule1 . r=NI_Remote_Get_Nak1 iRule1 )\<or>ex1P N (% iRule1 . r=NI_Local_Get_Nak1 iRule1 )\<or>ex1P N (% iRule1 . r=NI_Local_GetX_Nak2 iRule1 )\<or>ex1P N (% iRule1 . r=NI_Local_GetX_PutX10_home N iRule1 )\<or>ex1P N (% iRule1 . r=PI_Remote_Get iRule1 )\<or>ex1P N (% iRule1 . r=NI_Local_GetX_Nak3 iRule1 )\<or>ex2P N (% iRule1 iRule2 . r=NI_Local_GetX_PutX10 N iRule1 iRule2 )\<or>ex1P N (% iRule1 . r=NI_Local_GetX_PutX2 N iRule1 )\<or>ex1P N (% iRule1 . r=NI_Remote_Get_Put1 iRule1 )\<or>ex1P N (% iRule1 . r=NI_Remote_PutX iRule1 )\<or>ex1P N (% iRule1 . r=Store iRule1 )\<or>ex0P N ( r=NI_FAck )\<or>ex1P N (% iRule1 . r=NI_Local_GetX_PutX3 N iRule1 )\<or>ex0P N ( r=PI_Local_GetX_PutX3 )\<or>ex2P N (% iRule1 iRule2 . r=NI_Remote_GetX_PutX iRule1 iRule2 )\<or>ex1P N (% iRule1 . r=NI_Local_GetX_PutX8_home N iRule1 )\<or>ex1P N (% iRule1 . r=NI_Local_Get_Put1 N iRule1 )\<or>ex0P N ( r=PI_Local_GetX_GetX1 )\<or>ex0P N ( r=StoreHome )\<or>ex2P N (% iRule1 iRule2 . r=NI_Remote_GetX_Nak iRule1 iRule2 )\<or>ex1P N (% iRule1 . r=NI_Inv iRule1 )\<or>ex1P N (% iRule1 . r=PI_Remote_PutX iRule1 )\<or>ex0P N ( r=PI_Local_GetX_PutX4 )\<or>ex1P N (% iRule1 . r=NI_Local_GetX_PutX4 N iRule1 )\<or>ex1P N (% iRule1 . r=NI_Nak iRule1 )\<or>ex0P N ( r=PI_Local_GetX_PutX2 N )\<or>ex0P N ( r=NI_Local_Put )\<or>ex1P N (% iRule1 . r=NI_Local_GetX_Nak1 iRule1 )\<or>ex0P N ( r=NI_Nak_Clear )\<or>ex0P N ( r=PI_Local_PutX )\<or>ex1P N (% iRule1 . r=NI_Local_Get_Nak3 iRule1 )\<or>ex1P N (% iRule1 . r=NI_Remote_GetX_Nak_Home iRule1 )\<or>ex0P N ( r=PI_Local_Get_Get )\<or>ex1P N (% iRule1 . r=NI_Local_GetX_PutX9 N iRule1 )\<or>ex1P N (% iRule1 . r=PI_Remote_GetX iRule1 )\<or>ex0P N ( r=NI_ReplaceHome )\<or>ex1P N (% iRule1 . r=NI_Remote_GetX_PutX_Home iRule1 )\<or>ex1P N (% iRule1 . r=NI_Local_Get_Put3 iRule1 )" apply(cut_tac b1) apply auto done moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_GetX_PutX1 N iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_GetX_PutX1 N iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_GetX_PutX1 N iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_GetX_PutX1VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_GetX_GetX iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_GetX_GetX iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_GetX_GetX iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_GetX_GetXVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Replace iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Replace iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Replace iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_ReplaceVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= NI_ShWb N ) " from c1 have c2:" r= NI_ShWb N " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_ShWb N ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis NI_ShWbVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= PI_Local_GetX_GetX2 ) " from c1 have c2:" r= PI_Local_GetX_GetX2 " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (PI_Local_GetX_GetX2 ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis PI_Local_GetX_GetX2VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= NI_Local_PutXAcksDone ) " from c1 have c2:" r= NI_Local_PutXAcksDone " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_PutXAcksDone ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis NI_Local_PutXAcksDoneVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_GetX_PutX7 N iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_GetX_PutX7 N iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_GetX_PutX7 N iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_GetX_PutX7VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_Get_Nak2 iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_Get_Nak2 iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_Get_Nak2 iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_Get_Nak2VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= NI_ReplaceHomeShrVld ) " from c1 have c2:" r= NI_ReplaceHomeShrVld " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_ReplaceHomeShrVld ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis NI_ReplaceHomeShrVldVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Remote_Put iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Remote_Put iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Remote_Put iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Remote_PutVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_GetX_PutX5 N iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_GetX_PutX5 N iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_GetX_PutX5 N iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_GetX_PutX5VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= NI_Wb ) " from c1 have c2:" r= NI_Wb " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Wb ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis NI_WbVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_Get_Get iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_Get_Get iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_Get_Get iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_Get_GetVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= PI_Local_Replace ) " from c1 have c2:" r= PI_Local_Replace " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (PI_Local_Replace ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis PI_Local_ReplaceVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_ReplaceShrVld iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_ReplaceShrVld iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_ReplaceShrVld iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_ReplaceShrVldVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex2P N (% iRule1 iRule2 . r= NI_Local_GetX_PutX8 N iRule1 iRule2 ) " from c1 obtain iRule1 iRule2 where c2:" iRule1~=iRule2 \<and> iRule1 \<le> N \<and> iRule2 \<le> N \<and> r= NI_Local_GetX_PutX8 N iRule1 iRule2 " by (auto simp add: ex2P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_GetX_PutX8 N iRule1 iRule2 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_GetX_PutX8VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_InvAck_2 N iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_InvAck_2 N iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_InvAck_2 N iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_InvAck_2VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex2P N (% iRule1 iRule2 . r= NI_Remote_Get_Nak2 iRule1 iRule2 ) " from c1 obtain iRule1 iRule2 where c2:" iRule1~=iRule2 \<and> iRule1 \<le> N \<and> iRule2 \<le> N \<and> r= NI_Remote_Get_Nak2 iRule1 iRule2 " by (auto simp add: ex2P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Remote_Get_Nak2 iRule1 iRule2 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Remote_Get_Nak2VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= PI_Remote_Replace iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= PI_Remote_Replace iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (PI_Remote_Replace iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis PI_Remote_ReplaceVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= NI_Nak_Home ) " from c1 have c2:" r= NI_Nak_Home " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Nak_Home ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis NI_Nak_HomeVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_Get_Put2 iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_Get_Put2 iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_Get_Put2 iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_Get_Put2VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex2P N (% iRule1 iRule2 . r= NI_InvAck_1 iRule1 iRule2 ) " from c1 obtain iRule1 iRule2 where c2:" iRule1~=iRule2 \<and> iRule1 \<le> N \<and> iRule2 \<le> N \<and> r= NI_InvAck_1 iRule1 iRule2 " by (auto simp add: ex2P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_InvAck_1 iRule1 iRule2 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_InvAck_1VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_GetX_PutX11 N iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_GetX_PutX11 N iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_GetX_PutX11 N iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_GetX_PutX11VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_GetX_PutX6 N iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_GetX_PutX6 N iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_GetX_PutX6 N iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_GetX_PutX6VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex2P N (% iRule1 iRule2 . r= NI_Remote_Get_Put2 iRule1 iRule2 ) " from c1 obtain iRule1 iRule2 where c2:" iRule1~=iRule2 \<and> iRule1 \<le> N \<and> iRule2 \<le> N \<and> r= NI_Remote_Get_Put2 iRule1 iRule2 " by (auto simp add: ex2P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Remote_Get_Put2 iRule1 iRule2 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Remote_Get_Put2VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= PI_Local_Get_Put ) " from c1 have c2:" r= PI_Local_Get_Put " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (PI_Local_Get_Put ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis PI_Local_Get_PutVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= PI_Local_GetX_PutX1 N ) " from c1 have c2:" r= PI_Local_GetX_PutX1 N " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (PI_Local_GetX_PutX1 N ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis PI_Local_GetX_PutX1VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_InvAck_1_Home iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_InvAck_1_Home iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_InvAck_1_Home iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_InvAck_1_HomeVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Remote_Get_Nak1 iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Remote_Get_Nak1 iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Remote_Get_Nak1 iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Remote_Get_Nak1VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_Get_Nak1 iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_Get_Nak1 iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_Get_Nak1 iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_Get_Nak1VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_GetX_Nak2 iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_GetX_Nak2 iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_GetX_Nak2 iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_GetX_Nak2VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_GetX_PutX10_home N iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_GetX_PutX10_home N iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_GetX_PutX10_home N iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_GetX_PutX10_homeVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= PI_Remote_Get iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= PI_Remote_Get iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (PI_Remote_Get iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis PI_Remote_GetVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_GetX_Nak3 iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_GetX_Nak3 iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_GetX_Nak3 iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_GetX_Nak3VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex2P N (% iRule1 iRule2 . r= NI_Local_GetX_PutX10 N iRule1 iRule2 ) " from c1 obtain iRule1 iRule2 where c2:" iRule1~=iRule2 \<and> iRule1 \<le> N \<and> iRule2 \<le> N \<and> r= NI_Local_GetX_PutX10 N iRule1 iRule2 " by (auto simp add: ex2P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_GetX_PutX10 N iRule1 iRule2 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_GetX_PutX10VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_GetX_PutX2 N iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_GetX_PutX2 N iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_GetX_PutX2 N iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_GetX_PutX2VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Remote_Get_Put1 iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Remote_Get_Put1 iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Remote_Get_Put1 iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Remote_Get_Put1VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Remote_PutX iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Remote_PutX iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Remote_PutX iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Remote_PutXVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= Store iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= Store iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (Store iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis StoreVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= NI_FAck ) " from c1 have c2:" r= NI_FAck " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_FAck ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis NI_FAckVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_GetX_PutX3 N iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_GetX_PutX3 N iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_GetX_PutX3 N iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_GetX_PutX3VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= PI_Local_GetX_PutX3 ) " from c1 have c2:" r= PI_Local_GetX_PutX3 " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (PI_Local_GetX_PutX3 ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis PI_Local_GetX_PutX3VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex2P N (% iRule1 iRule2 . r= NI_Remote_GetX_PutX iRule1 iRule2 ) " from c1 obtain iRule1 iRule2 where c2:" iRule1~=iRule2 \<and> iRule1 \<le> N \<and> iRule2 \<le> N \<and> r= NI_Remote_GetX_PutX iRule1 iRule2 " by (auto simp add: ex2P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Remote_GetX_PutX iRule1 iRule2 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Remote_GetX_PutXVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_GetX_PutX8_home N iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_GetX_PutX8_home N iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_GetX_PutX8_home N iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_GetX_PutX8_homeVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_Get_Put1 N iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_Get_Put1 N iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_Get_Put1 N iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_Get_Put1VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= PI_Local_GetX_GetX1 ) " from c1 have c2:" r= PI_Local_GetX_GetX1 " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (PI_Local_GetX_GetX1 ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis PI_Local_GetX_GetX1VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= StoreHome ) " from c1 have c2:" r= StoreHome " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (StoreHome ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis StoreHomeVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex2P N (% iRule1 iRule2 . r= NI_Remote_GetX_Nak iRule1 iRule2 ) " from c1 obtain iRule1 iRule2 where c2:" iRule1~=iRule2 \<and> iRule1 \<le> N \<and> iRule2 \<le> N \<and> r= NI_Remote_GetX_Nak iRule1 iRule2 " by (auto simp add: ex2P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Remote_GetX_Nak iRule1 iRule2 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Remote_GetX_NakVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Inv iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Inv iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Inv iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_InvVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= PI_Remote_PutX iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= PI_Remote_PutX iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (PI_Remote_PutX iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis PI_Remote_PutXVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= PI_Local_GetX_PutX4 ) " from c1 have c2:" r= PI_Local_GetX_PutX4 " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (PI_Local_GetX_PutX4 ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis PI_Local_GetX_PutX4VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_GetX_PutX4 N iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_GetX_PutX4 N iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_GetX_PutX4 N iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_GetX_PutX4VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Nak iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Nak iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Nak iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_NakVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= PI_Local_GetX_PutX2 N ) " from c1 have c2:" r= PI_Local_GetX_PutX2 N " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (PI_Local_GetX_PutX2 N ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis PI_Local_GetX_PutX2VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= NI_Local_Put ) " from c1 have c2:" r= NI_Local_Put " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_Put ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis NI_Local_PutVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_GetX_Nak1 iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_GetX_Nak1 iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_GetX_Nak1 iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_GetX_Nak1VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= NI_Nak_Clear ) " from c1 have c2:" r= NI_Nak_Clear " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Nak_Clear ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis NI_Nak_ClearVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= PI_Local_PutX ) " from c1 have c2:" r= PI_Local_PutX " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (PI_Local_PutX ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis PI_Local_PutXVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_Get_Nak3 iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_Get_Nak3 iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_Get_Nak3 iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_Get_Nak3VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Remote_GetX_Nak_Home iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Remote_GetX_Nak_Home iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Remote_GetX_Nak_Home iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Remote_GetX_Nak_HomeVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= PI_Local_Get_Get ) " from c1 have c2:" r= PI_Local_Get_Get " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (PI_Local_Get_Get ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis PI_Local_Get_GetVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_GetX_PutX9 N iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_GetX_PutX9 N iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_GetX_PutX9 N iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_GetX_PutX9VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= PI_Remote_GetX iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= PI_Remote_GetX iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (PI_Remote_GetX iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis PI_Remote_GetXVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex0P N ( r= NI_ReplaceHome ) " from c1 have c2:" r= NI_ReplaceHome " by (auto simp add: ex0P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_ReplaceHome ) (invariants N) " apply(cut_tac a1 a2 a3 b2 c2 ) by (metis NI_ReplaceHomeVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Remote_GetX_PutX_Home iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Remote_GetX_PutX_Home iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Remote_GetX_PutX_Home iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Remote_GetX_PutX_HomeVsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) } moreover {assume c1: "ex1P N (% iRule1 . r= NI_Local_Get_Put3 iRule1 ) " from c1 obtain iRule1 where c2:" iRule1 \<le> N \<and> r= NI_Local_Get_Put3 iRule1 " by (auto simp add: ex1P_def) have "invHoldForRule' s (inv46 iInv1 iInv2 ) (NI_Local_Get_Put3 iRule1 ) (invariants N) " apply(cut_tac c2 a1 a2 a3 ) by (metis NI_Local_Get_Put3VsInv46 ) then have "invHoldForRule' s invf r (invariants N) " by(cut_tac c2 b2, metis) }ultimately show "invHoldForRule' s invf r (invariants N) " by blast qed end
#include "gpudb/Avro.hpp" #include <boost/thread/thread.hpp> namespace gpudb { namespace avro { Decoder::~Decoder() { } void runTask(const boost::function0<void>* task, std::string* error) { try { (*task)(); } catch (const std::exception& ex) { *error = ex.what(); } } void Executor::run(const std::vector<boost::function0<void> >& tasks) const { std::vector<std::string> errors; errors.resize(tasks.size(), ""); boost::thread_group threads; for (size_t i = 0; i < tasks.size(); ++i) { threads.create_thread(boost::bind(&runTask, &tasks[i], &errors[i])); } threads.join_all(); for (size_t i = 0; i < tasks.size(); ++i) { if (!errors[i].empty()) { throw GPUdbException(errors[i]); } } } Executor::~Executor() { } } }
------------------------------------------------------------------------ -- Definitions of functions that generate list ------------------------------------------------------------------------ {-# OPTIONS --without-K --safe --exact-split #-} module Math.Combinatorics.ListFunction where open import Data.List open import Data.List.NonEmpty as NE using (List⁺) open import Data.Nat open import Data.Product as Prod using (_×_; _,_) open import Function open import Relation.Binary.PropositionalEquality hiding ([_]) module _ {a} {A : Set a} where -- applyEach (_+ 10) (upTo 3) -- >>> (10 ∷ 1 ∷ 2 ∷ []) ∷ (0 ∷ 11 ∷ 2 ∷ []) ∷ (0 ∷ 1 ∷ 12 ∷ []) ∷ [] applyEach : (A → A) → List A → List (List A) applyEach f [] = [] applyEach f (x ∷ xs) = (f x ∷ xs) ∷ map (x ∷_) (applyEach f xs) module _ {a} {A : Set a} where -- Combinations -- combinations 2 (upTo 3) -- >>> (0 ∷ 1 ∷ []) ∷ (0 ∷ 2 ∷ []) ∷ -- >>> (0 ∷ 3 ∷ []) ∷ (1 ∷ 2 ∷ []) ∷ (1 ∷ 3 ∷ []) ∷ (2 ∷ 3 ∷ []) ∷ [] combinations : ℕ → List A → List (List A) combinations 0 xs = [ [] ] combinations (suc k) [] = [] combinations (suc k) (x ∷ xs) = map (x ∷_) (combinations k xs) ++ combinations (suc k) xs -- combinationsWithComplement 2 (upTo 2) -- >>> (0 ∷ 1 ∷ [] , 2 ∷ []) ∷ -- >>> (0 ∷ 2 ∷ [] , 1 ∷ []) ∷ (1 ∷ 2 ∷ [] , 0 ∷ []) ∷ [] combinationsWithComplement : ℕ → List A → List (List A × List A) combinationsWithComplement 0 xs = [ [] , xs ] combinationsWithComplement (suc k) [] = [] combinationsWithComplement (suc k) (x ∷ xs) = map (Prod.map₁ (x ∷_)) (combinationsWithComplement k xs) ++ map (Prod.map₂ (x ∷_)) (combinationsWithComplement (suc k) xs) -- split list into two list (include empty list and order is preserved) -- splits₂ (upTo 3) -- >>> ([] , 0 ∷ 1 ∷ 2 ∷ []) ∷ -- >>> (0 ∷ [] , 1 ∷ 2 ∷ []) ∷ -- >>> (0 ∷ 1 ∷ [] , 2 ∷ []) ∷ (0 ∷ 1 ∷ 2 ∷ [] , []) ∷ [] splits₂ : List A → List (List A × List A) splits₂ [] = ([] , []) ∷ [] splits₂ (x ∷ xs) = ([] , x ∷ xs) ∷ map (Prod.map₁ (x ∷_)) (splits₂ xs) splits : ℕ → List A → List (List (List A)) splits 0 xs = [] splits 1 xs = [ xs ∷ [] ] splits (suc (suc k)) xs = concatMap f (splits (suc k) xs) where f : List (List A) → List (List (List A)) f [] = [] f (ys ∷ yss) = map (λ { (as , bs) → as ∷ bs ∷ yss }) (splits₂ ys) splits⁺₂Acc : A → List A → List (List⁺ A × List⁺ A) splits⁺₂Acc x [] = [] splits⁺₂Acc x (y ∷ xs) = (x NE.∷ [] , y NE.∷ xs) ∷ map (Prod.map₁ (x NE.∷⁺_)) (splits⁺₂Acc y xs) -- split list into two list (exclude empty list and order is preserved) splits⁺₂ : List⁺ A → List (List⁺ A × List⁺ A) splits⁺₂ (x NE.∷ xs) = splits⁺₂Acc x xs splits⁺ : ℕ → List⁺ A → List (List (List⁺ A)) splits⁺ 0 xs = [] splits⁺ 1 xs = [ xs ∷ [] ] splits⁺ (suc (suc k)) xs = concatMap f (splits⁺ (suc k) xs) where f : List (List⁺ A) → List (List (List⁺ A)) f [] = [] f (ys ∷ yss) = map (λ { (as , bs) → as ∷ bs ∷ yss }) (splits⁺₂ ys) splits⁺AllAcc : A → List A → List⁺ (List⁺ (List⁺ A)) splits⁺AllAcc x [] = ((x NE.∷ []) NE.∷ []) NE.∷ [] splits⁺AllAcc x (y ∷ xs) = NE.map (λ zs → (x NE.∷ []) NE.∷⁺ zs) yss NE.⁺++⁺ NE.map (λ { (z NE.∷ zs) → (x NE.∷⁺ z) NE.∷ zs} ) yss where yss = splits⁺AllAcc y xs splits⁺All : List⁺ A → List⁺ (List⁺ (List⁺ A)) splits⁺All (x NE.∷ xs) = splits⁺AllAcc x xs -- Partition of set -- Generalization of `combinationsWithComplement` -- partitions 2 (upTo 3) -- >>> ((0 ∷ []) ∷ (1 ∷ 2 ∷ []) ∷ []) ∷ -- >>> ((0 ∷ 1 ∷ []) ∷ (2 ∷ []) ∷ []) ∷ -- >>> ((1 ∷ []) ∷ (0 ∷ 2 ∷ []) ∷ []) ∷ [] partitions : ℕ → List A → List (List (List A)) partitions 0 [] = [ [] ] partitions 0 (x ∷ xs) = [] partitions (suc k) [] = [] partitions (suc k) (x ∷ xs) = map ([ x ] ∷_) (partitions k xs) ++ concatMap (applyEach (x ∷_)) (partitions (suc k) xs) partitionsAll : List A → List (List (List A)) partitionsAll [] = [ [] ] partitionsAll (x ∷ xs) = map ([ x ] ∷_) yss ++ concatMap (applyEach (x ∷_)) yss where yss = partitionsAll xs module _ {a} {A : Set a} where -- insertEverywhere 100 (upTo 2) -- >>> (100 ∷ 0 ∷ 1 ∷ []) ∷ (0 ∷ 100 ∷ 1 ∷ []) ∷ (0 ∷ 1 ∷ 100 ∷ []) ∷ [] insertEverywhere : A → List A → List (List A) insertEverywhere x [] = [ [ x ] ] insertEverywhere x (y ∷ ys) = (x ∷ y ∷ ys) ∷ map (y ∷_) (insertEverywhere x ys) module _ {a} {A : Set a} where -- permutations (upTo 2) -- >>> (0 ∷ 1 ∷ []) ∷ (1 ∷ 0 ∷ []) ∷ [] permutations : List A → List (List A) permutations [] = [ [] ] permutations (x ∷ xs) = concatMap (insertEverywhere x) (permutations xs)
\documentclass[fancy]{article} \setDate{November 2018} \begin{document} \begin{Name}{1}{rulessat}{author}{LDL Tools}{title} rulessat -- SAT solver for dsl4sc \end{Name} \section{Synopsis} rulessat <option>* <infile> \section{Description} rulessat reads a set of dsl4sc declarations from <infile>, converts them to a LDL formula φ, examines if φ is satisfiable or not, and then returns either "valid" (¬φ is unsatisfiable), "satisfiable", or "unsatisfiable" \section{Options} \begin{description} \item[\Opt{-v}, \Opt{--verbose}] become verbose % \item[\Opt{-h}, \Opt{--help}] show usage \end{description} \section{See Also} rulesmc, ldlsat \section{Author} LDLTools development team at IBM Research. \begin{itemize} \item URL: \URL{https://ldltools.github.io} \item Email: \Email{[email protected]} \end{itemize} \section{Copyright} (C) Copyright IBM Corp. 2018. License Apache 2.0.\\ This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. \end{document}
From mathcomp Require Import ssreflect ssrnat ssrbool eqtype. From deriving Require Import deriving. Require Import type. Time Definition tree_indDef := [indDef for tree_rect]. Canonical tree_indType := IndType tree tree_indDef. Module nored. (* see https://github.com/arthuraa/deriving#simplification-and-performance-issues *) Definition tree_eqMixin : Equality.mixin_of tree. Proof. Time exact [derive nored eqMixin for tree]. Time Defined. (* -> does not reduce with QED *) Canonical tree_eqType := EqType tree tree_eqMixin. Eval vm_compute in t0 == t1. Time Eval vm_compute in t0 == t0. Time Eval vm_compute in t0 == t1. End nored. Optimize Heap. Module red. Time Definition tree_eqMixin := [derive eqMixin for tree]. Canonical tree_eqType := EqType tree tree_eqMixin. Eval vm_compute in t0 == t1. Time Eval vm_compute in t0 == t0. Time Eval vm_compute in t0 == t1. End red.
(* Title: HOL/Fields.thy Author: Gertrud Bauer Author: Steven Obua Author: Tobias Nipkow Author: Lawrence C Paulson Author: Markus Wenzel Author: Jeremy Avigad *) section \<open>Fields\<close> theory Fields imports Nat begin context idom begin lemma inj_mult_left [simp]: \<open>inj ((*) a) \<longleftrightarrow> a \<noteq> 0\<close> (is \<open>?P \<longleftrightarrow> ?Q\<close>) proof assume ?P show ?Q proof assume \<open>a = 0\<close> with \<open>?P\<close> have "inj ((*) 0)" by simp moreover have "0 * 0 = 0 * 1" by simp ultimately have "0 = 1" by (rule injD) then show False by simp qed next assume ?Q then show ?P by (auto intro: injI) qed end subsection \<open>Division rings\<close> text \<open> A division ring is like a field, but without the commutativity requirement. \<close> class inverse = divide + fixes inverse :: "'a \<Rightarrow> 'a" begin abbreviation inverse_divide :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "'/" 70) where "inverse_divide \<equiv> divide" end text \<open>Setup for linear arithmetic prover\<close> ML_file \<open>~~/src/Provers/Arith/fast_lin_arith.ML\<close> ML_file \<open>Tools/lin_arith.ML\<close> setup \<open>Lin_Arith.global_setup\<close> declaration \<open>K ( Lin_Arith.init_arith_data #> Lin_Arith.add_discrete_type \<^type_name>\<open>nat\<close> #> Lin_Arith.add_lessD @{thm Suc_leI} #> Lin_Arith.add_simps @{thms simp_thms ring_distribs if_True if_False minus_diff_eq add_0_left add_0_right order_less_irrefl zero_neq_one zero_less_one zero_le_one zero_neq_one [THEN not_sym] not_one_le_zero not_one_less_zero add_Suc add_Suc_right nat.inject Suc_le_mono Suc_less_eq Zero_not_Suc Suc_not_Zero le_0_eq One_nat_def} #> Lin_Arith.add_simprocs [\<^simproc>\<open>group_cancel_add\<close>, \<^simproc>\<open>group_cancel_diff\<close>, \<^simproc>\<open>group_cancel_eq\<close>, \<^simproc>\<open>group_cancel_le\<close>, \<^simproc>\<open>group_cancel_less\<close>, \<^simproc>\<open>nateq_cancel_sums\<close>,\<^simproc>\<open>natless_cancel_sums\<close>, \<^simproc>\<open>natle_cancel_sums\<close>])\<close> simproc_setup fast_arith_nat ("(m::nat) < n" | "(m::nat) \<le> n" | "(m::nat) = n") = \<open>K Lin_Arith.simproc\<close> \<comment> \<open>Because of this simproc, the arithmetic solver is really only useful to detect inconsistencies among the premises for subgoals which are \<^emph>\<open>not\<close> themselves (in)equalities, because the latter activate \<^text>\<open>fast_nat_arith_simproc\<close> anyway. However, it seems cheaper to activate the solver all the time rather than add the additional check.\<close> lemmas [arith_split] = nat_diff_split split_min split_max text\<open>Lemmas \<open>divide_simps\<close> move division to the outside and eliminates them on (in)equalities.\<close> named_theorems divide_simps "rewrite rules to eliminate divisions" class division_ring = ring_1 + inverse + assumes left_inverse [simp]: "a \<noteq> 0 \<Longrightarrow> inverse a * a = 1" assumes right_inverse [simp]: "a \<noteq> 0 \<Longrightarrow> a * inverse a = 1" assumes divide_inverse: "a / b = a * inverse b" assumes inverse_zero [simp]: "inverse 0 = 0" begin subclass ring_1_no_zero_divisors proof fix a b :: 'a assume a: "a \<noteq> 0" and b: "b \<noteq> 0" show "a * b \<noteq> 0" proof assume ab: "a * b = 0" hence "0 = inverse a * (a * b) * inverse b" by simp also have "\<dots> = (inverse a * a) * (b * inverse b)" by (simp only: mult.assoc) also have "\<dots> = 1" using a b by simp finally show False by simp qed qed lemma nonzero_imp_inverse_nonzero: "a \<noteq> 0 \<Longrightarrow> inverse a \<noteq> 0" proof assume ianz: "inverse a = 0" assume "a \<noteq> 0" hence "1 = a * inverse a" by simp also have "... = 0" by (simp add: ianz) finally have "1 = 0" . thus False by (simp add: eq_commute) qed lemma inverse_zero_imp_zero: assumes "inverse a = 0" shows "a = 0" proof (rule ccontr) assume "a \<noteq> 0" then have "inverse a \<noteq> 0" by (simp add: nonzero_imp_inverse_nonzero) with assms show False by auto qed lemma inverse_unique: assumes ab: "a * b = 1" shows "inverse a = b" proof - have "a \<noteq> 0" using ab by (cases "a = 0") simp_all moreover have "inverse a * (a * b) = inverse a" by (simp add: ab) ultimately show ?thesis by (simp add: mult.assoc [symmetric]) qed lemma nonzero_inverse_minus_eq: "a \<noteq> 0 \<Longrightarrow> inverse (- a) = - inverse a" by (rule inverse_unique) simp lemma nonzero_inverse_inverse_eq: "a \<noteq> 0 \<Longrightarrow> inverse (inverse a) = a" by (rule inverse_unique) simp lemma nonzero_inverse_eq_imp_eq: assumes "inverse a = inverse b" and "a \<noteq> 0" and "b \<noteq> 0" shows "a = b" proof - from \<open>inverse a = inverse b\<close> have "inverse (inverse a) = inverse (inverse b)" by (rule arg_cong) with \<open>a \<noteq> 0\<close> and \<open>b \<noteq> 0\<close> show "a = b" by (simp add: nonzero_inverse_inverse_eq) qed lemma inverse_1 [simp]: "inverse 1 = 1" by (rule inverse_unique) simp lemma nonzero_inverse_mult_distrib: assumes "a \<noteq> 0" and "b \<noteq> 0" shows "inverse (a * b) = inverse b * inverse a" proof - have "a * (b * inverse b) * inverse a = 1" using assms by simp hence "a * b * (inverse b * inverse a) = 1" by (simp only: mult.assoc) thus ?thesis by (rule inverse_unique) qed lemma division_ring_inverse_add: "a \<noteq> 0 \<Longrightarrow> b \<noteq> 0 \<Longrightarrow> inverse a + inverse b = inverse a * (a + b) * inverse b" by (simp add: algebra_simps) lemma division_ring_inverse_diff: "a \<noteq> 0 \<Longrightarrow> b \<noteq> 0 \<Longrightarrow> inverse a - inverse b = inverse a * (b - a) * inverse b" by (simp add: algebra_simps) lemma right_inverse_eq: "b \<noteq> 0 \<Longrightarrow> a / b = 1 \<longleftrightarrow> a = b" proof assume neq: "b \<noteq> 0" { hence "a = (a / b) * b" by (simp add: divide_inverse mult.assoc) also assume "a / b = 1" finally show "a = b" by simp next assume "a = b" with neq show "a / b = 1" by (simp add: divide_inverse) } qed lemma nonzero_inverse_eq_divide: "a \<noteq> 0 \<Longrightarrow> inverse a = 1 / a" by (simp add: divide_inverse) lemma divide_self [simp]: "a \<noteq> 0 \<Longrightarrow> a / a = 1" by (simp add: divide_inverse) lemma inverse_eq_divide [field_simps, field_split_simps, divide_simps]: "inverse a = 1 / a" by (simp add: divide_inverse) lemma add_divide_distrib: "(a+b) / c = a/c + b/c" by (simp add: divide_inverse algebra_simps) lemma times_divide_eq_right [simp]: "a * (b / c) = (a * b) / c" by (simp add: divide_inverse mult.assoc) lemma minus_divide_left: "- (a / b) = (-a) / b" by (simp add: divide_inverse) lemma nonzero_minus_divide_right: "b \<noteq> 0 \<Longrightarrow> - (a / b) = a / (- b)" by (simp add: divide_inverse nonzero_inverse_minus_eq) lemma nonzero_minus_divide_divide: "b \<noteq> 0 \<Longrightarrow> (-a) / (-b) = a / b" by (simp add: divide_inverse nonzero_inverse_minus_eq) lemma divide_minus_left [simp]: "(-a) / b = - (a / b)" by (simp add: divide_inverse) lemma diff_divide_distrib: "(a - b) / c = a / c - b / c" using add_divide_distrib [of a "- b" c] by simp lemma nonzero_eq_divide_eq [field_simps]: "c \<noteq> 0 \<Longrightarrow> a = b / c \<longleftrightarrow> a * c = b" proof - assume [simp]: "c \<noteq> 0" have "a = b / c \<longleftrightarrow> a * c = (b / c) * c" by simp also have "... \<longleftrightarrow> a * c = b" by (simp add: divide_inverse mult.assoc) finally show ?thesis . qed lemma nonzero_divide_eq_eq [field_simps]: "c \<noteq> 0 \<Longrightarrow> b / c = a \<longleftrightarrow> b = a * c" proof - assume [simp]: "c \<noteq> 0" have "b / c = a \<longleftrightarrow> (b / c) * c = a * c" by simp also have "... \<longleftrightarrow> b = a * c" by (simp add: divide_inverse mult.assoc) finally show ?thesis . qed lemma nonzero_neg_divide_eq_eq [field_simps]: "b \<noteq> 0 \<Longrightarrow> - (a / b) = c \<longleftrightarrow> - a = c * b" using nonzero_divide_eq_eq[of b "-a" c] by simp lemma nonzero_neg_divide_eq_eq2 [field_simps]: "b \<noteq> 0 \<Longrightarrow> c = - (a / b) \<longleftrightarrow> c * b = - a" using nonzero_neg_divide_eq_eq[of b a c] by auto lemma divide_eq_imp: "c \<noteq> 0 \<Longrightarrow> b = a * c \<Longrightarrow> b / c = a" by (simp add: divide_inverse mult.assoc) lemma eq_divide_imp: "c \<noteq> 0 \<Longrightarrow> a * c = b \<Longrightarrow> a = b / c" by (drule sym) (simp add: divide_inverse mult.assoc) lemma add_divide_eq_iff [field_simps]: "z \<noteq> 0 \<Longrightarrow> x + y / z = (x * z + y) / z" by (simp add: add_divide_distrib nonzero_eq_divide_eq) lemma divide_add_eq_iff [field_simps]: "z \<noteq> 0 \<Longrightarrow> x / z + y = (x + y * z) / z" by (simp add: add_divide_distrib nonzero_eq_divide_eq) lemma diff_divide_eq_iff [field_simps]: "z \<noteq> 0 \<Longrightarrow> x - y / z = (x * z - y) / z" by (simp add: diff_divide_distrib nonzero_eq_divide_eq eq_diff_eq) lemma minus_divide_add_eq_iff [field_simps]: "z \<noteq> 0 \<Longrightarrow> - (x / z) + y = (- x + y * z) / z" by (simp add: add_divide_distrib diff_divide_eq_iff) lemma divide_diff_eq_iff [field_simps]: "z \<noteq> 0 \<Longrightarrow> x / z - y = (x - y * z) / z" by (simp add: field_simps) lemma minus_divide_diff_eq_iff [field_simps]: "z \<noteq> 0 \<Longrightarrow> - (x / z) - y = (- x - y * z) / z" by (simp add: divide_diff_eq_iff[symmetric]) lemma division_ring_divide_zero [simp]: "a / 0 = 0" by (simp add: divide_inverse) lemma divide_self_if [simp]: "a / a = (if a = 0 then 0 else 1)" by simp lemma inverse_nonzero_iff_nonzero [simp]: "inverse a = 0 \<longleftrightarrow> a = 0" by rule (fact inverse_zero_imp_zero, simp) lemma inverse_minus_eq [simp]: "inverse (- a) = - inverse a" proof cases assume "a=0" thus ?thesis by simp next assume "a\<noteq>0" thus ?thesis by (simp add: nonzero_inverse_minus_eq) qed lemma inverse_inverse_eq [simp]: "inverse (inverse a) = a" proof cases assume "a=0" thus ?thesis by simp next assume "a\<noteq>0" thus ?thesis by (simp add: nonzero_inverse_inverse_eq) qed lemma inverse_eq_imp_eq: "inverse a = inverse b \<Longrightarrow> a = b" by (drule arg_cong [where f="inverse"], simp) lemma inverse_eq_iff_eq [simp]: "inverse a = inverse b \<longleftrightarrow> a = b" by (force dest!: inverse_eq_imp_eq) lemma mult_commute_imp_mult_inverse_commute: assumes "y * x = x * y" shows "inverse y * x = x * inverse y" proof (cases "y=0") case False hence "x * inverse y = inverse y * y * x * inverse y" by simp also have "\<dots> = inverse y * (x * y * inverse y)" by (simp add: mult.assoc assms) finally show ?thesis by (simp add: mult.assoc False) qed simp lemmas mult_inverse_of_nat_commute = mult_commute_imp_mult_inverse_commute[OF mult_of_nat_commute] lemma divide_divide_eq_left': "(a / b) / c = a / (c * b)" by (cases "b = 0 \<or> c = 0") (auto simp: divide_inverse mult.assoc nonzero_inverse_mult_distrib) lemma add_divide_eq_if_simps [field_split_simps, divide_simps]: "a + b / z = (if z = 0 then a else (a * z + b) / z)" "a / z + b = (if z = 0 then b else (a + b * z) / z)" "- (a / z) + b = (if z = 0 then b else (-a + b * z) / z)" "a - b / z = (if z = 0 then a else (a * z - b) / z)" "a / z - b = (if z = 0 then -b else (a - b * z) / z)" "- (a / z) - b = (if z = 0 then -b else (- a - b * z) / z)" by (simp_all add: add_divide_eq_iff divide_add_eq_iff diff_divide_eq_iff divide_diff_eq_iff minus_divide_diff_eq_iff) lemma [field_split_simps, divide_simps]: shows divide_eq_eq: "b / c = a \<longleftrightarrow> (if c \<noteq> 0 then b = a * c else a = 0)" and eq_divide_eq: "a = b / c \<longleftrightarrow> (if c \<noteq> 0 then a * c = b else a = 0)" and minus_divide_eq_eq: "- (b / c) = a \<longleftrightarrow> (if c \<noteq> 0 then - b = a * c else a = 0)" and eq_minus_divide_eq: "a = - (b / c) \<longleftrightarrow> (if c \<noteq> 0 then a * c = - b else a = 0)" by (auto simp add: field_simps) end subsection \<open>Fields\<close> class field = comm_ring_1 + inverse + assumes field_inverse: "a \<noteq> 0 \<Longrightarrow> inverse a * a = 1" assumes field_divide_inverse: "a / b = a * inverse b" assumes field_inverse_zero: "inverse 0 = 0" begin subclass division_ring proof fix a :: 'a assume "a \<noteq> 0" thus "inverse a * a = 1" by (rule field_inverse) thus "a * inverse a = 1" by (simp only: mult.commute) next fix a b :: 'a show "a / b = a * inverse b" by (rule field_divide_inverse) next show "inverse 0 = 0" by (fact field_inverse_zero) qed subclass idom_divide proof fix b a assume "b \<noteq> 0" then show "a * b / b = a" by (simp add: divide_inverse ac_simps) next fix a show "a / 0 = 0" by (simp add: divide_inverse) qed text\<open>There is no slick version using division by zero.\<close> lemma inverse_add: "a \<noteq> 0 \<Longrightarrow> b \<noteq> 0 \<Longrightarrow> inverse a + inverse b = (a + b) * inverse a * inverse b" by (simp add: division_ring_inverse_add ac_simps) lemma nonzero_mult_divide_mult_cancel_left [simp]: assumes [simp]: "c \<noteq> 0" shows "(c * a) / (c * b) = a / b" proof (cases "b = 0") case True then show ?thesis by simp next case False then have "(c*a)/(c*b) = c * a * (inverse b * inverse c)" by (simp add: divide_inverse nonzero_inverse_mult_distrib) also have "... = a * inverse b * (inverse c * c)" by (simp only: ac_simps) also have "... = a * inverse b" by simp finally show ?thesis by (simp add: divide_inverse) qed lemma nonzero_mult_divide_mult_cancel_right [simp]: "c \<noteq> 0 \<Longrightarrow> (a * c) / (b * c) = a / b" using nonzero_mult_divide_mult_cancel_left [of c a b] by (simp add: ac_simps) lemma times_divide_eq_left [simp]: "(b / c) * a = (b * a) / c" by (simp add: divide_inverse ac_simps) lemma divide_inverse_commute: "a / b = inverse b * a" by (simp add: divide_inverse mult.commute) lemma add_frac_eq: assumes "y \<noteq> 0" and "z \<noteq> 0" shows "x / y + w / z = (x * z + w * y) / (y * z)" proof - have "x / y + w / z = (x * z) / (y * z) + (y * w) / (y * z)" using assms by simp also have "\<dots> = (x * z + y * w) / (y * z)" by (simp only: add_divide_distrib) finally show ?thesis by (simp only: mult.commute) qed text\<open>Special Cancellation Simprules for Division\<close> lemma nonzero_divide_mult_cancel_right [simp]: "b \<noteq> 0 \<Longrightarrow> b / (a * b) = 1 / a" using nonzero_mult_divide_mult_cancel_right [of b 1 a] by simp lemma nonzero_divide_mult_cancel_left [simp]: "a \<noteq> 0 \<Longrightarrow> a / (a * b) = 1 / b" using nonzero_mult_divide_mult_cancel_left [of a 1 b] by simp lemma nonzero_mult_divide_mult_cancel_left2 [simp]: "c \<noteq> 0 \<Longrightarrow> (c * a) / (b * c) = a / b" using nonzero_mult_divide_mult_cancel_left [of c a b] by (simp add: ac_simps) lemma nonzero_mult_divide_mult_cancel_right2 [simp]: "c \<noteq> 0 \<Longrightarrow> (a * c) / (c * b) = a / b" using nonzero_mult_divide_mult_cancel_right [of b c a] by (simp add: ac_simps) lemma diff_frac_eq: "y \<noteq> 0 \<Longrightarrow> z \<noteq> 0 \<Longrightarrow> x / y - w / z = (x * z - w * y) / (y * z)" by (simp add: field_simps) lemma frac_eq_eq: "y \<noteq> 0 \<Longrightarrow> z \<noteq> 0 \<Longrightarrow> (x / y = w / z) = (x * z = w * y)" by (simp add: field_simps) lemma divide_minus1 [simp]: "x / - 1 = - x" using nonzero_minus_divide_right [of "1" x] by simp text\<open>This version builds in division by zero while also re-orienting the right-hand side.\<close> lemma inverse_mult_distrib [simp]: "inverse (a * b) = inverse a * inverse b" proof cases assume "a \<noteq> 0 \<and> b \<noteq> 0" thus ?thesis by (simp add: nonzero_inverse_mult_distrib ac_simps) next assume "\<not> (a \<noteq> 0 \<and> b \<noteq> 0)" thus ?thesis by force qed lemma inverse_divide [simp]: "inverse (a / b) = b / a" by (simp add: divide_inverse mult.commute) text \<open>Calculations with fractions\<close> text\<open>There is a whole bunch of simp-rules just for class \<open>field\<close> but none for class \<open>field\<close> and \<open>nonzero_divides\<close> because the latter are covered by a simproc.\<close> lemmas mult_divide_mult_cancel_left = nonzero_mult_divide_mult_cancel_left lemmas mult_divide_mult_cancel_right = nonzero_mult_divide_mult_cancel_right lemma divide_divide_eq_right [simp]: "a / (b / c) = (a * c) / b" by (simp add: divide_inverse ac_simps) lemma divide_divide_eq_left [simp]: "(a / b) / c = a / (b * c)" by (simp add: divide_inverse mult.assoc) lemma divide_divide_times_eq: "(x / y) / (z / w) = (x * w) / (y * z)" by simp text \<open>Special Cancellation Simprules for Division\<close> lemma mult_divide_mult_cancel_left_if [simp]: shows "(c * a) / (c * b) = (if c = 0 then 0 else a / b)" by simp text \<open>Division and Unary Minus\<close> lemma minus_divide_right: "- (a / b) = a / - b" by (simp add: divide_inverse) lemma divide_minus_right [simp]: "a / - b = - (a / b)" by (simp add: divide_inverse) lemma minus_divide_divide: "(- a) / (- b) = a / b" by (cases "b=0") (simp_all add: nonzero_minus_divide_divide) lemma inverse_eq_1_iff [simp]: "inverse x = 1 \<longleftrightarrow> x = 1" by (insert inverse_eq_iff_eq [of x 1], simp) lemma divide_eq_0_iff [simp]: "a / b = 0 \<longleftrightarrow> a = 0 \<or> b = 0" by (simp add: divide_inverse) lemma divide_cancel_right [simp]: "a / c = b / c \<longleftrightarrow> c = 0 \<or> a = b" by (cases "c=0") (simp_all add: divide_inverse) lemma divide_cancel_left [simp]: "c / a = c / b \<longleftrightarrow> c = 0 \<or> a = b" by (cases "c=0") (simp_all add: divide_inverse) lemma divide_eq_1_iff [simp]: "a / b = 1 \<longleftrightarrow> b \<noteq> 0 \<and> a = b" by (cases "b=0") (simp_all add: right_inverse_eq) lemma one_eq_divide_iff [simp]: "1 = a / b \<longleftrightarrow> b \<noteq> 0 \<and> a = b" by (simp add: eq_commute [of 1]) lemma divide_eq_minus_1_iff: "(a / b = - 1) \<longleftrightarrow> b \<noteq> 0 \<and> a = - b" using divide_eq_1_iff by fastforce lemma times_divide_times_eq: "(x / y) * (z / w) = (x * z) / (y * w)" by simp lemma add_frac_num: "y \<noteq> 0 \<Longrightarrow> x / y + z = (x + z * y) / y" by (simp add: add_divide_distrib) lemma add_num_frac: "y \<noteq> 0 \<Longrightarrow> z + x / y = (x + z * y) / y" by (simp add: add_divide_distrib add.commute) lemma dvd_field_iff: "a dvd b \<longleftrightarrow> (a = 0 \<longrightarrow> b = 0)" proof (cases "a = 0") case False then have "b = a * (b / a)" by (simp add: field_simps) then have "a dvd b" .. with False show ?thesis by simp qed simp lemma inj_divide_right [simp]: "inj (\<lambda>b. b / a) \<longleftrightarrow> a \<noteq> 0" proof - have "(\<lambda>b. b / a) = (*) (inverse a)" by (simp add: field_simps fun_eq_iff) then have "inj (\<lambda>y. y / a) \<longleftrightarrow> inj ((*) (inverse a))" by simp also have "\<dots> \<longleftrightarrow> inverse a \<noteq> 0" by simp also have "\<dots> \<longleftrightarrow> a \<noteq> 0" by simp finally show ?thesis by simp qed end class field_char_0 = field + ring_char_0 subsection \<open>Ordered fields\<close> class field_abs_sgn = field + idom_abs_sgn begin lemma sgn_inverse [simp]: "sgn (inverse a) = inverse (sgn a)" proof (cases "a = 0") case True then show ?thesis by simp next case False then have "a * inverse a = 1" by simp then have "sgn (a * inverse a) = sgn 1" by simp then have "sgn a * sgn (inverse a) = 1" by (simp add: sgn_mult) then have "inverse (sgn a) * (sgn a * sgn (inverse a)) = inverse (sgn a) * 1" by simp then have "(inverse (sgn a) * sgn a) * sgn (inverse a) = inverse (sgn a)" by (simp add: ac_simps) with False show ?thesis by (simp add: sgn_eq_0_iff) qed lemma abs_inverse [simp]: "\<bar>inverse a\<bar> = inverse \<bar>a\<bar>" proof - from sgn_mult_abs [of "inverse a"] sgn_mult_abs [of a] have "inverse (sgn a) * \<bar>inverse a\<bar> = inverse (sgn a * \<bar>a\<bar>)" by simp then show ?thesis by (auto simp add: sgn_eq_0_iff) qed lemma sgn_divide [simp]: "sgn (a / b) = sgn a / sgn b" unfolding divide_inverse sgn_mult by simp lemma abs_divide [simp]: "\<bar>a / b\<bar> = \<bar>a\<bar> / \<bar>b\<bar>" unfolding divide_inverse abs_mult by simp end class linordered_field = field + linordered_idom begin lemma positive_imp_inverse_positive: assumes a_gt_0: "0 < a" shows "0 < inverse a" proof - have "0 < a * inverse a" by (simp add: a_gt_0 [THEN less_imp_not_eq2]) thus "0 < inverse a" by (simp add: a_gt_0 [THEN less_not_sym] zero_less_mult_iff) qed lemma negative_imp_inverse_negative: "a < 0 \<Longrightarrow> inverse a < 0" by (insert positive_imp_inverse_positive [of "-a"], simp add: nonzero_inverse_minus_eq less_imp_not_eq) lemma inverse_le_imp_le: assumes invle: "inverse a \<le> inverse b" and apos: "0 < a" shows "b \<le> a" proof (rule classical) assume "\<not> b \<le> a" hence "a < b" by (simp add: linorder_not_le) hence bpos: "0 < b" by (blast intro: apos less_trans) hence "a * inverse a \<le> a * inverse b" by (simp add: apos invle less_imp_le mult_left_mono) hence "(a * inverse a) * b \<le> (a * inverse b) * b" by (simp add: bpos less_imp_le mult_right_mono) thus "b \<le> a" by (simp add: mult.assoc apos bpos less_imp_not_eq2) qed lemma inverse_positive_imp_positive: assumes inv_gt_0: "0 < inverse a" and nz: "a \<noteq> 0" shows "0 < a" proof - have "0 < inverse (inverse a)" using inv_gt_0 by (rule positive_imp_inverse_positive) thus "0 < a" using nz by (simp add: nonzero_inverse_inverse_eq) qed lemma inverse_negative_imp_negative: assumes inv_less_0: "inverse a < 0" and nz: "a \<noteq> 0" shows "a < 0" proof - have "inverse (inverse a) < 0" using inv_less_0 by (rule negative_imp_inverse_negative) thus "a < 0" using nz by (simp add: nonzero_inverse_inverse_eq) qed lemma linordered_field_no_lb: "\<forall>x. \<exists>y. y < x" proof fix x::'a have m1: "- (1::'a) < 0" by simp from add_strict_right_mono[OF m1, where c=x] have "(- 1) + x < x" by simp thus "\<exists>y. y < x" by blast qed lemma linordered_field_no_ub: "\<forall> x. \<exists>y. y > x" proof fix x::'a have m1: " (1::'a) > 0" by simp from add_strict_right_mono[OF m1, where c=x] have "1 + x > x" by simp thus "\<exists>y. y > x" by blast qed lemma less_imp_inverse_less: assumes less: "a < b" and apos: "0 < a" shows "inverse b < inverse a" proof (rule ccontr) assume "\<not> inverse b < inverse a" hence "inverse a \<le> inverse b" by simp hence "\<not> (a < b)" by (simp add: not_less inverse_le_imp_le [OF _ apos]) thus False by (rule notE [OF _ less]) qed lemma inverse_less_imp_less: assumes "inverse a < inverse b" "0 < a" shows "b < a" proof - have "a \<noteq> b" using assms by (simp add: less_le) moreover have "b \<le> a" using assms by (force simp: less_le dest: inverse_le_imp_le) ultimately show ?thesis by (simp add: less_le) qed text\<open>Both premises are essential. Consider -1 and 1.\<close> lemma inverse_less_iff_less [simp]: "0 < a \<Longrightarrow> 0 < b \<Longrightarrow> inverse a < inverse b \<longleftrightarrow> b < a" by (blast intro: less_imp_inverse_less dest: inverse_less_imp_less) lemma le_imp_inverse_le: "a \<le> b \<Longrightarrow> 0 < a \<Longrightarrow> inverse b \<le> inverse a" by (force simp add: le_less less_imp_inverse_less) lemma inverse_le_iff_le [simp]: "0 < a \<Longrightarrow> 0 < b \<Longrightarrow> inverse a \<le> inverse b \<longleftrightarrow> b \<le> a" by (blast intro: le_imp_inverse_le dest: inverse_le_imp_le) text\<open>These results refer to both operands being negative. The opposite-sign case is trivial, since inverse preserves signs.\<close> lemma inverse_le_imp_le_neg: assumes "inverse a \<le> inverse b" "b < 0" shows "b \<le> a" proof (rule classical) assume "\<not> b \<le> a" with \<open>b < 0\<close> have "a < 0" by force with assms show "b \<le> a" using inverse_le_imp_le [of "-b" "-a"] by (simp add: nonzero_inverse_minus_eq) qed lemma less_imp_inverse_less_neg: assumes "a < b" "b < 0" shows "inverse b < inverse a" proof - have "a < 0" using assms by (blast intro: less_trans) with less_imp_inverse_less [of "-b" "-a"] show ?thesis by (simp add: nonzero_inverse_minus_eq assms) qed lemma inverse_less_imp_less_neg: assumes "inverse a < inverse b" "b < 0" shows "b < a" proof (rule classical) assume "\<not> b < a" with \<open>b < 0\<close> have "a < 0" by force with inverse_less_imp_less [of "-b" "-a"] show ?thesis by (simp add: nonzero_inverse_minus_eq assms) qed lemma inverse_less_iff_less_neg [simp]: "a < 0 \<Longrightarrow> b < 0 \<Longrightarrow> inverse a < inverse b \<longleftrightarrow> b < a" using inverse_less_iff_less [of "-b" "-a"] by (simp del: inverse_less_iff_less add: nonzero_inverse_minus_eq) lemma le_imp_inverse_le_neg: "a \<le> b \<Longrightarrow> b < 0 \<Longrightarrow> inverse b \<le> inverse a" by (force simp add: le_less less_imp_inverse_less_neg) lemma inverse_le_iff_le_neg [simp]: "a < 0 \<Longrightarrow> b < 0 \<Longrightarrow> inverse a \<le> inverse b \<longleftrightarrow> b \<le> a" by (blast intro: le_imp_inverse_le_neg dest: inverse_le_imp_le_neg) lemma one_less_inverse: "0 < a \<Longrightarrow> a < 1 \<Longrightarrow> 1 < inverse a" using less_imp_inverse_less [of a 1, unfolded inverse_1] . lemma one_le_inverse: "0 < a \<Longrightarrow> a \<le> 1 \<Longrightarrow> 1 \<le> inverse a" using le_imp_inverse_le [of a 1, unfolded inverse_1] . lemma pos_le_divide_eq [field_simps]: assumes "0 < c" shows "a \<le> b / c \<longleftrightarrow> a * c \<le> b" proof - from assms have "a \<le> b / c \<longleftrightarrow> a * c \<le> (b / c) * c" using mult_le_cancel_right [of a c "b * inverse c"] by (auto simp add: field_simps) also have "... \<longleftrightarrow> a * c \<le> b" by (simp add: less_imp_not_eq2 [OF assms] divide_inverse mult.assoc) finally show ?thesis . qed lemma pos_less_divide_eq [field_simps]: assumes "0 < c" shows "a < b / c \<longleftrightarrow> a * c < b" proof - from assms have "a < b / c \<longleftrightarrow> a * c < (b / c) * c" using mult_less_cancel_right [of a c "b / c"] by auto also have "... = (a*c < b)" by (simp add: less_imp_not_eq2 [OF assms] divide_inverse mult.assoc) finally show ?thesis . qed lemma neg_less_divide_eq [field_simps]: assumes "c < 0" shows "a < b / c \<longleftrightarrow> b < a * c" proof - from assms have "a < b / c \<longleftrightarrow> (b / c) * c < a * c" using mult_less_cancel_right [of "b / c" c a] by auto also have "... \<longleftrightarrow> b < a * c" by (simp add: less_imp_not_eq [OF assms] divide_inverse mult.assoc) finally show ?thesis . qed lemma neg_le_divide_eq [field_simps]: assumes "c < 0" shows "a \<le> b / c \<longleftrightarrow> b \<le> a * c" proof - from assms have "a \<le> b / c \<longleftrightarrow> (b / c) * c \<le> a * c" using mult_le_cancel_right [of "b * inverse c" c a] by (auto simp add: field_simps) also have "... \<longleftrightarrow> b \<le> a * c" by (simp add: less_imp_not_eq [OF assms] divide_inverse mult.assoc) finally show ?thesis . qed lemma pos_divide_le_eq [field_simps]: assumes "0 < c" shows "b / c \<le> a \<longleftrightarrow> b \<le> a * c" proof - from assms have "b / c \<le> a \<longleftrightarrow> (b / c) * c \<le> a * c" using mult_le_cancel_right [of "b / c" c a] by auto also have "... \<longleftrightarrow> b \<le> a * c" by (simp add: less_imp_not_eq2 [OF assms] divide_inverse mult.assoc) finally show ?thesis . qed lemma pos_divide_less_eq [field_simps]: assumes "0 < c" shows "b / c < a \<longleftrightarrow> b < a * c" proof - from assms have "b / c < a \<longleftrightarrow> (b / c) * c < a * c" using mult_less_cancel_right [of "b / c" c a] by auto also have "... \<longleftrightarrow> b < a * c" by (simp add: less_imp_not_eq2 [OF assms] divide_inverse mult.assoc) finally show ?thesis . qed lemma neg_divide_le_eq [field_simps]: assumes "c < 0" shows "b / c \<le> a \<longleftrightarrow> a * c \<le> b" proof - from assms have "b / c \<le> a \<longleftrightarrow> a * c \<le> (b / c) * c" using mult_le_cancel_right [of a c "b / c"] by auto also have "... \<longleftrightarrow> a * c \<le> b" by (simp add: less_imp_not_eq [OF assms] divide_inverse mult.assoc) finally show ?thesis . qed lemma neg_divide_less_eq [field_simps]: assumes "c < 0" shows "b / c < a \<longleftrightarrow> a * c < b" proof - from assms have "b / c < a \<longleftrightarrow> a * c < b / c * c" using mult_less_cancel_right [of a c "b / c"] by auto also have "... \<longleftrightarrow> a * c < b" by (simp add: less_imp_not_eq [OF assms] divide_inverse mult.assoc) finally show ?thesis . qed text\<open>The following \<open>field_simps\<close> rules are necessary, as minus is always moved atop of division but we want to get rid of division.\<close> lemma pos_le_minus_divide_eq [field_simps]: "0 < c \<Longrightarrow> a \<le> - (b / c) \<longleftrightarrow> a * c \<le> - b" unfolding minus_divide_left by (rule pos_le_divide_eq) lemma neg_le_minus_divide_eq [field_simps]: "c < 0 \<Longrightarrow> a \<le> - (b / c) \<longleftrightarrow> - b \<le> a * c" unfolding minus_divide_left by (rule neg_le_divide_eq) lemma pos_less_minus_divide_eq [field_simps]: "0 < c \<Longrightarrow> a < - (b / c) \<longleftrightarrow> a * c < - b" unfolding minus_divide_left by (rule pos_less_divide_eq) lemma neg_less_minus_divide_eq [field_simps]: "c < 0 \<Longrightarrow> a < - (b / c) \<longleftrightarrow> - b < a * c" unfolding minus_divide_left by (rule neg_less_divide_eq) lemma pos_minus_divide_less_eq [field_simps]: "0 < c \<Longrightarrow> - (b / c) < a \<longleftrightarrow> - b < a * c" unfolding minus_divide_left by (rule pos_divide_less_eq) lemma neg_minus_divide_less_eq [field_simps]: "c < 0 \<Longrightarrow> - (b / c) < a \<longleftrightarrow> a * c < - b" unfolding minus_divide_left by (rule neg_divide_less_eq) lemma pos_minus_divide_le_eq [field_simps]: "0 < c \<Longrightarrow> - (b / c) \<le> a \<longleftrightarrow> - b \<le> a * c" unfolding minus_divide_left by (rule pos_divide_le_eq) lemma neg_minus_divide_le_eq [field_simps]: "c < 0 \<Longrightarrow> - (b / c) \<le> a \<longleftrightarrow> a * c \<le> - b" unfolding minus_divide_left by (rule neg_divide_le_eq) lemma frac_less_eq: "y \<noteq> 0 \<Longrightarrow> z \<noteq> 0 \<Longrightarrow> x / y < w / z \<longleftrightarrow> (x * z - w * y) / (y * z) < 0" by (subst less_iff_diff_less_0) (simp add: diff_frac_eq ) lemma frac_le_eq: "y \<noteq> 0 \<Longrightarrow> z \<noteq> 0 \<Longrightarrow> x / y \<le> w / z \<longleftrightarrow> (x * z - w * y) / (y * z) \<le> 0" by (subst le_iff_diff_le_0) (simp add: diff_frac_eq ) lemma divide_pos_pos[simp]: "0 < x \<Longrightarrow> 0 < y \<Longrightarrow> 0 < x / y" by(simp add:field_simps) lemma divide_nonneg_pos: "0 \<le> x \<Longrightarrow> 0 < y \<Longrightarrow> 0 \<le> x / y" by(simp add:field_simps) lemma divide_neg_pos: "x < 0 \<Longrightarrow> 0 < y \<Longrightarrow> x / y < 0" by(simp add:field_simps) lemma divide_nonpos_pos: "x \<le> 0 \<Longrightarrow> 0 < y \<Longrightarrow> x / y \<le> 0" by(simp add:field_simps) lemma divide_pos_neg: "0 < x \<Longrightarrow> y < 0 \<Longrightarrow> x / y < 0" by(simp add:field_simps) lemma divide_nonneg_neg: "0 \<le> x \<Longrightarrow> y < 0 \<Longrightarrow> x / y \<le> 0" by(simp add:field_simps) lemma divide_neg_neg: "x < 0 \<Longrightarrow> y < 0 \<Longrightarrow> 0 < x / y" by(simp add:field_simps) lemma divide_nonpos_neg: "x \<le> 0 \<Longrightarrow> y < 0 \<Longrightarrow> 0 \<le> x / y" by(simp add:field_simps) lemma divide_strict_right_mono: "\<lbrakk>a < b; 0 < c\<rbrakk> \<Longrightarrow> a / c < b / c" by (simp add: less_imp_not_eq2 divide_inverse mult_strict_right_mono positive_imp_inverse_positive) lemma divide_strict_right_mono_neg: assumes "b < a" "c < 0" shows "a / c < b / c" proof - have "b / - c < a / - c" by (rule divide_strict_right_mono) (use assms in auto) then show ?thesis by (simp add: less_imp_not_eq) qed text\<open>The last premise ensures that \<^term>\<open>a\<close> and \<^term>\<open>b\<close> have the same sign\<close> lemma divide_strict_left_mono: "\<lbrakk>b < a; 0 < c; 0 < a*b\<rbrakk> \<Longrightarrow> c / a < c / b" by (auto simp: field_simps zero_less_mult_iff mult_strict_right_mono) lemma divide_left_mono: "\<lbrakk>b \<le> a; 0 \<le> c; 0 < a*b\<rbrakk> \<Longrightarrow> c / a \<le> c / b" by (auto simp: field_simps zero_less_mult_iff mult_right_mono) lemma divide_strict_left_mono_neg: "\<lbrakk>a < b; c < 0; 0 < a*b\<rbrakk> \<Longrightarrow> c / a < c / b" by (auto simp: field_simps zero_less_mult_iff mult_strict_right_mono_neg) lemma mult_imp_div_pos_le: "0 < y \<Longrightarrow> x \<le> z * y \<Longrightarrow> x / y \<le> z" by (subst pos_divide_le_eq, assumption+) lemma mult_imp_le_div_pos: "0 < y \<Longrightarrow> z * y \<le> x \<Longrightarrow> z \<le> x / y" by(simp add:field_simps) lemma mult_imp_div_pos_less: "0 < y \<Longrightarrow> x < z * y \<Longrightarrow> x / y < z" by(simp add:field_simps) lemma mult_imp_less_div_pos: "0 < y \<Longrightarrow> z * y < x \<Longrightarrow> z < x / y" by(simp add:field_simps) lemma frac_le: assumes "0 \<le> y" "x \<le> y" "0 < w" "w \<le> z" shows "x / z \<le> y / w" proof (rule mult_imp_div_pos_le) show "z > 0" using assms by simp have "x \<le> y * z / w" proof (rule mult_imp_le_div_pos [OF \<open>0 < w\<close>]) show "x * w \<le> y * z" using assms by (auto intro: mult_mono) qed also have "... = y / w * z" by simp finally show "x \<le> y / w * z" . qed lemma frac_less: assumes "0 \<le> x" "x < y" "0 < w" "w \<le> z" shows "x / z < y / w" proof (rule mult_imp_div_pos_less) show "z > 0" using assms by simp have "x < y * z / w" proof (rule mult_imp_less_div_pos [OF \<open>0 < w\<close>]) show "x * w < y * z" using assms by (auto intro: mult_less_le_imp_less) qed also have "... = y / w * z" by simp finally show "x < y / w * z" . qed lemma frac_less2: assumes "0 < x" "x \<le> y" "0 < w" "w < z" shows "x / z < y / w" proof (rule mult_imp_div_pos_less) show "z > 0" using assms by simp show "x < y / w * z" using assms by (force intro: mult_imp_less_div_pos mult_le_less_imp_less) qed lemma less_half_sum: "a < b \<Longrightarrow> a < (a+b) / (1+1)" by (simp add: field_simps zero_less_two) lemma gt_half_sum: "a < b \<Longrightarrow> (a+b)/(1+1) < b" by (simp add: field_simps zero_less_two) subclass unbounded_dense_linorder proof fix x y :: 'a from less_add_one show "\<exists>y. x < y" .. from less_add_one have "x + (- 1) < (x + 1) + (- 1)" by (rule add_strict_right_mono) then have "x - 1 < x + 1 - 1" by simp then have "x - 1 < x" by (simp add: algebra_simps) then show "\<exists>y. y < x" .. show "x < y \<Longrightarrow> \<exists>z>x. z < y" by (blast intro!: less_half_sum gt_half_sum) qed subclass field_abs_sgn .. lemma inverse_sgn [simp]: "inverse (sgn a) = sgn a" by (cases a 0 rule: linorder_cases) simp_all lemma divide_sgn [simp]: "a / sgn b = a * sgn b" by (cases b 0 rule: linorder_cases) simp_all lemma nonzero_abs_inverse: "a \<noteq> 0 \<Longrightarrow> \<bar>inverse a\<bar> = inverse \<bar>a\<bar>" by (rule abs_inverse) lemma nonzero_abs_divide: "b \<noteq> 0 \<Longrightarrow> \<bar>a / b\<bar> = \<bar>a\<bar> / \<bar>b\<bar>" by (rule abs_divide) lemma field_le_epsilon: assumes e: "\<And>e. 0 < e \<Longrightarrow> x \<le> y + e" shows "x \<le> y" proof (rule dense_le) fix t assume "t < x" hence "0 < x - t" by (simp add: less_diff_eq) from e [OF this] have "x + 0 \<le> x + (y - t)" by (simp add: algebra_simps) then have "0 \<le> y - t" by (simp only: add_le_cancel_left) then show "t \<le> y" by (simp add: algebra_simps) qed lemma inverse_positive_iff_positive [simp]: "(0 < inverse a) = (0 < a)" proof (cases "a = 0") case False then show ?thesis by (blast intro: inverse_positive_imp_positive positive_imp_inverse_positive) qed auto lemma inverse_negative_iff_negative [simp]: "(inverse a < 0) = (a < 0)" proof (cases "a = 0") case False then show ?thesis by (blast intro: inverse_negative_imp_negative negative_imp_inverse_negative) qed auto lemma inverse_nonnegative_iff_nonnegative [simp]: "0 \<le> inverse a \<longleftrightarrow> 0 \<le> a" by (simp add: not_less [symmetric]) lemma inverse_nonpositive_iff_nonpositive [simp]: "inverse a \<le> 0 \<longleftrightarrow> a \<le> 0" by (simp add: not_less [symmetric]) lemma one_less_inverse_iff: "1 < inverse x \<longleftrightarrow> 0 < x \<and> x < 1" using less_trans[of 1 x 0 for x] by (cases x 0 rule: linorder_cases) (auto simp add: field_simps) lemma one_le_inverse_iff: "1 \<le> inverse x \<longleftrightarrow> 0 < x \<and> x \<le> 1" proof (cases "x = 1") case True then show ?thesis by simp next case False then have "inverse x \<noteq> 1" by simp then have "1 \<noteq> inverse x" by blast then have "1 \<le> inverse x \<longleftrightarrow> 1 < inverse x" by (simp add: le_less) with False show ?thesis by (auto simp add: one_less_inverse_iff) qed lemma inverse_less_1_iff: "inverse x < 1 \<longleftrightarrow> x \<le> 0 \<or> 1 < x" by (simp add: not_le [symmetric] one_le_inverse_iff) lemma inverse_le_1_iff: "inverse x \<le> 1 \<longleftrightarrow> x \<le> 0 \<or> 1 \<le> x" by (simp add: not_less [symmetric] one_less_inverse_iff) lemma [field_split_simps, divide_simps]: shows le_divide_eq: "a \<le> b / c \<longleftrightarrow> (if 0 < c then a * c \<le> b else if c < 0 then b \<le> a * c else a \<le> 0)" and divide_le_eq: "b / c \<le> a \<longleftrightarrow> (if 0 < c then b \<le> a * c else if c < 0 then a * c \<le> b else 0 \<le> a)" and less_divide_eq: "a < b / c \<longleftrightarrow> (if 0 < c then a * c < b else if c < 0 then b < a * c else a < 0)" and divide_less_eq: "b / c < a \<longleftrightarrow> (if 0 < c then b < a * c else if c < 0 then a * c < b else 0 < a)" and le_minus_divide_eq: "a \<le> - (b / c) \<longleftrightarrow> (if 0 < c then a * c \<le> - b else if c < 0 then - b \<le> a * c else a \<le> 0)" and minus_divide_le_eq: "- (b / c) \<le> a \<longleftrightarrow> (if 0 < c then - b \<le> a * c else if c < 0 then a * c \<le> - b else 0 \<le> a)" and less_minus_divide_eq: "a < - (b / c) \<longleftrightarrow> (if 0 < c then a * c < - b else if c < 0 then - b < a * c else a < 0)" and minus_divide_less_eq: "- (b / c) < a \<longleftrightarrow> (if 0 < c then - b < a * c else if c < 0 then a * c < - b else 0 < a)" by (auto simp: field_simps not_less dest: antisym) text \<open>Division and Signs\<close> lemma shows zero_less_divide_iff: "0 < a / b \<longleftrightarrow> 0 < a \<and> 0 < b \<or> a < 0 \<and> b < 0" and divide_less_0_iff: "a / b < 0 \<longleftrightarrow> 0 < a \<and> b < 0 \<or> a < 0 \<and> 0 < b" and zero_le_divide_iff: "0 \<le> a / b \<longleftrightarrow> 0 \<le> a \<and> 0 \<le> b \<or> a \<le> 0 \<and> b \<le> 0" and divide_le_0_iff: "a / b \<le> 0 \<longleftrightarrow> 0 \<le> a \<and> b \<le> 0 \<or> a \<le> 0 \<and> 0 \<le> b" by (auto simp add: field_split_simps) text \<open>Division and the Number One\<close> text\<open>Simplify expressions equated with 1\<close> lemma zero_eq_1_divide_iff [simp]: "0 = 1 / a \<longleftrightarrow> a = 0" by (cases "a = 0") (auto simp: field_simps) lemma one_divide_eq_0_iff [simp]: "1 / a = 0 \<longleftrightarrow> a = 0" using zero_eq_1_divide_iff[of a] by simp text\<open>Simplify expressions such as \<open>0 < 1/x\<close> to \<open>0 < x\<close>\<close> lemma zero_le_divide_1_iff [simp]: "0 \<le> 1 / a \<longleftrightarrow> 0 \<le> a" by (simp add: zero_le_divide_iff) lemma zero_less_divide_1_iff [simp]: "0 < 1 / a \<longleftrightarrow> 0 < a" by (simp add: zero_less_divide_iff) lemma divide_le_0_1_iff [simp]: "1 / a \<le> 0 \<longleftrightarrow> a \<le> 0" by (simp add: divide_le_0_iff) lemma divide_less_0_1_iff [simp]: "1 / a < 0 \<longleftrightarrow> a < 0" by (simp add: divide_less_0_iff) lemma divide_right_mono: "\<lbrakk>a \<le> b; 0 \<le> c\<rbrakk> \<Longrightarrow> a/c \<le> b/c" by (force simp add: divide_strict_right_mono le_less) lemma divide_right_mono_neg: "a \<le> b \<Longrightarrow> c \<le> 0 \<Longrightarrow> b / c \<le> a / c" by (auto dest: divide_right_mono [of _ _ "- c"]) lemma divide_left_mono_neg: "a \<le> b \<Longrightarrow> c \<le> 0 \<Longrightarrow> 0 < a * b \<Longrightarrow> c / a \<le> c / b" by (auto simp add: mult.commute dest: divide_left_mono [of _ _ "- c"]) lemma inverse_le_iff: "inverse a \<le> inverse b \<longleftrightarrow> (0 < a * b \<longrightarrow> b \<le> a) \<and> (a * b \<le> 0 \<longrightarrow> a \<le> b)" by (cases a 0 b 0 rule: linorder_cases[case_product linorder_cases]) (auto simp add: field_simps zero_less_mult_iff mult_le_0_iff) lemma inverse_less_iff: "inverse a < inverse b \<longleftrightarrow> (0 < a * b \<longrightarrow> b < a) \<and> (a * b \<le> 0 \<longrightarrow> a < b)" by (subst less_le) (auto simp: inverse_le_iff) lemma divide_le_cancel: "a / c \<le> b / c \<longleftrightarrow> (0 < c \<longrightarrow> a \<le> b) \<and> (c < 0 \<longrightarrow> b \<le> a)" by (simp add: divide_inverse mult_le_cancel_right) lemma divide_less_cancel: "a / c < b / c \<longleftrightarrow> (0 < c \<longrightarrow> a < b) \<and> (c < 0 \<longrightarrow> b < a) \<and> c \<noteq> 0" by (auto simp add: divide_inverse mult_less_cancel_right) text\<open>Simplify quotients that are compared with the value 1.\<close> lemma le_divide_eq_1: "(1 \<le> b / a) = ((0 < a \<and> a \<le> b) \<or> (a < 0 \<and> b \<le> a))" by (auto simp add: le_divide_eq) lemma divide_le_eq_1: "(b / a \<le> 1) = ((0 < a \<and> b \<le> a) \<or> (a < 0 \<and> a \<le> b) \<or> a=0)" by (auto simp add: divide_le_eq) lemma less_divide_eq_1: "(1 < b / a) = ((0 < a \<and> a < b) \<or> (a < 0 \<and> b < a))" by (auto simp add: less_divide_eq) lemma divide_less_eq_1: "(b / a < 1) = ((0 < a \<and> b < a) \<or> (a < 0 \<and> a < b) \<or> a=0)" by (auto simp add: divide_less_eq) lemma divide_nonneg_nonneg [simp]: "0 \<le> x \<Longrightarrow> 0 \<le> y \<Longrightarrow> 0 \<le> x / y" by (auto simp add: field_split_simps) lemma divide_nonpos_nonpos: "x \<le> 0 \<Longrightarrow> y \<le> 0 \<Longrightarrow> 0 \<le> x / y" by (auto simp add: field_split_simps) lemma divide_nonneg_nonpos: "0 \<le> x \<Longrightarrow> y \<le> 0 \<Longrightarrow> x / y \<le> 0" by (auto simp add: field_split_simps) lemma divide_nonpos_nonneg: "x \<le> 0 \<Longrightarrow> 0 \<le> y \<Longrightarrow> x / y \<le> 0" by (auto simp add: field_split_simps) text \<open>Conditional Simplification Rules: No Case Splits\<close> lemma le_divide_eq_1_pos [simp]: "0 < a \<Longrightarrow> (1 \<le> b/a) = (a \<le> b)" by (auto simp add: le_divide_eq) lemma le_divide_eq_1_neg [simp]: "a < 0 \<Longrightarrow> (1 \<le> b/a) = (b \<le> a)" by (auto simp add: le_divide_eq) lemma divide_le_eq_1_pos [simp]: "0 < a \<Longrightarrow> (b/a \<le> 1) = (b \<le> a)" by (auto simp add: divide_le_eq) lemma divide_le_eq_1_neg [simp]: "a < 0 \<Longrightarrow> (b/a \<le> 1) = (a \<le> b)" by (auto simp add: divide_le_eq) lemma less_divide_eq_1_pos [simp]: "0 < a \<Longrightarrow> (1 < b/a) = (a < b)" by (auto simp add: less_divide_eq) lemma less_divide_eq_1_neg [simp]: "a < 0 \<Longrightarrow> (1 < b/a) = (b < a)" by (auto simp add: less_divide_eq) lemma divide_less_eq_1_pos [simp]: "0 < a \<Longrightarrow> (b/a < 1) = (b < a)" by (auto simp add: divide_less_eq) lemma divide_less_eq_1_neg [simp]: "a < 0 \<Longrightarrow> b/a < 1 \<longleftrightarrow> a < b" by (auto simp add: divide_less_eq) lemma eq_divide_eq_1 [simp]: "(1 = b/a) = ((a \<noteq> 0 \<and> a = b))" by (auto simp add: eq_divide_eq) lemma divide_eq_eq_1 [simp]: "(b/a = 1) = ((a \<noteq> 0 \<and> a = b))" by (auto simp add: divide_eq_eq) lemma abs_div_pos: "0 < y \<Longrightarrow> \<bar>x\<bar> / y = \<bar>x / y\<bar>" by (simp add: order_less_imp_le) lemma zero_le_divide_abs_iff [simp]: "(0 \<le> a / \<bar>b\<bar>) = (0 \<le> a \<or> b = 0)" by (auto simp: zero_le_divide_iff) lemma divide_le_0_abs_iff [simp]: "(a / \<bar>b\<bar> \<le> 0) = (a \<le> 0 \<or> b = 0)" by (auto simp: divide_le_0_iff) lemma field_le_mult_one_interval: assumes *: "\<And>z. \<lbrakk> 0 < z ; z < 1 \<rbrakk> \<Longrightarrow> z * x \<le> y" shows "x \<le> y" proof (cases "0 < x") assume "0 < x" thus ?thesis using dense_le_bounded[of 0 1 "y/x"] * unfolding le_divide_eq if_P[OF \<open>0 < x\<close>] by simp next assume "\<not>0 < x" hence "x \<le> 0" by simp obtain s::'a where s: "0 < s" "s < 1" using dense[of 0 "1::'a"] by auto hence "x \<le> s * x" using mult_le_cancel_right[of 1 x s] \<open>x \<le> 0\<close> by auto also note *[OF s] finally show ?thesis . qed text\<open>For creating values between \<^term>\<open>u\<close> and \<^term>\<open>v\<close>.\<close> lemma scaling_mono: assumes "u \<le> v" "0 \<le> r" "r \<le> s" shows "u + r * (v - u) / s \<le> v" proof - have "r/s \<le> 1" using assms using divide_le_eq_1 by fastforce moreover have "0 \<le> v - u" using assms by simp ultimately have "(r/s) * (v - u) \<le> 1 * (v - u)" by (rule mult_right_mono) then show ?thesis by (simp add: field_simps) qed end text \<open>Min/max Simplification Rules\<close> lemma min_mult_distrib_left: fixes x::"'a::linordered_idom" shows "p * min x y = (if 0 \<le> p then min (p*x) (p*y) else max (p*x) (p*y))" by (auto simp add: min_def max_def mult_le_cancel_left) lemma min_mult_distrib_right: fixes x::"'a::linordered_idom" shows "min x y * p = (if 0 \<le> p then min (x*p) (y*p) else max (x*p) (y*p))" by (auto simp add: min_def max_def mult_le_cancel_right) lemma min_divide_distrib_right: fixes x::"'a::linordered_field" shows "min x y / p = (if 0 \<le> p then min (x/p) (y/p) else max (x/p) (y/p))" by (simp add: min_mult_distrib_right divide_inverse) lemma max_mult_distrib_left: fixes x::"'a::linordered_idom" shows "p * max x y = (if 0 \<le> p then max (p*x) (p*y) else min (p*x) (p*y))" by (auto simp add: min_def max_def mult_le_cancel_left) lemma max_mult_distrib_right: fixes x::"'a::linordered_idom" shows "max x y * p = (if 0 \<le> p then max (x*p) (y*p) else min (x*p) (y*p))" by (auto simp add: min_def max_def mult_le_cancel_right) lemma max_divide_distrib_right: fixes x::"'a::linordered_field" shows "max x y / p = (if 0 \<le> p then max (x/p) (y/p) else min (x/p) (y/p))" by (simp add: max_mult_distrib_right divide_inverse) hide_fact (open) field_inverse field_divide_inverse field_inverse_zero code_identifier code_module Fields \<rightharpoonup> (SML) Arith and (OCaml) Arith and (Haskell) Arith end
module storage_construction_api use :: storage_builder_factory_module, only : & get_storage_builder, & create_storage_builder use :: storage_builder_module, only : storage_builder use :: storage_builder_interface, only : & allocate_and_create_storage, & create_storage, & allocate_and_create_storage_in_scratch, & create_storage_in_scratch, & allocate_and_copy_storage, & copy_storage, & allocate_and_copy_storage_to_scratch, & copy_storage_to_scratch, & allocate_and_point_to_storage, & point_to_storage, & copy_storage_from_c_ptr, & copy_storage_to_scratch_from_c_ptr implicit none public end module storage_construction_api
C Copyright (C) 2002, Carnegie Mellon University and others. C All Rights Reserved. C This code is published under the Common Public License. C******************************************************************************* C subroutine LINESEARCH(ITER, N, NIND, M, X, IVAR, NLB, ILB, NUB, 1 IUB, LSLACKS, BNDS_L, BNDS_U, DX, DV_L, DV_U, S_L, S_U, 2 V_L, V_U, SIGMA_L, SIGMA_U, NORIG, XORIG, CSCALE, MU, ERR, 3 YPY, THETA1, THETA2, LAM, LAMOLD, REGU, 2 PZ, ZPZ, G, WCORR, F, C, CNRM0, NEWBAS, 3 ALPHA, ALPHA_DUAL, LS_COUNT, C_WATCH, NU_OUT, 4 SKIP_UPDATE, SOC_FLAG, KCONSTR, 5 LRS, LRS_END, RS, LIS, LIS_END, IS, 6 LRW, RW, LIW, IW, IERR) C C******************************************************************************* C C $Id: linesearch.f,v 1.3 2002/11/24 21:49:57 andreasw Exp $ C C------------------------------------------------------------------------------- C Title C------------------------------------------------------------------------------- C CT Do the linesearch /w watchdog (primal-dual l_2 penalty function) C C------------------------------------------------------------------------------- C Programm description C------------------------------------------------------------------------------- C CB C C------------------------------------------------------------------------------- C Author, date C------------------------------------------------------------------------------- C CA Andreas Waechter 05/01/02 Release as version IPOPT 2.0 C C------------------------------------------------------------------------------- C Documentation C------------------------------------------------------------------------------- C CD C C------------------------------------------------------------------------------- C Parameter list C------------------------------------------------------------------------------- C C Name I/O Type Meaning CP ITER I INT iteration counter CP (if -1: initialize pointers for storage space) CP N I INT number of variables (without fixed) CP NIND I INT number of independent variables CP M I INT number of constraints CP X I/O DP actual iterate (reordered without fixed vars: CP first M entries belong to dependent CP variables, remaining to independent variables) CP I: old point CP O: point after line search CP IVAR I INT information about partitioning CP i = 1..M XORIG(IVAR(i)) dependent CP i = (M+1)..N XORIG(IVAR(i)) independent CP Note: fixed variables do not occur in IVAR CP X(i) corresponds to XORIG(IVAR(i)) CP NLB I INT number of lower bounds (excluding fixed vars) CP ILB I INT indices of lower bounds CP (e.g. S_L(i) is slack for X(ILB(i)) ) CP NUB I INT number of upper bounds (excluding fixed vars) CP IUB I INT indices of upper bounds CP (e.g. S_U(i) is slack for X(IUB(i)) ) CP LSLACKS I/O LOG I: =.true.: There are slacks that don't satisfy CP "slack equation" CP O: set to .false., if full step was taken CP BNDS_L I DP values of lower bounds (ordered as S_L) CP BNDS_U I DP values of upper bounds (ordered as S_U) CP DX I DP step for X (primal) CP DV_L I DP step for V_L (dual variables for lower bounds) CP DV_U I DP step for V_U (dual variables for upper bounds) CP S_L I/O DP slacks to lower bounds CP I: for start of line search CP O: after line search CP S_U I/O DP slacks to upper bounds CP I: for start of line search CP O: after line search CP V_L I/O DP dual variables for lower bounds CP I: for start of line search CP O: after line search CP V_U I/O DP dual variables for upper bounds CP I: for start of line search CP O: after line search CP SIGMA_L I DP primal-dual Hessian of lower bound barrier term CP (NLB diagonal elements only) CP SIGMA_U I DP primal-dual Hessian of upper bound barrier term CP (NUB diagonal elements only) CP NORIG I INT number of all variables including fixed vars CP XORIG I/O DP actual iterate CP XORIG is ordered in ORIGINAL order (i.e. not CP partitioned into independent and dependent CP variables) (on output: as X) CP CSCALE I DP scaling factors for constraints CP MU I DP barrier parameter CP ERR I DP actual KKT-error (needed for switching on watchdog) CP YPY I DP range space step (all variables; ordered like X) CP THETA1 I DP dogleg parameter (=1 <-> full Newton step) CP THETA2 I DP dogleg parameter (= -c^T*A^T*d_CS ) CP LAM I/O DP multipliers for equality constraints CP LAMOLD I/O DP multipliers for equality constraints from last iter CP REGU I DP regularization factor (added regu*I to diagonal) CP (from get_step_full) CP PZ I DP null space step (only independent variables) CP ZPZ I DP null space step (only dependent variables) CP (only needed if NU based on LAM, i.e. QLAMBDA<>2) CP G I DP gradient of objective function CP WCORR I DP correction term for PZ CP F I/O DP value of objective function at X CP I: for start of line search CP O: after line search CP C I/O DP values of constraints at X CP I: for start of line search CP O: after line search CP CNRM0 O DP 2-norm of constraints at old point CP NEWBAS I L only .true. if variables have been repartitioned CP ALPHA I/O DP step size: I: where to start line search CP O: step size from X to X_NEW CP ALPHA_DUAL I/O DP step size for dual variables CP LS_COUNT O INT number of trial steps CP C_WATCH O C*1 information about watchdog CP ' ' : WFLAG = 0 CP 'a' : WFLAG = 1 CP 'b' : WFLAG = 2 CP 'c' : WFLAG = 3 CP NU_OUT O DP actual value of penalty parameter CP (only output; value is stored internally!) CP SKIP_UPDATE I/O LOG if set to .true. if next Quasi-Newton update has CP to be skipped CP SOC_FLAG I/O INT flag for second order correction: CP =0: entered first time in this iteration CP =1: OUT: compute SOC direction CP IN : directions have SOC direction part in it CP KCONSTR I INT KCONSTR(1): LRS for CONSTR CP KCONSTR(2): P_LRS for CONSTR CP KCONSTR(3): LIS for CONSTR CP KCONSTR(4): P_LIS for CONSTR CP KCONSTR(5): LRW for CONSTR CP KCONSTR(6): LIW for CONSTR CP LRS I INT total length of RS CP LRS_END I/O INT last used reserved entry in RS CP RS I/O DP DP storage space (all!) CP LIS I INT total length of IS CP LRS_END I/O INT last used reserved entry in IS CP IS I/O INT INT storage space (all!) CP LRW I INT length of RW CP RW I/O DP can be used as DP work space but content will be CP changed between calls CP LIW I INT length of IW CP IW I/O INT can be used as INT work space but content will be CP changed between calls CP IERR O INT =0: everything OK CP >0: Error occured; abort optimization CP <0: Warning; message to user C C------------------------------------------------------------------------------- C local variables C------------------------------------------------------------------------------- C CL C C------------------------------------------------------------------------------- C used subroutines C------------------------------------------------------------------------------- C CS DDOT CS DAXPY CS DCOPY CS DSCAL CS CALC_BAR CS CALC_NRM CS UPDATE_NU CS ARMIJO CS C_OUT C C******************************************************************************* C C Declarations C C******************************************************************************* C IMPLICIT NONE C C******************************************************************************* C C Include files C C******************************************************************************* C include 'IPOPT.INC' C C------------------------------------------------------------------------------- C Parameter list C------------------------------------------------------------------------------- C integer ITER integer N integer NIND integer M double precision X(N) integer IVAR(N) integer NLB integer ILB(NLB) integer NUB integer IUB(NUB) logical LSLACKS double precision BNDS_L(NLB) double precision BNDS_U(NUB) double precision DX(N) double precision DV_L(NLB) double precision DV_U(NUB) double precision S_L(NLB) double precision S_U(NUB) double precision V_L(NLB) double precision V_U(NUB) double precision SIGMA_L(NLB) double precision SIGMA_U(NUB) integer NORIG double precision XORIG(NORIG) double precision CSCALE(*) double precision MU double precision ERR double precision YPY(N) double precision THETA1 double precision THETA2 double precision LAM(M) double precision LAMOLD(M) double precision REGU double precision PZ(NIND) double precision ZPZ(M) double precision G(N) double precision WCORR(NIND) double precision F double precision C(M) double precision CNRM0 logical NEWBAS double precision ALPHA double precision ALPHA_DUAL integer LS_COUNT character*1 C_WATCH double precision NU_OUT logical SKIP_UPDATE integer SOC_FLAG integer KCONSTR(6) integer LRS integer LRS_END double precision RS(*) integer LIS integer LIS_END integer IS(LIS) integer LRW double precision RW(LRW) integer LIW integer IW(LIW) integer IERR C C------------------------------------------------------------------------------- C Local variables C------------------------------------------------------------------------------- C double precision DDOT, CALC_BAR, CALC_NRM, D1MACH integer IDAMAX double precision PHI0_STORE, DPHI_STORE, ALPHA_STORE, NU_STORE, NU save PHI0_STORE, DPHI_STORE, ALPHA_STORE, NU_STORE, NU double precision PHI0_STORE_B, CNRM0_STORE, ALPHA_DUAL_STORE save PHI0_STORE_B, CNRM0_STORE, ALPHA_DUAL_STORE double precision DPHI_STORE_B, DPHI_STORE_C, phi0, dphi save DPHI_STORE_B, DPHI_STORE_C, phi0, dphi integer WFLAG, SOC_ITER save WFLAG, SOC_ITER logical LSLACKS_STORE save LSLACKS_STORE integer P_XSTORE, P_VLSTORE, P_VUSTORE, P_SLSTORE, P_NUS save P_XSTORE, P_VLSTORE, P_VUSTORE, P_SLSTORE, P_NUS integer P_SUSTORE, P_DXSTORE, P_DVLSTORE, P_DVUSTORE save P_SUSTORE, P_DXSTORE, P_DVLSTORE, P_DVUSTORE integer P_NUSSTORE, P_CSTORE, P_LAMSTORE, P_PZSTORE save P_NUSSTORE, P_CSTORE, P_LAMSTORE, P_PZSTORE double precision gd, tmp1, tmp2 double precision rhs, phi_new, f_new, alpha_cut double precision phi0_b, cnrm_new, phi_new_b, dphi_b, dphi_c double precision phi0_old_newnu double precision dphi_old_newnu, alpha_min, machtiny integer p_xnew, p_slnew, p_sunew, p_vlnew, p_vunew, p_cnew integer i, k, p_rwend, p_iwend, p_gb, p_tmp, p_cs logical takefull character*100 line(4) c$$$CTRY2 c$$$ double precision BL_SOC(10000), BU_SOC(10000) c$$$ common /SOC/ BL_SOC, BU_SOC C C******************************************************************************* C C Executable Statements C C******************************************************************************* C C C if ITER = -1 do initializations C if( ITER.eq.-1 ) then if( QSOC.ne.0 ) then C C Reserve storage space for second order correction option C P_DXSTORE = LRS_END P_DVLSTORE = P_DXSTORE + N P_DVUSTORE = P_DVLSTORE + NLB P_LAMSTORE = P_DVUSTORE + NUB LRS_END = P_LAMSTORE + M if( QQUASI.ne.0 .and. abs(QQUASI).lt.6 ) then P_PZSTORE = LRS_END LRS_END = P_PZSTORE + NIND endif C C If Watchdog is chosen, reserve storage space C elseif( QWATCHTOL.ne.0.d0 ) then P_XSTORE = LRS_END P_VLSTORE = P_XSTORE + N P_VUSTORE = P_VLSTORE + NLB P_SLSTORE = P_VUSTORE + NUB P_SUSTORE = P_SLSTORE + NLB P_DXSTORE = P_SUSTORE + NUB P_DVLSTORE = P_DXSTORE + N P_DVUSTORE = P_DVLSTORE + NLB LRS_END = P_DVUSTORE + NUB if( QMERIT.lt.0 ) then P_CSTORE = LRS_END P_NUSSTORE = P_CSTORE + M LRS_END = P_NUSSTORE + M if( QMOVEINIT.eq.0 ) then LRS_END = P_NUSSTORE + M+N endif endif endif C C If indidual penalty parameters for each constraint get storage for those C if( QMERIT.lt.0 ) then P_NUS = LRS_END LRS_END = P_NUS + M if( QMOVEINIT.eq.0 ) then LRS_END = P_NUS + M+N endif else P_NUS = 0 endif goto 9999 endif p_rwend = 0 p_iwend = 0 C C The following is not necessary if SOC direction has just been computer C if( SOC_FLAG.eq.0 ) then SOC_ITER = 0 C C Do some initializations in first iteration C if( ITER.eq.0 ) then WFLAG = 0 if( QMERIT.lt.0 ) then call DCOPY(M, QNUMIN, 0, RS(P_NUS+1), 1) if( LSLACKS ) then call DCOPY(N, QNUMIN, 0, RS(P_NUS+M+1), 1) endif else NU = QNUMIN endif endif C C Compute current value of constraint violation C CNRM0 = CALC_NRM(M, C) C if( LSLACKS ) then C C compute additional slack constraints C if( QCNRM.ne.2 ) then IERR = 4 goto 9999 endif p_cs = p_rwend p_rwend = p_cs + N if( p_rwend.gt.LRW ) then IERR = 98 goto 9999 endif call DCOPY(N, 0d0, 0, RW(p_cs+1), 1) do i = 1, NLB k = ILB(i) RW(p_cs+k) = X(k) - S_L(i) - BNDS_L(i) enddo do i = 1, NUB k = IUB(i) RW(p_cs+k) = X(k) + S_U(i) - BNDS_U(i) enddo CNRM0 = dsqrt(CNRM0**2 + 1 DDOT(N, RW(p_cs+1), 1, RW(p_cs+1), 1)) endif C C Check, if watchdog need to be waken up or put to sleep again C if( NEWBAS ) then if( WFLAG.ne.0 ) then write(line,*) 1 'WARNING: Set WFLAG = 0 after Change of Basis!' call C_OUT(2,0,1,line) WFLAG = 0 endif endif if( M.eq.0 .or. QMERIT.eq.0 .or. QSOC.ne.0 ) then WFLAG = 0 elseif( QWATCHTOL.lt.0.d0 ) then if( WFLAG.eq.0 .and. CNRM0.lt.-QWATCHTOL ) then WFLAG = 1 elseif( WFLAG.eq.1 .and. CNRM0.ge.-QWATCHTOL ) then WFLAG = 0 endif else if( WFLAG.eq.0 .and. ERR.lt.QWATCHTOL ) then WFLAG = 1 elseif( WFLAG.eq.1 .and. ERR.ge.QWATCHTOL ) then WFLAG = 0 endif endif C C Compute gradient of barrier function C p_gb = p_rwend p_rwend = p_gb + N if( p_rwend.gt.LRW ) then IERR = 98 goto 9999 endif call DCOPY(N, G, 1, RW(p_gb+1), 1) do i = 1, NLB k = ILB(i) RW(p_gb+k) = RW(p_gb+k) - MU/S_L(i) enddo do i = 1, NUB k = IUB(i) RW(p_gb+k) = RW(p_gb+k) + MU/S_U(i) enddo C C Update penalty parameter C call UPDATE_NU(ITER, N, NIND, M, X, NLB, ILB, NUB, IUB, 1 LSLACKS, BNDS_L, BNDS_U, S_L, S_U, SIGMA_L, SIGMA_U, MU, 1 ERR, YPY, THETA1, THETA2, 3 LAM, REGU, ZPZ, PZ, DX, RW(p_gb+1), WCORR, F, C, 2 CNRM0, WFLAG, NU, RS(P_NUS+1), LRW-p_rwend, RW(p_rwend+1), 4 IERR) if( IERR.gt.0 ) then write(line,*) 'linesearch: UPDATE_NU returns IERR = ', IERR call C_OUT(2,0,1,line) goto 9999 elseif( IERR.lt.0 ) then write(line,*) 'linesearch: Warning: ', 1 'UPDATE_NU returns IERR = ', IERR call C_OUT(2,0,1,line) IERR = 0 endif C C Compute directional derivative C C objective function gd = DDOT(N, DX, 1, RW(p_gb+1), 1) p_rwend = p_gb if( LSLACKS ) then do i = 1, NLB k = ILB(i) gd = gd - MU/S_L(i)*(X(k) - S_L(i) - BNDS_L(i)) enddo do i = 1, NUB k = IUB(i) gd = gd + MU/S_U(i)*(X(k) + S_U(i) - BNDS_U(i)) enddo endif C constraints 554 continue if( CNRM0.gt.0 ) then if( QMERIT.gt.0 ) then dphi_c = THETA1*CNRM0 + (1.d0-THETA1)*THETA2/CNRM0 elseif( QMERIT.ne.0 ) then if( QLAMBDA.eq.0 ) then call C_OUT(2,0,1, 1 'linesearch: Need multipliers for this option') IERR = 4 goto 9999 endif dphi_c = 0d0 do i = 1, M dphi_c = dphi_c - dabs(RS(P_NUS+i)*C(i)) enddo if( LSLACKS ) then do i = 1, N dphi_c = dphi_c - dabs(RS(P_NUS+M+i)*RW(p_cs+i)) enddo endif endif else dphi_c = 0.d0 endif C dphi_b = gd if( abs(QMERIT).eq.1) then if( QPRIMAL.eq.1 ) then call C_OUT(2,0,1, 1 'Can''t do primal version and p-d merit function.') IERR = 4 goto 9999 endif do i = 1, NLB tmp1 = S_L(i)*V_L(i) tmp2 = MU - tmp1 dphi_b = dphi_b - (tmp2**2)/tmp1 enddo do i = 1, NUB tmp1 = S_U(i)*V_U(i) tmp2 = MU - tmp1 dphi_b = dphi_b - (tmp2**2)/tmp1 enddo endif C if( QMERIT.gt.0 ) then 555 continue dphi = dphi_b - NU*dphi_c if( QFULL.eq.1 ) then if( dphi.gt.-QRHO*NU*dphi_c ) then if( dphi_c.eq.0.d0 ) then dphi = 0.d0 else if( NU.gt.1d300 ) then IERR = 736 goto 9999 endif NU = 10*NU goto 555 endif endif endif elseif( QMERIT.lt.0 ) then dphi = dphi_b + dphi_c if( QFULL.eq.1 ) then if( dphi.gt.QRHO*dphi_c ) then if( dphi_c.eq.0.d0 ) then dphi = 0.d0 else call DSCAL(M, 1.d1, RS(P_NUS+1), 1) if( LSLACKS ) then call DSCAL(N, 1.d1, RS(P_NUS+M+1), 1) endif if( .not.LSLACKS ) then i = IDAMAX(M, RS(P_NUS+1), 1) else i = IDAMAX(M+N, RS(P_NUS+1), 1) endif if( RS(P_NUS+i).gt.1d300 ) then IERR = 736 goto 9999 endif goto 554 endif endif endif endif if( LSLACKS ) then p_rwend = p_cs endif C if( dphi.gt.0.d0 .and. QMERIT.ne.0 ) then write(line,*) 'Problem in linesearch: dphi = ',dphi call C_OUT(2,0,1,line) IERR = 589 goto 9999 endif C C Compute merit function at current point C phi0_b = F + CALC_BAR(NLB, NUB, S_L, S_U, V_L, V_U, MU) C if( QMERIT.gt.0 ) then phi0 = phi0_b + NU*CNRM0 else phi0 = phi0_b - dphi_c endif endif C if( QCNR.gt.0 .and. QPRINT.ge.3 ) then write(line,711) NU, dphi, phi0 711 format('NU = ', d20.12,' dphi = ', d20.12,' phi0 = ', d20.12) call C_OUT(1,3,1,line) write(line,712) phi0_b, cnrm0 712 format('phi0_bv = ', d20.12,' cnrm0 = ', d20.12) call C_OUT(1,3,1,line) endif C C reserve work space for trial points C p_xnew = p_rwend p_slnew = p_xnew + N p_sunew = p_slnew + NLB p_vlnew = p_sunew + NUB ! don't need that for QMERIT <> 1 p_vunew = p_vlnew + NLB ! don't need that for QMERIT <> 1 p_cnew = p_vunew + NUB p_rwend = p_cnew + M if( p_rwend.gt.LRW ) then IERR = 98 goto 9999 endif C C if in watchdog, might want to take full step C if( WFLAG.eq.1 .or. QMERIT.eq.0 .or. QSOC.ne.0 ) then takefull = .true. else takefull = .false. endif C C If in watchdog trial, have earier ls failure C if( WFLAG.eq.2 ) then alpha_min = 1.d-5 else alpha_min = 1.d-14 endif C Do the armijo line search and obtain new values for everything C 50 alpha_cut = ALPHA call ARMIJO(ITER, N, M, NIND, X, IVAR, NLB, ILB, NUB, IUB, 1 LSLACKS, BNDS_L, BNDS_U, DX, DV_L, DV_U, 1 S_L, S_U, V_L, V_U, NORIG, XORIG, CSCALE, takefull, 2 alpha_min, ALPHA, phi0, dphi, NU, RS(P_NUS+1), MU, 4 RW(p_xnew+1), RW(p_slnew+1), RW(p_sunew+1), 4 RW(p_vlnew+1), RW(p_vunew+1), 4 f_new, RW(p_cnew+1), 4 phi_new, phi_new_b, cnrm_new, LS_COUNT, 1 KCONSTR, LRS, RS, LIS, IS, 2 LRW-p_rwend, RW(p_rwend+1), 4 LIW-p_iwend, IW(p_iwend+1), IERR) if( IERR.gt.0 ) then write(line,*) 'linesearch: ARMIJO returns IERR = ', IERR call C_OUT(2,0,1,line) if( IERR.eq.2 ) then call C_OUT(2,0,1,'line search failure.') endif goto 9999 elseif( IERR.lt.0 ) then write(line,*) 'linesearch: Warning: ', 1 'ARMIJO returns IERR = ', IERR call C_OUT(2,0,1,line) IERR = 0 endif C C For Second Order Correction: Check, if SOC needed C if( QSOC.ne.0 .and. SOC_FLAG.eq.0 .and. .not.takefull ) then C C Try SOC C write(line,*) 'Try second order correction with CNRM0 = ',CNRM0 call C_OUT(1,2,1,line) write(line,*) ' and cnrm_new = ', cnrm_new call C_OUT(1,2,1,line) write(line,*) ' and ALPHA = ', ALPHA call C_OUT(1,2,1,line) C C Store current step etc C call DCOPY(N, DX, 1, RS(P_DXSTORE +1), 1) call DCOPY(NLB, DV_L, 1, RS(P_DVLSTORE+1), 1) call DCOPY(NUB, DV_U, 1, RS(P_DVUSTORE+1), 1) if( QLAMBDA.eq.2 ) then call DCOPY(M, LAM, 1, RS(P_LAMSTORE+1), 1) call DCOPY(M, LAMOLD, 1, LAM, 1) endif if( QQUASI.ne.0 .and. abs(QQUASI).lt.6 ) then call DCOPY(NIND, PZ, 1, RS(P_PZSTORE+1), 1) endif ALPHA_STORE = alpha_cut ALPHA_DUAL_STORE = ALPHA_DUAL CNRM0_STORE = cnrm_new C C Compute new right hand side for constraints C call DSCAL(M, ALPHA, C, 1) call DAXPY(M, 1.d0, RW(p_cnew+1), 1, C, 1) c$$$CTRY c$$$ do i = 1, NLB c$$$ BL_SOC(i) = MU/S_L(i) c$$$ enddo c$$$ do i = 1, NUB c$$$ BU_SOC(i) = - MU/S_U(i) c$$$ enddo c$$$ call DSCAL(NLB, ALPHA, BL_SOC, 1) c$$$ do i = 1, NLB c$$$ BL_SOC(i) = BL_SOC(i) + MU/RW(p_slnew+i) c$$$ enddo c$$$ call DSCAL(NUB, ALPHA, BU_SOC, 1) c$$$ do i = 1, NUB c$$$ BU_SOC(i) = BU_SOC(i) - MU/RW(p_sunew+i) c$$$ enddo c$$$CTRY END C SOC_FLAG = 1 SOC_ITER = 1 goto 9999 elseif( QSOC.ne.0 .and. SOC_FLAG.eq.1 .and. .not.takefull ) then CTODO Put some constant in here or so... if( cnrm_new.ge.0.99d0*CNRM0_STORE .or. SOC_ITER.gt.10 ) then C C SOC didn't help C if( cnrm_new.ge.0.99d0*CNRM0_STORE ) then write(line,*) 1 'Second order correction rejected with cnrm_new = ', 2 cnrm_new call C_OUT(1,2,1,line) elseif( SOC_ITER.gt.10 ) then write(line,*) 'Tried ',SOC_ITER, 1 ' second order corrections. Quit SOC.' call C_OUT(1,2,1,line) endif C C Restore old search direction C call DCOPY(N, RS(P_DXSTORE +1), 1, DX, 1) call DCOPY(NLB, RS(P_DVLSTORE+1), 1, DV_L, 1) call DCOPY(NUB, RS(P_DVUSTORE+1), 1, DV_U, 1) if( QLAMBDA.eq.2 ) then call DCOPY(M, RS(P_LAMSTORE+1), 1, LAM, 1) endif if( QQUASI.ne.0 .and. abs(QQUASI).lt.6 ) then call DCOPY(NIND, RS(P_PZSTORE+1), 1, PZ, 1) endif ALPHA = ALPHA_STORE ALPHA_DUAL = ALPHA_DUAL_STORE C C Do Armijo line search with these restored directions C C SOC_FLAG = 2 goto 50 else C C Try additional second order corrections C write(line,*) 1 'Try additional second order correction with cnrm_new = ', 2 cnrm_new call C_OUT(1,2,1,line) write(line,*) ' and ALPHA = ', ALPHA call C_OUT(1,2,1,line) CNRM0_STORE = cnrm_new if( QLAMBDA.eq.2 ) then call DCOPY(M, LAMOLD, 1, LAM, 1) endif C C Compute new right hand side for constraints C call DSCAL(M, ALPHA, C, 1) call DAXPY(M, 1.d0, RW(p_cnew+1), 1, C, 1) c$$$CTRY c$$$ call DSCAL(NLB, ALPHA, BL_SOC, 1) c$$$ do i = 1, NLB c$$$ BL_SOC(i) = BL_SOC(i) + MU/RW(p_slnew+i) c$$$ enddo c$$$ call DSCAL(NUB, ALPHA, BU_SOC, 1) c$$$ do i = 1, NUB c$$$ BU_SOC(i) = BU_SOC(i) - MU/RW(p_sunew+i) c$$$ enddo c$$$CTRY END C SOC_ITER = SOC_ITER + 1 goto 9999 endif endif C C Do the watchdog handling C goto (100, 200, 300) WFLAG C_WATCH = ' ' if( SOC_FLAG.eq.1 ) then C_WATCH = 'S' SOC_FLAG = 2 endif goto 400 100 continue CCHECK rhs = phi0 + 1.d-1*ALPHA*dphi rhs = phi0 + 1.d-4*ALPHA*dphi if( phi_new.gt.rhs ) then C C Full step doesn't satisfy armijo condition, store old values C WFLAG = 2 C_WATCH = 'b' call DCOPY(N, X, 1, RS(P_XSTORE +1), 1) call DCOPY(NLB, V_L, 1, RS(P_VLSTORE +1), 1) call DCOPY(NUB, V_U, 1, RS(P_VUSTORE +1), 1) call DCOPY(NLB, S_L, 1, RS(P_SLSTORE +1), 1) call DCOPY(NUB, S_U, 1, RS(P_SUSTORE +1), 1) call DCOPY(N, DX, 1, RS(P_DXSTORE +1), 1) call DCOPY(NLB, DV_L, 1, RS(P_DVLSTORE+1), 1) call DCOPY(NUB, DV_U, 1, RS(P_DVUSTORE+1), 1) PHI0_STORE = phi0 DPHI_STORE = dphi NU_STORE = NU ALPHA_STORE = alpha_cut ALPHA_DUAL_STORE = ALPHA_DUAL PHI0_STORE_B = phi0_b CNRM0_STORE = cnrm0 DPHI_STORE_B = dphi_b LSLACKS_STORE = LSLACKS if( QMERIT.gt.0 ) then DPHI_STORE_C = dphi_c else call DCOPY(M, C , 1, RS(P_CSTORE +1), 1) call DCOPY(M, RS(P_NUS+1), 1, RS(P_NUSSTORE+1), 1) if( LSLACKS ) then call DCOPY(N, RS(P_NUS+M+1), 1, RS(P_NUSSTORE+M+1), 1) endif endif else C_WATCH = 'a' endif goto 400 200 continue C This changes new values according to old NU (probably wrong!) C rhs = PHI0_STORE + 1.d-1*DPHI_STORE C phi0_oldnu = phi0_b + NU_STORE*cnrm0 C phi_new_oldnu = phi_new_b + NU_STORE*cnrm_new C if( phi0_oldnu.le.PHI0_STORE .or. phi_new_oldnu.le.rhs ) then C WFLAG = 1 C C_WATCH = 'a' C else C if( phi_new_oldnu.gt.PHI0_STORE ) then C This changes old values according to NEW NU phi0_old_newnu = PHI0_STORE_B + NU*CNRM0_STORE if( QMERIT.gt.0 ) then dphi_old_newnu = DPHI_STORE_B - NU*DPHI_STORE_C else dphi_old_newnu = DPHI_STORE_B do i = 1, M dphi_old_newnu = dphi_old_newnu - 1 RS(P_NUS+i)*dabs(RS(P_CSTORE+i)) enddo if( LSLACKS ) then p_tmp = p_rwend p_rwend = p_tmp + N if( p_rwend.gt.LRW ) then IERR = 98 goto 9999 endif call DCOPY(N, 0d0, 0, RW(p_tmp+1), 1) do i = 1, NLB k = ILB(i) RW(p_tmp+k) = RS(P_XSTORE+k) - RS(P_SLSTORE+i) - 1 BNDS_L(i) enddo do i = 1, NUB k = IUB(i) RW(p_tmp+k) = RS(P_XSTORE+k) + RS(P_SUSTORE+i) - 1 BNDS_U(i) enddo do i = 1, N dphi_old_newnu = dphi_old_newnu - 1 RS(P_NUS+M+i)*dabs(RW(p_tmp+i)) enddo p_rwend = p_tmp endif endif rhs = phi0_old_newnu + 1.d-1*dphi_old_newnu if( phi0.le.phi0_old_newnu .or. phi_new.le.rhs ) then WFLAG = 1 C_WATCH = 'a' else if( phi_new.gt.phi0_old_newnu ) then C C Do armijo ls starting from old point C takefull = .false. ALPHA = ALPHA_STORE ALPHA_DUAL = ALPHA_DUAL_STORE alpha_min = 1.d-14 LSLACKS = LSLACKS_STORE call ARMIJO(ITER, N, M, NIND, RS(P_XSTORE+1), 1 IVAR, NLB, ILB, NUB, IUB, 1 LSLACKS, BNDS_L, BNDS_U, 2 RS(P_DXSTORE+1), RS(P_DVLSTORE+1), 3 RS(P_DVUSTORE+1), RS(P_SLSTORE+1), 4 RS(P_SUSTORE+1), RS(P_VLSTORE+1), 5 RS(P_VUSTORE+1), NORIG, XORIG, CSCALE, 5 takefull, alpha_min, 6 ALPHA, PHI0_STORE, DPHI_STORE, NU_STORE, 7 RS(P_NUSSTORE+1), MU, 4 RW(p_xnew+1), RW(p_slnew+1), RW(p_sunew+1), 4 RW(p_vlnew+1), RW(p_vunew+1), 4 f_new, RW(p_cnew+1), 4 phi_new, phi_new_b, cnrm_new, LS_COUNT, 1 KCONSTR, LRS, RS, LIS, IS, 2 LRW-p_rwend, RW(p_rwend+1), 4 LIW-p_iwend, IW(p_iwend+1), IERR) if( IERR.gt.0 ) then write(line,*) 'linesearch: ARMIJO returns IERR = ', 1 IERR call C_OUT(2,0,1,line) if( IERR.eq.2 ) then call C_OUT(2,0,1,'ls failure in WFLAG = 2.') IERR = 3 endif goto 9999 elseif( IERR.lt.0 ) then write(2,*) 'linesearch: Warning: ', 1 'ARMIJO returns IERR = ', IERR call C_OUT(2,0,1,line) IERR = 0 endif WFLAG = 1 C_WATCH = 'r' C Take old value of NU! NU = NU_STORE call DCOPY(M, RS(P_NUSSTORE+1), 1, RS(P_NUS+1), 1) if( LSLACKS ) then call DCOPY(N, RS(P_NUSSTORE+M+1), 1, RS(P_NUS+M+1), 1) endif CTODO skipping BFGS in watchdog? C C ANDREAS: ist das der einzige Fall in dem BFGS nicht stattfinden soll??? C CCCRAUS? SKIP_UPDATE = .true. else WFLAG = 3 C_WATCH = 'c' endif endif goto 400 300 continue WFLAG = 1 C_WATCH = 'a' 400 continue C C Copy trial point to real point C F = f_new call DCOPY(N, RW(p_xnew +1), 1, X , 1) call DCOPY(M, RW(p_cnew +1), 1, C , 1) call DCOPY(NLB, RW(p_slnew+1), 1, S_L, 1) call DCOPY(NUB, RW(p_sunew+1), 1, S_U, 1) if( abs(QMERIT).eq.1 ) then call DCOPY(NLB, RW(p_vlnew+1), 1, V_L, 1) call DCOPY(NUB, RW(p_vunew+1), 1, V_U, 1) else C C Do step in dual variables C if( QALPHA.eq.0 ) then ALPHA_DUAL = ALPHA elseif( QALPHA.eq.1 ) then ALPHA_DUAL = min( ALPHA_DUAL, ALPHA ) endif call DAXPY(NLB, ALPHA_DUAL, DV_L, 1, V_L, 1) call DAXPY(NUB, ALPHA_DUAL, DV_U, 1, V_U, 1) C C Make sure that each V_NEW is at least machtiny C machtiny = D1MACH(1) do i = 1, NLB V_L(i) = dmax1(machtiny, V_L(i)) enddo do i = 1, NUB V_U(i) = dmax1(machtiny, V_U(i)) enddo endif C C Free work space C p_rwend = p_xnew C C Take step in LAM C CTODO this also for QLAMBDA = 1 ??? CWEG if( QLAMBDA.eq.2 ) then CTODO decide which option CORIG if( ITER.gt.0 ) then if( .true. ) then call DSCAL(M, ALPHA, LAM, 1) call DAXPY(M, 1d0-ALPHA, LAMOLD, 1, LAM, 1) endif call DCOPY(M, LAM, 1, LAMOLD, 1) endif C C Copy value of NU into NU_OUT for the output C NU_OUT = NU C C Set LSLACKS C if( ALPHA.eq.1.d0 ) then LSLACKS = .false. endif C C That's it C 9999 continue return end
Formal statement is: lemma measure_Un_le: assumes "A \<in> sets M" "B \<in> sets M" shows "measure M (A \<union> B) \<le> measure M A + measure M B" Informal statement is: If $A$ and $B$ are measurable sets, then the measure of their union is less than or equal to the sum of their measures.
theory RestrictOpProps imports MainHC begin theorem restrict1 : "restrictOp (restrictOp a (defOp b)) c = restrictOp (restrictOp a c) (defOp (restrictOp b c))" apply (case_tac c) apply (simp add: restrictOp_def) apply (simp add: restrictOp_def) done theorem restrict_trivial_rule[simp]: "b ==> restrictOp u b = u" apply (simp add: restrictOp_def) done theorem restrict_trivial [simp]: "restrictOp u True = u" apply (simp) done theorem restrict_assoc[simp] : "restrictOp a (defOp (restrictOp b c)) = restrictOp (restrictOp a (defOp b)) c" apply (case_tac c) apply (simp add: restrictOp_def) apply (simp add: restrictOp_def noneOp_def defOp.simps) sorry theorem restrict_out[simp] : "restrictOp (u b) b = restrictOp (u True) b" apply (case_tac "b") apply (simp add: restrictOp_def) apply (simp add: restrictOp_def) done theorem mkpartial_cancel [simp]: "makeTotal(makePartial x) = x" apply (simp add: makeTotal_def makePartial_def) done theorem mkpartial_cancel2 [simp]: "defOp(x) ==> makePartial(makeTotal x) = x" apply (simp add: makeTotal_def makePartial_def) apply (case_tac x) apply (simp) apply (simp) done theorem mkpartial_cancel3 [simp] : "((makePartial x) = (makePartial y)) = (x = y)" apply (simp add: makePartial_def) done theorem defOp_trivial [simp]: "defOp(makePartial x) = True" apply (simp add: makePartial_def makeTotal_def) done (* Some more stuff about removing extraneous restrictions *) theorem total_restrict2 [simp]: "(c ==> b) ==> restrictOp (u (makeTotal (restrictOp a b))) c = restrictOp (u (makeTotal a)) c" apply (simp add: makeTotal_def restrictOp_def defOp.simps undefinedOp_def) done theorem def_restrict [simp]: "defOp (restrictOp a b) = (defOp a & b)" apply (simp add: restrictOp_def defOp.simps undefinedOp_def split: split_if) done theorem total_restrict [simp]: "restrictOp (u (makeTotal (restrictOp a b))) (defOp (restrictOp a b)) = restrictOp (u (makeTotal a)) (defOp a & b)" apply simp done lemma restrictOp_cong [cong]: "b = b' ==> (b' ==> a = a') ==> restrictOp a b = restrictOp a' b'" apply (simp add: restrictOp_def defOp.simps undefinedOp_def) done lemma test: "((restrictOp a c) = (restrictOp b c)) = (c --> (a = b))" apply (simp add: restrictOp_def) done lemma res_overlap[simp]: "restrictOp (restrictOp a b) c = restrictOp a (b & c)" apply (simp add: restrictOp_def) done (*theorem normaleq_1[simp]: "[|def a & b = def c & d; b & d --> a = c|]==> (restrictOp a b = restrictOp c d)"*) lemma normalizeeq_1: "b=c ==> ((restrictOp a b) = (restrictOp a c))" apply (simp) done (* ============================================================================= *) lemma makePartialproj: "makeTotal(makePartial x)=x" apply (subst makeTotal_def) apply (subst makePartial_def) apply (auto) done end
# HW 2 ## Imports ```python import nbtools nbtools.setup_nb() ``` ```python import itertools from astropy import units import sympy import pandas from scipy import integrate import numpy import plotly.express as px from plotly import graph_objects as go from sympy.diffgeom import Manifold, Patch from pystein import coords, metric, curvature, geodesic from pystein.utilities import tensor_pow as tpow, full_simplify, boundary_filter ``` ```python show_plots = True ``` ## Utilities ```python import plotly.colors def get_continuous_color(colorscale, intermed): """ Plotly continuous colorscales assign colors to the range [0, 1]. This function computes the intermediate color for any value in that range. Plotly doesn't make the colorscales directly accessible in a common format. Some are ready to use: colorscale = plotly.colors.PLOTLY_SCALES["Greens"] Others are just swatches that need to be constructed into a colorscale: viridis_colors, scale = plotly.colors.convert_colors_to_same_type(plotly.colors.sequential.Viridis) colorscale = plotly.colors.make_colorscale(viridis_colors, scale=scale) :param colorscale: A plotly continuous colorscale defined with RGB string colors. :param intermed: value in the range [0, 1] :return: color in rgb string format :rtype: str """ if len(colorscale) < 1: raise ValueError("colorscale must have at least one color") if intermed <= 0 or len(colorscale) == 1: return colorscale[0][1] if intermed >= 1: return colorscale[-1][1] for cutoff, color in colorscale: if intermed > cutoff: low_cutoff, low_color = cutoff, color else: high_cutoff, high_color = cutoff, color break # noinspection PyUnboundLocalVariable return plotly.colors.find_intermediate_color( lowcolor=low_color, highcolor=high_color, intermed=((intermed - low_cutoff) / (high_cutoff - low_cutoff)), colortype="rgb") ``` ## Exercises ### B2 - Rindler Coordinates #### Setup Metric ```python M = Manifold('M', dim=2) P = Patch('origin', M) ``` ```python rho, eta = sympy.symbols('rho eta', nonnegative=False) cs = coords.CoordSystem('cartesian', P, [eta, rho]) deta, drho = cs.base_oneforms() ds2 = - rho ** 2 * tpow(deta, 2) + tpow(drho, 2) g_rind = metric.Metric(twoform=ds2) g_rind ``` #### Compute curvature components ```python crs, rmns, rcs = curvature.compute_components(g_rind) ``` ```python curvature.display_components(crs) ``` ```python curvature.display_components(rmns) ``` ```python curvature.display_components(rcs) ``` ### B3 ```python rhos = numpy.arange(1/numpy.e**2, 10, 0.01) consts = numpy.arange(-2, 12, 1.0) c_norm = lambda c: (c + 2) / 14 pos_etas = [+numpy.log(rhos) + c for c in consts] neg_etas = [-numpy.log(rhos) + c for c in consts] ``` ```python viridis_colors, _ = plotly.colors.convert_colors_to_same_type(plotly.colors.sequential.Plasma) colorscale = plotly.colors.make_colorscale(viridis_colors) ``` ```python fig = go.Figure() for c, n, p in zip(consts, neg_etas, pos_etas): color = get_continuous_color(colorscale, intermed=c_norm(c)) fig.add_trace(go.Scatter(x=rhos, y=p, line=dict(color=color, dash='solid'))) fig.add_trace(go.Scatter(x=rhos, y=n, line=dict(color='rgba'+color[3:-1]+', 0.6)', dash='dot'))) fig.update_layout(yaxis_range=[0,10], xaxis_range=[0,10], width=700, height=700, showlegend=False, title_text=r'$\text{Null Geodesics in 2D Minkowski }(\eta, \rho)$', title_x=0.5, xaxis_title=r'$\rho$', yaxis_title=r'$\eta$') fig.show() ``` ```python # fig.write_image('/Users/jim/repos/tex/homework/courses/PHYS510 - GR I/figures/fig-b3.pdf') ``` ### B4 ```python xs = numpy.arange(0.0, 1.0, 0.01) ts_p = numpy.arange(0.0, 1.0, 0.01) ts_m = numpy.arange(-1.0, 0.0, 0.01) # Const Eta etas = numpy.arange(-3, 3, .25) eta_norm = lambda e: abs(e) / 3 ts_eta_const = [xs * numpy.tanh(e) for e in etas] # Const rho rhos = numpy.arange(0.0, 1.0, 0.1) rho_norm = lambda r: 1 - r xs_rho_const_p = [numpy.sqrt(ts_p ** 2 + r ** 2) for r in rhos] xs_rho_const_m = [numpy.sqrt(ts_m ** 2 + r ** 2) for r in rhos] ``` ```python _tmp_const_eta, _ = plotly.colors.convert_colors_to_same_type(plotly.colors.sequential.Blues) _tmp_const_rho, _ = plotly.colors.convert_colors_to_same_type(plotly.colors.sequential.OrRd) colorscale_eta = plotly.colors.make_colorscale(_tmp_const_eta) colorscale_rho = plotly.colors.make_colorscale(_tmp_const_rho) ``` ```python fig = go.Figure() # Plot const rho for r, n, p in zip(rhos, xs_rho_const_m, xs_rho_const_p): color_r = get_continuous_color(colorscale_rho, intermed=0.5*rho_norm(r) + 0.5) # print(r, rho_norm(r), 0.5*rho_norm(r) + 0.5) # print(color_r) fig.add_trace(go.Scatter(x=n, y=ts_m, line=dict(color=color_r, dash='solid'))) fig.add_trace(go.Scatter(x=-n, y=ts_m, line=dict(color=color_r, dash='solid'))) fig.add_trace(go.Scatter(x=p, y=ts_p, line=dict(color=color_r, dash='solid'))) fig.add_trace(go.Scatter(x=-p, y=ts_p, line=dict(color=color_r, dash='solid'))) for e, t in zip(etas, ts_eta_const): color_e = get_continuous_color(colorscale_eta, intermed=0.5*eta_norm(e) + 0.5) # print(e, eta_norm(e), color) fig.add_trace(go.Scatter(x=xs, y=t, line=dict(color=color_e, dash='solid'))) fig.add_trace(go.Scatter(x=-xs, y=-t, line=dict(color=color_e, dash='solid'))) fig.update_layout(yaxis_range=[-1,1], xaxis_range=[-1,1], width=550, height=550, showlegend=False, title_text=r'$\text{Rindler Wedge in 2D Minkowski Metric }(t, x)$', title_x=0.5, xaxis_title=r'$x$', yaxis_title=r'$t$') fig.show() ``` ```python # fig.write_image('/Users/jim/repos/tex/homework/courses/PHYS510 - GR I/figures/fig-b4.pdf') ``` ### B5 Simplify Expression for $\lambda_2$ ```python d, g, z = sympy.symbols('d g z') ``` ```python x_arg_alt = sympy.Rational(1, 2) * (z + 1) + 1 / (z + 1) sympy.simplify(x_arg_alt) ``` ```python x_arg_z = sympy.simplify((z**2 * (z + 2)**2 + 2 * (z + 1)**2) / (2 * (z + 1)**2)) x_arg_z ``` ```python x_arg_ln_z = x_arg_z + sympy.sqrt(x_arg_z ** 2 - 1) x_arg_ln_z ``` ```python lam_2_z_alt = sympy.ln(x_arg_ln_z) lam_2_z_alt ``` ```python (sympy.series(lam_2_z_alt, x=z, n=3).subs([(z, d * g)]) / g).doit() ``` ### B8 Make visual of $\rho_{\mathrm{obs}}(\tau)$ ```python # gs = 1 + numpy.arange(-1 + 0.1, 50, 0.1) gs = 10.0 ** numpy.arange(-1.2, 1.2, 0.1) # gs = numpy.logspace(-2, 2, int(4/0.1)) rho_f = 1 P_f = 1 g_norm = lambda g: (numpy.log(g) + 1.2) / (1.2 - -1.2) - 0.1 taus_ = [] rhos_ = [] for g in gs: gtau_bound = numpy.log(1 + numpy.sqrt(2)) tau_bound = gtau_bound / g taus = numpy.arange(0, 1.0, 0.01) vs = numpy.sinh(g * taus) gammas_sq = 1 / (1 - vs ** 2) rho_obs = gammas_sq * (rho_f + vs ** 2 * P_f) rho_obs[numpy.where(taus > tau_bound)] = numpy.nan taus_.append(taus) rhos_.append(rho_obs) ``` ```python _tmp_const_g, _ = plotly.colors.convert_colors_to_same_type(plotly.colors.sequential.Agsunset) colorscale_g = plotly.colors.make_colorscale(_tmp_const_g) ``` ```python fig = go.Figure() for g, ts, rs in zip(gs, taus_, rhos_): color_g = get_continuous_color(colorscale_g, g_norm(g)) fig.add_trace(go.Scatter(x=ts, y=rs, line=dict(color=color_g, dash='solid'), )) fig.add_trace(go.Scatter(x=[-1], y=[-1], marker=dict( size=16, cmax=39, cmin=0, color=color_g, colorbar=dict( title="Field g" ), colorscale="Agsunset" ), mode='markers' )) fig.update_layout( yaxis_range=[0.95,2.05], xaxis_range=[-.05,1], width=700, height=550, showlegend=False, title_text=r'$\text{Measured energy density vs proper time}$', title_x=0.5, xaxis_title=r'$\tau$', yaxis_title=r'$\rho_{\mathrm{obs}}/\rho_{\mathrm{f}}$', ) fig.show() ``` ```python fig.write_image('/Users/jim/repos/tex/homework/courses/PHYS510 - GR I/figures/fig-b8.pdf') ``` ## C - Problem ### C1 ```python M = Manifold('M', dim=2) P = Patch('origin', M) t, h, g, c = sympy.symbols('t h g c', nonnegative=False) cs = coords.CoordSystem('cartesian', P, [t, h]) dt, dh = cs.base_oneforms() ds2 = - (1 + g * h / c ** 2) ** 2 * tpow(dt, 2) + tpow(dh, 2) g_c1 = metric.Metric(twoform=ds2) g_c1 ``` ```python crs, rmns, rcs = curvature.compute_components(g_c1) ``` ```python curvature.display_components(crs) ``` ```python sympy.Derivative(c**2/(c**2 + g * h), h).doit() ```
Formal statement is: lemma (in first_countable_topology) countable_basis: obtains A :: "nat \<Rightarrow> 'a set" where "\<And>i. open (A i)" "\<And>i. x \<in> A i" "\<And>F. (\<forall>n. F n \<in> A n) \<Longrightarrow> F \<longlonglongrightarrow> x" Informal statement is: If $X$ is a first-countable topological space, then there exists a countable basis for $X$.
###################################################################### # prime_simplex_boundary(A) is the set of maps x : A -> [0,1] with # min = 0 and max = 1. `is_element/prime_simplex_boundary` := (A::set) -> proc(x) local a,u,v,w; global reason; if not type(x,table) then reason := [convert(procname,string),"x is not a table",x]; return false; fi; if map(op,{indices(x)}) <> A then reason := [convert(procname,string),"x is not indexed by A",x,A]; return false; fi; v := 0; w := 1; for a in A do u := x[a]; if not (`is_element/RR`(u) and u >= 0 and u <= 1) then reason := [convert(procname,string),"x[a] is not in the unit interval",a,u]; return false; fi; v := max(u,v); w := min(u,w); od; if simplify(v - 1) <> 0 then reason := [convert(procname,string),"max(x[a] : a in A) <> 1",v]; return false; fi; if simplify(w) <> 0 then reason := [convert(procname,string),"min(x[a] : a in A) <> 0",w]; return false; fi; return true; end; ###################################################################### `is_equal/prime_simplex_boundary` := (A::set) -> proc(x,y) local a; global reason; for a in A do if simplify(x[a] - y[a]) <> 0 then reason := [convert(procname,string),"x[a] <> y[a]",a,x[a],y[a]]; return false; fi; od; return true; end; ###################################################################### `is_leq/prime_simplex_boundary` := NULL; ###################################################################### `random_element/prime_simplex_boundary` := (A::set) -> proc(d::posint := 5) local x,a,r1,r2,u,v; if nops(A) < 2 then return FAIL; fi; r1 := rand(0..d); r2 := rand(1..d); x := table(); u := 0; v := 0; while u = v do u := 0; v := infinity; for a in A do x[a] := r1()/r2(); u := max(u,x[a]); v := min(v,x[a]); od; od; for a in A do x[a] := (x[a]-v)/(u-v); od; return eval(x); end; ###################################################################### `list_elements/prime_simplex_boundary` := NULL; `count_elements/prime_simplex_boundary` := NULL; ###################################################################### `phi/proper_nonempty_subsets/prime_simplex_boundary` := (A::set) -> proc(U) local x,a; x := table(); for a in A do x[a] := 0; od; for a in U do x[a] := 1; od; return eval(x); end;
module Ch03.Exercise_3_5_17 import Ch03.Arith %default total -------------------------------------------------------------------------------- -- Exercise 3.5.17 -------------------------------------------------------------------------------- ||| Big step evaluation rules expressed in terms of reflexive-transitive closure ||| of small-step evaluation rules data BInE : Term -> Term -> Type where BInEValue : {pf : IsValue v} -> BInE v v BInEIfTrue : {pf : IsValue v2} -> {t1, t2, t3 : Term} -> EvalsToStar t1 True -> EvalsToStar t2 v2 -> BInE (IfThenElse t1 t2 t3) v2 BInEIfFalse : {pf : IsValue v3} -> {t1, t2, t3 : Term} -> EvalsToStar t1 False -> EvalsToStar t3 v3 -> BInE (IfThenElse t1 t2 t3) v3 BInESucc : {pf : IsNumValue nv1} -> {t1 : Term} -> EvalsToStar t1 nv1 -> BInE (Succ t1) (Succ nv1) BInEPredZero : {t1 : Term} -> EvalsToStar t1 Zero -> BInE (Pred t1) Zero BInEPredSucc : {t1 : Term} -> {pf : IsNumValue nv1} -> EvalsToStar t1 (Succ nv1) -> BInE (Pred t1) nv1 BInEIsZeroZero : {t1 : Term} -> EvalsToStar t1 Zero -> BInE (IsZero t1) True BInEIsZeroSucc : {t1 : Term} -> {pf : IsNumValue nv1} -> EvalsToStar t1 (Succ nv1) -> BInE (IsZero t1) False ||| Given a (one-step) derivation in the `BInE`-calculus, computes its corresponding derivation ||| in the `E`-calculus. from_BInE_to_E : BInE t v -> EvalsToStar t v from_BInE_to_E BInEValue = Refl from_BInE_to_E (BInEIfTrue {t2} {t3} x y) = map {func=(\t => IfThenElse t t2 t3)} EIf x ++ Cons EIfTrue y from_BInE_to_E (BInEIfFalse {t2} {t3} x y) = map {func=(\t => IfThenElse t t2 t3)} EIf x ++ Cons EIfFalse y from_BInE_to_E (BInESucc x) = map ESucc x from_BInE_to_E (BInEPredZero x) = map EPred x ++ weaken EPredZero from_BInE_to_E (BInEPredSucc {pf} x) = map EPred x ++ weaken (EPredSucc {pf=pf}) from_BInE_to_E (BInEIsZeroZero x) = map EIsZero x ++ weaken EIsZeroZero from_BInE_to_E (BInEIsZeroSucc {pf} x) = map EIsZero x ++ weaken (EIsZeroSucc {pf=pf}) -------------------------------------------------------------------------------- -- Sublemmas of `from_E_to_BInE` -------------------------------------------------------------------------------- lemma_EIfTrue : {t2, t3 : Term} -> {pf : IsValue v} -> (d' : EvalsToStar t2 v) -> (r : BInE (IfThenElse True t2 t3) v ** Cons EIfTrue d' = from_BInE_to_E r) lemma_EIfTrue {pf} d' = (BInEIfTrue {pf=pf} Refl d' ** Refl) lemma_EIfFalse : {t2, t3 : Term} -> {pf : IsValue v} -> (d' : EvalsToStar t3 v) -> (r : BInE (IfThenElse False t2 t3) v ** Cons EIfFalse d' = from_BInE_to_E r) lemma_EIfFalse {pf} d' = (BInEIfFalse {pf=pf} Refl d' ** Refl) lemma_EIf : {t1, t2, t3 : Term} -> {pf : IsValue v} -> {x : EvalsTo t1 t1'} -> (d' : EvalsToStar (IfThenElse t1' t2 t3) v) -> (r' : BInE (IfThenElse t1' t2 t3) v ** d' = from_BInE_to_E r') -> (r : BInE (IfThenElse t1 t2 t3) v ** Cons (EIf x) d' = from_BInE_to_E r) lemma_EIf {pf} {x} d' (r' ** pf') = case r' of BInEValue {pf} => absurd (ifThenElseNotNormal pf) BInEIfTrue d1 d2 => (BInEIfTrue {pf=pf} (Cons x d1) d2 ** cong pf') BInEIfFalse d1 d2 => (BInEIfFalse {pf=pf} (Cons x d1) d2 ** cong pf') lemma_ESucc : {t1 : Term} -> {pf : IsValue v} -> (x : EvalsTo t1 t1') -> (r' : BInE (Succ t1') v ** d' = from_BInE_to_E r') -> (r : BInE (Succ t1) v ** Cons (ESucc x) d' = from_BInE_to_E r) lemma_ESucc {pf} x (r' ** pf') = case r' of BInEValue => case succIsValueIf pf of nv_pf@(ConvertedFrom nv) => (BInESucc {pf=nv_pf} (weaken x) ** cong pf') BInESucc {pf} d'' => (BInESucc {pf=pf} (Cons x d'') ** cong pf') lemma_EPred : {t1, t1' : Term} -> {pf : IsValue v} -> (x : EvalsTo t1 t1') -> (r' : BInE (Pred t1') v ** d' = from_BInE_to_E r') -> (r : BInE (Pred t1) v ** Cons (EPred x) d' = from_BInE_to_E r) lemma_EPred {pf} x (r' ** pf') = case r' of BInEValue {pf=pf_val} => absurd (predNotValue pf_val) BInEPredZero y => (BInEPredZero (Cons x y) ** cong pf') BInEPredSucc {pf=pf_v} y => (BInEPredSucc {pf=pf_v} (Cons x y) ** cong pf') lemma_EIsZero : {t1, t1', v : Term} -> {pf : IsValue v} -> (x : EvalsTo t1 t1') -> (r' : BInE (IsZero t1') v ** d' = from_BInE_to_E r') -> (r : BInE (IsZero t1) v ** Cons (EIsZero x) d' = from_BInE_to_E r) lemma_EIsZero {pf} x (r' ** pf') = case r' of BInEValue {pf=pf_val} => absurd (isZeroNotValue pf_val) BInEIsZeroZero y => (BInEIsZeroZero (Cons x y) ** cong pf') BInEIsZeroSucc {pf=pf_v} y => (BInEIsZeroSucc {pf=pf_v} (Cons x y) ** cong pf') ||| Deconstructs a derivation of a term `t` to a value `v` in the `E`-calculus into a (one-step) derivation ||| in the `BInE`-calculus. from_E_to_BInE : {pf : IsValue v} -> (d : EvalsToStar t v) -> (r : BInE t v ** d = from_BInE_to_E r) from_E_to_BInE {pf} {t = True} Refl = (BInEValue {pf=ConvertedFrom (Left True)} {v=True} ** Refl) from_E_to_BInE {pf} {t = True} (Cons x y) = absurd (valuesDontEvaluate {pf=ConvertedFrom (Left True)} x) from_E_to_BInE {pf} {t = False} Refl = (BInEValue {pf=ConvertedFrom (Left False)} {v=False} ** Refl) from_E_to_BInE {pf} {t = False} (Cons x y) = absurd (valuesDontEvaluate {pf=ConvertedFrom (Left False)} x) from_E_to_BInE {pf} {t = (IfThenElse x y z)} Refl = absurd (ifThenElseNotNormal pf) from_E_to_BInE {pf} {t = (IfThenElse x y z)} (Cons w s) = case w of EIfTrue => lemma_EIfTrue {pf=pf} s EIfFalse => lemma_EIfFalse {pf=pf} s (EIf r) => lemma_EIf {pf=pf} s (from_E_to_BInE {pf=pf} s) from_E_to_BInE {pf} {t = Zero} d = case valuesAreNormal' {pf=ConvertedFrom (Right Zero)} d of Refl => case d of Refl => (BInEValue {pf=ConvertedFrom (Right Zero)} ** Refl) (Cons x y) => absurd (valuesDontEvaluate {pf=ConvertedFrom (Right Zero)} x) from_E_to_BInE {pf} {t = (Succ x)} Refl = case succIsValueIf pf of ConvertedFrom nv => (BInEValue {pf=ConvertedFrom (Right (Succ nv))} ** Refl) from_E_to_BInE {pf} {t = (Succ x)} (Cons y z) = case y of ESucc y' => lemma_ESucc {pf=pf} y' (from_E_to_BInE {pf=pf} z) from_E_to_BInE {pf} {t = (Pred x)} Refl = absurd (predNotValue pf) from_E_to_BInE {pf} {t = (Pred x)} (Cons y z) = case y of EPredZero => case valuesAreNormal {pf=ConvertedFrom (Right Zero)} z of Refl => (BInEPredZero Refl ** Refl) EPredSucc {nv1} {pf=pf_nv} => case valuesAreNormal {pf=numValueIsValue pf_nv} z of Refl => (BInEPredSucc {pf=pf_nv} Refl ** Refl) EPred y' => lemma_EPred {pf=pf} y' (from_E_to_BInE {pf=pf} z) from_E_to_BInE {pf} {t = (IsZero x)} Refl = absurd (isZeroNotValue pf) from_E_to_BInE {pf} {t = (IsZero x)} (Cons y z) = case y of EIsZeroZero => case valuesAreNormal {pf=ConvertedFrom (Left True)} z of Refl => (BInEIsZeroZero Refl ** Refl) EIsZeroSucc => case valuesAreNormal {pf=ConvertedFrom (Left False)} z of Refl => (BInEIsZeroSucc Refl ** Refl) EIsZero y' => lemma_EIsZero {pf=pf} y' (from_E_to_BInE {pf=pf} z) ||| Converts a derivation in `BInE`-calculus to a derivation in the `B`-calculus. from_BInE_to_B : {pf : IsValue v} -> BInE t v -> BigEvalsTo t v from_BInE_to_B {pf} BInEValue = BValue {pf=pf} from_BInE_to_B {pf} (BInEIfTrue y z) = let pf_true = ConvertedFrom (Left True) y' = fst (from_E_to_BInE {pf=pf_true} y) z' = fst (from_E_to_BInE {pf=pf} z) in BIfTrue {pf=pf} (from_BInE_to_B {pf=pf_true} y') (from_BInE_to_B {pf=pf} z') from_BInE_to_B {pf} (BInEIfFalse y z) = let pf_false = ConvertedFrom (Left False) y' = fst (from_E_to_BInE {pf=pf_false} y) z' = fst (from_E_to_BInE {pf=pf} z) in BIfFalse {pf=pf} (from_BInE_to_B {pf=pf_false} y') (from_BInE_to_B {pf=pf} z') from_BInE_to_B {pf} (BInESucc {pf=pf_nv} y) = let y' = fst (from_E_to_BInE {pf=numValueIsValue pf_nv} y) in BSucc {pf=pf_nv} (from_BInE_to_B {pf=numValueIsValue pf_nv} y') from_BInE_to_B {pf} (BInEPredZero x) = let pf_zero = ConvertedFrom (Right Zero) x' = fst (from_E_to_BInE {pf=pf_zero} x) in BPredZero (from_BInE_to_B {pf=pf_zero} x') from_BInE_to_B {pf} (BInEPredSucc {pf=pf_v} y) = let pf_succ = numValueIsValue (succNumValueIsNumValue pf_v) y' = fst (from_E_to_BInE {pf=pf_succ} y) in BPredSucc {pf=pf_v} (from_BInE_to_B {pf=pf_succ} y') from_BInE_to_B {pf} (BInEIsZeroZero x) = let pf_zero = ConvertedFrom (Right Zero) x' = fst (from_E_to_BInE {pf=pf_zero} x) in BIsZeroZero (from_BInE_to_B {pf=pf_zero} x') from_BInE_to_B {pf} (BInEIsZeroSucc {pf=pf_v} y) = let pf_succ = numValueIsValue (succNumValueIsNumValue pf_v) y' = fst (from_E_to_BInE {pf=pf_succ} y) in BIsZeroSucc {pf=pf_v} (from_BInE_to_B {pf=pf_succ} y') ||| Proof that if a term `t` evaluates to a value `v` under the reflexive transitive ||| closure of the small-step evaluation rules, then it also evaluates to it under the ||| big-step evaluation rules. starImpliesBig : {pf : IsValue v} -> EvalsToStar t v -> BigEvalsTo t v starImpliesBig {pf} d = from_BInE_to_B {pf=pf} (fst (from_E_to_BInE {pf=pf} d)) ||| Proof that if a term `t` evaluates to a value `v` under the big-step evaluation rules, ||| then it also evaluates to it under the reflexive transitive closure of the small-step ||| rules. bigImpliesStar : {pf : IsValue v} -> BigEvalsTo t v -> EvalsToStar t v bigImpliesStar {pf} BValue = Refl bigImpliesStar {pf} (BIfTrue y z) = let y' = bigImpliesStar {pf=ConvertedFrom (Left True)} y z' = bigImpliesStar {pf=pf} z in from_BInE_to_E (BInEIfTrue {pf=pf} y' z') bigImpliesStar {pf} (BIfFalse y z) = let y' = bigImpliesStar {pf=ConvertedFrom (Left False)} y z' = bigImpliesStar {pf=pf} z in from_BInE_to_E (BInEIfFalse {pf=pf} y' z') bigImpliesStar {pf} (BSucc {pf=pf_nv} y) = let y' = bigImpliesStar {pf=numValueIsValue pf_nv} y in from_BInE_to_E (BInESucc {pf=pf_nv} y') bigImpliesStar {pf} (BPredZero x) = let x' = bigImpliesStar {pf=pf} x in from_BInE_to_E (BInEPredZero x') bigImpliesStar {pf} (BPredSucc {pf=pf_nv} y) = let y' = bigImpliesStar {pf=numValueIsValue (succNumValueIsNumValue pf_nv)} y in from_BInE_to_E (BInEPredSucc {pf=pf_nv} y') bigImpliesStar {pf} (BIsZeroZero x) = let x' = bigImpliesStar {pf=ConvertedFrom (Right Zero)} x in from_BInE_to_E (BInEIsZeroZero x') bigImpliesStar {pf} (BIsZeroSucc {pf=pf_nv} y) = let y' = bigImpliesStar {pf=numValueIsValue (succNumValueIsNumValue pf_nv)} y in from_BInE_to_E (BInEIsZeroSucc {pf=pf_nv} y')
! ============================================================================= ! Test netCDF reading 2D datasets ! ! This unit test checks to read 2D datasets. ! ============================================================================= program test_netcdf_read_dataset_2d use unit_test use netcdf_writer use netcdf_reader implicit none integer, parameter :: nx = 5, ny = 10, nt = 3 integer :: ix, iy, ncid, dimids(3), t integer :: var_id = -1, cnt(3), start(3) double precision :: wdset(ny, nx), rdset(ny, nx), error logical :: passed = .true. do ix = 1, nx do iy = 1, ny wdset(iy, ix) = iy + (ix-1) * ny enddo enddo call create_netcdf_file(ncfname='nctest.nc', & overwrite=.true., & ncid=ncid) passed = (passed .and. (ncerr == 0)) call define_netcdf_dimension(ncid, "x", nx, dimids(1)) passed = (passed .and. (ncerr == 0)) call define_netcdf_dimension(ncid, "y", ny, dimids(2)) passed = (passed .and. (ncerr == 0)) call define_netcdf_dimension(ncid, "t", NF90_UNLIMITED, dimids(3)) passed = (passed .and. (ncerr == 0)) call define_netcdf_dataset(ncid, 'x_velocity', '', '', 'm/s', NF90_DOUBLE, dimids, var_id) passed = (passed .and. (ncerr == 0)) call close_definition(ncid) passed = (passed .and. (ncerr == 0)) ! write data do t = 1, nt cnt = (/ nx, ny, 1 /) start = (/ 1, 1, t /) call open_netcdf_file(ncfname='nctest.nc', & access_flag=NF90_WRITE, & ncid=ncid) passed = (passed .and. (ncerr == 0)) call write_netcdf_dataset(ncid, var_id, wdset, start, cnt) passed = (passed .and. (ncerr == 0)) call close_netcdf_file(ncid) passed = (passed .and. (ncerr == 0)) wdset = 1.0d0 + wdset enddo ! we need to subtract the last increase again ! since it was not written wdset = wdset - 1.0d0 ! read last data call open_netcdf_file(ncfname='nctest.nc', & access_flag=NF90_NOWRITE, & ncid=ncid) passed = (passed .and. (ncerr == 0)) cnt = (/nx, ny, 1/) start = (/1, 1, nt/) call read_netcdf_dataset(ncid, 'x_velocity', rdset, start, cnt) passed = (passed .and. (ncerr == 0)) call close_netcdf_file(ncid) passed = (passed .and. (ncerr == 0)) error = sum(abs(wdset - rdset)) passed = (passed .and. (ncerr == 0) .and. (error == 0.0d0)) call delete_netcdf_file(ncfname='nctest.nc') passed = (passed .and. (ncerr == 0)) call print_result_logical('Test netCDF read 2D dataset', passed) end program test_netcdf_read_dataset_2d
#include "execution_model/execution_model.h" #include "generate_data/baryocentric_coords.h" #include "generate_data/baryocentric_to_ray.h" #include "generate_data/single_triangle/amend_config.h" #include "generate_data/single_triangle/generate_scene.h" #include "generate_data/single_triangle/generate_scene_triangles.h" #include "generate_data/single_triangle/normalize_scene_triangles.h" #include "integrate/sample_triangle.h" #include "intersect/triangle.h" #include "lib/assert.h" #include "lib/cuda/utils.h" #include "render/config_io.h" #include "render/renderer.h" #include "rng/uniform/uniform.h" #include "scene/camera.h" #include <Eigen/Dense> #include <QImage> #include <boost/lexical_cast.hpp> #include <docopt.h> #include <iostream> #include <random> #include <string> // In retrospect, I don't really like docopt... constexpr char USAGE[] = R"(Path Usage: generate_data_visualizer [--seed=<seed>] [--config=<file_name>] [-g | --gpu] [--print-config] generate_data_visualizer (-h | --help) Options: -h --help Show this screen. --seed=<seed> Random seed [default: 0] --config=<file_name> Config file name. If no file is specified, default settings will be used. -g --gpu Use gpu --print-config Print config )"; int main(int argc, char *argv[]) { using namespace generate_data; using namespace generate_data::single_triangle; const std::map<std::string, docopt::value> args = docopt::docopt(USAGE, {argv + 1, argv + argc}); auto get_unpack_arg = [&](const std::string &s) { auto it = args.find(s); if (it == args.end()) { std::cerr << "internal command line parse error" << std::endl; std::cerr << s << std::endl; unreachable(); } return it->second; }; bool using_gpu = get_unpack_arg("--gpu").asBool(); const std::string output_file_name = "out.png"; const std::string baryocentric_output_file_name = "baryo.png"; const bool print_config = get_unpack_arg("--print-config").asBool(); const unsigned seed = get_unpack_arg("--seed").asLong(); if (using_gpu) { int n_devices; CUDA_ERROR_CHK(cudaGetDeviceCount(&n_devices)); for (int i = 0; i < n_devices; i++) { cudaDeviceProp prop; CUDA_ERROR_CHK(cudaGetDeviceProperties(&prop, i)); std::cout << "found gpu: " << prop.name << std::endl; } if (n_devices == 0) { std::cout << "no gpu found, using cpu" << std::endl; using_gpu = false; } } ExecutionModel execution_model = using_gpu ? ExecutionModel::GPU : ExecutionModel::CPU; UniformState rng{seed}; // auto tris = generate_scene_triangles(rng); auto tris = normalize_scene_triangles(generate_scene_triangles(rng)); auto scene = generate_scene(tris); auto dir_towards = -tris.triangle_onto.template cast<float>().normal(); Eigen::Vector3f onto_centroid = tris.triangle_onto.centroid().template cast<float>(); auto film_to_world = scene::get_camera_transform( dir_towards, UnitVector::new_normalize({0.f, 1.f, 0.f}), onto_centroid - 8 * (*dir_towards), 45.f, 1.f); render::Renderer renderer; unsigned width = 256; unsigned height = width; unsigned num_samples = 1024; QImage image(width, height, QImage::Format_RGB32); Span<BGRA32> pixels(reinterpret_cast<BGRA32 *>(image.bits()), width * height); render::Settings settings; auto config_file_name = get_unpack_arg("--config"); if (config_file_name) { settings = render::load_config(config_file_name.asString()); } amend_config(settings); if (print_config) { render::print_config(settings); } renderer.render(execution_model, {tag_v<render::SampleSpecType::SquareImage>, { .x_dim = width, .y_dim = height, .film_to_world = film_to_world, }}, {tag_v<render::OutputType::BGRA>, pixels}, scene, num_samples, settings, true); image.save(output_file_name.c_str()); unsigned baryocentric_width = 128; unsigned baryocentric_height = baryocentric_width; unsigned baryocentric_num_samples = 4096; auto [baryocentric_indexes, baryocentric_grid_values] = baryocentric_coords(baryocentric_width, baryocentric_height); VectorT<render::InitialIdxAndDirSpec> baryocentric_grid( baryocentric_grid_values.size()); for (unsigned i = 0; i < baryocentric_grid_values.size(); ++i) { auto [x_v, y_v] = baryocentric_grid_values[i]; baryocentric_grid[i] = { .idx = 0, .ray = baryocentric_to_ray( x_v, y_v, tris.triangle_onto.template cast<float>(), dir_towards), }; } std::vector<BGRA32> baryocentric_pixels(baryocentric_grid.size(), BGRA32::Zero()); renderer.render( execution_model, {tag_v<render::SampleSpecType::InitialIdxAndDir>, baryocentric_grid}, {tag_v<render::OutputType::BGRA>, baryocentric_pixels}, scene, baryocentric_num_samples, settings, true); QImage baryocentric_image(baryocentric_width, baryocentric_height, QImage::Format_RGB32); SpanSized<BGRA32> baryocentric_image_pixels( reinterpret_cast<BGRA32 *>(baryocentric_image.bits()), baryocentric_width * baryocentric_height); std::fill(baryocentric_image_pixels.begin(), baryocentric_image_pixels.end(), BGRA32::Zero()); for (unsigned i = 0; i < baryocentric_grid.size(); ++i) { auto [x, y] = baryocentric_indexes[i]; baryocentric_image_pixels[x + y * baryocentric_width] = baryocentric_pixels[i]; } baryocentric_image.save(baryocentric_output_file_name.c_str()); return 0; }
module HVX.DcpTests.OkXAffine where import Numeric.LinearAlgebra import HVX main :: IO () main = do let zero = EConst $ konst 0.0 (2, 1) x = EVar "x" e = hexp $ x _ = zero >=~ e _ = e <=~ zero return ()
------------------------------------------------------------------------------ -- Generic well-founded induction on trees ------------------------------------------------------------------------------ {-# OPTIONS --exact-split #-} {-# OPTIONS --no-sized-types #-} {-# OPTIONS --no-universe-polymorphism #-} {-# OPTIONS --without-K #-} -- Adapted from FOTC.Data.Nat.Induction.Acc.WellFounded. module FOT.FOTC.Program.Mirror.Induction.Acc.WellFounded where open import FOTC.Base open import FOTC.Program.Mirror.Type ------------------------------------------------------------------------------ -- The accessibility predicate: x is accessible if everything which is -- smaller than x is also accessible (inductively). data Acc (_<_ : D → D → Set)(t : D) : Set where acc : (∀ {t'} → Tree t' → t' < t → Acc _<_ t') → Acc _<_ t accFold : {P : D → Set}(_<_ : D → D → Set) → (∀ {t} → Tree t → (∀ {t'} → Tree t' → t' < t → P t') → P t) → ∀ {t} → Tree t → Acc _<_ t → P t accFold _<_ f Tt (acc h) = f Tt (λ Tt' t'<t → accFold _<_ f Tt' (h Tt' t'<t)) -- The accessibility predicate encodes what it means to be -- well-founded; if all elements are accessible, then _<_ is -- well-founded. WellFounded : (D → D → Set) → Set WellFounded _<_ = ∀ {t} → Tree t → Acc _<_ t WellFoundedInduction : {P : D → Set}{_<_ : D → D → Set} → WellFounded _<_ → (∀ {t} → Tree t → (∀ {t'} → Tree t' → t' < t → P t') → P t) → ∀ {t} → Tree t → P t WellFoundedInduction {_<_ = _<_} wf f Tt = accFold _<_ f Tt (wf Tt)
[STATEMENT] lemma new_tv_Fun[simp]: "new_tv n (t1 -> t2) = (new_tv n t1 \<and> new_tv n t2)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. new_tv n (t1 -> t2) = (new_tv n t1 \<and> new_tv n t2) [PROOF STEP] by (auto simp: new_tv_def)