text
stringlengths 0
3.34M
|
---|
theory ETCS_Parity
imports ETCS_Add ETCS_Mult ETCS_Exp ETCS_Pred ETCS_Quantifier
begin
definition nth_even :: "cfunc" where
"nth_even = (THE u. u: \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c \<and>
u \<circ>\<^sub>c zero = zero \<and>
(successor \<circ>\<^sub>c successor) \<circ>\<^sub>c u = u \<circ>\<^sub>c successor)"
lemma nth_even_def2:
"nth_even: \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c \<and> nth_even \<circ>\<^sub>c zero = zero \<and> (successor \<circ>\<^sub>c successor) \<circ>\<^sub>c nth_even = nth_even \<circ>\<^sub>c successor"
by (unfold nth_even_def, rule theI', typecheck_cfuncs, rule natural_number_object_property2, auto)
lemma nth_even_type[type_rule]:
"nth_even: \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
by (simp add: nth_even_def2)
lemma nth_even_zero:
"nth_even \<circ>\<^sub>c zero = zero"
by (simp add: nth_even_def2)
lemma nth_even_successor:
"nth_even \<circ>\<^sub>c successor = (successor \<circ>\<^sub>c successor) \<circ>\<^sub>c nth_even"
by (simp add: nth_even_def2)
lemma nth_even_successor2:
"nth_even \<circ>\<^sub>c successor = successor \<circ>\<^sub>c successor \<circ>\<^sub>c nth_even"
using comp_associative2 nth_even_def2 by (typecheck_cfuncs, auto)
lemma nth_even_is_times_two:
"nth_even = mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>, id \<nat>\<^sub>c\<rangle>"
proof (rule natural_number_object_func_unique[where f="successor \<circ>\<^sub>c successor", where X="\<nat>\<^sub>c"])
show "nth_even : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
by typecheck_cfuncs
show "mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle> : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
by typecheck_cfuncs
show "successor \<circ>\<^sub>c successor : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
by typecheck_cfuncs
show "nth_even \<circ>\<^sub>c zero = (mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c zero"
proof -
have "(mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c zero
= mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle> \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = mult2 \<circ>\<^sub>c \<langle>successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero, zero\<rangle>"
by (typecheck_cfuncs, simp add: cart_prod_extract_right)
also have "... = zero"
using mult_def mult_respects_zero_right succ_n_type zero_type by auto
also have "... = nth_even \<circ>\<^sub>c zero"
by (simp add: nth_even_def2)
then show ?thesis
using calculation by auto
qed
show "nth_even \<circ>\<^sub>c successor = (successor \<circ>\<^sub>c successor) \<circ>\<^sub>c nth_even"
by (simp add: nth_even_successor)
show "(mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c successor =
(successor \<circ>\<^sub>c successor) \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>"
proof -
have "(mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c successor
= mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle> \<circ>\<^sub>c successor"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>, successor\<rangle>"
by (typecheck_cfuncs, smt cfunc_prod_comp comp_associative2 id_left_unit2 terminal_func_comp)
also have "... = add2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>\<rangle>"
using mult2_respects_succ_right by (typecheck_cfuncs, blast)
also have "... = add2 \<circ>\<^sub>c \<langle>successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>\<rangle>"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = successor \<circ>\<^sub>c successor \<circ>\<^sub>c add2 \<circ>\<^sub>c \<langle>zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>\<rangle>"
by (typecheck_cfuncs, simp add: add2_commutes_succ add2_respects_succ_right)
also have "... = successor \<circ>\<^sub>c successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>"
by (typecheck_cfuncs, simp add: add2_respects_zero_on_left)
also have "... = (successor \<circ>\<^sub>c successor) \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>"
by (typecheck_cfuncs, smt comp_associative2)
then show ?thesis
using calculation by auto
qed
qed
lemma nth_even_is_times_twoB:
assumes "n \<in>\<^sub>c \<nat>\<^sub>c"
shows "nth_even \<circ>\<^sub>c n = (successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> n"
proof -
have "nth_even \<circ>\<^sub>c n = (mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>, id \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c n"
using nth_even_is_times_two by auto
also have "... = mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>, id \<nat>\<^sub>c\<rangle> \<circ>\<^sub>c n"
using assms by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> \<circ>\<^sub>c n, id \<nat>\<^sub>c \<circ>\<^sub>c n\<rangle>"
using assms by (typecheck_cfuncs, simp add: cfunc_prod_comp comp_associative2)
also have "... = mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c id(one), id \<nat>\<^sub>c \<circ>\<^sub>c n\<rangle>"
using assms by (typecheck_cfuncs, metis terminal_func_unique)
also have "... = mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero), n\<rangle>"
using assms by (typecheck_cfuncs, simp add: id_left_unit2 id_right_unit2)
also have "... = (successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> n"
by (simp add: mult_def)
then show ?thesis using calculation by auto
qed
definition nth_odd :: "cfunc" where
"nth_odd = (THE u. u: \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c \<and>
u \<circ>\<^sub>c zero = successor \<circ>\<^sub>c zero \<and>
(successor \<circ>\<^sub>c successor) \<circ>\<^sub>c u = u \<circ>\<^sub>c successor)"
lemma nth_odd_def2:
"nth_odd: \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c \<and> nth_odd \<circ>\<^sub>c zero = successor \<circ>\<^sub>c zero \<and> (successor \<circ>\<^sub>c successor) \<circ>\<^sub>c nth_odd = nth_odd \<circ>\<^sub>c successor"
by (unfold nth_odd_def, rule theI', typecheck_cfuncs, rule natural_number_object_property2, auto)
lemma nth_odd_type[type_rule]:
"nth_odd: \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
by (simp add: nth_odd_def2)
lemma nth_odd_zero:
"nth_odd \<circ>\<^sub>c zero = successor \<circ>\<^sub>c zero"
by (simp add: nth_odd_def2)
lemma nth_odd_successor:
"nth_odd \<circ>\<^sub>c successor = (successor \<circ>\<^sub>c successor) \<circ>\<^sub>c nth_odd"
by (simp add: nth_odd_def2)
lemma nth_odd_successor2:
"nth_odd \<circ>\<^sub>c successor = successor \<circ>\<^sub>c successor \<circ>\<^sub>c nth_odd"
using comp_associative2 nth_odd_def2 by (typecheck_cfuncs, auto)
lemma nth_odd_is_succ_times_two:
"nth_odd = successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>, id \<nat>\<^sub>c\<rangle>"
proof (rule natural_number_object_func_unique[where f="successor \<circ>\<^sub>c successor", where X="\<nat>\<^sub>c"])
show "nth_odd : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
by typecheck_cfuncs
show "successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>, id \<nat>\<^sub>c\<rangle> : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
by typecheck_cfuncs
show "successor \<circ>\<^sub>c successor : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
by typecheck_cfuncs
show "nth_odd \<circ>\<^sub>c zero =
(successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c zero"
proof -
have "(successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c zero
= successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle> \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero, zero\<rangle>"
by (typecheck_cfuncs, simp add: cart_prod_extract_right)
also have "... = successor \<circ>\<^sub>c zero"
using mult_def mult_respects_zero_right succ_n_type zero_type by auto
also have "... = nth_odd \<circ>\<^sub>c zero"
by (simp add: nth_odd_def2)
then show ?thesis
using calculation by auto
qed
show "nth_odd \<circ>\<^sub>c successor = (successor \<circ>\<^sub>c successor) \<circ>\<^sub>c nth_odd"
by (simp add: nth_odd_successor)
show "(successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c successor =
(successor \<circ>\<^sub>c successor) \<circ>\<^sub>c successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>"
proof -
have "(successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c successor
= successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle> \<circ>\<^sub>c successor"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>, successor\<rangle>"
by (typecheck_cfuncs, smt cfunc_prod_comp comp_associative2 id_left_unit2 terminal_func_comp)
also have "... = successor \<circ>\<^sub>c add2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>\<rangle>"
using mult2_respects_succ_right by (typecheck_cfuncs, auto)
also have "... = successor \<circ>\<^sub>c add2 \<circ>\<^sub>c \<langle>successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>\<rangle>"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c add2 \<circ>\<^sub>c \<langle>zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>\<rangle>"
by (typecheck_cfuncs, simp add: add2_commutes_succ add2_respects_succ_right)
also have "... = successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>"
by (typecheck_cfuncs, simp add: add2_respects_zero_on_left)
also have "... = (successor \<circ>\<^sub>c successor) \<circ>\<^sub>c successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>"
by (typecheck_cfuncs, smt comp_associative2)
then show ?thesis
using calculation by auto
qed
qed
lemma nth_odd_is_succ_times_twoB:
assumes "n \<in>\<^sub>c \<nat>\<^sub>c"
shows "nth_odd \<circ>\<^sub>c n = successor \<circ>\<^sub>c((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> n)"
proof -
have "nth_odd \<circ>\<^sub>c n = (successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>, id \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c n"
by (simp add: nth_odd_is_succ_times_two)
also have "... = successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>, id \<nat>\<^sub>c\<rangle> \<circ>\<^sub>c n"
using assms by (typecheck_cfuncs, simp add: cfunc_type_def comp_associative)
also have "... = successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> \<circ>\<^sub>c n, id \<nat>\<^sub>c \<circ>\<^sub>c n \<rangle>"
using assms by (typecheck_cfuncs, simp add: cfunc_prod_comp comp_associative2)
also have "... = successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c id(one), id \<nat>\<^sub>c \<circ>\<^sub>c n \<rangle>"
using assms by (typecheck_cfuncs, metis terminal_func_unique)
also have "... = successor \<circ>\<^sub>c mult2 \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero), n \<rangle>"
using assms by (typecheck_cfuncs, simp add: id_left_unit2 id_right_unit2)
also have "... = successor \<circ>\<^sub>c((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> n)"
by (simp add: mult_def)
then show ?thesis using calculation by auto
qed
lemma nth_odd_is_succ_nth_even:
"nth_odd = successor \<circ>\<^sub>c nth_even"
proof (rule natural_number_object_func_unique[where X="\<nat>\<^sub>c", where f="successor \<circ>\<^sub>c successor"])
show "nth_odd : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
by typecheck_cfuncs
show "successor \<circ>\<^sub>c nth_even : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
by typecheck_cfuncs
show "successor \<circ>\<^sub>c successor : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
by typecheck_cfuncs
show "nth_odd \<circ>\<^sub>c zero = (successor \<circ>\<^sub>c nth_even) \<circ>\<^sub>c zero"
proof -
have "nth_odd \<circ>\<^sub>c zero = successor \<circ>\<^sub>c zero"
by (simp add: nth_odd_zero)
also have "... = (successor \<circ>\<^sub>c nth_even) \<circ>\<^sub>c zero"
using nth_even_is_times_two nth_odd_def2 nth_odd_is_succ_times_two by (typecheck_cfuncs, auto)
then show ?thesis
using calculation by auto
qed
show "nth_odd \<circ>\<^sub>c successor = (successor \<circ>\<^sub>c successor) \<circ>\<^sub>c nth_odd"
by (simp add: nth_odd_successor)
show "(successor \<circ>\<^sub>c nth_even) \<circ>\<^sub>c successor = (successor \<circ>\<^sub>c successor) \<circ>\<^sub>c successor \<circ>\<^sub>c nth_even"
proof -
have "(successor \<circ>\<^sub>c nth_even) \<circ>\<^sub>c successor = successor \<circ>\<^sub>c nth_even \<circ>\<^sub>c successor"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c nth_even"
by (simp add: nth_even_successor2)
also have "... = (successor \<circ>\<^sub>c successor) \<circ>\<^sub>c successor \<circ>\<^sub>c nth_even"
by (typecheck_cfuncs, simp add: comp_associative2)
then show ?thesis
using calculation by auto
qed
qed
lemma succ_nth_odd_is_nth_even_succ:
"successor \<circ>\<^sub>c nth_odd = nth_even \<circ>\<^sub>c successor"
proof (rule natural_number_object_func_unique[where X="\<nat>\<^sub>c", where f="successor \<circ>\<^sub>c successor"])
show "successor \<circ>\<^sub>c nth_odd : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
by typecheck_cfuncs
show "nth_even \<circ>\<^sub>c successor : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
by typecheck_cfuncs
show "successor \<circ>\<^sub>c successor : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
by typecheck_cfuncs
show "(successor \<circ>\<^sub>c nth_odd) \<circ>\<^sub>c zero = (nth_even \<circ>\<^sub>c successor) \<circ>\<^sub>c zero"
proof -
have "(successor \<circ>\<^sub>c nth_odd) \<circ>\<^sub>c zero = successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero"
using comp_associative2 nth_odd_def2 successor_type zero_type by fastforce
also have "... = (nth_even \<circ>\<^sub>c successor) \<circ>\<^sub>c zero"
using calculation nth_even_successor2 nth_odd_is_succ_nth_even by auto
then show ?thesis
using calculation by auto
qed
show "(successor \<circ>\<^sub>c nth_odd) \<circ>\<^sub>c successor = (successor \<circ>\<^sub>c successor) \<circ>\<^sub>c successor \<circ>\<^sub>c nth_odd"
by (metis cfunc_type_def codomain_comp comp_associative nth_odd_def2 successor_type)
then show "(nth_even \<circ>\<^sub>c successor) \<circ>\<^sub>c successor = (successor \<circ>\<^sub>c successor) \<circ>\<^sub>c nth_even \<circ>\<^sub>c successor"
using nth_even_successor2 nth_odd_is_succ_nth_even by auto
qed
definition is_even :: "cfunc" where
"is_even = (THE u. u: \<nat>\<^sub>c \<rightarrow> \<Omega> \<and> u \<circ>\<^sub>c zero = \<t> \<and> NOT \<circ>\<^sub>c u = u \<circ>\<^sub>c successor)"
lemma is_even_def2:
"is_even : \<nat>\<^sub>c \<rightarrow> \<Omega> \<and> is_even \<circ>\<^sub>c zero = \<t> \<and> NOT \<circ>\<^sub>c is_even = is_even \<circ>\<^sub>c successor"
by (unfold is_even_def, rule theI', typecheck_cfuncs, rule natural_number_object_property2, auto)
lemma is_even_type[type_rule]:
"is_even : \<nat>\<^sub>c \<rightarrow> \<Omega>"
by (simp add: is_even_def2)
lemma is_even_zero:
"is_even \<circ>\<^sub>c zero = \<t>"
by (simp add: is_even_def2)
lemma is_even_successor:
"is_even \<circ>\<^sub>c successor = NOT \<circ>\<^sub>c is_even"
by (simp add: is_even_def2)
definition is_odd :: "cfunc" where
"is_odd = (THE u. u: \<nat>\<^sub>c \<rightarrow> \<Omega> \<and> u \<circ>\<^sub>c zero = \<f> \<and> NOT \<circ>\<^sub>c u = u \<circ>\<^sub>c successor)"
lemma is_odd_def2:
"is_odd : \<nat>\<^sub>c \<rightarrow> \<Omega> \<and> is_odd \<circ>\<^sub>c zero = \<f> \<and> NOT \<circ>\<^sub>c is_odd = is_odd \<circ>\<^sub>c successor"
by (unfold is_odd_def, rule theI', typecheck_cfuncs, rule natural_number_object_property2, auto)
lemma is_odd_type[type_rule]:
"is_odd : \<nat>\<^sub>c \<rightarrow> \<Omega>"
by (simp add: is_odd_def2)
lemma is_odd_zero:
"is_odd \<circ>\<^sub>c zero = \<f>"
by (simp add: is_odd_def2)
lemma is_odd_successor:
"is_odd \<circ>\<^sub>c successor = NOT \<circ>\<^sub>c is_odd"
by (simp add: is_odd_def2)
lemma is_even_not_is_odd:
"is_even = NOT \<circ>\<^sub>c is_odd"
proof (typecheck_cfuncs, rule natural_number_object_func_unique[where f="NOT", where X="\<Omega>"], auto)
show "is_even \<circ>\<^sub>c zero = (NOT \<circ>\<^sub>c is_odd) \<circ>\<^sub>c zero"
by (typecheck_cfuncs, metis NOT_false_is_true cfunc_type_def comp_associative is_even_def2 is_odd_def2)
show "is_even \<circ>\<^sub>c successor = NOT \<circ>\<^sub>c is_even"
by (simp add: is_even_successor)
show "(NOT \<circ>\<^sub>c is_odd) \<circ>\<^sub>c successor = NOT \<circ>\<^sub>c NOT \<circ>\<^sub>c is_odd"
by (typecheck_cfuncs, simp add: cfunc_type_def comp_associative is_odd_def2)
qed
lemma is_odd_not_is_even:
"is_odd = NOT \<circ>\<^sub>c is_even"
proof (typecheck_cfuncs, rule natural_number_object_func_unique[where f="NOT", where X="\<Omega>"], auto)
show "is_odd \<circ>\<^sub>c zero = (NOT \<circ>\<^sub>c is_even) \<circ>\<^sub>c zero"
by (typecheck_cfuncs, metis NOT_true_is_false cfunc_type_def comp_associative is_even_def2 is_odd_def2)
show "is_odd \<circ>\<^sub>c successor = NOT \<circ>\<^sub>c is_odd"
by (simp add: is_odd_successor)
show "(NOT \<circ>\<^sub>c is_even) \<circ>\<^sub>c successor = NOT \<circ>\<^sub>c NOT \<circ>\<^sub>c is_even"
by (typecheck_cfuncs, simp add: cfunc_type_def comp_associative is_even_def2)
qed
lemma not_even_and_odd:
assumes "m \<in>\<^sub>c \<nat>\<^sub>c"
shows "\<not>(is_even \<circ>\<^sub>c m = \<t> \<and> is_odd \<circ>\<^sub>c m = \<t>)"
proof(auto)
assume "is_even \<circ>\<^sub>c m = \<t>"
assume "is_odd \<circ>\<^sub>c m = \<t>"
have "NOT \<circ>\<^sub>c is_odd \<circ>\<^sub>c m = \<t>"
using \<open>is_even \<circ>\<^sub>c m = \<t>\<close> assms comp_associative2 is_even_not_is_odd by (typecheck_cfuncs, auto)
then have "is_odd \<circ>\<^sub>c m \<noteq> \<t>"
using NOT_true_is_false true_false_distinct by fastforce
then show False
by (simp add: \<open>is_odd \<circ>\<^sub>c m = \<t>\<close>)
qed
lemma even_or_odd:
assumes "n \<in>\<^sub>c \<nat>\<^sub>c"
shows "(is_even \<circ>\<^sub>c n = \<t>) \<or> (is_odd \<circ>\<^sub>c n = \<t>)"
proof(auto)
assume not_odd: "is_odd \<circ>\<^sub>c n \<noteq> \<t>"
show "is_even \<circ>\<^sub>c n = \<t>"
using assms by (typecheck_cfuncs, metis NOT_false_is_true NOT_type cfunc_type_def comp_associative is_odd_not_is_even not_odd true_false_only_truth_values)
qed
lemma is_even_nth_even_true:
"is_even \<circ>\<^sub>c nth_even = \<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>"
proof (rule natural_number_object_func_unique[where f="id \<Omega>", where X=\<Omega>])
show "is_even \<circ>\<^sub>c nth_even : \<nat>\<^sub>c \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> : \<nat>\<^sub>c \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "id\<^sub>c \<Omega> : \<Omega> \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "(is_even \<circ>\<^sub>c nth_even) \<circ>\<^sub>c zero = (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c zero"
proof -
have "(is_even \<circ>\<^sub>c nth_even) \<circ>\<^sub>c zero = is_even \<circ>\<^sub>c nth_even \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = \<t>"
by (simp add: is_even_zero nth_even_zero)
also have "... = (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c zero"
by (typecheck_cfuncs, smt beta_N_succ_mEqs_Id1 comp_associative2 id_right_unit2 successor_type terminal_func_comp)
then show ?thesis
using calculation by auto
qed
show "(is_even \<circ>\<^sub>c nth_even) \<circ>\<^sub>c successor = id\<^sub>c \<Omega> \<circ>\<^sub>c is_even \<circ>\<^sub>c nth_even"
proof -
have "(is_even \<circ>\<^sub>c nth_even) \<circ>\<^sub>c successor = is_even \<circ>\<^sub>c nth_even \<circ>\<^sub>c successor"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = is_even \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c nth_even"
by (simp add: nth_even_successor2)
also have "... = ((is_even \<circ>\<^sub>c successor) \<circ>\<^sub>c successor) \<circ>\<^sub>c nth_even"
by (typecheck_cfuncs, smt comp_associative2)
also have "... = is_even \<circ>\<^sub>c nth_even"
using is_even_def2 is_even_not_is_odd is_odd_def2 is_odd_not_is_even by (typecheck_cfuncs, auto)
also have "... = id \<Omega> \<circ>\<^sub>c is_even \<circ>\<^sub>c nth_even"
by (typecheck_cfuncs, simp add: id_left_unit2)
then show ?thesis
using calculation by auto
qed
show "(\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c successor = id\<^sub>c \<Omega> \<circ>\<^sub>c \<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>"
by (typecheck_cfuncs, smt comp_associative2 id_left_unit2 terminal_func_comp)
qed
lemma is_odd_nth_odd_true:
"is_odd \<circ>\<^sub>c nth_odd = \<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>"
proof (rule natural_number_object_func_unique[where f="id \<Omega>", where X=\<Omega>])
show "is_odd \<circ>\<^sub>c nth_odd : \<nat>\<^sub>c \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> : \<nat>\<^sub>c \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "id\<^sub>c \<Omega> : \<Omega> \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "(is_odd \<circ>\<^sub>c nth_odd) \<circ>\<^sub>c zero = (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c zero"
proof -
have "(is_odd \<circ>\<^sub>c nth_odd) \<circ>\<^sub>c zero = is_odd \<circ>\<^sub>c nth_odd \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = \<t>"
using comp_associative2 is_even_not_is_odd is_even_zero is_odd_def2 nth_odd_def2 successor_type zero_type by auto
also have "... = (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c zero"
by (typecheck_cfuncs, smt beta_N_succ_mEqs_Id1 comp_associative2 id_right_unit2 successor_type terminal_func_comp)
then show ?thesis
using calculation by auto
qed
show "(is_odd \<circ>\<^sub>c nth_odd) \<circ>\<^sub>c successor = id\<^sub>c \<Omega> \<circ>\<^sub>c is_odd \<circ>\<^sub>c nth_odd"
proof -
have "(is_odd \<circ>\<^sub>c nth_odd) \<circ>\<^sub>c successor = is_odd \<circ>\<^sub>c nth_odd \<circ>\<^sub>c successor"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = is_odd \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c nth_odd"
by (simp add: nth_odd_successor2)
also have "... = ((is_odd \<circ>\<^sub>c successor) \<circ>\<^sub>c successor) \<circ>\<^sub>c nth_odd"
by (typecheck_cfuncs, smt comp_associative2)
also have "... = is_odd \<circ>\<^sub>c nth_odd"
using is_even_def2 is_even_not_is_odd is_odd_def2 is_odd_not_is_even by (typecheck_cfuncs, auto)
also have "... = id \<Omega> \<circ>\<^sub>c is_odd \<circ>\<^sub>c nth_odd"
by (typecheck_cfuncs, simp add: id_left_unit2)
then show ?thesis
using calculation by auto
qed
show "(\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c successor = id\<^sub>c \<Omega> \<circ>\<^sub>c \<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>"
by (typecheck_cfuncs, smt comp_associative2 id_left_unit2 terminal_func_comp)
qed
lemma is_odd_nth_even_false:
"is_odd \<circ>\<^sub>c nth_even = \<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>"
by (smt NOT_true_is_false NOT_type comp_associative2 is_even_def2 is_even_nth_even_true
is_odd_not_is_even nth_even_def2 terminal_func_type true_func_type)
lemma is_even_nth_odd_false:
"is_even \<circ>\<^sub>c nth_odd = \<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>"
by (smt NOT_true_is_false NOT_type comp_associative2 is_odd_def2 is_odd_nth_odd_true
is_even_not_is_odd nth_odd_def2 terminal_func_type true_func_type)
lemma add_evens_is_even:
assumes "m \<in>\<^sub>c \<nat>\<^sub>c" "n \<in>\<^sub>c \<nat>\<^sub>c"
assumes "j \<in>\<^sub>c \<nat>\<^sub>c \<and> (successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> j = m"
assumes "k \<in>\<^sub>c \<nat>\<^sub>c \<and> (successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> k = n"
shows "(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> (j +\<^sub>\<nat> k) = m +\<^sub>\<nat> n"
proof -
have m_pls_n: "m +\<^sub>\<nat> n = ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> j) +\<^sub>\<nat> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> k)"
using assms(3) assms(4) by blast
also have "... = ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero)) \<cdot>\<^sub>\<nat> (j +\<^sub>\<nat> k)"
by (typecheck_cfuncs, simp add: assms(3) assms(4) mult_right_distributivity)
then show ?thesis
by (simp add: m_pls_n)
qed
lemma add_odds_is_even:
assumes "m \<in>\<^sub>c \<nat>\<^sub>c" "n \<in>\<^sub>c \<nat>\<^sub>c"
assumes "j \<in>\<^sub>c \<nat>\<^sub>c \<and> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> j) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero) = m"
assumes "k \<in>\<^sub>c \<nat>\<^sub>c \<and> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> k) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero) = n"
shows "(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> ((j +\<^sub>\<nat> k) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero)) = (m +\<^sub>\<nat> n)"
proof -
have m_pls_n: "m +\<^sub>\<nat> n = (((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> j) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero)) +\<^sub>\<nat> (((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> k) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero))"
using assms(3) assms(4) by blast
also have "... = (((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> j) +\<^sub>\<nat> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> k)) +\<^sub>\<nat> ((successor \<circ>\<^sub>c zero) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero))"
by (typecheck_cfuncs, simp add: add_associates_mix_commutes assms(3) assms(4))
also have "... = ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> (j +\<^sub>\<nat> k)) +\<^sub>\<nat> ((successor \<circ>\<^sub>c zero) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero))"
by (typecheck_cfuncs, simp add: assms(3) assms(4) mult_right_distributivity)
also have "... = ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> (j +\<^sub>\<nat> k)) +\<^sub>\<nat> (successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero)"
by (simp add: one_plus_one_is_two)
also have "... = ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> (j +\<^sub>\<nat> k)) +\<^sub>\<nat> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> (successor \<circ>\<^sub>c zero))"
by (typecheck_cfuncs, simp add: s0_is_right_id)
also have "... = (successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> ((j +\<^sub>\<nat> k) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero))"
by (typecheck_cfuncs, simp add: assms(3) assms(4) mult_right_distributivity)
then show ?thesis
by (simp add: calculation)
qed
lemma add_mixed_is_odd:
assumes "m \<in>\<^sub>c \<nat>\<^sub>c" "n \<in>\<^sub>c \<nat>\<^sub>c"
assumes "j \<in>\<^sub>c \<nat>\<^sub>c \<and> (successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> j = m"
assumes "k \<in>\<^sub>c \<nat>\<^sub>c \<and> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> k) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero) = n"
shows "((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> (j +\<^sub>\<nat> k) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero)) = m +\<^sub>\<nat> n"
apply typecheck_cfuncs
proof -
assume a1: "successor \<circ>\<^sub>c zero \<in>\<^sub>c \<nat>\<^sub>c"
assume a2: "successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero \<in>\<^sub>c \<nat>\<^sub>c"
have "\<forall>c ca. \<not> c \<in>\<^sub>c \<nat>\<^sub>c \<or> \<not> ca \<in>\<^sub>c \<nat>\<^sub>c \<or> c \<cdot>\<^sub>\<nat> ca \<in>\<^sub>c \<nat>\<^sub>c"
using mult_closure by blast
then show ?thesis
using a2 a1 add_associates assms(3) assms(4) mult_right_distributivity by force
qed
lemma mult_even_is_even:
assumes "m \<in>\<^sub>c \<nat>\<^sub>c" "n \<in>\<^sub>c \<nat>\<^sub>c"
assumes "j \<in>\<^sub>c \<nat>\<^sub>c \<and> (successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> j = m"
shows "(successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> (j \<cdot>\<^sub>\<nat> n) = m \<cdot>\<^sub>\<nat> n"
proof -
have " m \<cdot>\<^sub>\<nat> n = ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> j) \<cdot>\<^sub>\<nat> n"
using assms(3) by presburger
also have "... = (successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> (j \<cdot>\<^sub>\<nat> n)"
by (typecheck_cfuncs, simp add: assms(2) assms(3) mult_associative)
then show ?thesis
by (simp add: calculation)
qed
lemma mult_odds_is_odd:
assumes "m \<in>\<^sub>c \<nat>\<^sub>c" "n \<in>\<^sub>c \<nat>\<^sub>c"
assumes "\<exists>j. j \<in>\<^sub>c \<nat>\<^sub>c \<and> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> j) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero) = m"
assumes "\<exists>k. k \<in>\<^sub>c \<nat>\<^sub>c \<and> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> k) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero) = n"
shows "\<exists>l. l \<in>\<^sub>c \<nat>\<^sub>c \<and> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> l) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero) = m \<cdot>\<^sub>\<nat> n"
proof -
obtain j where j_def: "j \<in>\<^sub>c \<nat>\<^sub>c \<and> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> j) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero) = m"
using assms(3) by blast
obtain k where k_def: "k \<in>\<^sub>c \<nat>\<^sub>c \<and> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> k) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero) = n"
using assms(4) by blast
have "m \<cdot>\<^sub>\<nat> n = (((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> j) \<cdot>\<^sub>\<nat> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> k)) +\<^sub>\<nat>
(((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> j) \<cdot>\<^sub>\<nat> (successor \<circ>\<^sub>c zero)) +\<^sub>\<nat>
((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> k) \<cdot>\<^sub>\<nat> (successor \<circ>\<^sub>c zero) +\<^sub>\<nat>
(successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> (successor \<circ>\<^sub>c zero)"
using FOIL_2 j_def k_def mult_closure succ_n_type zero_type by auto
also have "... = ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> (j \<cdot>\<^sub>\<nat> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> k))) +\<^sub>\<nat>
((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> (j \<cdot>\<^sub>\<nat> (successor \<circ>\<^sub>c zero))) +\<^sub>\<nat>
((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> (k \<cdot>\<^sub>\<nat> (successor \<circ>\<^sub>c zero))) +\<^sub>\<nat>
(successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> (successor \<circ>\<^sub>c zero)"
by (simp add: j_def k_def mult_associative mult_closure succ_n_type zero_type)
also have "... = ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat>
( (j \<cdot>\<^sub>\<nat> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> k)) +\<^sub>\<nat> (j \<cdot>\<^sub>\<nat> (successor \<circ>\<^sub>c zero)) +\<^sub>\<nat> (k \<cdot>\<^sub>\<nat> (successor \<circ>\<^sub>c zero))))
+\<^sub>\<nat> (successor \<circ>\<^sub>c zero)"
by (smt assms(2) j_def k_def mult_closure mult_right_distributivity s0_is_right_id succ_n_type zero_type)
then show ?thesis
by (typecheck_cfuncs, metis add_type calculation j_def k_def mult_closure)
qed
lemma EXISTS_zero_nth_even:
"(EXISTS \<nat>\<^sub>c \<circ>\<^sub>c (eq_pred \<nat>\<^sub>c \<circ>\<^sub>c nth_even \<times>\<^sub>f id\<^sub>c \<nat>\<^sub>c)\<^sup>\<sharp>) \<circ>\<^sub>c zero = \<t>"
proof -
have "(EXISTS \<nat>\<^sub>c \<circ>\<^sub>c (eq_pred \<nat>\<^sub>c \<circ>\<^sub>c nth_even \<times>\<^sub>f id\<^sub>c \<nat>\<^sub>c)\<^sup>\<sharp>) \<circ>\<^sub>c zero
= EXISTS \<nat>\<^sub>c \<circ>\<^sub>c (eq_pred \<nat>\<^sub>c \<circ>\<^sub>c nth_even \<times>\<^sub>f id\<^sub>c \<nat>\<^sub>c)\<^sup>\<sharp> \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = EXISTS \<nat>\<^sub>c \<circ>\<^sub>c (eq_pred \<nat>\<^sub>c \<circ>\<^sub>c (nth_even \<times>\<^sub>f id\<^sub>c \<nat>\<^sub>c) \<circ>\<^sub>c (id\<^sub>c \<nat>\<^sub>c \<times>\<^sub>f zero))\<^sup>\<sharp>"
by (typecheck_cfuncs, simp add: comp_associative2 sharp_comp)
also have "... = EXISTS \<nat>\<^sub>c \<circ>\<^sub>c (eq_pred \<nat>\<^sub>c \<circ>\<^sub>c (nth_even \<times>\<^sub>f zero))\<^sup>\<sharp>"
by (typecheck_cfuncs, simp add: cfunc_cross_prod_comp_cfunc_cross_prod id_left_unit2 id_right_unit2)
also have "... = EXISTS \<nat>\<^sub>c \<circ>\<^sub>c (eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_even \<circ>\<^sub>c left_cart_proj \<nat>\<^sub>c one, zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<times>\<^sub>cone\<^esub>\<rangle> )\<^sup>\<sharp>"
by (typecheck_cfuncs, metis cfunc_cross_prod_def cfunc_type_def right_cart_proj_type terminal_func_unique)
also have "... = EXISTS \<nat>\<^sub>c \<circ>\<^sub>c (eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_even \<circ>\<^sub>c left_cart_proj \<nat>\<^sub>c one, (zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c left_cart_proj \<nat>\<^sub>c one\<rangle> )\<^sup>\<sharp>"
by (typecheck_cfuncs, smt comp_associative2 terminal_func_comp)
also have "... = EXISTS \<nat>\<^sub>c \<circ>\<^sub>c ((eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_even, zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>\<rangle>) \<circ>\<^sub>c left_cart_proj \<nat>\<^sub>c one)\<^sup>\<sharp>"
by (typecheck_cfuncs, smt cfunc_prod_comp comp_associative2)
also have "... = \<t>"
proof (rule exists_true_implies_EXISTS_true)
show "eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_even,zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>\<rangle> : \<nat>\<^sub>c \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "\<exists>x. x \<in>\<^sub>c \<nat>\<^sub>c \<and> (eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_even,zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>\<rangle>) \<circ>\<^sub>c x = \<t>"
proof (typecheck_cfuncs, rule_tac x="zero" in exI, auto)
have "(eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_even,zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>\<rangle>) \<circ>\<^sub>c zero
= eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_even,zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>\<rangle> \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_even \<circ>\<^sub>c zero, zero\<rangle>"
by (typecheck_cfuncs, smt beta_N_succ_mEqs_Id1 cfunc_prod_comp comp_associative2 id_right_unit2 successor_type terminal_func_comp)
also have "... = \<t>"
using eq_pred_iff_eq nth_even_zero by (typecheck_cfuncs, blast)
then show "(eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_even,zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>\<rangle>) \<circ>\<^sub>c zero = \<t>"
using calculation by auto
qed
qed
then show ?thesis
using calculation by auto
qed
lemma not_EXISTS_zero_nth_odd:
"(EXISTS \<nat>\<^sub>c \<circ>\<^sub>c (eq_pred \<nat>\<^sub>c \<circ>\<^sub>c nth_odd \<times>\<^sub>f id\<^sub>c \<nat>\<^sub>c)\<^sup>\<sharp>) \<circ>\<^sub>c zero = \<f>"
proof -
have "(EXISTS \<nat>\<^sub>c \<circ>\<^sub>c (eq_pred \<nat>\<^sub>c \<circ>\<^sub>c nth_odd \<times>\<^sub>f id\<^sub>c \<nat>\<^sub>c)\<^sup>\<sharp>) \<circ>\<^sub>c zero
= EXISTS \<nat>\<^sub>c \<circ>\<^sub>c (eq_pred \<nat>\<^sub>c \<circ>\<^sub>c nth_odd \<times>\<^sub>f id\<^sub>c \<nat>\<^sub>c)\<^sup>\<sharp> \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = EXISTS \<nat>\<^sub>c \<circ>\<^sub>c (eq_pred \<nat>\<^sub>c \<circ>\<^sub>c (nth_odd \<times>\<^sub>f id\<^sub>c \<nat>\<^sub>c) \<circ>\<^sub>c (id\<^sub>c \<nat>\<^sub>c \<times>\<^sub>f zero))\<^sup>\<sharp>"
by (typecheck_cfuncs, simp add: comp_associative2 sharp_comp)
also have "... = EXISTS \<nat>\<^sub>c \<circ>\<^sub>c (eq_pred \<nat>\<^sub>c \<circ>\<^sub>c (nth_odd \<times>\<^sub>f zero))\<^sup>\<sharp>"
by (typecheck_cfuncs, simp add: cfunc_cross_prod_comp_cfunc_cross_prod id_left_unit2 id_right_unit2)
also have "... = EXISTS \<nat>\<^sub>c \<circ>\<^sub>c (eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_odd \<circ>\<^sub>c left_cart_proj \<nat>\<^sub>c one, zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<times>\<^sub>cone\<^esub>\<rangle> )\<^sup>\<sharp>"
by (typecheck_cfuncs, metis cfunc_cross_prod_def cfunc_type_def right_cart_proj_type terminal_func_unique)
also have "... = EXISTS \<nat>\<^sub>c \<circ>\<^sub>c (eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_odd \<circ>\<^sub>c left_cart_proj \<nat>\<^sub>c one, (zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c left_cart_proj \<nat>\<^sub>c one\<rangle> )\<^sup>\<sharp>"
by (typecheck_cfuncs, smt comp_associative2 terminal_func_comp)
also have "... = EXISTS \<nat>\<^sub>c \<circ>\<^sub>c ((eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_odd, zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>\<rangle>) \<circ>\<^sub>c left_cart_proj \<nat>\<^sub>c one)\<^sup>\<sharp>"
by (typecheck_cfuncs, smt cfunc_prod_comp comp_associative2)
also have "... = \<f>"
proof -
have "\<nexists> x. x \<in>\<^sub>c \<nat>\<^sub>c \<and> (eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_odd, zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>\<rangle>) \<circ>\<^sub>c x = \<t>"
proof auto
fix x
assume x_type[type_rule]: "x \<in>\<^sub>c \<nat>\<^sub>c"
assume "(eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_odd,zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>\<rangle>) \<circ>\<^sub>c x = \<t>"
then have "eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_odd, zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>\<rangle> \<circ>\<^sub>c x = \<t>"
by (typecheck_cfuncs, simp add: comp_associative2)
then have "eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_odd \<circ>\<^sub>c x, zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> \<circ>\<^sub>c x\<rangle> = \<t>"
by (typecheck_cfuncs_prems, auto simp add: cfunc_prod_comp comp_associative2)
then have "eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_odd \<circ>\<^sub>c x, zero\<rangle> = \<t>"
by (typecheck_cfuncs_prems, metis cfunc_type_def id_right_unit id_type one_unique_element)
then have "nth_odd \<circ>\<^sub>c x = zero"
using eq_pred_iff_eq by (typecheck_cfuncs_prems, blast)
then have "successor \<circ>\<^sub>c ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> x) = zero"
using nth_odd_is_succ_times_twoB by (typecheck_cfuncs, auto)
then show "False"
by (metis mult_closure succ_n_type x_type zero_is_not_successor zero_type)
qed
then have "EXISTS \<nat>\<^sub>c \<circ>\<^sub>c ((eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_odd,zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>\<rangle>) \<circ>\<^sub>c left_cart_proj \<nat>\<^sub>c one)\<^sup>\<sharp> \<noteq> \<t>"
using EXISTS_true_implies_exists_true by (typecheck_cfuncs, blast)
then show "EXISTS \<nat>\<^sub>c \<circ>\<^sub>c ((eq_pred \<nat>\<^sub>c \<circ>\<^sub>c \<langle>nth_odd,zero \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>\<rangle>) \<circ>\<^sub>c left_cart_proj \<nat>\<^sub>c one)\<^sup>\<sharp> = \<f>"
using true_false_only_truth_values by (typecheck_cfuncs, blast)
qed
then show ?thesis
using calculation by auto
qed
definition halve_with_parity :: "cfunc" where
"halve_with_parity = (THE u. u: \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c \<Coprod> \<nat>\<^sub>c \<and>
u \<circ>\<^sub>c zero = left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c zero \<and>
(right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor)) \<circ>\<^sub>c u = u \<circ>\<^sub>c successor)"
lemma halve_with_parity_def2:
"halve_with_parity : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c \<Coprod> \<nat>\<^sub>c \<and>
halve_with_parity \<circ>\<^sub>c zero = left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c zero \<and>
(right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor)) \<circ>\<^sub>c halve_with_parity = halve_with_parity \<circ>\<^sub>c successor"
by (unfold halve_with_parity_def, rule theI', typecheck_cfuncs, rule natural_number_object_property2, auto)
lemma halve_with_parity_type[type_rule]:
"halve_with_parity : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c \<Coprod> \<nat>\<^sub>c"
by (simp add: halve_with_parity_def2)
lemma halve_with_parity_zero:
"halve_with_parity \<circ>\<^sub>c zero = left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c zero"
by (simp add: halve_with_parity_def2)
lemma halve_with_parity_successor:
"(right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor)) \<circ>\<^sub>c halve_with_parity = halve_with_parity \<circ>\<^sub>c successor"
by (simp add: halve_with_parity_def2)
lemma halve_with_parity_nth_even:
"halve_with_parity \<circ>\<^sub>c nth_even = left_coproj \<nat>\<^sub>c \<nat>\<^sub>c"
proof (rule natural_number_object_func_unique[where X="\<nat>\<^sub>c \<Coprod> \<nat>\<^sub>c", where f="(left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<amalg> (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor)"])
show "halve_with_parity \<circ>\<^sub>c nth_even : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c \<Coprod> \<nat>\<^sub>c"
by typecheck_cfuncs
show "left_coproj \<nat>\<^sub>c \<nat>\<^sub>c : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c \<Coprod> \<nat>\<^sub>c"
by typecheck_cfuncs
show "(left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<amalg> (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) : \<nat>\<^sub>c \<Coprod> \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c \<Coprod> \<nat>\<^sub>c"
by typecheck_cfuncs
show "(halve_with_parity \<circ>\<^sub>c nth_even) \<circ>\<^sub>c zero = left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c zero"
proof -
have "(halve_with_parity \<circ>\<^sub>c nth_even) \<circ>\<^sub>c zero = halve_with_parity \<circ>\<^sub>c nth_even \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = halve_with_parity \<circ>\<^sub>c zero"
by (simp add: nth_even_zero)
also have "... = left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c zero"
by (simp add: halve_with_parity_zero)
then show ?thesis
using calculation by auto
qed
show "(halve_with_parity \<circ>\<^sub>c nth_even) \<circ>\<^sub>c successor =
((left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<amalg> (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor)) \<circ>\<^sub>c halve_with_parity \<circ>\<^sub>c nth_even"
proof -
have "(halve_with_parity \<circ>\<^sub>c nth_even) \<circ>\<^sub>c successor = halve_with_parity \<circ>\<^sub>c nth_even \<circ>\<^sub>c successor"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = halve_with_parity \<circ>\<^sub>c (successor \<circ>\<^sub>c successor) \<circ>\<^sub>c nth_even"
by (simp add: nth_even_successor)
also have "... = ((halve_with_parity \<circ>\<^sub>c successor) \<circ>\<^sub>c successor) \<circ>\<^sub>c nth_even"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = (((right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor)) \<circ>\<^sub>c halve_with_parity) \<circ>\<^sub>c successor) \<circ>\<^sub>c nth_even"
by (simp add: halve_with_parity_def2)
also have "... = (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor))
\<circ>\<^sub>c (halve_with_parity \<circ>\<^sub>c successor) \<circ>\<^sub>c nth_even"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor))
\<circ>\<^sub>c ((right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor)) \<circ>\<^sub>c halve_with_parity) \<circ>\<^sub>c nth_even"
by (simp add: halve_with_parity_def2)
also have "... = ((right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor))
\<circ>\<^sub>c (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor)))
\<circ>\<^sub>c halve_with_parity \<circ>\<^sub>c nth_even"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = ((left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<amalg> (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor))
\<circ>\<^sub>c halve_with_parity \<circ>\<^sub>c nth_even"
by (typecheck_cfuncs, smt cfunc_coprod_comp comp_associative2 left_coproj_cfunc_coprod right_coproj_cfunc_coprod)
then show ?thesis
using calculation by auto
qed
show "left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor =
(left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<amalg> (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<circ>\<^sub>c left_coproj \<nat>\<^sub>c \<nat>\<^sub>c"
by (typecheck_cfuncs, simp add: left_coproj_cfunc_coprod)
qed
lemma halve_with_parity_nth_odd:
"halve_with_parity \<circ>\<^sub>c nth_odd = right_coproj \<nat>\<^sub>c \<nat>\<^sub>c"
proof (rule natural_number_object_func_unique[where X="\<nat>\<^sub>c \<Coprod> \<nat>\<^sub>c", where f="(left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<amalg> (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor)"])
show "halve_with_parity \<circ>\<^sub>c nth_odd : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c \<Coprod> \<nat>\<^sub>c"
by typecheck_cfuncs
show "right_coproj \<nat>\<^sub>c \<nat>\<^sub>c : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c \<Coprod> \<nat>\<^sub>c"
by typecheck_cfuncs
show "(left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<amalg> (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) : \<nat>\<^sub>c \<Coprod> \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c \<Coprod> \<nat>\<^sub>c"
by typecheck_cfuncs
show "(halve_with_parity \<circ>\<^sub>c nth_odd) \<circ>\<^sub>c zero = right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c zero"
proof -
have "(halve_with_parity \<circ>\<^sub>c nth_odd) \<circ>\<^sub>c zero = halve_with_parity \<circ>\<^sub>c nth_odd \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = halve_with_parity \<circ>\<^sub>c successor \<circ>\<^sub>c zero"
by (simp add: nth_odd_def2)
also have "... = (halve_with_parity \<circ>\<^sub>c successor) \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<circ>\<^sub>c halve_with_parity) \<circ>\<^sub>c zero"
by (simp add: halve_with_parity_def2)
also have "... = right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<circ>\<^sub>c halve_with_parity \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<circ>\<^sub>c left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c zero"
by (simp add: halve_with_parity_def2)
also have "... = (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<circ>\<^sub>c left_coproj \<nat>\<^sub>c \<nat>\<^sub>c) \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: left_coproj_cfunc_coprod)
then show ?thesis
using calculation by auto
qed
show "(halve_with_parity \<circ>\<^sub>c nth_odd) \<circ>\<^sub>c successor =
(left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<amalg> (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<circ>\<^sub>c halve_with_parity \<circ>\<^sub>c nth_odd"
proof -
have "(halve_with_parity \<circ>\<^sub>c nth_odd) \<circ>\<^sub>c successor = halve_with_parity \<circ>\<^sub>c nth_odd \<circ>\<^sub>c successor"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = halve_with_parity \<circ>\<^sub>c (successor \<circ>\<^sub>c successor) \<circ>\<^sub>c nth_odd"
by (simp add: nth_odd_successor)
also have "... = ((halve_with_parity \<circ>\<^sub>c successor) \<circ>\<^sub>c successor) \<circ>\<^sub>c nth_odd"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = ((right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<circ>\<^sub>c halve_with_parity)
\<circ>\<^sub>c successor) \<circ>\<^sub>c nth_odd"
by (simp add: halve_with_parity_successor)
also have "... = (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor)
\<circ>\<^sub>c (halve_with_parity \<circ>\<^sub>c successor)) \<circ>\<^sub>c nth_odd"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor)
\<circ>\<^sub>c (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<circ>\<^sub>c halve_with_parity)) \<circ>\<^sub>c nth_odd"
by (simp add: halve_with_parity_successor)
also have "... = (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor)
\<circ>\<^sub>c right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor)) \<circ>\<^sub>c halve_with_parity \<circ>\<^sub>c nth_odd"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = ((left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<amalg> (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor)) \<circ>\<^sub>c halve_with_parity \<circ>\<^sub>c nth_odd"
by (typecheck_cfuncs, smt cfunc_coprod_comp comp_associative2 left_coproj_cfunc_coprod right_coproj_cfunc_coprod)
then show ?thesis
using calculation by auto
qed
show "right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor =
(left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<amalg> (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<circ>\<^sub>c right_coproj \<nat>\<^sub>c \<nat>\<^sub>c"
by (typecheck_cfuncs, simp add: right_coproj_cfunc_coprod)
qed
lemma nth_even_nth_odd_halve_with_parity:
"(nth_even \<amalg> nth_odd) \<circ>\<^sub>c halve_with_parity = id \<nat>\<^sub>c"
proof (rule natural_number_object_func_unique[where X="\<nat>\<^sub>c", where f="successor"])
show "nth_even \<amalg> nth_odd \<circ>\<^sub>c halve_with_parity : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
by typecheck_cfuncs
show "id\<^sub>c \<nat>\<^sub>c : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
by typecheck_cfuncs
show "successor : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
by typecheck_cfuncs
show "(nth_even \<amalg> nth_odd \<circ>\<^sub>c halve_with_parity) \<circ>\<^sub>c zero = id\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c zero"
proof -
have "(nth_even \<amalg> nth_odd \<circ>\<^sub>c halve_with_parity) \<circ>\<^sub>c zero = nth_even \<amalg> nth_odd \<circ>\<^sub>c halve_with_parity \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = nth_even \<amalg> nth_odd \<circ>\<^sub>c left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c zero"
by (simp add: halve_with_parity_zero)
also have "... = (nth_even \<amalg> nth_odd \<circ>\<^sub>c left_coproj \<nat>\<^sub>c \<nat>\<^sub>c) \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = nth_even \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: left_coproj_cfunc_coprod)
also have "... = id\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c zero"
using id_left_unit2 nth_even_def2 zero_type by auto
then show ?thesis
using calculation by auto
qed
show "(nth_even \<amalg> nth_odd \<circ>\<^sub>c halve_with_parity) \<circ>\<^sub>c successor =
successor \<circ>\<^sub>c nth_even \<amalg> nth_odd \<circ>\<^sub>c halve_with_parity"
proof -
have "(nth_even \<amalg> nth_odd \<circ>\<^sub>c halve_with_parity) \<circ>\<^sub>c successor = nth_even \<amalg> nth_odd \<circ>\<^sub>c halve_with_parity \<circ>\<^sub>c successor"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = nth_even \<amalg> nth_odd \<circ>\<^sub>c right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor) \<circ>\<^sub>c halve_with_parity"
by (simp add: halve_with_parity_successor)
also have "... = (nth_even \<amalg> nth_odd \<circ>\<^sub>c right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor)) \<circ>\<^sub>c halve_with_parity"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = nth_odd \<amalg> (nth_even \<circ>\<^sub>c successor) \<circ>\<^sub>c halve_with_parity"
by (typecheck_cfuncs, smt cfunc_coprod_comp comp_associative2 left_coproj_cfunc_coprod right_coproj_cfunc_coprod)
also have "... = (successor \<circ>\<^sub>c nth_even) \<amalg> ((successor \<circ>\<^sub>c successor) \<circ>\<^sub>c nth_even) \<circ>\<^sub>c halve_with_parity"
by (simp add: nth_even_successor nth_odd_is_succ_nth_even)
also have "... = (successor \<circ>\<^sub>c nth_even) \<amalg> (successor \<circ>\<^sub>c successor \<circ>\<^sub>c nth_even) \<circ>\<^sub>c halve_with_parity"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = (successor \<circ>\<^sub>c nth_even) \<amalg> (successor \<circ>\<^sub>c nth_odd) \<circ>\<^sub>c halve_with_parity"
by (simp add: nth_odd_is_succ_nth_even)
also have "... = successor \<circ>\<^sub>c nth_even \<amalg> nth_odd \<circ>\<^sub>c halve_with_parity"
by (typecheck_cfuncs, simp add: cfunc_coprod_comp comp_associative2)
then show ?thesis
using calculation by auto
qed
show "id\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor = successor \<circ>\<^sub>c id\<^sub>c \<nat>\<^sub>c"
using id_left_unit2 id_right_unit2 successor_type by auto
qed
lemma halve_with_parity_nth_even_nth_odd:
"halve_with_parity \<circ>\<^sub>c (nth_even \<amalg> nth_odd) = id (\<nat>\<^sub>c \<Coprod> \<nat>\<^sub>c)"
by (typecheck_cfuncs, smt cfunc_coprod_comp halve_with_parity_nth_even halve_with_parity_nth_odd id_coprod)
lemma even_odd_iso:
"isomorphism (nth_even \<amalg> nth_odd)"
proof (unfold isomorphism_def, rule_tac x=halve_with_parity in exI, auto)
show "domain halve_with_parity = codomain (nth_even \<amalg> nth_odd)"
by (typecheck_cfuncs, unfold cfunc_type_def, auto)
show "codomain halve_with_parity = domain (nth_even \<amalg> nth_odd)"
by (typecheck_cfuncs, unfold cfunc_type_def, auto)
show "halve_with_parity \<circ>\<^sub>c nth_even \<amalg> nth_odd = id\<^sub>c (domain (nth_even \<amalg> nth_odd))"
by (typecheck_cfuncs, unfold cfunc_type_def, auto simp add: halve_with_parity_nth_even_nth_odd)
show "nth_even \<amalg> nth_odd \<circ>\<^sub>c halve_with_parity = id\<^sub>c (domain halve_with_parity)"
by (typecheck_cfuncs, unfold cfunc_type_def, auto simp add: nth_even_nth_odd_halve_with_parity)
qed
lemma halve_with_parity_iso:
"isomorphism halve_with_parity"
proof (unfold isomorphism_def, rule_tac x="nth_even \<amalg> nth_odd" in exI, auto)
show "domain (nth_even \<amalg> nth_odd) = codomain halve_with_parity"
by (typecheck_cfuncs, unfold cfunc_type_def, auto)
show "codomain (nth_even \<amalg> nth_odd) = domain halve_with_parity"
by (typecheck_cfuncs, unfold cfunc_type_def, auto)
show "nth_even \<amalg> nth_odd \<circ>\<^sub>c halve_with_parity = id\<^sub>c (domain halve_with_parity)"
by (typecheck_cfuncs, unfold cfunc_type_def, auto simp add: nth_even_nth_odd_halve_with_parity)
show "halve_with_parity \<circ>\<^sub>c nth_even \<amalg> nth_odd = id\<^sub>c (domain (nth_even \<amalg> nth_odd))"
by (typecheck_cfuncs, unfold cfunc_type_def, auto simp add: halve_with_parity_nth_even_nth_odd)
qed
definition halve :: "cfunc" where
"halve = (id \<nat>\<^sub>c \<amalg> id \<nat>\<^sub>c) \<circ>\<^sub>c halve_with_parity"
lemma halve_type[type_rule]:
"halve : \<nat>\<^sub>c \<rightarrow> \<nat>\<^sub>c"
unfolding halve_def by typecheck_cfuncs
lemma halve_nth_even:
"halve \<circ>\<^sub>c nth_even = id \<nat>\<^sub>c"
unfolding halve_def by (typecheck_cfuncs, smt comp_associative2 halve_with_parity_nth_even left_coproj_cfunc_coprod)
lemma halve_nth_odd:
"halve \<circ>\<^sub>c nth_odd = id \<nat>\<^sub>c"
unfolding halve_def by (typecheck_cfuncs, smt comp_associative2 halve_with_parity_nth_odd right_coproj_cfunc_coprod)
lemma is_even_def3:
"is_even = ((\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>)) \<circ>\<^sub>c halve_with_parity"
proof (rule natural_number_object_func_unique[where X=\<Omega>, where f=NOT])
show "is_even : \<nat>\<^sub>c \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "(\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c halve_with_parity : \<nat>\<^sub>c \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "NOT : \<Omega> \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "is_even \<circ>\<^sub>c zero = ((\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c halve_with_parity) \<circ>\<^sub>c zero"
proof -
have "((\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c halve_with_parity) \<circ>\<^sub>c zero
= (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c zero"
by (typecheck_cfuncs, metis cfunc_type_def comp_associative halve_with_parity_zero)
also have "... = (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: comp_associative2 left_coproj_cfunc_coprod)
also have "... = \<t>"
using comp_associative2 is_even_def2 is_even_nth_even_true nth_even_def2 by (typecheck_cfuncs, force)
also have "... = is_even \<circ>\<^sub>c zero"
by (simp add: is_even_zero)
then show ?thesis
using calculation by auto
qed
show "is_even \<circ>\<^sub>c successor = NOT \<circ>\<^sub>c is_even"
by (simp add: is_even_successor)
show "((\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c halve_with_parity) \<circ>\<^sub>c successor =
NOT \<circ>\<^sub>c (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c halve_with_parity"
proof -
have "((\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c halve_with_parity) \<circ>\<^sub>c successor
= (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor)) \<circ>\<^sub>c halve_with_parity"
by (typecheck_cfuncs, simp add: comp_associative2 halve_with_parity_successor)
also have "... =
(((\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c right_coproj \<nat>\<^sub>c \<nat>\<^sub>c)
\<amalg>
((\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor))
\<circ>\<^sub>c halve_with_parity"
by (typecheck_cfuncs, smt cfunc_coprod_comp comp_associative2)
also have "... = ((\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> \<circ>\<^sub>c successor)) \<circ>\<^sub>c halve_with_parity"
by (typecheck_cfuncs, simp add: comp_associative2 left_coproj_cfunc_coprod right_coproj_cfunc_coprod)
also have "... = ((NOT \<circ>\<^sub>c \<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (NOT \<circ>\<^sub>c \<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> \<circ>\<^sub>c successor)) \<circ>\<^sub>c halve_with_parity"
by (typecheck_cfuncs, simp add: NOT_false_is_true NOT_true_is_false comp_associative2)
also have "... = NOT \<circ>\<^sub>c (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c halve_with_parity"
by (typecheck_cfuncs, smt cfunc_coprod_comp comp_associative2 terminal_func_unique)
then show ?thesis
using calculation by auto
qed
qed
lemma is_odd_def3:
"is_odd = ((\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>)) \<circ>\<^sub>c halve_with_parity"
proof (rule natural_number_object_func_unique[where X=\<Omega>, where f=NOT])
show "is_odd : \<nat>\<^sub>c \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "(\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c halve_with_parity : \<nat>\<^sub>c \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "NOT : \<Omega> \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "is_odd \<circ>\<^sub>c zero = ((\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c halve_with_parity) \<circ>\<^sub>c zero"
proof -
have "((\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c halve_with_parity) \<circ>\<^sub>c zero
= (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c zero"
by (typecheck_cfuncs, metis cfunc_type_def comp_associative halve_with_parity_zero)
also have "... = (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c zero"
by (typecheck_cfuncs, simp add: comp_associative2 left_coproj_cfunc_coprod)
also have "... = \<f>"
using comp_associative2 is_odd_nth_even_false is_odd_type is_odd_zero nth_even_def2 by (typecheck_cfuncs, force)
also have "... = is_odd \<circ>\<^sub>c zero"
by (simp add: is_odd_def2)
then show ?thesis
using calculation by auto
qed
show "is_odd \<circ>\<^sub>c successor = NOT \<circ>\<^sub>c is_odd"
by (simp add: is_odd_successor)
show "((\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c halve_with_parity) \<circ>\<^sub>c successor =
NOT \<circ>\<^sub>c (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c halve_with_parity"
proof -
have "((\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c halve_with_parity) \<circ>\<^sub>c successor
= (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c (right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<amalg> (left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor)) \<circ>\<^sub>c halve_with_parity"
by (typecheck_cfuncs, simp add: comp_associative2 halve_with_parity_successor)
also have "... =
(((\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c right_coproj \<nat>\<^sub>c \<nat>\<^sub>c)
\<amalg>
((\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c successor))
\<circ>\<^sub>c halve_with_parity"
by (typecheck_cfuncs, smt cfunc_coprod_comp comp_associative2)
also have "... = ((\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> \<circ>\<^sub>c successor)) \<circ>\<^sub>c halve_with_parity"
by (typecheck_cfuncs, simp add: comp_associative2 left_coproj_cfunc_coprod right_coproj_cfunc_coprod)
also have "... = ((NOT \<circ>\<^sub>c \<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (NOT \<circ>\<^sub>c \<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> \<circ>\<^sub>c successor)) \<circ>\<^sub>c halve_with_parity"
by (typecheck_cfuncs, simp add: NOT_false_is_true NOT_true_is_false comp_associative2)
also have "... = NOT \<circ>\<^sub>c (\<f> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<amalg> (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c halve_with_parity"
by (typecheck_cfuncs, smt cfunc_coprod_comp comp_associative2 terminal_func_unique)
then show ?thesis
using calculation by auto
qed
qed
lemma nth_even_or_nth_odd:
assumes "n \<in>\<^sub>c \<nat>\<^sub>c"
shows "(\<exists> m. m \<in>\<^sub>c \<nat>\<^sub>c \<and> nth_even \<circ>\<^sub>c m = n) \<or> (\<exists> m. m \<in>\<^sub>c \<nat>\<^sub>c \<and> nth_odd \<circ>\<^sub>c m = n)"
proof -
have "(\<exists>m. m \<in>\<^sub>c \<nat>\<^sub>c \<and> halve_with_parity \<circ>\<^sub>c n = left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c m)
\<or> (\<exists>m. m \<in>\<^sub>c \<nat>\<^sub>c \<and> halve_with_parity \<circ>\<^sub>c n = right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c m)"
by (rule coprojs_jointly_surj, insert assms, typecheck_cfuncs)
then show ?thesis
proof auto
fix m
assume m_type[type_rule]: "m \<in>\<^sub>c \<nat>\<^sub>c"
assume "halve_with_parity \<circ>\<^sub>c n = left_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c m"
then have "((nth_even \<amalg> nth_odd) \<circ>\<^sub>c halve_with_parity) \<circ>\<^sub>c n = ((nth_even \<amalg> nth_odd) \<circ>\<^sub>c left_coproj \<nat>\<^sub>c \<nat>\<^sub>c) \<circ>\<^sub>c m"
by (typecheck_cfuncs, smt assms comp_associative2)
then have "n = nth_even \<circ>\<^sub>c m"
using assms by (typecheck_cfuncs_prems, smt comp_associative2 halve_with_parity_nth_even id_left_unit2 nth_even_nth_odd_halve_with_parity)
then show "\<exists>m. m \<in>\<^sub>c \<nat>\<^sub>c \<and> nth_even \<circ>\<^sub>c m = n"
using m_type by auto
next
fix m
assume m_type[type_rule]: "m \<in>\<^sub>c \<nat>\<^sub>c"
assume "halve_with_parity \<circ>\<^sub>c n = right_coproj \<nat>\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c m"
then have "((nth_even \<amalg> nth_odd) \<circ>\<^sub>c halve_with_parity) \<circ>\<^sub>c n = ((nth_even \<amalg> nth_odd) \<circ>\<^sub>c right_coproj \<nat>\<^sub>c \<nat>\<^sub>c) \<circ>\<^sub>c m"
by (typecheck_cfuncs, smt assms comp_associative2)
then have "n = nth_odd \<circ>\<^sub>c m"
using assms by (typecheck_cfuncs_prems, smt comp_associative2 halve_with_parity_nth_odd id_left_unit2 nth_even_nth_odd_halve_with_parity)
then show "\<forall>m. m \<in>\<^sub>c \<nat>\<^sub>c \<longrightarrow> nth_odd \<circ>\<^sub>c m \<noteq> n \<Longrightarrow> \<exists>m. m \<in>\<^sub>c \<nat>\<^sub>c \<and> nth_even \<circ>\<^sub>c m = n"
using m_type by auto
qed
qed
lemma even_or_odd2:
assumes "n \<in>\<^sub>c \<nat>\<^sub>c"
shows "(\<exists> m. m \<in>\<^sub>c \<nat>\<^sub>c \<and> n = successor \<circ>\<^sub>c((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> m)) \<or>
(\<exists> m. m \<in>\<^sub>c \<nat>\<^sub>c \<and> n = (successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> m) "
proof -
have "(\<exists> m. m \<in>\<^sub>c \<nat>\<^sub>c \<and> nth_even \<circ>\<^sub>c m = n) \<or> (\<exists> m. m \<in>\<^sub>c \<nat>\<^sub>c \<and> nth_odd \<circ>\<^sub>c m = n)"
by (simp add: assms nth_even_or_nth_odd)
then show ?thesis
proof auto
fix m
assume m_type: "m \<in>\<^sub>c \<nat>\<^sub>c"
have "nth_even \<circ>\<^sub>c m = (successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> m"
by (simp add: m_type nth_even_is_times_twoB)
then show "\<forall>ma. ma \<in>\<^sub>c \<nat>\<^sub>c \<longrightarrow> nth_even \<circ>\<^sub>c m \<noteq> (successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> ma \<Longrightarrow>
\<exists>k. k \<in>\<^sub>c \<nat>\<^sub>c \<and> nth_even \<circ>\<^sub>c m = successor \<circ>\<^sub>c (successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> k"
using m_type by auto
next
fix m
assume m_type: "m \<in>\<^sub>c \<nat>\<^sub>c"
have "nth_odd \<circ>\<^sub>c m = successor \<circ>\<^sub>c (successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> m"
by (simp add: m_type nth_odd_is_succ_times_twoB)
then show "\<exists>k. k \<in>\<^sub>c \<nat>\<^sub>c \<and> nth_odd \<circ>\<^sub>c m = successor \<circ>\<^sub>c (successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> k"
using m_type by auto
qed
qed
lemma not_even_and_odd2:
assumes "n \<in>\<^sub>c \<nat>\<^sub>c"
shows "\<not>((\<exists> m. m \<in>\<^sub>c \<nat>\<^sub>c \<and> n = successor \<circ>\<^sub>c((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> m)) \<and>
(\<exists> m. m \<in>\<^sub>c \<nat>\<^sub>c \<and> n = (successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> m)) "
by (smt (z3) assms comp_associative2 halve_nth_even halve_nth_odd halve_type id_left_unit2 n_neq_succ_n nth_even_is_times_twoB nth_even_type nth_odd_def2 nth_odd_is_succ_times_twoB)
lemma is_even_exists_nth_even:
assumes "is_even \<circ>\<^sub>c n = \<t>" and n_type[type_rule]: "n \<in>\<^sub>c \<nat>\<^sub>c"
shows "\<exists>m. m \<in>\<^sub>c \<nat>\<^sub>c \<and> n = nth_even \<circ>\<^sub>c m"
proof (rule ccontr)
assume "\<nexists>m. m \<in>\<^sub>c \<nat>\<^sub>c \<and> n = nth_even \<circ>\<^sub>c m"
then obtain m where m_type[type_rule]: "m \<in>\<^sub>c \<nat>\<^sub>c" and n_def: "n = nth_odd \<circ>\<^sub>c m"
using n_type nth_even_or_nth_odd by blast
then have "is_even \<circ>\<^sub>c nth_odd \<circ>\<^sub>c m = \<t>"
using assms(1) by blast
then have "is_odd \<circ>\<^sub>c nth_odd \<circ>\<^sub>c m = \<f>"
using NOT_true_is_false NOT_type comp_associative2 is_even_def2 is_odd_not_is_even n_def n_type by fastforce
then have "\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> \<circ>\<^sub>c m = \<f>"
by (typecheck_cfuncs_prems, smt comp_associative2 is_odd_nth_odd_true terminal_func_type true_func_type)
then have "\<t> = \<f>"
by (typecheck_cfuncs_prems, metis id_right_unit2 id_type one_unique_element)
then show False
using true_false_distinct by auto
qed
lemma is_odd_exists_nth_odd:
assumes "is_odd \<circ>\<^sub>c n = \<t>" and n_type[type_rule]: "n \<in>\<^sub>c \<nat>\<^sub>c"
shows "\<exists>m. m \<in>\<^sub>c \<nat>\<^sub>c \<and> n = nth_odd \<circ>\<^sub>c m"
proof (rule ccontr)
assume "\<nexists>m. m \<in>\<^sub>c \<nat>\<^sub>c \<and> n = nth_odd \<circ>\<^sub>c m"
then obtain m where m_type[type_rule]: "m \<in>\<^sub>c \<nat>\<^sub>c" and n_def: "n = nth_even \<circ>\<^sub>c m"
using n_type nth_even_or_nth_odd by blast
then have "is_odd \<circ>\<^sub>c nth_even \<circ>\<^sub>c m = \<t>"
using assms(1) by blast
then have "is_even \<circ>\<^sub>c nth_even \<circ>\<^sub>c m = \<f>"
using NOT_true_is_false NOT_type comp_associative2 is_even_not_is_odd is_odd_def2 n_def n_type by fastforce
then have "\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> \<circ>\<^sub>c m = \<f>"
by (typecheck_cfuncs_prems, smt comp_associative2 is_even_nth_even_true terminal_func_type true_func_type)
then have "\<t> = \<f>"
by (typecheck_cfuncs_prems, metis id_right_unit2 id_type one_unique_element)
then show False
using true_false_distinct by auto
qed
lemma add_evens_is_even2:
assumes "m \<in>\<^sub>c \<nat>\<^sub>c" "n \<in>\<^sub>c \<nat>\<^sub>c"
assumes "is_even \<circ>\<^sub>c m = \<t>" "is_even \<circ>\<^sub>c n = \<t>"
shows "is_even \<circ>\<^sub>c (m +\<^sub>\<nat> n) = \<t>"
proof-
obtain p where m_def: "p \<in>\<^sub>c \<nat>\<^sub>c \<and> m = nth_even \<circ>\<^sub>c p"
using assms(1) assms(3) is_even_exists_nth_even by blast
obtain q where n_def: "q \<in>\<^sub>c \<nat>\<^sub>c \<and> n = nth_even \<circ>\<^sub>c q"
using assms(2) assms(4) is_even_exists_nth_even by blast
have m_def2: "m = ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> p)"
by (simp add: m_def nth_even_is_times_twoB)
have n_def2: "n = ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> q)"
by (simp add: n_def nth_even_is_times_twoB)
have "m +\<^sub>\<nat> n = ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> (p +\<^sub>\<nat> q))"
using m_def m_def2 mult_right_distributivity n_def n_def2 by (typecheck_cfuncs, auto)
then have "m +\<^sub>\<nat> n = nth_even \<circ>\<^sub>c (p +\<^sub>\<nat> q)"
by (simp add: add_type m_def n_def nth_even_is_times_twoB)
then have "is_even \<circ>\<^sub>c (m +\<^sub>\<nat> n) = (is_even \<circ>\<^sub>c nth_even) \<circ>\<^sub>c (p +\<^sub>\<nat> q)"
by (typecheck_cfuncs, metis comp_associative2 m_def n_def)
also have "... = (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c (p +\<^sub>\<nat> q)"
by (simp add: is_even_nth_even_true)
also have "... = \<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> \<circ>\<^sub>c (p +\<^sub>\<nat> q)"
using comp_associative2 m_def n_def by (typecheck_cfuncs, fastforce)
also have "... = \<t> \<circ>\<^sub>c id(one)"
by (typecheck_cfuncs, metis m_def n_def terminal_func_unique)
also have "... = \<t>"
by (typecheck_cfuncs, simp add: id_right_unit2)
then show ?thesis
by (simp add: calculation)
qed
lemma add_odds_is_even2:
assumes "m \<in>\<^sub>c \<nat>\<^sub>c" "n \<in>\<^sub>c \<nat>\<^sub>c"
assumes "is_odd \<circ>\<^sub>c m = \<t>" "is_odd \<circ>\<^sub>c n = \<t>"
shows "is_even \<circ>\<^sub>c (m +\<^sub>\<nat> n) = \<t>"
proof-
obtain p where m_def: "p \<in>\<^sub>c \<nat>\<^sub>c \<and> m = nth_odd \<circ>\<^sub>c p"
using assms(1) assms(3) is_odd_exists_nth_odd by blast
obtain q where n_def: "q \<in>\<^sub>c \<nat>\<^sub>c \<and> n = nth_odd \<circ>\<^sub>c q"
using assms(2) assms(4) is_odd_exists_nth_odd by blast
have m_def2: "m = successor \<circ>\<^sub>c ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> p) "
using m_def nth_odd_is_succ_times_twoB by blast
then have m_def3: "m = ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> p) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero)"
using add_respects_succ1 add_respects_zero_on_right m_def m_def2 by (typecheck_cfuncs, auto)
have n_def2: "n = successor \<circ>\<^sub>c ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> q)"
using n_def nth_odd_is_succ_times_twoB by blast
have n_def3: "n= ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> q) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero)"
using add_respects_succ1 add_respects_zero_on_right n_def n_def2 by (typecheck_cfuncs, auto)
have "(m +\<^sub>\<nat> n) = (successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> ((p +\<^sub>\<nat> q) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero))"
using add_odds_is_even assms(1) assms(2) m_def m_def3 n_def n_def3 by auto
then have "(m +\<^sub>\<nat> n) = nth_even \<circ>\<^sub>c ((p +\<^sub>\<nat> q) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero))"
by (typecheck_cfuncs, simp add: m_def n_def nth_even_is_times_twoB)
then have "is_even \<circ>\<^sub>c (m +\<^sub>\<nat> n) = (is_even \<circ>\<^sub>c nth_even) \<circ>\<^sub>c ((p +\<^sub>\<nat> q) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero))"
using comp_associative2 m_def n_def by (typecheck_cfuncs, auto)
also have "... = (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c ((p +\<^sub>\<nat> q) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero))"
by (typecheck_cfuncs, simp add: is_even_nth_even_true)
also have "... = \<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> \<circ>\<^sub>c ((p +\<^sub>\<nat> q) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero))"
using comp_associative2 m_def n_def by (typecheck_cfuncs, auto)
also have "... = \<t> \<circ>\<^sub>c id(one)"
by (typecheck_cfuncs, metis m_def n_def terminal_func_unique)
also have "... = \<t>"
by (typecheck_cfuncs, simp add: id_right_unit2)
then show ?thesis
by (simp add: calculation)
qed
lemma add_mixed_is_odd2:
assumes "m \<in>\<^sub>c \<nat>\<^sub>c" "n \<in>\<^sub>c \<nat>\<^sub>c"
assumes "is_odd \<circ>\<^sub>c m = \<t>" "is_even \<circ>\<^sub>c n = \<t>"
shows "is_odd \<circ>\<^sub>c (m +\<^sub>\<nat> n) = \<t>"
by (typecheck_cfuncs, smt add_evens_is_even2 add_respects_succ3 assms cfunc_type_def comp_associative comp_type is_even_def2 is_odd_not_is_even successor_type)
lemma mult_evens_is_even2:
assumes "m \<in>\<^sub>c \<nat>\<^sub>c" "n \<in>\<^sub>c \<nat>\<^sub>c"
assumes "is_even \<circ>\<^sub>c m = \<t>"
shows "is_even \<circ>\<^sub>c (m \<cdot>\<^sub>\<nat> n) = \<t>"
proof -
obtain p where m_def: "p \<in>\<^sub>c \<nat>\<^sub>c \<and> m = nth_even \<circ>\<^sub>c p"
using assms(1) assms(3) is_even_exists_nth_even by blast
have m_def2: "m = ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> p)"
by (simp add: m_def nth_even_is_times_twoB)
then have mn_def: "m \<cdot>\<^sub>\<nat> n = ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> (p \<cdot>\<^sub>\<nat> n))"
by (simp add: assms(2) m_def mult_associative succ_n_type zero_type)
then have "(m \<cdot>\<^sub>\<nat> n) = nth_even \<circ>\<^sub>c (p \<cdot>\<^sub>\<nat> n)"
by (simp add: assms(2) m_def mult_closure nth_even_is_times_twoB)
then have "is_even \<circ>\<^sub>c (m \<cdot>\<^sub>\<nat> n) = (is_even \<circ>\<^sub>c nth_even) \<circ>\<^sub>c (p \<cdot>\<^sub>\<nat> n)"
by (typecheck_cfuncs, metis assms(2) comp_associative2 m_def)
also have "... = (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c (p \<cdot>\<^sub>\<nat> n)"
by (simp add: is_even_nth_even_true)
also have "... = \<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> \<circ>\<^sub>c (p \<cdot>\<^sub>\<nat> n)"
using assms(2) comp_associative2 m_def by (typecheck_cfuncs, fastforce)
also have "... = \<t> \<circ>\<^sub>c id(one)"
by (typecheck_cfuncs, metis assms(2) m_def terminal_func_unique)
also have "... = \<t>"
by (typecheck_cfuncs, simp add: id_right_unit2)
then show ?thesis
by (simp add: calculation)
qed
lemma mult_odds_is_odd2:
assumes "m \<in>\<^sub>c \<nat>\<^sub>c" "n \<in>\<^sub>c \<nat>\<^sub>c"
assumes "is_odd \<circ>\<^sub>c m = \<t>" "is_odd \<circ>\<^sub>c n = \<t>"
shows "is_odd \<circ>\<^sub>c (m \<cdot>\<^sub>\<nat> n) = \<t>"
proof -
obtain p where m_def: "p \<in>\<^sub>c \<nat>\<^sub>c \<and> m = nth_odd \<circ>\<^sub>c p"
using assms(1) assms(3) is_odd_exists_nth_odd by blast
obtain q where n_def: "q \<in>\<^sub>c \<nat>\<^sub>c \<and> n = nth_odd \<circ>\<^sub>c q"
using assms(2) assms(4) is_odd_exists_nth_odd by blast
have m_def2: "m = successor \<circ>\<^sub>c ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> p) "
using m_def nth_odd_is_succ_times_twoB by blast
then have m_def3: "m = ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> p) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero)"
using add_respects_succ1 add_respects_zero_on_right m_def m_def2 by (typecheck_cfuncs, auto)
then have m_def4: "\<exists>j. j \<in>\<^sub>c \<nat>\<^sub>c \<and> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> j) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero) = m"
using m_def by blast
have n_def2: "n = successor \<circ>\<^sub>c ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> q)"
using n_def nth_odd_is_succ_times_twoB by blast
have n_def3: "n= ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> q) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero)"
using add_respects_succ1 add_respects_zero_on_right n_def n_def2 by (typecheck_cfuncs, auto)
then have n_def4: "\<exists>k. k \<in>\<^sub>c \<nat>\<^sub>c \<and> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> k) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero) = n"
using n_def by blast
have "\<exists>l. l \<in>\<^sub>c \<nat>\<^sub>c \<and> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> l) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero) = m \<cdot>\<^sub>\<nat> n"
by (rule mult_odds_is_odd, simp_all add: assms m_def4 n_def4)
then obtain l where mn_def: "l \<in>\<^sub>c \<nat>\<^sub>c \<and> ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> l) +\<^sub>\<nat> (successor \<circ>\<^sub>c zero) = m \<cdot>\<^sub>\<nat> n"
by blast
then have "m \<cdot>\<^sub>\<nat> n = successor \<circ>\<^sub>c ((successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> l)"
using add_respects_succ1 add_respects_zero_on_right mn_def by (typecheck_cfuncs, auto)
then have "m \<cdot>\<^sub>\<nat> n = nth_odd \<circ>\<^sub>c l"
by (simp add: mn_def nth_odd_is_succ_times_twoB)
then have "is_odd \<circ>\<^sub>c (m \<cdot>\<^sub>\<nat> n) = (is_odd \<circ>\<^sub>c nth_odd) \<circ>\<^sub>c l"
using comp_associative2 mn_def by (typecheck_cfuncs, auto)
also have "... = (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c l"
by (simp add: is_odd_nth_odd_true)
also have "... = \<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> \<circ>\<^sub>c l"
using comp_associative2 mn_def by (typecheck_cfuncs, auto)
also have "... = \<t> \<circ>\<^sub>c id(one)"
using id_type mn_def one_unique_element terminal_func_comp terminal_func_type by fastforce
also have "... = \<t>"
by (typecheck_cfuncs, simp add: id_right_unit2)
then show ?thesis
by (simp add: calculation)
qed
lemma prod_of_consecutive_nats_is_even:
assumes "n \<in>\<^sub>c \<nat>\<^sub>c"
shows "is_even \<circ>\<^sub>c (n \<cdot>\<^sub>\<nat> (successor \<circ>\<^sub>c n)) = \<t>"
by (metis add_odds_is_even2 assms even_or_odd mult_evens_is_even2 mult_odds_is_odd2 mult_respects_succ_right mult_type succ_n_type)
lemma powers_of_two_are_even:
assumes "n \<in>\<^sub>c \<nat>\<^sub>c"
assumes "n \<noteq> zero"
shows "is_even \<circ>\<^sub>c (exp_uncurried \<circ>\<^sub>c \<langle>successor \<circ>\<^sub>csuccessor \<circ>\<^sub>c zero,n\<rangle>) = \<t>"
proof -
obtain j where j_type[type_rule]: "j \<in>\<^sub>c \<nat>\<^sub>c" and j_def: "n = successor \<circ>\<^sub>c j"
using assms(1) assms(2) nonzero_is_succ by blast
have "exp_uncurried \<circ>\<^sub>c \<langle>successor \<circ>\<^sub>csuccessor \<circ>\<^sub>c zero,n\<rangle> = exp_uncurried \<circ>\<^sub>c \<langle>successor \<circ>\<^sub>csuccessor \<circ>\<^sub>c zero, (successor \<circ>\<^sub>c zero) +\<^sub>\<nat> j\<rangle>"
by (typecheck_cfuncs, metis add_commutes add_respects_succ3 add_respects_zero_on_right j_def)
also have "... = ((successor \<circ>\<^sub>csuccessor \<circ>\<^sub>c zero) ^\<^sub>\<nat> (successor \<circ>\<^sub>c zero)) \<cdot>\<^sub>\<nat> ((successor \<circ>\<^sub>csuccessor \<circ>\<^sub>c zero) ^\<^sub>\<nat> j)"
using exp_def exp_right_dist by (typecheck_cfuncs, force)
also have "... = (successor \<circ>\<^sub>csuccessor \<circ>\<^sub>c zero) \<cdot>\<^sub>\<nat> ((successor \<circ>\<^sub>csuccessor \<circ>\<^sub>c zero) ^\<^sub>\<nat> j)"
by (typecheck_cfuncs, simp add: exp_respects_one_right)
then show ?thesis
by (metis calculation exp_closure j_type mult_evens_is_even2 prod_of_consecutive_nats_is_even s0_is_left_id succ_n_type zero_type)
qed
lemma three_is_odd:
"is_odd \<circ>\<^sub>c (successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) = \<t>"
by (typecheck_cfuncs, metis (full_types) comp_associative2 is_even_not_is_odd is_odd_def2 prod_of_consecutive_nats_is_even s0_is_left_id)
lemma powers_of_three_are_odd:
assumes "n \<in>\<^sub>c \<nat>\<^sub>c"
shows "is_odd \<circ>\<^sub>c (exp_uncurried \<circ>\<^sub>c \<langle>successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero,n\<rangle>) = \<t>"
proof -
have main_result: "is_odd \<circ>\<^sub>c (exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id \<nat>\<^sub>c\<rangle>) = \<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>"
proof(rule natural_number_object_func_unique[where X = "\<Omega>", where f = "id \<Omega>" ])
show func_type[type_rule]: "is_odd \<circ>\<^sub>c (exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id \<nat>\<^sub>c\<rangle>) : \<nat>\<^sub>c \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> : \<nat>\<^sub>c \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "id\<^sub>c \<Omega> : \<Omega> \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "(is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c zero = (\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c zero"
proof -
have "(is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c zero =
is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle> \<circ>\<^sub>c zero"
using comp_associative2 by (typecheck_cfuncs, force)
also have "... = is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> \<circ>\<^sub>c zero ,id\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c zero\<rangle> "
using cfunc_prod_comp comp_associative2 by (typecheck_cfuncs, force)
also have "... = is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero, zero\<rangle>"
by (typecheck_cfuncs, metis beta_N_succ_mEqs_Id1 id_left_unit2 id_right_unit2 terminal_func_comp)
also have "... = is_odd \<circ>\<^sub>c (successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero)"
by (typecheck_cfuncs, metis even_or_odd exp_def exp_respects_Zero_Left mult_evens_is_even2 not_even_and_odd s0_is_left_id three_is_odd)
also have "... = \<t>"
by (simp add: three_is_odd)
then show ?thesis
by (metis calculation cfunc_type_def comp_associative is_even_def2 is_even_nth_even_true nth_even_def2 zero_type)
qed
show "(is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c successor =
id\<^sub>c \<Omega> \<circ>\<^sub>c is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>"
proof(rule one_separator[where X = "\<nat>\<^sub>c", where Y = "\<Omega>"])
show "(is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c successor : \<nat>\<^sub>c \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "id\<^sub>c \<Omega> \<circ>\<^sub>c is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle> : \<nat>\<^sub>c \<rightarrow> \<Omega>"
by typecheck_cfuncs
show "\<And>x. x \<in>\<^sub>c \<nat>\<^sub>c \<Longrightarrow>
((is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c successor) \<circ>\<^sub>c x =
(id\<^sub>c \<Omega> \<circ>\<^sub>c is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c x"
proof -
fix m
assume m_type[type_rule]: "m \<in>\<^sub>c \<nat>\<^sub>c"
have " (id\<^sub>c \<Omega> \<circ>\<^sub>c is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c m =
(is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c m"
using id_left_unit2 by (typecheck_cfuncs, presburger)
also have "... = is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle> \<circ>\<^sub>c m"
by (typecheck_cfuncs, metis cfunc_type_def comp_associative)
also have "... = is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub> \<circ>\<^sub>c m ,id\<^sub>c \<nat>\<^sub>c \<circ>\<^sub>c m\<rangle>"
using cfunc_prod_comp comp_associative2 by (typecheck_cfuncs, auto)
also have "... = is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) ,m\<rangle>"
by (typecheck_cfuncs, metis id_left_unit2 id_right_unit2 id_type one_unique_element)
also have "... = ((is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c successor) \<circ>\<^sub>c m"
proof(cases "is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) ,m\<rangle> = \<t>")
assume real_case: "is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) ,m\<rangle> = \<t>" (*The only real case*)
have "((is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c successor) \<circ>\<^sub>c m =
(is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c (successor \<circ>\<^sub>c m)"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = (is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c (m +\<^sub>\<nat> (successor \<circ>\<^sub>c zero))"
by (typecheck_cfuncs, metis add_commutes add_respects_succ3 add_respects_zero_on_left)
also have "... = is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle> \<circ>\<^sub>c (m +\<^sub>\<nat> (successor \<circ>\<^sub>c zero))"
by (typecheck_cfuncs, metis cfunc_type_def comp_associative)
also have "... = is_odd \<circ>\<^sub>c ((exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle> \<circ>\<^sub>c m) \<cdot>\<^sub>\<nat>
(exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle> \<circ>\<^sub>c (successor \<circ>\<^sub>c zero)))"
by (typecheck_cfuncs, metis exp_apply1 exp_right_dist)
also have "... = \<t>"
by (typecheck_cfuncs, metis real_case exp_apply1 exp_def exp_respects_one_right mult_odds_is_odd2 three_is_odd)
then show ?thesis
using calculation real_case by presburger
next
assume "is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero,m\<rangle> \<noteq> \<t>"
then have fake_case: "is_even \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero,m\<rangle> = \<t>" (*The fake case... only difference is final line!*)
by (metis even_or_odd exp_closure exp_def m_type succ_n_type zero_type)
have "((is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c successor) \<circ>\<^sub>c m =
(is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c (successor \<circ>\<^sub>c m)"
by (typecheck_cfuncs, simp add: comp_associative2)
also have "... = (is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c (m +\<^sub>\<nat> (successor \<circ>\<^sub>c zero))"
by (typecheck_cfuncs, metis add_commutes add_respects_succ3 add_respects_zero_on_left)
also have "... = is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle> \<circ>\<^sub>c (m +\<^sub>\<nat> (successor \<circ>\<^sub>c zero))"
by (typecheck_cfuncs, metis cfunc_type_def comp_associative)
also have "... = is_odd \<circ>\<^sub>c ((exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle> \<circ>\<^sub>c m) \<cdot>\<^sub>\<nat>
(exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle> \<circ>\<^sub>c (successor \<circ>\<^sub>c zero)))"
by (typecheck_cfuncs, metis exp_apply1 exp_right_dist)
also have "... = \<f>"
by (typecheck_cfuncs, metis cart_prod_extract_right fake_case mult_evens_is_even2 not_even_and_odd true_false_only_truth_values)
then show ?thesis
by (metis NOT_true_is_false NOT_type calculation cfunc_type_def comp_associative exp_closure exp_def fake_case is_even_type is_odd_not_is_even m_type succ_n_type zero_type)
qed
then show "((is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c successor) \<circ>\<^sub>c m =
(id\<^sub>c \<Omega> \<circ>\<^sub>c is_odd \<circ>\<^sub>c exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id\<^sub>c \<nat>\<^sub>c\<rangle>) \<circ>\<^sub>c m"
by (simp add: calculation)
qed
qed
show "(\<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>) \<circ>\<^sub>c successor = id\<^sub>c \<Omega> \<circ>\<^sub>c \<t> \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>"
by (typecheck_cfuncs, smt (z3) comp_associative2 id_left_unit2 terminal_func_comp)
qed
have "is_odd \<circ>\<^sub>c (exp_uncurried \<circ>\<^sub>c \<langle>successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero,n\<rangle>) =
(is_odd \<circ>\<^sub>c (exp_uncurried \<circ>\<^sub>c \<langle>(successor \<circ>\<^sub>c successor \<circ>\<^sub>c successor \<circ>\<^sub>c zero) \<circ>\<^sub>c \<beta>\<^bsub>\<nat>\<^sub>c\<^esub>,id \<nat>\<^sub>c\<rangle>)) \<circ>\<^sub>c n"
using assms cfunc_type_def comp_associative exp_apply1 exp_def by (typecheck_cfuncs, fastforce)
then show ?thesis
by (typecheck_cfuncs, smt (z3) main_result assms beta_N_succ_mEqs_Id1 comp_associative2 id_right_unit2 terminal_func_comp terminal_func_type)
qed
end |
-----------------------------------------------------------------------------
-- |
-- Module : Numeric.IO
-- Copyright : (c) Alberto Ruiz 2010
-- License : GPL
--
-- Maintainer : Alberto Ruiz <[email protected]>
-- Stability : provisional
-- Portability : portable
--
-- Display, formatting and IO functions for numeric 'Vector' and 'Matrix'
--
-----------------------------------------------------------------------------
module Numeric.IO (
dispf, disps, dispcf, vecdisp, latexFormat, format,
loadMatrix, saveMatrix, fromFile, fileDimensions,
readMatrix, fromArray2D,
fscanfVector, fprintfVector, freadVector, fwriteVector
) where
import Data.Packed
import Data.Packed.Internal
import System.Process(readProcess)
import Text.Printf(printf)
import Data.List(intersperse)
import Data.Complex
{- | Creates a string from a matrix given a separator and a function to show each entry. Using
this function the user can easily define any desired display function:
@import Text.Printf(printf)@
@disp = putStr . format \" \" (printf \"%.2f\")@
-}
format :: (Element t) => String -> (t -> String) -> Matrix t -> String
format sep f m = table sep . map (map f) . toLists $ m
{- | Show a matrix with \"autoscaling\" and a given number of decimal places.
@disp = putStr . disps 2
\> disp $ 120 * (3><4) [1..]
3x4 E3
0.12 0.24 0.36 0.48
0.60 0.72 0.84 0.96
1.08 1.20 1.32 1.44
@
-}
disps :: Int -> Matrix Double -> String
disps d x = sdims x ++ " " ++ formatScaled d x
{- | Show a matrix with a given number of decimal places.
@disp = putStr . dispf 3
\> disp (1/3 + ident 4)
4x4
1.333 0.333 0.333 0.333
0.333 1.333 0.333 0.333
0.333 0.333 1.333 0.333
0.333 0.333 0.333 1.333
@
-}
dispf :: Int -> Matrix Double -> String
dispf d x = sdims x ++ "\n" ++ formatFixed (if isInt x then 0 else d) x
sdims x = show (rows x) ++ "x" ++ show (cols x)
formatFixed d x = format " " (printf ("%."++show d++"f")) $ x
isInt = all lookslikeInt . toList . flatten
formatScaled dec t = "E"++show o++"\n" ++ ss
where ss = format " " (printf fmt. g) t
g x | o >= 0 = x/10^(o::Int)
| otherwise = x*10^(-o)
o = floor $ maximum $ map (logBase 10 . abs) $ toList $ flatten t
fmt = '%':show (dec+3) ++ '.':show dec ++"f"
{- | Show a vector using a function for showing matrices.
@disp = putStr . vecdisp ('dispf' 2)
\> disp ('linspace' 10 (0,1))
10 |> 0.00 0.11 0.22 0.33 0.44 0.56 0.67 0.78 0.89 1.00
@
-}
vecdisp :: (Element t) => (Matrix t -> String) -> Vector t -> String
vecdisp f v
= ((show (dim v) ++ " |> ") ++) . (++"\n")
. unwords . lines . tail . dropWhile (not . (`elem` " \n"))
. f . trans . reshape 1
$ v
-- | Tool to display matrices with latex syntax.
latexFormat :: String -- ^ type of braces: \"matrix\", \"bmatrix\", \"pmatrix\", etc.
-> String -- ^ Formatted matrix, with elements separated by spaces and newlines
-> String
latexFormat del tab = "\\begin{"++del++"}\n" ++ f tab ++ "\\end{"++del++"}"
where f = unlines . intersperse "\\\\" . map unwords . map (intersperse " & " . words) . tail . lines
-- | Pretty print a complex number with at most n decimal digits.
showComplex :: Int -> Complex Double -> String
showComplex d (a:+b)
| isZero a && isZero b = "0"
| isZero b = sa
| isZero a && isOne b = s2++"i"
| isZero a = sb++"i"
| isOne b = sa++s3++"i"
| otherwise = sa++s1++sb++"i"
where
sa = shcr d a
sb = shcr d b
s1 = if b<0 then "" else "+"
s2 = if b<0 then "-" else ""
s3 = if b<0 then "-" else "+"
shcr d a | lookslikeInt a = printf "%.0f" a
| otherwise = printf ("%."++show d++"f") a
lookslikeInt x = show (round x :: Int) ++".0" == shx || "-0.0" == shx
where shx = show x
isZero x = show x `elem` ["0.0","-0.0"]
isOne x = show x `elem` ["1.0","-1.0"]
-- | Pretty print a complex matrix with at most n decimal digits.
dispcf :: Int -> Matrix (Complex Double) -> String
dispcf d m = sdims m ++ "\n" ++ format " " (showComplex d) m
--------------------------------------------------------------------
-- | reads a matrix from a string containing a table of numbers.
readMatrix :: String -> Matrix Double
readMatrix = fromLists . map (map read). map words . filter (not.null) . lines
{- | obtains the number of rows and columns in an ASCII data file
(provisionally using unix's wc).
-}
fileDimensions :: FilePath -> IO (Int,Int)
fileDimensions fname = do
wcres <- readProcess "wc" ["-w",fname] ""
contents <- readFile fname
let tot = read . head . words $ wcres
c = length . head . dropWhile null . map words . lines $ contents
if tot > 0
then return (tot `div` c, c)
else return (0,0)
-- | Loads a matrix from an ASCII file formatted as a 2D table.
loadMatrix :: FilePath -> IO (Matrix Double)
loadMatrix file = fromFile file =<< fileDimensions file
-- | Loads a matrix from an ASCII file (the number of rows and columns must be known in advance).
fromFile :: FilePath -> (Int,Int) -> IO (Matrix Double)
fromFile filename (r,c) = reshape c `fmap` fscanfVector filename (r*c)
|
/-
Copyright (c) 2016 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Leonardo de Moura
! This file was ported from Lean 3 source module logic.basic
! leanprover-community/mathlib commit d2d8742b0c21426362a9dacebc6005db895ca963
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Init.Logic
import Mathlib.Init.Function
import Mathlib.Init.Algebra.Classes
import Mathlib.Tactic.Basic
import Mathlib.Tactic.LeftRight
import Std.Util.LibraryNote
import Std.Tactic.Lint.Basic
/-!
# Basic logic properties
This file is one of the earliest imports in mathlib.
## Implementation notes
Theorems that require decidability hypotheses are in the namespace `Decidable`.
Classical versions are in the namespace `Classical`.
-/
open Function
attribute [local instance 10] Classical.propDecidable
section Miscellany
-- Porting note: the following `inline` attributes have been omitted,
-- on the assumption that this issue has been dealt with properly in Lean 4.
-- /- We add the `inline` attribute to optimize VM computation using these declarations.
-- For example, `if p ∧ q then ... else ...` will not evaluate the decidability
-- of `q` if `p` is false. -/
-- attribute [inline]
-- And.decidable Or.decidable Decidable.false Xor.decidable Iff.decidable Decidable.true
-- Implies.decidable Not.decidable Ne.decidable Bool.decidableEq Decidable.toBool
attribute [simp] cast_eq cast_heq
/-- An identity function with its main argument implicit. This will be printed as `hidden` even
if it is applied to a large term, so it can be used for elision,
as done in the `elide` and `unelide` tactics. -/
@[reducible] def hidden {α : Sort _} {a : α} := a
#align hidden hidden
instance (priority := 10) decidableEq_of_subsingleton [Subsingleton α] : DecidableEq α :=
fun a b ↦ isTrue (Subsingleton.elim a b)
#align decidable_eq_of_subsingleton decidableEq_of_subsingleton
instance (α : Sort _) [Subsingleton α] (p : α → Prop) : Subsingleton (Subtype p) :=
⟨fun ⟨x, _⟩ ⟨y, _⟩ ↦ by cases Subsingleton.elim x y; rfl⟩
#align pempty PEmpty
theorem congr_heq {α β γ : Sort _} {f : α → γ} {g : β → γ} {x : α} {y : β}
(h₁ : HEq f g) (h₂ : HEq x y) : f x = g y := by
cases h₂; cases h₁; rfl
#align congr_heq congr_heq
theorem congr_arg_heq {α} {β : α → Sort _} (f : ∀ a, β a) :
∀ {a₁ a₂ : α}, a₁ = a₂ → HEq (f a₁) (f a₂)
| _, _, rfl => HEq.rfl
#align congr_arg_heq congr_arg_heq
theorem ULift.down_injective {α : Sort _} : Function.Injective (@ULift.down α)
| ⟨a⟩, ⟨b⟩, _ => by congr
#align ulift.down_injective ULift.down_injective
@[simp] theorem ULift.down_inj {α : Sort _} {a b : ULift α} : a.down = b.down ↔ a = b :=
⟨fun h ↦ ULift.down_injective h, fun h ↦ by rw [h]⟩
#align ulift.down_inj ULift.down_inj
theorem PLift.down_injective {α : Sort _} : Function.Injective (@PLift.down α)
| ⟨a⟩, ⟨b⟩, _ => by congr
#align plift.down_injective PLift.down_injective
@[simp] theorem PLift.down_inj {α : Sort _} {a b : PLift α} : a.down = b.down ↔ a = b :=
⟨fun h ↦ PLift.down_injective h, fun h ↦ by rw [h]⟩
#align plift.down_inj PLift.down_inj
@[simp] theorem eq_iff_eq_cancel_left {b c : α} : (∀ {a}, a = b ↔ a = c) ↔ b = c :=
⟨fun h ↦ by rw [← h], fun h a ↦ by rw [h]⟩
#align eq_iff_eq_cancel_left eq_iff_eq_cancel_left
@[simp] theorem eq_iff_eq_cancel_right {a b : α} : (∀ {c}, a = c ↔ b = c) ↔ a = b :=
⟨fun h ↦ by rw [h], fun h a ↦ by rw [h]⟩
#align eq_iff_eq_cancel_right eq_iff_eq_cancel_right
lemma ne_and_eq_iff_right {α : Sort _} {a b c : α} (h : b ≠ c) : a ≠ b ∧ a = c ↔ a = c :=
and_iff_right_of_imp (fun h2 => h2.symm ▸ h.symm)
#align ne_and_eq_iff_right ne_and_eq_iff_right
/-- Wrapper for adding elementary propositions to the type class systems.
Warning: this can easily be abused. See the rest of this docstring for details.
Certain propositions should not be treated as a class globally,
but sometimes it is very convenient to be able to use the type class system
in specific circumstances.
For example, `ZMod p` is a field if and only if `p` is a prime number.
In order to be able to find this field instance automatically by type class search,
we have to turn `p.prime` into an instance implicit assumption.
On the other hand, making `Nat.prime` a class would require a major refactoring of the library,
and it is questionable whether making `Nat.prime` a class is desirable at all.
The compromise is to add the assumption `[Fact p.prime]` to `ZMod.field`.
In particular, this class is not intended for turning the type class system
into an automated theorem prover for first order logic. -/
class Fact (p : Prop) : Prop where
/-- `Fact.out` contains the unwrapped witness for the fact represented by the instance of
`Fact p`. -/
out : p
#align fact Fact
library_note "fact non-instances"/--
In most cases, we should not have global instances of `Fact`; typeclass search only reads the head
symbol and then tries any instances, which means that adding any such instance will cause slowdowns
everywhere. We instead make them as lemmata and make them local instances as required.
-/
theorem Fact.elim {p : Prop} (h : Fact p) : p := h.1
theorem fact_iff {p : Prop} : Fact p ↔ p := ⟨fun h ↦ h.1, fun h ↦ ⟨h⟩⟩
#align fact_iff fact_iff
#align fact.elim Fact.elim
/-- Swaps two pairs of arguments to a function. -/
@[reducible] def Function.swap₂ {κ₁ : ι₁ → Sort _} {κ₂ : ι₂ → Sort _}
{φ : ∀ i₁, κ₁ i₁ → ∀ i₂, κ₂ i₂ → Sort _} (f : ∀ i₁ j₁ i₂ j₂, φ i₁ j₁ i₂ j₂)
(i₂ j₂ i₁ j₁) : φ i₁ j₁ i₂ j₂ := f i₁ j₁ i₂ j₂
#align function.swap₂ Function.swap₂
-- Porting note: these don't work as intended any more
-- /-- If `x : α . tac_name` then `x.out : α`. These are definitionally equal, but this can
-- nevertheless be useful for various reasons, e.g. to apply further projection notation or in an
-- argument to `simp`. -/
-- def autoParam'.out {α : Sort _} {n : Name} (x : autoParam' α n) : α := x
-- /-- If `x : α := d` then `x.out : α`. These are definitionally equal, but this can
-- nevertheless be useful for various reasons, e.g. to apply further projection notation or in an
-- argument to `simp`. -/
-- def optParam.out {α : Sort _} {d : α} (x : α := d) : α := x
end Miscellany
open Function
/-!
### Declarations about propositional connectives
-/
section Propositional
/-! ### Declarations about `implies` -/
instance : IsRefl Prop Iff := ⟨Iff.refl⟩
instance : IsTrans Prop Iff := ⟨fun _ _ _ ↦ Iff.trans⟩
alias imp_congr ← Iff.imp
#align iff.imp Iff.imp
@[simp] theorem eq_true_eq_id : Eq True = id := by
funext _; simp only [true_iff, id.def, eq_iff_iff]
#align eq_true_eq_id eq_true_eq_id
#align imp_and_distrib imp_and
#align imp_iff_right imp_iff_rightₓ -- reorder implicits
#align imp_iff_not imp_iff_notₓ -- reorder implicits
@[simp] theorem imp_iff_right_iff : (a → b ↔ b) ↔ a ∨ b := Decidable.imp_iff_right_iff
#align imp_iff_right_iff imp_iff_right_iff
@[simp] theorem and_or_imp : a ∧ b ∨ (a → c) ↔ a → b ∨ c := Decidable.and_or_imp
#align and_or_imp and_or_imp
/-- Provide modus tollens (`mt`) as dot notation for implications. -/
protected theorem Function.mt : (a → b) → ¬b → ¬a := mt
#align function.mt Function.mt
/-! ### Declarations about `not` -/
alias Decidable.em ← dec_em
#align dec_em dec_em
theorem dec_em' (p : Prop) [Decidable p] : ¬p ∨ p := (dec_em p).symm
#align dec_em' dec_em'
alias Classical.em ← em
#align em em
theorem em' (p : Prop) : ¬p ∨ p := (em p).symm
#align em' em'
theorem or_not {p : Prop} : p ∨ ¬p := em _
#align or_not or_not
theorem Decidable.eq_or_ne (x y : α) [Decidable (x = y)] : x = y ∨ x ≠ y := dec_em <| x = y
#align decidable.eq_or_ne Decidable.eq_or_ne
theorem Decidable.ne_or_eq (x y : α) [Decidable (x = y)] : x ≠ y ∨ x = y := dec_em' <| x = y
#align decidable.ne_or_eq Decidable.ne_or_eq
theorem eq_or_ne (x y : α) : x = y ∨ x ≠ y := em <| x = y
#align eq_or_ne eq_or_ne
theorem ne_or_eq (x y : α) : x ≠ y ∨ x = y := em' <| x = y
#align ne_or_eq ne_or_eq
theorem by_contradiction : (¬p → False) → p := Decidable.by_contradiction
#align classical.by_contradiction by_contradiction
#align by_contradiction by_contradiction
theorem by_cases {q : Prop} (hpq : p → q) (hnpq : ¬p → q) : q :=
if hp : p then hpq hp else hnpq hp
#align classical.by_cases by_cases
alias by_contradiction ← by_contra
#align by_contra by_contra
library_note "decidable namespace"/--
In most of mathlib, we use the law of excluded middle (LEM) and the axiom of choice (AC) freely.
The `Decidable` namespace contains versions of lemmas from the root namespace that explicitly
attempt to avoid the axiom of choice, usually by adding decidability assumptions on the inputs.
You can check if a lemma uses the axiom of choice by using `#print axioms foo` and seeing if
`Classical.choice` appears in the list.
-/
library_note "decidable arguments"/--
As mathlib is primarily classical,
if the type signature of a `def` or `lemma` does not require any `Decidable` instances to state,
it is preferable not to introduce any `Decidable` instances that are needed in the proof
as arguments, but rather to use the `classical` tactic as needed.
In the other direction, when `Decidable` instances do appear in the type signature,
it is better to use explicitly introduced ones rather than allowing Lean to automatically infer
classical ones, as these may cause instance mismatch errors later.
-/
export Classical (not_not)
attribute [simp] not_not
#align not_not Classical.not_not
theorem of_not_not : ¬¬a → a := by_contra
#align of_not_not of_not_not
theorem not_ne_iff : ¬a ≠ b ↔ a = b := not_not
#align not_ne_iff not_ne_iff
theorem of_not_imp {a b : Prop} : ¬(a → b) → a := Decidable.of_not_imp
#align of_not_imp of_not_imp
alias Decidable.not_imp_symm ← Not.decidable_imp_symm
#align not.decidable_imp_symm Not.decidable_imp_symm
theorem Not.imp_symm : (¬a → b) → ¬b → a := Not.decidable_imp_symm
#align not.imp_symm Not.imp_symm
theorem not_imp_comm : ¬a → b ↔ ¬b → a := Decidable.not_imp_comm
#align not_imp_comm not_imp_comm
@[simp] theorem not_imp_self : ¬a → a ↔ a := Decidable.not_imp_self
#align not_imp_self not_imp_self
theorem Imp.swap : a → b → c ↔ b → a → c := ⟨Function.swap, Function.swap⟩
#align imp.swap Imp.swap
alias not_congr ← Iff.not
theorem Iff.not_left (h : a ↔ ¬b) : ¬a ↔ b := h.not.trans not_not
theorem Iff.not_right (h : ¬a ↔ b) : a ↔ ¬b := not_not.symm.trans h.not
#align iff.not_right Iff.not_right
#align iff.not_left Iff.not_left
#align iff.not Iff.not
/-! ### Declarations about `xor` -/
@[simp] theorem xor_true : Xor' True = Not := by simp [Xor']
#align xor_true xor_true
@[simp] theorem xor_false : Xor' False = id := by ext; simp [Xor']
#align xor_false xor_false
theorem xor_comm (a b) : Xor' a b = Xor' b a := by simp [Xor', and_comm, or_comm]
#align xor_comm xor_comm
instance : IsCommutative Prop Xor' := ⟨xor_comm⟩
@[simp] theorem xor_self (a : Prop) : Xor' a a = False := by simp [Xor']
@[simp] theorem xor_not_left : Xor' (¬a) b ↔ (a ↔ b) := by by_cases a <;> simp [*]
@[simp] theorem xor_not_right : Xor' a (¬b) ↔ (a ↔ b) := by by_cases a <;> simp [*]
theorem xor_not_not : Xor' (¬a) (¬b) ↔ Xor' a b := by simp [Xor', or_comm, and_comm]
protected theorem Xor'.or (h : Xor' a b) : a ∨ b := h.imp And.left And.left
#align xor.or Xor'.or
#align xor_not_not xor_not_not
#align xor_not_right xor_not_right
#align xor_not_left xor_not_left
#align xor_self xor_self
/-! ### Declarations about `and` -/
alias and_congr ← Iff.and
#align and_congr_left and_congr_leftₓ -- reorder implicits
#align and_congr_right' and_congr_right'ₓ -- reorder implicits
#align and.right_comm and_right_comm
#align and_and_distrib_left and_and_left
#align and_and_distrib_right and_and_right
alias and_rotate ↔ And.rotate _
#align and.congr_right_iff and_congr_right_iff
#align and.congr_left_iff and_congr_left_iffₓ -- reorder implicits
#align and.rotate And.rotate
#align iff.and Iff.and
theorem and_symm_right (a b : α) (p : Prop) : p ∧ a = b ↔ p ∧ b = a := by simp [eq_comm]
theorem and_symm_left (a b : α) (p : Prop) : a = b ∧ p ↔ b = a ∧ p := by simp [eq_comm]
/-! ### Declarations about `or` -/
alias or_congr ← Iff.or
#align or_congr_left' or_congr_left
#align or_congr_right' or_congr_rightₓ -- reorder implicits
#align or.right_comm or_right_comm
alias or_rotate ↔ Or.rotate _
#align or.rotate Or.rotate
#align iff.or Iff.or
@[deprecated Or.imp]
theorem or_of_or_of_imp_of_imp (h₁ : a ∨ b) (h₂ : a → c) (h₃ : b → d) : c ∨ d := Or.imp h₂ h₃ h₁
#align or_of_or_of_imp_of_imp or_of_or_of_imp_of_imp
@[deprecated Or.imp_left]
theorem or_of_or_of_imp_left (h₁ : a ∨ c) (h : a → b) : b ∨ c := Or.imp_left h h₁
#align or_of_or_of_imp_left or_of_or_of_imp_left
@[deprecated Or.imp_right]
theorem or_of_or_of_imp_right (h₁ : c ∨ a) (h : a → b) : c ∨ b := Or.imp_right h h₁
#align or_of_or_of_imp_right or_of_or_of_imp_right
theorem Or.elim3 {d : Prop} (h : a ∨ b ∨ c) (ha : a → d) (hb : b → d) (hc : c → d) : d :=
Or.elim h ha fun h₂ ↦ Or.elim h₂ hb hc
#align or.elim3 Or.elim3
theorem Or.imp3 (had : a → d) (hbe : b → e) (hcf : c → f) : a ∨ b ∨ c → d ∨ e ∨ f :=
Or.imp had <| Or.imp hbe hcf
#align or.imp3 Or.imp3
#align or_imp_distrib or_imp
theorem or_iff_not_imp_left : a ∨ b ↔ ¬a → b := Decidable.or_iff_not_imp_left
#align or_iff_not_imp_left or_iff_not_imp_left
theorem or_iff_not_imp_right : a ∨ b ↔ ¬b → a := Decidable.or_iff_not_imp_right
#align or_iff_not_imp_right or_iff_not_imp_right
theorem not_or_of_imp : (a → b) → ¬a ∨ b := Decidable.not_or_of_imp
#align not_or_of_imp not_or_of_imp
-- See Note [decidable namespace]
protected theorem Decidable.or_not_of_imp [Decidable a] (h : a → b) : b ∨ ¬a :=
dite _ (Or.inl ∘ h) Or.inr
#align decidable.or_not_of_imp Decidable.or_not_of_imp
theorem or_not_of_imp : (a → b) → b ∨ ¬a := Decidable.or_not_of_imp
#align or_not_of_imp or_not_of_imp
theorem imp_iff_not_or : a → b ↔ ¬a ∨ b := Decidable.imp_iff_not_or
#align imp_iff_not_or imp_iff_not_or
theorem imp_iff_or_not : b → a ↔ a ∨ ¬b := Decidable.imp_iff_or_not
#align imp_iff_or_not imp_iff_or_not
theorem not_imp_not : ¬a → ¬b ↔ b → a := Decidable.not_imp_not
#align not_imp_not not_imp_not
/-- Provide the reverse of modus tollens (`mt`) as dot notation for implications. -/
protected theorem Function.mtr : (¬a → ¬b) → b → a := not_imp_not.mp
#align function.mtr Function.mtr
#align decidable.or_congr_left Decidable.or_congr_left'
#align decidable.or_congr_right Decidable.or_congr_right'
#align decidable.or_iff_not_imp_right Decidable.or_iff_not_imp_rightₓ -- reorder implicits
#align decidable.imp_iff_or_not Decidable.imp_iff_or_notₓ -- reorder implicits
theorem or_congr_left' (h : ¬c → (a ↔ b)) : a ∨ c ↔ b ∨ c := Decidable.or_congr_left' h
#align or_congr_left or_congr_left'
theorem or_congr_right' (h : ¬a → (b ↔ c)) : a ∨ b ↔ a ∨ c := Decidable.or_congr_right' h
#align or_congr_right or_congr_right'ₓ -- reorder implicits
#align or_iff_left or_iff_leftₓ -- reorder implicits
/-! ### Declarations about distributivity -/
#align and_or_distrib_left and_or_left
#align or_and_distrib_right or_and_right
#align or_and_distrib_left or_and_left
#align and_or_distrib_right and_or_right
/-! Declarations about `iff` -/
alias iff_congr ← Iff.iff
#align iff.iff Iff.iff
-- @[simp] -- FIXME simp ignores proof rewrites
theorem iff_mpr_iff_true_intro (h : P) : Iff.mpr (iff_true_intro h) True.intro = h := rfl
#align iff_mpr_iff_true_intro iff_mpr_iff_true_intro
#align decidable.imp_or_distrib Decidable.imp_or
theorem imp_or {a b c : Prop} : a → b ∨ c ↔ (a → b) ∨ (a → c) := Decidable.imp_or
#align imp_or_distrib imp_or
#align decidable.imp_or_distrib' Decidable.imp_or'
theorem imp_or' : a → b ∨ c ↔ (a → b) ∨ (a → c) := Decidable.imp_or'
#align imp_or_distrib' imp_or'ₓ -- universes
theorem not_imp : ¬(a → b) ↔ a ∧ ¬b := Decidable.not_imp
#align not_imp not_imp
theorem peirce (a b : Prop) : ((a → b) → a) → a := Decidable.peirce _ _
#align peirce peirce
theorem not_iff_not : (¬a ↔ ¬b) ↔ (a ↔ b) := Decidable.not_iff_not
#align not_iff_not not_iff_not
theorem not_iff_comm : (¬a ↔ b) ↔ (¬b ↔ a) := Decidable.not_iff_comm
#align not_iff_comm not_iff_comm
theorem not_iff : ¬(a ↔ b) ↔ (¬a ↔ b) := Decidable.not_iff
#align not_iff not_iff
theorem iff_not_comm : (a ↔ ¬b) ↔ (b ↔ ¬a) := Decidable.iff_not_comm
#align iff_not_comm iff_not_comm
theorem iff_iff_and_or_not_and_not : (a ↔ b) ↔ a ∧ b ∨ ¬a ∧ ¬b :=
Decidable.iff_iff_and_or_not_and_not
#align iff_iff_and_or_not_and_not iff_iff_and_or_not_and_not
theorem iff_iff_not_or_and_or_not : (a ↔ b) ↔ (¬a ∨ b) ∧ (a ∨ ¬b) :=
Decidable.iff_iff_not_or_and_or_not
#align iff_iff_not_or_and_or_not iff_iff_not_or_and_or_not
theorem not_and_not_right : ¬(a ∧ ¬b) ↔ a → b := Decidable.not_and_not_right
#align not_and_not_right not_and_not_right
#align decidable_of_iff decidable_of_iff
#align decidable_of_iff' decidable_of_iff'
#align decidable_of_bool decidable_of_bool
/-! ### De Morgan's laws -/
#align decidable.not_and_distrib Decidable.not_and
#align decidable.not_and_distrib' Decidable.not_and'
/-- One of de Morgan's laws: the negation of a conjunction is logically equivalent to the
disjunction of the negations. -/
theorem not_and_or : ¬(a ∧ b) ↔ ¬a ∨ ¬b := Decidable.not_and
#align not_and_distrib not_and_or
#align not_or_distrib not_or
theorem or_iff_not_and_not : a ∨ b ↔ ¬(¬a ∧ ¬b) := Decidable.or_iff_not_and_not
#align or_iff_not_and_not or_iff_not_and_not
theorem and_iff_not_or_not : a ∧ b ↔ ¬(¬a ∨ ¬b) := Decidable.and_iff_not_or_not
#align and_iff_not_or_not and_iff_not_or_not
@[simp] theorem not_xor (P Q : Prop) : ¬Xor' P Q ↔ (P ↔ Q) := by
simp only [not_and, Xor', not_or, not_not, ← iff_iff_implies_and_implies]
#align not_xor not_xor
theorem xor_iff_not_iff (P Q : Prop) : Xor' P Q ↔ ¬ (P ↔ Q) := (not_xor P Q).not_right
theorem xor_iff_iff_not : Xor' a b ↔ (a ↔ ¬b) := by simp only [← @xor_not_right a, not_not]
theorem xor_iff_not_iff' : Xor' a b ↔ (¬a ↔ b) := by simp only [← @xor_not_left _ b, not_not]
#align xor_iff_not_iff' xor_iff_not_iff'
#align xor_iff_iff_not xor_iff_iff_not
#align xor_iff_not_iff xor_iff_not_iff
end Propositional
/-! ### Declarations about equality -/
alias ne_of_mem_of_not_mem ← Membership.mem.ne_of_not_mem
alias ne_of_mem_of_not_mem' ← Membership.mem.ne_of_not_mem'
#align has_mem.mem.ne_of_not_mem Membership.mem.ne_of_not_mem
#align has_mem.mem.ne_of_not_mem' Membership.mem.ne_of_not_mem'
section Equality
-- todo: change name
theorem ball_cond_comm {α} {s : α → Prop} {p : α → α → Prop} :
(∀ a, s a → ∀ b, s b → p a b) ↔ ∀ a b, s a → s b → p a b :=
⟨fun h a b ha hb ↦ h a ha b hb, fun h a ha b hb ↦ h a b ha hb⟩
#align ball_cond_comm ball_cond_comm
theorem ball_mem_comm {α β} [Membership α β] {s : β} {p : α → α → Prop} :
(∀ a (_ : a ∈ s) b (_ : b ∈ s), p a b) ↔ ∀ a b, a ∈ s → b ∈ s → p a b :=
ball_cond_comm
#align ball_mem_comm ball_mem_comm
theorem ne_of_apply_ne {α β : Sort _} (f : α → β) {x y : α} (h : f x ≠ f y) : x ≠ y :=
fun w : x = y ↦ h (congr_arg f w)
#align ne_of_apply_ne ne_of_apply_ne
theorem eq_equivalence : Equivalence (@Eq α) :=
⟨Eq.refl, @Eq.symm _, @Eq.trans _⟩
#align eq_equivalence eq_equivalence
@[simp] theorem eq_mp_eq_cast (h : α = β) : Eq.mp h = cast h := rfl
#align eq_mp_eq_cast eq_mp_eq_cast
@[simp] theorem eq_mpr_eq_cast (h : α = β) : Eq.mpr h = cast h.symm := rfl
#align eq_mpr_eq_cast eq_mpr_eq_cast
@[simp] theorem cast_cast : ∀ (ha : α = β) (hb : β = γ) (a : α),
cast hb (cast ha a) = cast (ha.trans hb) a
| rfl, rfl, _ => rfl
#align cast_cast cast_cast
-- @[simp] -- FIXME simp ignores proof rewrites
theorem congr_refl_left (f : α → β) {a b : α} (h : a = b) :
congr (Eq.refl f) h = congr_arg f h := rfl
#align congr_refl_left congr_refl_left
-- @[simp] -- FIXME simp ignores proof rewrites
theorem congr_refl_right {f g : α → β} (h : f = g) (a : α) :
congr h (Eq.refl a) = congr_fun h a := rfl
#align congr_refl_right congr_refl_right
-- @[simp] -- FIXME simp ignores proof rewrites
theorem congr_arg_refl (f : α → β) (a : α) : congr_arg f (Eq.refl a) = Eq.refl (f a) := rfl
#align congr_arg_refl congr_arg_refl
-- @[simp] -- FIXME simp ignores proof rewrites
theorem congr_fun_rfl (f : α → β) (a : α) : congr_fun (Eq.refl f) a = Eq.refl (f a) := rfl
#align congr_fun_rfl congr_fun_rfl
-- @[simp] -- FIXME simp ignores proof rewrites
theorem congr_fun_congr_arg (f : α → β → γ) {a a' : α} (p : a = a') (b : β) :
congr_fun (congr_arg f p) b = congr_arg (fun a ↦ f a b) p := rfl
#align congr_fun_congr_arg congr_fun_congr_arg
theorem heq_of_cast_eq : ∀ (e : α = β) (_ : cast e a = a'), HEq a a'
| rfl, h => Eq.recOn h (HEq.refl _)
#align heq_of_cast_eq heq_of_cast_eq
theorem cast_eq_iff_heq : cast e a = a' ↔ HEq a a' :=
⟨heq_of_cast_eq _, fun h ↦ by cases h; rfl⟩
#align cast_eq_iff_heq cast_eq_iff_heq
--Porting note: new theorem. More general version of `eqRec_heq`
theorem eqRec_heq' {α : Sort u_1} {a' : α} {motive : (a : α) → a' = a → Sort u}
(p : motive a' (rfl : a' = a')) {a : α} (t : a' = a) :
HEq (@Eq.rec α a' motive p a t) p :=
by subst t; rfl
theorem rec_heq_of_heq {C : α → Sort _} {x : C a} {y : β} (e : a = b) (h : HEq x y) :
HEq (e ▸ x) y := by subst e; exact h
#align rec_heq_of_heq rec_heq_of_heq
theorem rec_heq_iff_heq {C : α → Sort _} {x : C a} {y : β} {e : a = b} :
HEq (e ▸ x) y ↔ HEq x y := by subst e; rfl
#align rec_heq_iff_heq rec_heq_iff_heq
theorem heq_rec_iff_heq {C : α → Sort _} {x : β} {y : C a} {e : a = b} :
HEq x (e ▸ y) ↔ HEq x y := by subst e; rfl
#align heq_rec_iff_heq heq_rec_iff_heq
protected theorem Eq.congr (h₁ : x₁ = y₁) (h₂ : x₂ = y₂) : x₁ = x₂ ↔ y₁ = y₂ := by
subst h₁; subst h₂; rfl
#align eq.congr Eq.congr
theorem Eq.congr_left {x y z : α} (h : x = y) : x = z ↔ y = z := by rw [h]
#align eq.congr_left Eq.congr_left
theorem Eq.congr_right {x y z : α} (h : x = y) : z = x ↔ z = y := by rw [h]
#align eq.congr_right Eq.congr_right
alias congrArg₂ ← congr_arg₂
#align congr_arg2 congr_arg₂
variable {β : α → Sort _} {γ : ∀ a, β a → Sort _} {δ : ∀ a b, γ a b → Sort _}
theorem congr_fun₂ {f g : ∀ a b, γ a b} (h : f = g) (a : α) (b : β a) : f a b = g a b :=
congr_fun (congr_fun h _) _
#align congr_fun₂ congr_fun₂
theorem congr_fun₃ {f g : ∀ a b c, δ a b c} (h : f = g) (a : α) (b : β a) (c : γ a b) :
f a b c = g a b c :=
congr_fun₂ (congr_fun h _) _ _
#align congr_fun₃ congr_fun₃
theorem funext₂ {f g : ∀ a b, γ a b} (h : ∀ a b, f a b = g a b) : f = g :=
funext fun _ ↦ funext <| h _
#align funext₂ funext₂
theorem funext₃ {f g : ∀ a b c, δ a b c} (h : ∀ a b c, f a b c = g a b c) : f = g :=
funext fun _ ↦ funext₂ <| h _
#align funext₃ funext₃
end Equality
/-! ### Declarations about quantifiers -/
section Quantifiers
section Dependent
variable {β : α → Sort _} {γ : ∀ a, β a → Sort _} {δ : ∀ a b, γ a b → Sort _}
{ε : ∀ a b c, δ a b c → Sort _}
theorem pi_congr {β' : α → Sort _} (h : ∀ a, β a = β' a) : (∀ a, β a) = ∀ a, β' a :=
(funext h : β = β') ▸ rfl
#align pi_congr pi_congr
-- Porting note: some higher order lemmas such as `forall₂_congr` and `exists₂_congr`
-- were moved to `Std4`
theorem forall₂_imp {p q : ∀ a, β a → Prop} (h : ∀ a b, p a b → q a b) :
(∀ a b, p a b) → ∀ a b, q a b :=
forall_imp fun i ↦ forall_imp <| h i
#align forall₂_imp forall₂_imp
theorem forall₃_imp {p q : ∀ a b, γ a b → Prop} (h : ∀ a b c, p a b c → q a b c) :
(∀ a b c, p a b c) → ∀ a b c, q a b c :=
forall_imp fun a ↦ forall₂_imp <| h a
#align forall₃_imp forall₃_imp
theorem Exists₂.imp {p q : ∀ a, β a → Prop} (h : ∀ a b, p a b → q a b) :
(∃ a b, p a b) → ∃ a b, q a b :=
Exists.imp fun a ↦ Exists.imp <| h a
#align Exists₂.imp Exists₂.imp
theorem Exists₃.imp {p q : ∀ a b, γ a b → Prop} (h : ∀ a b c, p a b c → q a b c) :
(∃ a b c, p a b c) → ∃ a b c, q a b c :=
Exists.imp fun a ↦ Exists₂.imp <| h a
#align Exists₃.imp Exists₃.imp
end Dependent
variable {κ : ι → Sort _} {p q : α → Prop}
#align exists_imp_exists' Exists.imp'
theorem forall_swap {p : α → β → Prop} : (∀ x y, p x y) ↔ ∀ y x, p x y := ⟨swap, swap⟩
#align forall_swap forall_swap
theorem forall₂_swap {κ₁ : ι₁ → Sort _} {κ₂ : ι₂ → Sort _} {p : ∀ i₁, κ₁ i₁ → ∀ i₂, κ₂ i₂ → Prop} :
(∀ i₁ j₁ i₂ j₂, p i₁ j₁ i₂ j₂) ↔ ∀ i₂ j₂ i₁ j₁, p i₁ j₁ i₂ j₂ := ⟨swap₂, swap₂⟩
#align forall₂_swap forall₂_swap
/-- We intentionally restrict the type of `α` in this lemma so that this is a safer to use in simp
than `forall_swap`. -/
theorem imp_forall_iff {α : Type _} {p : Prop} {q : α → Prop} : (p → ∀ x, q x) ↔ ∀ x, p → q x :=
forall_swap
#align imp_forall_iff imp_forall_iff
theorem exists_swap {p : α → β → Prop} : (∃ x y, p x y) ↔ ∃ y x, p x y :=
⟨fun ⟨x, y, h⟩ ↦ ⟨y, x, h⟩, fun ⟨y, x, h⟩ ↦ ⟨x, y, h⟩⟩
#align exists_swap exists_swap
#align forall_exists_index forall_exists_index
#align exists_imp_distrib exists_imp
alias exists_imp ↔ _ not_exists_of_forall_not
#align not_exists_of_forall_not not_exists_of_forall_not
#align Exists.some Exists.choose
#align Exists.some_spec Exists.choose_spec
-- See Note [decidable namespace]
protected theorem Decidable.not_forall {p : α → Prop} [Decidable (∃ x, ¬p x)]
[∀ x, Decidable (p x)] : (¬∀ x, p x) ↔ ∃ x, ¬p x :=
⟨Not.decidable_imp_symm fun nx x ↦ nx.decidable_imp_symm fun h ↦ ⟨x, h⟩,
not_forall_of_exists_not⟩
#align decidable.not_forall Decidable.not_forall
@[simp]
theorem not_forall {p : α → Prop} : (¬∀ x, p x) ↔ ∃ x, ¬p x :=
Decidable.not_forall
#align not_forall not_forall
-- See Note [decidable namespace]
protected theorem Decidable.not_forall_not [Decidable (∃ x, p x)] : (¬∀ x, ¬p x) ↔ ∃ x, p x :=
(@Decidable.not_iff_comm _ _ _ (decidable_of_iff (¬∃ x, p x) not_exists)).1 not_exists
#align decidable.not_forall_not Decidable.not_forall_not
theorem not_forall_not : (¬∀ x, ¬p x) ↔ ∃ x, p x := Decidable.not_forall_not
#align not_forall_not not_forall_not
-- See Note [decidable namespace]
protected theorem Decidable.not_exists_not [∀ x, Decidable (p x)] : (¬∃ x, ¬p x) ↔ ∀ x, p x := by
simp only [not_exists, Decidable.not_not]
#align decidable.not_exists_not Decidable.not_exists_not
theorem not_exists_not : (¬∃ x, ¬p x) ↔ ∀ x, p x := Decidable.not_exists_not
#align not_exists_not not_exists_not
theorem forall_imp_iff_exists_imp [ha : Nonempty α] : (∀ x, p x) → b ↔ ∃ x, p x → b := by
let ⟨a⟩ := ha
refine ⟨fun h ↦ not_forall_not.1 fun h' ↦ ?_, fun ⟨x, hx⟩ h ↦ hx (h x)⟩
exact if hb : b then h' a fun _ ↦ hb else hb <| h fun x ↦ (not_imp.1 (h' x)).1
#align forall_imp_iff_exists_imp forall_imp_iff_exists_imp
theorem forall_true_iff : (α → True) ↔ True := imp_true_iff _
#align forall_true_iff forall_true_iff
-- Unfortunately this causes simp to loop sometimes, so we
-- add the 2 and 3 cases as simp lemmas instead
theorem forall_true_iff' (h : ∀ a, p a ↔ True) : (∀ a, p a) ↔ True :=
iff_true_intro fun _ ↦ of_iff_true (h _)
#align forall_true_iff' forall_true_iff'
-- This is not marked `@[simp]` because `implies_true : (α → True) = True` works
theorem forall₂_true_iff {β : α → Sort _} : (∀ a, β a → True) ↔ True := by simp
#align forall_2_true_iff forall₂_true_iff
-- This is not marked `@[simp]` because `implies_true : (α → True) = True` works
theorem forall₃_true_iff {β : α → Sort _} {γ : ∀ a, β a → Sort _} :
(∀ (a) (b : β a), γ a b → True) ↔ True := by simp
#align forall_3_true_iff forall₃_true_iff
@[simp] theorem exists_unique_iff_exists [Subsingleton α] {p : α → Prop} :
(∃! x, p x) ↔ ∃ x, p x :=
⟨fun h ↦ h.exists, Exists.imp fun x hx ↦ ⟨hx, fun y _ ↦ Subsingleton.elim y x⟩⟩
#align exists_unique_iff_exists exists_unique_iff_exists
-- forall_forall_const is no longer needed
@[simp] theorem exists_const (α) [i : Nonempty α] : (∃ _ : α, b) ↔ b :=
⟨fun ⟨_, h⟩ ↦ h, i.elim Exists.intro⟩
#align exists_const exists_const
theorem exists_unique_const (α) [i : Nonempty α] [Subsingleton α] :
(∃! _ : α, b) ↔ b := by simp
#align exists_unique_const exists_unique_const
#align forall_and_distrib forall_and
#align exists_or_distrib exists_or
#align exists_and_distrib_left exists_and_left
#align exists_and_distrib_right exists_and_right
theorem Decidable.and_forall_ne [DecidableEq α] (a : α) {p : α → Prop} :
(p a ∧ ∀ b, b ≠ a → p b) ↔ ∀ b, p b := by
simp only [← @forall_eq _ p a, ← forall_and, ← or_imp, Decidable.em, forall_const]
#align decidable.and_forall_ne Decidable.and_forall_ne
theorem and_forall_ne (a : α) : (p a ∧ ∀ b, b ≠ a → p b) ↔ ∀ b, p b :=
Decidable.and_forall_ne a
#align and_forall_ne and_forall_ne
theorem Ne.ne_or_ne {x y : α} (z : α) (h : x ≠ y) : x ≠ z ∨ y ≠ z :=
not_and_or.1 <| mt (and_imp.2 (· ▸ ·)) h.symm
#align ne.ne_or_ne Ne.ne_or_ne
@[simp] theorem exists_unique_eq {a' : α} : ∃! a, a = a' := by
simp only [eq_comm, ExistsUnique, and_self, forall_eq', exists_eq']
#align exists_unique_eq exists_unique_eq
@[simp] theorem exists_unique_eq' {a' : α} : ∃! a, a' = a := by
simp only [ExistsUnique, and_self, forall_eq', exists_eq']
#align exists_unique_eq' exists_unique_eq'
-- @[simp] -- FIXME simp does not apply this lemma for some reason
theorem exists_apply_eq_apply' (f : α → β) (a' : α) : ∃ a, f a' = f a := ⟨a', rfl⟩
#align exists_apply_eq_apply' exists_apply_eq_apply'
-- porting note: an alternative workaround theorem:
theorem exists_apply_eq (a : α) (b : β) : ∃ f : α → β, f a = b := ⟨fun _ ↦ b, rfl⟩
@[simp] theorem exists_exists_and_eq_and {f : α → β} {p : α → Prop} {q : β → Prop} :
(∃ b, (∃ a, p a ∧ f a = b) ∧ q b) ↔ ∃ a, p a ∧ q (f a) :=
⟨fun ⟨_, ⟨a, ha, hab⟩, hb⟩ ↦ ⟨a, ha, hab.symm ▸ hb⟩, fun ⟨a, hp, hq⟩ ↦ ⟨f a, ⟨a, hp, rfl⟩, hq⟩⟩
#align exists_exists_and_eq_and exists_exists_and_eq_and
@[simp] theorem exists_exists_eq_and {f : α → β} {p : β → Prop} :
(∃ b, (∃ a, f a = b) ∧ p b) ↔ ∃ a, p (f a) :=
⟨fun ⟨_, ⟨a, ha⟩, hb⟩ ↦ ⟨a, ha.symm ▸ hb⟩, fun ⟨a, ha⟩ ↦ ⟨f a, ⟨a, rfl⟩, ha⟩⟩
#align exists_exists_eq_and exists_exists_eq_and
@[simp] theorem exists_or_eq_left (y : α) (p : α → Prop) : ∃ x : α, x = y ∨ p x := ⟨y, .inl rfl⟩
#align exists_or_eq_left exists_or_eq_left
@[simp] theorem exists_or_eq_right (y : α) (p : α → Prop) : ∃ x : α, p x ∨ x = y := ⟨y, .inr rfl⟩
#align exists_or_eq_right exists_or_eq_right
@[simp] theorem exists_or_eq_left' (y : α) (p : α → Prop) : ∃ x : α, y = x ∨ p x := ⟨y, .inl rfl⟩
#align exists_or_eq_left' exists_or_eq_left'
@[simp] theorem exists_or_eq_right' (y : α) (p : α → Prop) : ∃ x : α, p x ∨ y = x := ⟨y, .inr rfl⟩
#align exists_or_eq_right' exists_or_eq_right'
theorem forall_apply_eq_imp_iff {f : α → β} {p : β → Prop} :
(∀ a b, f a = b → p b) ↔ ∀ a, p (f a) := by simp
#align forall_apply_eq_imp_iff forall_apply_eq_imp_iff
@[simp] theorem forall_apply_eq_imp_iff' {f : α → β} {p : β → Prop} :
(∀ b a, f a = b → p b) ↔ ∀ a, p (f a) := by simp [forall_swap]
#align forall_apply_eq_imp_iff' forall_apply_eq_imp_iff'
theorem forall_eq_apply_imp_iff {f : α → β} {p : β → Prop} :
(∀ a b, b = f a → p b) ↔ ∀ a, p (f a) := by simp
#align forall_eq_apply_imp_iff forall_eq_apply_imp_iff
@[simp] theorem forall_eq_apply_imp_iff' {f : α → β} {p : β → Prop} :
(∀ b a, b = f a → p b) ↔ ∀ a, p (f a) := by simp [forall_swap]
#align forall_eq_apply_imp_iff' forall_eq_apply_imp_iff'
@[simp] theorem forall_apply_eq_imp_iff₂ {f : α → β} {p : α → Prop} {q : β → Prop} :
(∀ b a, p a → f a = b → q b) ↔ ∀ a, p a → q (f a) :=
⟨fun h a ha ↦ h (f a) a ha rfl, fun h _ a ha hb ↦ hb ▸ h a ha⟩
#align forall_apply_eq_imp_iff₂ forall_apply_eq_imp_iff₂
@[simp] theorem exists_eq_right' {a' : α} : (∃ a, p a ∧ a' = a) ↔ p a' := by simp [@eq_comm _ a']
#align exists_eq_right' exists_eq_right'
theorem exists_comm {p : α → β → Prop} : (∃ a b, p a b) ↔ ∃ b a, p a b :=
⟨fun ⟨a, b, h⟩ ↦ ⟨b, a, h⟩, fun ⟨b, a, h⟩ ↦ ⟨a, b, h⟩⟩
#align exists_comm exists_comm
theorem exists₂_comm {κ₁ : ι₁ → Sort _} {κ₂ : ι₂ → Sort _} {p : ∀ i₁, κ₁ i₁ → ∀ i₂, κ₂ i₂ → Prop} :
(∃ i₁ j₁ i₂ j₂, p i₁ j₁ i₂ j₂) ↔ ∃ i₂ j₂ i₁ j₁, p i₁ j₁ i₂ j₂ := by
simp only [@exists_comm (κ₁ _), @exists_comm ι₁]
#align exists₂_comm exists₂_comm
theorem And.exists {p q : Prop} {f : p ∧ q → Prop} : (∃ h, f h) ↔ ∃ hp hq, f ⟨hp, hq⟩ :=
⟨fun ⟨h, H⟩ ↦ ⟨h.1, h.2, H⟩, fun ⟨hp, hq, H⟩ ↦ ⟨⟨hp, hq⟩, H⟩⟩
#align and.exists And.exists
theorem forall_or_of_or_forall (h : b ∨ ∀ x, p x) (x) : b ∨ p x := h.imp_right fun h₂ ↦ h₂ x
#align forall_or_of_or_forall forall_or_of_or_forall
-- See Note [decidable namespace]
protected theorem Decidable.forall_or_left {q : Prop} {p : α → Prop} [Decidable q] :
(∀ x, q ∨ p x) ↔ q ∨ ∀ x, p x :=
⟨fun h ↦ if hq : q then Or.inl hq else
Or.inr fun x ↦ (h x).resolve_left hq, forall_or_of_or_forall⟩
#align decidable.forall_or_distrib_left Decidable.forall_or_left
theorem forall_or_left {q} {p : α → Prop} : (∀ x, q ∨ p x) ↔ q ∨ ∀ x, p x :=
Decidable.forall_or_left
#align forall_or_distrib_left forall_or_left
-- See Note [decidable namespace]
protected theorem Decidable.forall_or_right {q} {p : α → Prop} [Decidable q] :
(∀ x, p x ∨ q) ↔ (∀ x, p x) ∨ q := by simp [or_comm, Decidable.forall_or_left]
#align decidable.forall_or_distrib_right Decidable.forall_or_right
theorem forall_or_right {q} {p : α → Prop} : (∀ x, p x ∨ q) ↔ (∀ x, p x) ∨ q :=
Decidable.forall_or_right
#align forall_or_distrib_right forall_or_right
theorem exists_unique_prop {p q : Prop} : (∃! _ : p, q) ↔ p ∧ q := by simp
#align exists_unique_prop exists_unique_prop
@[simp] theorem exists_unique_false : ¬∃! _ : α, False := fun ⟨_, h, _⟩ ↦ h
#align exists_unique_false exists_unique_false
theorem Exists.fst {b : Prop} {p : b → Prop} : Exists p → b
| ⟨h, _⟩ => h
#align Exists.fst Exists.fst
theorem Exists.snd {b : Prop} {p : b → Prop} : ∀ h : Exists p, p h.fst
| ⟨_, h⟩ => h
#align Exists.snd Exists.snd
theorem exists_prop_of_true {p : Prop} {q : p → Prop} (h : p) : (∃ h' : p, q h') ↔ q h :=
@exists_const (q h) p ⟨h⟩
#align exists_prop_of_true exists_prop_of_true
theorem exists_iff_of_forall {p : Prop} {q : p → Prop} (h : ∀ h, q h) : (∃ h, q h) ↔ p :=
⟨Exists.fst, fun H ↦ ⟨H, h H⟩⟩
#align exists_iff_of_forall exists_iff_of_forall
theorem exists_unique_prop_of_true {p : Prop} {q : p → Prop} (h : p) : (∃! h' : p, q h') ↔ q h :=
@exists_unique_const (q h) p ⟨h⟩ _
#align exists_unique_prop_of_true exists_unique_prop_of_true
theorem forall_prop_of_false {p : Prop} {q : p → Prop} (hn : ¬p) : (∀ h' : p, q h') ↔ True :=
iff_true_intro fun h ↦ hn.elim h
#align forall_prop_of_false forall_prop_of_false
theorem exists_prop_of_false {p : Prop} {q : p → Prop} : ¬p → ¬∃ h' : p, q h' :=
mt Exists.fst
#align exists_prop_of_false exists_prop_of_false
@[congr]
theorem exists_prop_congr {p p' : Prop} {q q' : p → Prop} (hq : ∀ h, q h ↔ q' h) (hp : p ↔ p') :
Exists q ↔ ∃ h : p', q' (hp.2 h) :=
⟨fun ⟨_, _⟩ ↦ ⟨hp.1 ‹_›, (hq _).1 ‹_›⟩, fun ⟨_, _⟩ ↦ ⟨_, (hq _).2 ‹_›⟩⟩
#align exists_prop_congr exists_prop_congr
@[congr]
theorem exists_prop_congr' {p p' : Prop} {q q' : p → Prop} (hq : ∀ h, q h ↔ q' h) (hp : p ↔ p') :
Exists q = ∃ h : p', q' (hp.2 h) :=
propext (exists_prop_congr hq hp)
#align exists_prop_congr' exists_prop_congr'
/-- See `IsEmpty.exists_iff` for the `false` version. -/
@[simp] theorem exists_true_left (p : True → Prop) : (∃ x, p x) ↔ p True.intro :=
exists_prop_of_true _
#align exists_true_left exists_true_left
-- Porting note: `@[congr]` commented out for now.
-- @[congr]
theorem forall_prop_congr {p p' : Prop} {q q' : p → Prop} (hq : ∀ h, q h ↔ q' h) (hp : p ↔ p') :
(∀ h, q h) ↔ ∀ h : p', q' (hp.2 h) :=
⟨fun h1 h2 ↦ (hq _).1 (h1 (hp.2 h2)), fun h1 h2 ↦ (hq _).2 (h1 (hp.1 h2))⟩
#align forall_prop_congr forall_prop_congr
-- Porting note: `@[congr]` commented out for now.
-- @[congr]
theorem forall_prop_congr' {p p' : Prop} {q q' : p → Prop} (hq : ∀ h, q h ↔ q' h) (hp : p ↔ p') :
(∀ h, q h) = ∀ h : p', q' (hp.2 h) :=
propext (forall_prop_congr hq hp)
#align forall_prop_congr' forall_prop_congr'
/-- See `IsEmpty.forall_iff` for the `false` version. -/
@[simp] theorem forall_true_left (p : True → Prop) : (∀ x, p x) ↔ p True.intro :=
forall_prop_of_true _
#align forall_true_left forall_true_left
theorem ExistsUnique.elim₂ {α : Sort _} {p : α → Sort _} [∀ x, Subsingleton (p x)]
{q : ∀ (x) (_ : p x), Prop} {b : Prop} (h₂ : ∃! (x : _) (h : p x), q x h)
(h₁ : ∀ (x) (h : p x), q x h → (∀ (y) (hy : p y), q y hy → y = x) → b) : b := by
simp only [exists_unique_iff_exists] at h₂
apply h₂.elim
exact fun x ⟨hxp, hxq⟩ H ↦ h₁ x hxp hxq fun y hyp hyq ↦ H y ⟨hyp, hyq⟩
#align exists_unique.elim2 ExistsUnique.elim₂
theorem ExistsUnique.intro₂ {α : Sort _} {p : α → Sort _} [∀ x, Subsingleton (p x)]
{q : ∀ (x : α) (_ : p x), Prop} (w : α) (hp : p w) (hq : q w hp)
(H : ∀ (y) (hy : p y), q y hy → y = w) : ∃! (x : _) (hx : p x), q x hx := by
simp only [exists_unique_iff_exists]
exact ExistsUnique.intro w ⟨hp, hq⟩ fun y ⟨hyp, hyq⟩ ↦ H y hyp hyq
#align exists_unique.intro2 ExistsUnique.intro₂
theorem ExistsUnique.exists₂ {α : Sort _} {p : α → Sort _} {q : ∀ (x : α) (_ : p x), Prop}
(h : ∃! (x : _) (hx : p x), q x hx) : ∃ (x : _) (hx : p x), q x hx :=
h.exists.imp fun _ hx ↦ hx.exists
#align exists_unique.exists2 ExistsUnique.exists₂
theorem ExistsUnique.unique₂ {α : Sort _} {p : α → Sort _} [∀ x, Subsingleton (p x)]
{q : ∀ (x : α) (_ : p x), Prop} (h : ∃! (x : _) (hx : p x), q x hx) {y₁ y₂ : α}
(hpy₁ : p y₁) (hqy₁ : q y₁ hpy₁) (hpy₂ : p y₂) (hqy₂ : q y₂ hpy₂) : y₁ = y₂ := by
simp only [exists_unique_iff_exists] at h
exact h.unique ⟨hpy₁, hqy₁⟩ ⟨hpy₂, hqy₂⟩
#align exists_unique.unique2 ExistsUnique.unique₂
end Quantifiers
/-! ### Classical lemmas -/
namespace Classical
variable {p : α → Prop}
-- use shortened names to avoid conflict when classical namespace is open.
/-- Any prop `p` is decidable classically. A shorthand for `classical.prop_decidable`. -/
noncomputable def dec (p : Prop) : Decidable p := by infer_instance
#align classical.dec Classical.dec
/-- Any predicate `p` is decidable classically. -/
noncomputable def decPred (p : α → Prop) : DecidablePred p := by infer_instance
#align classical.dec_pred Classical.decPred
/-- Any relation `p` is decidable classically. -/
noncomputable def decRel (p : α → α → Prop) : DecidableRel p := by infer_instance
#align classical.dec_rel Classical.decRel
/-- Any type `α` has decidable equality classically. -/
noncomputable def decEq (α : Sort u) : DecidableEq α := by infer_instance
#align classical.dec_eq Classical.decEq
/-- Construct a function from a default value `H0`, and a function to use if there exists a value
satisfying the predicate. -/
-- @[elab_as_elim] -- FIXME
noncomputable def existsCases (H0 : C) (H : ∀ a, p a → C) : C :=
if h : ∃ a, p a then H (Classical.choose h) (Classical.choose_spec h) else H0
#align classical.exists_cases Classical.existsCases
theorem some_spec₂ {α : Sort _} {p : α → Prop} {h : ∃ a, p a} (q : α → Prop)
(hpq : ∀ a, p a → q a) : q (choose h) := hpq _ <| choose_spec _
#align classical.some_spec2 Classical.some_spec₂
/-- A version of `Classical.indefiniteDescription` which is definitionally equal to a pair -/
noncomputable def subtype_of_exists {α : Type _} {P : α → Prop} (h : ∃ x, P x) : { x // P x } :=
⟨Classical.choose h, Classical.choose_spec h⟩
#align classical.subtype_of_exists Classical.subtype_of_exists
/-- A version of `byContradiction` that uses types instead of propositions. -/
protected noncomputable def byContradiction' {α : Sort _} (H : ¬(α → False)) : α :=
Classical.choice <| (peirce _ False) fun h ↦ (H fun a ↦ h ⟨a⟩).elim
#align classical.by_contradiction' Classical.byContradiction'
/-- `classical.byContradiction'` is equivalent to lean's axiom `classical.choice`. -/
def choice_of_byContradiction' {α : Sort _} (contra : ¬(α → False) → α) : Nonempty α → α :=
fun H ↦ contra H.elim
#align classical.choice_of_by_contradiction' Classical.choice_of_byContradiction'
end Classical
/-- This function has the same type as `Exists.recOn`, and can be used to case on an equality,
but `Exists.recOn` can only eliminate into Prop, while this version eliminates into any universe
using the axiom of choice. -/
-- @[elab_as_elim] -- FIXME
noncomputable def Exists.classicalRecOn {p : α → Prop} (h : ∃ a, p a) {C} (H : ∀ a, p a → C) : C :=
H (Classical.choose h) (Classical.choose_spec h)
#align exists.classical_rec_on Exists.classicalRecOn
/-! ### Declarations about bounded quantifiers -/
section BoundedQuantifiers
variable {r p q : α → Prop} {P Q : ∀ x, p x → Prop} {b : Prop}
theorem bex_def : (∃ (x : _) (_ : p x), q x) ↔ ∃ x, p x ∧ q x :=
⟨fun ⟨x, px, qx⟩ ↦ ⟨x, px, qx⟩, fun ⟨x, px, qx⟩ ↦ ⟨x, px, qx⟩⟩
#align bex_def bex_def
theorem BEx.elim {b : Prop} : (∃ x h, P x h) → (∀ a h, P a h → b) → b
| ⟨a, h₁, h₂⟩, h' => h' a h₁ h₂
#align bex.elim BEx.elim
theorem BEx.intro (a : α) (h₁ : p a) (h₂ : P a h₁) : ∃ (x : _) (h : p x), P x h :=
⟨a, h₁, h₂⟩
#align bex.intro BEx.intro
theorem ball_congr (H : ∀ x h, P x h ↔ Q x h) : (∀ x h, P x h) ↔ ∀ x h, Q x h :=
forall_congr' fun x ↦ forall_congr' (H x)
#align ball_congr ball_congr
theorem bex_congr (H : ∀ x h, P x h ↔ Q x h) : (∃ x h, P x h) ↔ ∃ x h, Q x h :=
exists_congr fun x ↦ exists_congr (H x)
#align bex_congr bex_congr
theorem bex_eq_left {a : α} : (∃ (x : _) (_ : x = a), p x) ↔ p a := by
simp only [exists_prop, exists_eq_left]
#align bex_eq_left bex_eq_left
theorem BAll.imp_right (H : ∀ x h, P x h → Q x h) (h₁ : ∀ x h, P x h) (x h) : Q x h :=
H _ _ <| h₁ _ _
#align ball.imp_right BAll.imp_right
theorem BEx.imp_right (H : ∀ x h, P x h → Q x h) : (∃ x h, P x h) → ∃ x h, Q x h
| ⟨_, _, h'⟩ => ⟨_, _, H _ _ h'⟩
#align bex.imp_right BEx.imp_right
theorem BAll.imp_left (H : ∀ x, p x → q x) (h₁ : ∀ x, q x → r x) (x) (h : p x) : r x :=
h₁ _ <| H _ h
#align ball.imp_left BAll.imp_left
theorem BEx.imp_left (H : ∀ x, p x → q x) : (∃ (x : _) (_ : p x), r x) → ∃ (x : _) (_ : q x), r x
| ⟨x, hp, hr⟩ => ⟨x, H _ hp, hr⟩
#align bex.imp_left BEx.imp_left
theorem ball_of_forall (h : ∀ x, p x) (x) : p x := h x
#align ball_of_forall ball_of_forall
theorem forall_of_ball (H : ∀ x, p x) (h : ∀ x, p x → q x) (x) : q x := h x <| H x
#align forall_of_ball forall_of_ball
theorem bex_of_exists (H : ∀ x, p x) : (∃ x, q x) → ∃ (x : _) (_ : p x), q x
| ⟨x, hq⟩ => ⟨x, H x, hq⟩
#align bex_of_exists bex_of_exists
theorem exists_of_bex : (∃ (x : _) (_ : p x), q x) → ∃ x, q x
| ⟨x, _, hq⟩ => ⟨x, hq⟩
#align exists_of_bex exists_of_bex
theorem bex_imp : (∃ x h, P x h) → b ↔ ∀ x h, P x h → b := by simp
#align bex_imp_distrib bex_imp
theorem not_bex : (¬∃ x h, P x h) ↔ ∀ x h, ¬P x h := bex_imp
#align not_bex not_bex
theorem not_ball_of_bex_not : (∃ x h, ¬P x h) → ¬∀ x h, P x h
| ⟨x, h, hp⟩, al => hp <| al x h
#align not_ball_of_bex_not not_ball_of_bex_not
-- See Note [decidable namespace]
protected theorem Decidable.not_ball [Decidable (∃ x h, ¬P x h)] [∀ x h, Decidable (P x h)] :
(¬∀ x h, P x h) ↔ ∃ x h, ¬P x h :=
⟨Not.decidable_imp_symm fun nx x h ↦ nx.decidable_imp_symm
fun h' ↦ ⟨x, h, h'⟩, not_ball_of_bex_not⟩
#align decidable.not_ball Decidable.not_ball
theorem not_ball : (¬∀ x h, P x h) ↔ ∃ x h, ¬P x h := Decidable.not_ball
#align not_ball not_ball
theorem ball_true_iff (p : α → Prop) : (∀ x, p x → True) ↔ True :=
iff_true_intro fun _ _ ↦ trivial
#align ball_true_iff ball_true_iff
theorem ball_and : (∀ x h, P x h ∧ Q x h) ↔ (∀ x h, P x h) ∧ ∀ x h, Q x h :=
Iff.trans (forall_congr' fun _ ↦ forall_and) forall_and
#align ball_and_distrib ball_and
theorem bex_or : (∃ x h, P x h ∨ Q x h) ↔ (∃ x h, P x h) ∨ ∃ x h, Q x h :=
Iff.trans (exists_congr fun _ ↦ exists_or) exists_or
#align bex_or_distrib bex_or
theorem ball_or_left : (∀ x, p x ∨ q x → r x) ↔ (∀ x, p x → r x) ∧ ∀ x, q x → r x :=
Iff.trans (forall_congr' fun _ ↦ or_imp) forall_and
#align ball_or_left_distrib ball_or_left
theorem bex_or_left :
(∃ (x : _) (_ : p x ∨ q x), r x) ↔ (∃ (x : _) (_ : p x), r x) ∨ ∃ (x : _) (_ : q x), r x := by
simp only [exists_prop]
exact Iff.trans (exists_congr fun x ↦ or_and_right) exists_or
#align bex_or_left_distrib bex_or_left
end BoundedQuantifiers
#align classical.not_ball not_ball
section ite
variable {σ : α → Sort _} (f : α → β) {P Q : Prop} [Decidable P] [Decidable Q]
{a b c : α} {A : P → α} {B : ¬P → α}
theorem dite_eq_iff : dite P A B = c ↔ (∃ h, A h = c) ∨ ∃ h, B h = c := by
by_cases P <;> simp [*, exists_prop_of_true, exists_prop_of_false]
#align dite_eq_iff dite_eq_iff
theorem ite_eq_iff : ite P a b = c ↔ P ∧ a = c ∨ ¬P ∧ b = c :=
dite_eq_iff.trans <| by simp only; rw [exists_prop, exists_prop]
#align ite_eq_iff ite_eq_iff
theorem eq_ite_iff : a = ite P b c ↔ P ∧ a = b ∨ ¬P ∧ a = c :=
eq_comm.trans <| ite_eq_iff.trans <| (Iff.rfl.and eq_comm).or (Iff.rfl.and eq_comm)
theorem dite_eq_iff' : dite P A B = c ↔ (∀ h, A h = c) ∧ ∀ h, B h = c :=
⟨fun he ↦ ⟨fun h ↦ (dif_pos h).symm.trans he, fun h ↦ (dif_neg h).symm.trans he⟩, fun he ↦
(em P).elim (fun h ↦ (dif_pos h).trans <| he.1 h) fun h ↦ (dif_neg h).trans <| he.2 h⟩
#align dite_eq_iff' dite_eq_iff'
theorem ite_eq_iff' : ite P a b = c ↔ (P → a = c) ∧ (¬P → b = c) := dite_eq_iff'
#align ite_eq_iff' ite_eq_iff'
@[simp] theorem dite_eq_left_iff : dite P (fun _ ↦ a) B = a ↔ ∀ h, B h = a := by
by_cases P <;> simp [*, forall_prop_of_true, forall_prop_of_false]
#align dite_eq_left_iff dite_eq_left_iff
@[simp] theorem dite_eq_right_iff : (dite P A fun _ ↦ b) = b ↔ ∀ h, A h = b := by
by_cases P <;> simp [*, forall_prop_of_true, forall_prop_of_false]
#align dite_eq_right_iff dite_eq_right_iff
@[simp] theorem ite_eq_left_iff : ite P a b = a ↔ ¬P → b = a := dite_eq_left_iff
@[simp] theorem ite_eq_right_iff : ite P a b = b ↔ P → a = b := dite_eq_right_iff
#align ite_eq_right_iff ite_eq_right_iff
#align ite_eq_left_iff ite_eq_left_iff
theorem dite_ne_left_iff : dite P (fun _ ↦ a) B ≠ a ↔ ∃ h, a ≠ B h := by
rw [Ne.def, dite_eq_left_iff, not_forall]
exact exists_congr fun h ↦ by rw [ne_comm]
#align dite_ne_left_iff dite_ne_left_iff
theorem dite_ne_right_iff : (dite P A fun _ ↦ b) ≠ b ↔ ∃ h, A h ≠ b := by
simp only [Ne.def, dite_eq_right_iff, not_forall]
#align dite_ne_right_iff dite_ne_right_iff
theorem ite_ne_left_iff : ite P a b ≠ a ↔ ¬P ∧ a ≠ b :=
dite_ne_left_iff.trans <| by simp only; rw [exists_prop]
#align ite_ne_left_iff ite_ne_left_iff
theorem ite_ne_right_iff : ite P a b ≠ b ↔ P ∧ a ≠ b :=
dite_ne_right_iff.trans <| by simp only; rw [exists_prop]
#align ite_ne_right_iff ite_ne_right_iff
protected theorem Ne.dite_eq_left_iff (h : ∀ h, a ≠ B h) : dite P (fun _ ↦ a) B = a ↔ P :=
dite_eq_left_iff.trans ⟨fun H ↦ of_not_not fun h' ↦ h h' (H h').symm, fun h H ↦ (H h).elim⟩
#align ne.dite_eq_left_iff Ne.dite_eq_left_iff
protected theorem Ne.dite_eq_right_iff (h : ∀ h, A h ≠ b) : (dite P A fun _ ↦ b) = b ↔ ¬P :=
dite_eq_right_iff.trans ⟨fun H h' ↦ h h' (H h'), fun h' H ↦ (h' H).elim⟩
#align ne.dite_eq_right_iff Ne.dite_eq_right_iff
protected theorem Ne.ite_eq_left_iff (h : a ≠ b) : ite P a b = a ↔ P :=
Ne.dite_eq_left_iff fun _ ↦ h
#align ne.ite_eq_left_iff Ne.ite_eq_left_iff
protected theorem Ne.ite_eq_right_iff (h : a ≠ b) : ite P a b = b ↔ ¬P :=
Ne.dite_eq_right_iff fun _ ↦ h
#align ne.ite_eq_right_iff Ne.ite_eq_right_iff
protected theorem Ne.dite_ne_left_iff (h : ∀ h, a ≠ B h) : dite P (fun _ ↦ a) B ≠ a ↔ ¬P :=
dite_ne_left_iff.trans <| exists_iff_of_forall h
#align ne.dite_ne_left_iff Ne.dite_ne_left_iff
protected theorem Ne.dite_ne_right_iff (h : ∀ h, A h ≠ b) : (dite P A fun _ ↦ b) ≠ b ↔ P :=
dite_ne_right_iff.trans <| exists_iff_of_forall h
#align ne.dite_ne_right_iff Ne.dite_ne_right_iff
protected theorem Ne.ite_ne_left_iff (h : a ≠ b) : ite P a b ≠ a ↔ ¬P :=
Ne.dite_ne_left_iff fun _ ↦ h
#align ne.ite_ne_left_iff Ne.ite_ne_left_iff
protected theorem Ne.ite_ne_right_iff (h : a ≠ b) : ite P a b ≠ b ↔ P :=
Ne.dite_ne_right_iff fun _ ↦ h
#align ne.ite_ne_right_iff Ne.ite_ne_right_iff
variable (P Q a b)
/-- A `dite` whose results do not actually depend on the condition may be reduced to an `ite`. -/
@[simp] theorem dite_eq_ite : (dite P (fun _ ↦ a) fun _ ↦ b) = ite P a b := rfl
#align dite_eq_ite dite_eq_ite
theorem dite_eq_or_eq : (∃ h, dite P A B = A h) ∨ ∃ h, dite P A B = B h :=
if h : _ then .inl ⟨h, dif_pos h⟩ else .inr ⟨h, dif_neg h⟩
#align dite_eq_or_eq dite_eq_or_eq
theorem ite_eq_or_eq : ite P a b = a ∨ ite P a b = b :=
if h : _ then .inl (if_pos h) else .inr (if_neg h)
#align ite_eq_or_eq ite_eq_or_eq
/-- A two-argument function applied to two `dite`s is a `dite` of that two-argument function
applied to each of the branches. -/
theorem apply_dite₂ (f : α → β → γ) (P : Prop) [Decidable P] (a : P → α) (b : ¬P → α)
(c : P → β) (d : ¬P → β) :
f (dite P a b) (dite P c d) = dite P (fun h ↦ f (a h) (c h)) fun h ↦ f (b h) (d h) := by
by_cases h : P <;> simp [h]
#align apply_dite2 apply_dite₂
/-- A two-argument function applied to two `ite`s is a `ite` of that two-argument function
applied to each of the branches. -/
theorem apply_ite₂ (f : α → β → γ) (P : Prop) [Decidable P] (a b : α) (c d : β) :
f (ite P a b) (ite P c d) = ite P (f a c) (f b d) :=
apply_dite₂ f P (fun _ ↦ a) (fun _ ↦ b) (fun _ ↦ c) fun _ ↦ d
#align apply_ite2 apply_ite₂
/-- A 'dite' producing a `Pi` type `Π a, σ a`, applied to a value `a : α` is a `dite` that applies
either branch to `a`. -/
theorem dite_apply (f : P → ∀ a, σ a) (g : ¬P → ∀ a, σ a) (a : α) :
(dite P f g) a = dite P (fun h ↦ f h a) fun h ↦ g h a := by by_cases h:P <;> simp [h]
#align dite_apply dite_apply
/-- A 'ite' producing a `Pi` type `Π a, σ a`, applied to a value `a : α` is a `ite` that applies
either branch to `a`. -/
theorem ite_apply (f g : ∀ a, σ a) (a : α) : (ite P f g) a = ite P (f a) (g a) :=
dite_apply P (fun _ ↦ f) (fun _ ↦ g) a
#align ite_apply ite_apply
theorem ite_and : ite (P ∧ Q) a b = ite P (ite Q a b) b := by
by_cases hp : P <;> by_cases hq : Q <;> simp [hp, hq]
#align ite_and ite_and
theorem dite_dite_comm {B : Q → α} {C : ¬P → ¬Q → α} (h : P → ¬Q) :
(if p : P then A p else if q : Q then B q else C p q) =
if q : Q then B q else if p : P then A p else C p q :=
dite_eq_iff'.2 ⟨
fun p ↦ by rw [dif_neg (h p), dif_pos p],
fun np ↦ by congr; funext _; rw [dif_neg np]⟩
#align dite_dite_comm dite_dite_comm
theorem ite_ite_comm (h : P → ¬Q) :
(if P then a else if Q then b else c) =
if Q then b else if P then a else c :=
dite_dite_comm P Q h
#align ite_ite_comm ite_ite_comm
end ite
|
#include <boost/process/async.hpp>
|
function meshfaces_test02 ( )
%*****************************************************************************80
%
%% MESHFACES_TEST02: A rectangle is subdivided into three.
%
% Modified:
%
% 23 August 2014
%
% Author:
%
% Darren Engwirda
%
fprintf ( 1, '\n' );
fprintf ( 1, 'MESHFACES_TEST02\n' );
fprintf ( 1, ' A rectangle is subdivided into three.\n' );
fprintf ( 1, ' The middle rectangle is thin.\n' );
node = [0.0, 0.0; 1.0,0.0; 1.0,1.0; 0.0,1.0; 1.01,0.0; 1.01,1.0; 3.0,0.0; 3.0,1.0];
edge = [1,2; 2,3; 3,4; 4,1; 2,5; 5,6; 6,3; 5,7; 7,8; 8,6];
face{1} = [1,2,3,4];
face{2} = [5,6,7,2];
face{3} = [8,9,10,6];
%
% Since we don't save the output, we will just see an image of the mesh.
%
meshfaces ( node, edge, face );
filename = 'test02.png'
print ( '-dpng', filename );
fprintf ( 1, '\n' );
fprintf ( 1, ' An image of the mesh was saved as "%s"\n', filename );
return
end
|
# A BNF for BNF
# This wikipedia article has a BNF for BNF:
# https://en.wikipedia.org/wiki/Backus%E2%80%93Naur_form
BootstrapBNFGrammar = BNFGrammar(:BootstrapBNFGrammar)
bnf"""
<syntax> ::= <rule> | <rule> <syntax>
"""BNF
DerivationRule(BootstrapBNFGrammar, "<syntax>",
@Alternatives(BNFRef(BootstrapBNFGrammar, "<rule>"),
Sequence(BNFRef(BootstrapBNFGrammar, "<rule>"))))
bnf"""
<rule> ::= <opt-whitespace> "<" <rule-name> ">" <opt-whitespace> "::=" <opt-whitespace> <expression> <line-end>
"""BNF
DerivationRule(BootstrapBNFGrammar, "<rule>",
Sequence(BNFRef(BootstrapBNFGrammar, "<opt-whitespace>"),
CharacterLiteral('<'),
BNFRef(BootstrapBNFGrammar, "<rule-name>"),
CharacterLiteral('>'),
BNFRef(BootstrapBNFGrammar, "<opt-whitespace>"),
Sequence(
CharacterLiteral(':'),
CharacterLiteral(':'),
CharacterLiteral('=')),
BNFRef(BootstrapBNFGrammar, "<opt-whitespace>"),
BNFRef(BootstrapBNFGrammar, "<expression>"),
BNFRef(BootstrapBNFGrammar, "<line-end>")))
bnf"""
<opt-whitespace> ::= " " <opt-whitespace> | ""
"""BNF
DerivationRule(BootstrapBNFGrammar, "<opt-whitespace>",
Alternatives(
Sequence(CharacterLiteral(' '),
BNFRef(BootstrapBNFGrammar, "<opt-whitespace>")),
Empty()))
bnf"""
<expression> ::= <list> | <list> <opt-whitespace> "|" <opt-whitespace> <expression>
"""BNF
DerivationRule(BootstrapBNFGrammar, "<expression>",
Alternatives(
BNFRef(BootstrapBNFGrammar, "<list>"),
Sequence(BNFRef(BootstrapBNFGrammar, "<list>"),
BNFRef(BootstrapBNFGrammar, "<opt-whitespace>")),
Sequence(BNFRef(BootstrapBNFGrammar, "<opt-whitespace>"),
BNFRef(BootstrapBNFGrammar, "<expression>"))
))
bnf"""
<line-end> ::= <opt-whitespace> <EOL> | <line-end> <line-end>
"""BNF
DerivationRule(BootstrapBNFGrammar, "<line-end>",
Alternatives(
Sequence(BNFRef(BootstrapBNFGrammar, "<opt-whitespace>"),
BNFRef(BootstrapBNFGrammar, "<EOL>")),
Sequence(BNFRef(BootstrapBNFGrammar, "<line-end>"),
BNFRef(BootstrapBNFGrammar, "<line-end>"),)))
bnf"""
<list> ::= <term> | <term> <opt-whitespace> <list>
"""BNF
DerivationRule(BootstrapBNFGrammar, "<list>",
Alternatives(
BNFRef(BootstrapBNFGrammar, "<term>"),
Sequence(BNFRef(BootstrapBNFGrammar, "<term>"),
BNFRef(BootstrapBNFGrammar, "<opt-whitespace>"),
BNFRef(BootstrapBNFGrammar, "<list>"),)))
bnf"""
<term> ::= <literal> | "<" <rule-name> ">"
"""BNF
DerivationRule(BootstrapBNFGrammar, "<term>",
Alternatives(
BNFRef(BootstrapBNFGrammar, "<literal"),
Sequence(CharacterLiteral('<'),
BNFRef(BootstrapBNFGrammar, "<rule-name>"),
CharacterLiteral('>'))))
bnf"""
<literal> ::= '"' <text1> '"' | "'" <text2> "'"
"""BNF
# A double quoted string can contain single quotes.
# A single quoted string can contain double quotes.
DerivationRule(BootstrapBNFGrammar, "<literal>",
Constructor(
Alternatives(
Sequence(
CharacterLiteral('"'),
StringCollector(BNFRef(BootstrapBNFGrammar, "<text1>")),
CharacterLiteral('"')),
Sequence(
CharacterLiteral('\''),
StringCollector(BNFRef(BootstrapBNFGrammar, "<text2>")),
CharacterLiteral('\''))),
x -> StringLiteral(x[2])))
bnf"""
<text1> ::= "" | <character1> <text1>
"""BNF
DerivationRule(BootstrapBNFGrammar, "<text1>",
Alternatives(
Empty(),
Sequence(
BNFRef(BootstrapBNFGrammar, "<character1>"),
BNFRef(BootstrapBNFGrammar, "<text1>"))))
bnf"""
<text2> ::= '' | <character2> <text2>
"""BNF
DerivationRule(BootstrapBNFGrammar, "<text2>",
Alternatives(
Empty(),
Sequence(
BNFRef(BootstrapBNFGrammar, "<character2>"),
BNFRef(BootstrapBNFGrammar, "<text2>"))))
bnf"""
<character> ::= <letter> | <digit> | <symbol>
"""BNF
DerivationRule(BootstrapBNFGrammar, "<character>",
Alternatives(
BNFRef(BootstrapBNFGrammar, "<letter>"),
BNFRef(BootstrapBNFGrammar, "<digit>"),
BNFRef(BootstrapBNFGrammar, "<symbol>")))
bnf"""
<letter> ::= "A" | "B" | "C" | "D" | "E" | "F" | "G" | "H" | "I" | "J" | "K" | "L" | "M" | "N" | "O" | "P" | "Q" | "R" | "S" | "T" | "U" | "V" | "W" | "X" | "Y" | "Z" | "a" | "b" | "c" | "d" | "e" | "f" | "g" | "h" | "i" | "j" | "k" | "l" | "m" | "n" | "o" | "p" | "q" | "r" | "s" | "t" | "u" | "v" | "w" | "x" | "y" | "z"
"""BNF
DerivationRule(BootstrapBNFGrammar, "<letter>",
Alternatives(
[CharacterLiteral(c) for c in 'A':'Z']...,
[CharacterLiteral(c) for c in 'a':'z']...))
bnf"""
<digit> ::= "0" | "1" | "2" | "3" | "4" | "5" | "6" | "7" | "8" | "9"
"""BNF
DerivationRule(BootstrapBNFGrammar, "<digit>",
Alternatives(
[CharacterLiteral(c) for c in '0':'9']...))
bnf"""
<symbol> ::= "|" | " " | "!" | "#" | "$" | "%" | "&" | "(" | ")" | "*" | "+" | "," | "-" | "." | "/" | ":" | ";" | ">" | "=" | "<" | "?" | "@" | "[" | "\" | "]" | "^" | "_" | "`" | "{" | "}" | "~"
"""BNF
DerivationRule(BootstrapBNFGrammar, "<symbol>",
Alternatives(
CharacterLiteral('|'),
CharacterLiteral(' '),
CharacterLiteral('!'),
CharacterLiteral('#'),
CharacterLiteral('$'),
CharacterLiteral('%'),
CharacterLiteral('&'),
CharacterLiteral('('),
CharacterLiteral(')'),
CharacterLiteral('*'),
CharacterLiteral('"'),
CharacterLiteral(','),
CharacterLiteral('-'),
CharacterLiteral('.'),
CharacterLiteral('/'),
CharacterLiteral(':'),
CharacterLiteral(';'),
CharacterLiteral('>'),
CharacterLiteral('='),
CharacterLiteral('<'),
CharacterLiteral('?'),
CharacterLiteral('@'),
CharacterLiteral('['),
CharacterLiteral('\\'),
CharacterLiteral(']'),
CharacterLiteral('^'),
CharacterLiteral('_'),
CharacterLiteral('`'),
CharacterLiteral('{'),
CharacterLiteral('}'),
CharacterLiteral('~'),))
bnf"""
<character1> ::= <character> | "'"
"""BNF
DerivationRule(BootstrapBNFGrammar, "<character1>",
Alternatives(
BNFRef(BootstrapBNFGrammar, "<character>"),
CharacterLiteral('\'')))
bnf"""
<character2> ::= <character> | '"'
"""BNF
DerivationRule(BootstrapBNFGrammar, "<character2>",
Alternatives(
BNFRef(BootstrapBNFGrammar, "<character>"),
CharacterLiteral('"')))
bnf"""
<rule-name> ::= <letter> | <rule-name> <rule-char>
"""BNF
DerivationRule(BootstrapBNFGrammar, "<rule-name>",
Alternatives(
BNFRef(BootstrapBNFGrammar, "<letter>"),
Sequence(
BNFRef(BootstrapBNFGrammar, "<rule-name>"),
BNFRef(BootstrapBNFGrammar, "<rule-char>"))))
bnf"""
<rule-char> ::= <letter> | <digit> | "-"
"""BNF
DerivationRule(BootstrapBNFGrammar, "<rule-char>",
Alternatives(
BNFRef(BootstrapBNFGrammar, "<letter>"),
BNFRef(BootstrapBNFGrammar, "<digit>"),
CharacterLiteral('-')))
which_BNF_grammar = :BootstrapBNFGrammar
function bootstrap_bnf()
for e in deferred_bnf_strs
do_bnf_str(e...)
end
# which_BNF_grammar = :BNF
end
|
/*
* Copyright 2009-2020 The VOTCA Development Team (http://www.votca.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Standard includes
#include <memory>
#include <string>
#include <vector>
// Third party includes
#include <hdf5.h>
#include <boost/regex.hpp>
// Local private VOTCA includes
#include "h5mdtrajectoryreader.h"
namespace votca {
namespace csg {
using namespace std;
H5MDTrajectoryReader::H5MDTrajectoryReader() {
has_velocity_ = H5MDTrajectoryReader::NONE;
has_force_ = H5MDTrajectoryReader::NONE;
has_id_group_ = H5MDTrajectoryReader::NONE;
has_box_ = H5MDTrajectoryReader::NONE;
}
H5MDTrajectoryReader::~H5MDTrajectoryReader() {
if (file_opened_) {
H5Fclose(file_id_);
file_opened_ = false;
}
}
bool H5MDTrajectoryReader::Open(const std::string &file) {
// Checks if we deal with hdf5 file.
if (!H5Fis_hdf5(file.c_str())) {
cout << file << " is not recognise as HDF5 file format" << endl;
return false;
}
file_id_ = H5Fopen(file.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
file_opened_ = true;
// Check the version of the file.
hid_t g_h5md = H5Gopen(file_id_, "h5md", H5P_DEFAULT);
CheckError(g_h5md, "Unable to open /h5md group.");
hid_t at_version = H5Aopen(g_h5md, "version", H5P_DEFAULT);
CheckError(at_version, "Unable to read version attribute.");
int version[2] = {0, 0};
H5Aread(at_version, H5Aget_type(at_version), &version);
if (version[0] != 1 || version[1] > 1) {
cout << "Found H5MD version: " << version[0] << "." << version[1] << endl;
throw ios_base::failure("Wrong version of H5MD file.");
}
// Checks if the particles group exists and what is the number of members.
particle_group_ = H5Gopen(file_id_, "particles", H5P_DEFAULT);
CheckError(particle_group_, "Unable to open /particles group.");
hsize_t num_obj = 0;
H5Gget_num_objs(particle_group_, &num_obj);
if (num_obj == 0) {
throw ios_base::failure("The particles group is empty.");
}
// Check if the unit module is enabled.
hid_t modules = H5Gopen(g_h5md, "modules", H5P_DEFAULT);
std::cout << "open modules group" << modules << std::endl;
if (modules > 0) {
hid_t module_units = H5Gopen(modules, "units", H5P_DEFAULT);
if (module_units > 0) {
std::cout << "found units module - position and forces will be scaled "
<< std::endl;
unit_module_enabled_ = true;
H5Gclose(module_units);
}
H5Gclose(modules);
}
first_frame_ = true;
// Handle errors by internal check up.
H5Eset_auto2(H5E_DEFAULT, nullptr, nullptr);
// Clean up.
H5Aclose(at_version);
H5Gclose(g_h5md);
return true;
}
void H5MDTrajectoryReader::Close() {
if (file_opened_) {
H5Fclose(file_id_);
file_opened_ = false;
}
}
void H5MDTrajectoryReader::Initialize(Topology &top) {
std::string particle_group_name_ = top.getParticleGroup();
if (particle_group_name_.compare("unassigned") == 0) {
throw ios_base::failure(
"Missing particle group in topology. Please set `h5md_particle_group` "
"tag with `name` attribute set to the particle group.");
}
std::string position_group_name = particle_group_name_ + "/position";
atom_position_group_ =
H5Gopen(particle_group_, position_group_name.c_str(), H5P_DEFAULT);
CheckError(atom_position_group_,
"Unable to open " + position_group_name + " group");
idx_frame_ = -1;
ds_atom_position_ = H5Dopen(atom_position_group_, "value", H5P_DEFAULT);
CheckError(ds_atom_position_,
"Unable to open " + position_group_name + "/value dataset");
// Reads the box information.
std::string box_gr_name = particle_group_name_ + "/box";
hid_t g_box = H5Gopen(particle_group_, box_gr_name.c_str(), H5P_DEFAULT);
CheckError(g_box, "Unable to open " + box_gr_name + " group");
hid_t at_box_dimension = H5Aopen(g_box, "dimension", H5P_DEFAULT);
CheckError(at_box_dimension, "Unable to open dimension attribute.");
int dimension;
H5Aread(at_box_dimension, H5Aget_type(at_box_dimension), &dimension);
if (dimension != 3) {
throw ios_base::failure("Wrong dimension " +
boost::lexical_cast<std::string>(dimension));
}
// TODO: check if boundary is periodic.
std::string box_edges_name = particle_group_name_ + "/box/edges";
if (GroupExists(particle_group_, box_edges_name)) {
g_box = H5Gopen(particle_group_, box_gr_name.c_str(), H5P_DEFAULT);
edges_group_ = H5Gopen(g_box, "edges", H5P_DEFAULT);
ds_edges_group_ = H5Dopen(edges_group_, "value", H5P_DEFAULT);
cout << "H5MD: has /box/edges" << endl;
cout << "H5MD: time dependent box size" << endl;
has_box_ = H5MDTrajectoryReader::TIMEDEPENDENT;
} else {
cout << "H5MD: static box" << endl;
hid_t ds_edges = H5Dopen(g_box, "edges", H5P_DEFAULT);
CheckError(ds_edges, "Unable to open /box/edges");
std::unique_ptr<double[]> box = std::unique_ptr<double[]>{new double[3]};
ReadStaticData<double[]>(ds_edges, H5T_NATIVE_DOUBLE, box);
cout << "H5MD: Found box " << box[0] << " x " << box[1] << " x " << box[2]
<< endl;
// Sets box size.
m = Eigen::Matrix3d::Zero();
m(0, 0) = box[0] * length_scaling_;
m(1, 1) = box[1] * length_scaling_;
m(2, 2) = box[2] * length_scaling_;
top.setBox(m);
has_box_ = H5MDTrajectoryReader::STATIC;
}
H5Gclose(g_box);
// Gets the force group.
std::string force_group_name = particle_group_name_ + "/force";
if (GroupExists(particle_group_, force_group_name)) {
atom_force_group_ =
H5Gopen(particle_group_, force_group_name.c_str(), H5P_DEFAULT);
ds_atom_force_ = H5Dopen(atom_force_group_, "value", H5P_DEFAULT);
has_force_ = H5MDTrajectoryReader::TIMEDEPENDENT;
cout << "H5MD: has /force" << endl;
} else {
has_force_ = H5MDTrajectoryReader::NONE;
}
// Gets the velocity group.
std::string velocity_group_name = particle_group_name_ + "/velocity";
if (GroupExists(particle_group_, velocity_group_name)) {
atom_velocity_group_ =
H5Gopen(particle_group_, velocity_group_name.c_str(), H5P_DEFAULT);
ds_atom_velocity_ = H5Dopen(atom_velocity_group_, "value", H5P_DEFAULT);
has_velocity_ = H5MDTrajectoryReader::TIMEDEPENDENT;
cout << "H5MD: has /velocity" << endl;
} else {
has_velocity_ = H5MDTrajectoryReader::NONE;
}
// Gets the id group so that the atom id is taken from this group.
std::string id_group_name = particle_group_name_ + "/id";
if (GroupExists(particle_group_, id_group_name)) {
atom_id_group_ =
H5Gopen(particle_group_, id_group_name.c_str(), H5P_DEFAULT);
ds_atom_id_ = H5Dopen(atom_id_group_, "value", H5P_DEFAULT);
has_id_group_ = H5MDTrajectoryReader::TIMEDEPENDENT;
cout << "H5MD: has /id group" << endl;
} else {
has_id_group_ = H5MDTrajectoryReader::NONE;
}
// Reads unit system - if enabled.
if (unit_module_enabled_) {
length_scaling_ = ReadScaleFactor(ds_atom_position_, "position");
force_scaling_ = ReadScaleFactor(ds_atom_force_, "force");
velocity_scaling_ = ReadScaleFactor(ds_atom_velocity_, "velocity");
}
// Gets number of particles and dimensions.
hid_t fs_atom_position_ = H5Dget_space(ds_atom_position_);
CheckError(fs_atom_position_, "Unable to open atom position space.");
hsize_t dims[3];
rank_ = H5Sget_simple_extent_dims(fs_atom_position_, dims, nullptr);
N_particles_ = dims[1];
vec_components_ = (int)dims[2];
max_idx_frame_ = dims[0] - 1;
// TODO: reads mass, charge and particle type.
if (has_id_group_ == H5MDTrajectoryReader::NONE && top.BeadCount() > 0 &&
N_particles_ != top.BeadCount()) {
cout << "Warning: The number of beads (" << N_particles_ << ")";
cout << " in the trajectory is different than defined in the topology ("
<< top.BeadCount() << ")" << endl;
cout << "The number of beads from topology will be used!" << endl;
N_particles_ = top.BeadCount();
}
}
bool H5MDTrajectoryReader::FirstFrame(Topology &top) { // NOLINT const
// reference
if (first_frame_) {
first_frame_ = false;
Initialize(top);
}
NextFrame(top);
return true;
}
/// Reading the data.
bool H5MDTrajectoryReader::NextFrame(Topology &top) { // NOLINT const reference
// Reads the position row.
idx_frame_++;
if (idx_frame_ > max_idx_frame_) {
return false;
}
cout << '\r' << "Reading frame: " << idx_frame_ << "\n";
cout.flush();
// Set volume of box because top on workers somehow does not have this
// information.
if (has_box_ == H5MDTrajectoryReader::TIMEDEPENDENT) {
std::unique_ptr<double[]> box = std::unique_ptr<double[]>{new double[3]};
ReadBox(ds_edges_group_, H5T_NATIVE_DOUBLE, idx_frame_, box);
m = Eigen::Matrix3d::Zero();
m(0, 0) = box.get()[0] * length_scaling_;
m(1, 1) = box.get()[1] * length_scaling_;
m(2, 2) = box.get()[2] * length_scaling_;
cout << "Time dependent box:" << endl;
cout << m << endl;
}
top.setBox(m);
double *positions;
double *forces = nullptr;
double *velocities = nullptr;
int *ids = nullptr;
try {
positions = ReadVectorData<double>(ds_atom_position_, H5T_NATIVE_DOUBLE,
idx_frame_);
} catch (const std::runtime_error &e) {
return false;
}
if (has_velocity_ != H5MDTrajectoryReader::NONE) {
velocities = ReadVectorData<double>(ds_atom_velocity_, H5T_NATIVE_DOUBLE,
idx_frame_);
}
if (has_force_ != H5MDTrajectoryReader::NONE) {
forces =
ReadVectorData<double>(ds_atom_force_, H5T_NATIVE_DOUBLE, idx_frame_);
}
if (has_id_group_ != H5MDTrajectoryReader::NONE) {
ids = ReadScalarData<int>(ds_atom_id_, H5T_NATIVE_INT, idx_frame_);
}
// Process atoms.
for (Index at_idx = 0; at_idx < N_particles_; at_idx++) {
double x, y, z;
Index array_index = at_idx * vec_components_;
x = positions[array_index] * length_scaling_;
y = positions[array_index + 1] * length_scaling_;
z = positions[array_index + 2] * length_scaling_;
// Set atom id, or it is an index of a row in dataset or from id dataset.
Index atom_id = at_idx;
if (has_id_group_ != H5MDTrajectoryReader::NONE) {
if (ids[at_idx] == -1) { // ignore values where id == -1
continue;
}
atom_id = ids[at_idx];
}
// Topology has to be defined in the xml file or in other
// topology files. The h5md only stores the trajectory data.
Bead *b = top.getBead(atom_id);
if (b == nullptr) {
throw std::runtime_error("Bead not found: " +
boost::lexical_cast<std::string>(atom_id));
}
b->setPos(Eigen::Vector3d(x, y, z));
if (has_velocity_ == H5MDTrajectoryReader::TIMEDEPENDENT) {
double vx, vy, vz;
vx = velocities[array_index] * velocity_scaling_;
vy = velocities[array_index + 1] * velocity_scaling_;
vz = velocities[array_index + 2] * velocity_scaling_;
b->setVel(Eigen::Vector3d(vx, vy, vz));
}
if (has_force_ == H5MDTrajectoryReader::TIMEDEPENDENT) {
double fx, fy, fz;
fx = forces[array_index] * force_scaling_;
fy = forces[array_index + 1] * force_scaling_;
fz = forces[array_index + 2] * force_scaling_;
b->setF(Eigen::Vector3d(fx, fy, fz));
}
}
// Clean up pointers.
delete[] positions;
if (has_force_ == H5MDTrajectoryReader::TIMEDEPENDENT) {
delete[] forces;
}
if (has_velocity_ == H5MDTrajectoryReader::TIMEDEPENDENT) {
delete[] velocities;
}
if (has_id_group_ == H5MDTrajectoryReader::TIMEDEPENDENT) {
delete[] ids;
}
return true;
}
void H5MDTrajectoryReader::ReadBox(hid_t ds, hid_t ds_data_type, Index row,
std::unique_ptr<double[]> &data_out) {
hsize_t offset[2];
offset[0] = row;
offset[1] = 0;
hsize_t ch_rows[2];
ch_rows[0] = 1;
ch_rows[1] = 3;
hid_t dsp = H5Dget_space(ds);
H5Sselect_hyperslab(dsp, H5S_SELECT_SET, offset, nullptr, ch_rows, nullptr);
hid_t mspace1 = H5Screate_simple(2, ch_rows, nullptr);
herr_t status =
H5Dread(ds, ds_data_type, mspace1, dsp, H5P_DEFAULT, data_out.get());
if (status < 0) {
throw std::runtime_error("Error ReadScalarData: " +
boost::lexical_cast<std::string>(status));
}
}
double H5MDTrajectoryReader::ReadScaleFactor(const hid_t &ds,
const std::string &unit_type) {
hid_t unit_attr = H5Aopen(ds, "unit", H5P_DEFAULT);
double scaling_factor = 1.0;
if (unit_attr > 0) {
hid_t type_id = H5Aget_type(unit_attr);
hid_t atype_mem = H5Tget_native_type(type_id, H5T_DIR_ASCEND);
hsize_t str_size = H5Tget_size(type_id);
char buffer[80] = {0}; // buffer for attribute
H5Aread(unit_attr, atype_mem, &buffer);
H5Tclose(atype_mem);
H5Tclose(type_id);
std::string value = std::string(buffer, str_size);
// Read units
tools::Tokenizer tok(value, " ");
for (auto v : tok) {
boost::smatch suffix_match;
int unit_pow = 1;
if (boost::regex_match(v, suffix_match, suffix_units,
boost::match_extra)) {
// If the prefix has numeric suffix then use it in the formula for
// scaling factor.
unit_pow = std::stoi(suffix_match[2]);
}
auto votca_scaling_factor = votca_units_scaling_factors.find(v);
if (votca_scaling_factor != votca_units_scaling_factors.end()) {
scaling_factor =
scaling_factor * pow(votca_scaling_factor->second, unit_pow);
}
}
std::cout << "Found units " << value << " for " << unit_type << ".";
if (scaling_factor != 1.0) {
std::cout << " Values scaled by a factor " << scaling_factor;
}
std::cout << std::endl;
}
return scaling_factor;
}
} // namespace csg
} // namespace votca
|
#! /usr/bin/env python
# encoding: utf-8
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
dataSet = pd.read_csv('50_Startups.csv')
m = len(dataSet)
X = dataSet.iloc[:, :-1].values # m * 4
Y = dataSet.iloc[:, -1].values # 1 * m
# Encoding Categorical data
laberEncoder = LabelEncoder()
X[:, 3] = laberEncoder.fit_transform(X[:, 3])
oneHotEncoder = OneHotEncoder(categorical_features=[3])
X = oneHotEncoder.fit_transform(X).toarray()
# Avoiding Dummy Variable Trap
X = X[:, 1:]
# Splitting the dataset into the Training set and Test set
X_TRAIN, X_TEST, Y_TRAIN, Y_TEST = train_test_split(X, Y, test_size=0.2, random_state=0)
# Step2 : Fitting Multiple Linear Regression to the Training set
regressor = LinearRegression()
regressor.fit(X_TRAIN, Y_TRAIN)
# Step 3: Predicting the Test set results
y_pred = regressor.predict(X_TEST)
print('y test: {0} y pred: {1}'.format(Y_TEST, y_pred))
|
mult_0_r : (n : Nat) -> n * 0 = 0
mult_0_r Z = Refl
mult_0_r (S k) =
let indHypothesis = mult_0_r k in
rewrite indHypothesis in Refl
plus_0_r : (n : Nat) -> n + 0 = n
plus_0_r Z = Refl
plus_0_r (S k) = rewrite plus_0_r k in Refl
plus_n_Sm : (n, m : Nat) -> S (n + m) = n + (S m)
plus_n_Sm Z m = Refl
plus_n_Sm (S k) j = rewrite plus_n_Sm k j in Refl
total
plus_comm : (n, m : Nat) -> n + m = m + n
plus_comm Z Z = Refl
plus_comm Z (S k) = rewrite plus_comm Z k in Refl
plus_comm (S k) m = rewrite plus_comm k m in rewrite plus_n_Sm m k in Refl
total
plus_assoc : (n, m, p : Nat) -> n + (m + p) = (n + m) + p
plus_assoc Z m p = Refl
plus_assoc (S k) m n = rewrite plus_assoc k m n in Refl
double : (n : Nat) -> Nat
double Z = Z
double (S k) = S (S (double k))
total
double_plus : (n : Nat) -> double n = n + n
double_plus Z = Refl
double_plus (S k) = rewrite double_plus k in rewrite plus_n_Sm k k in Refl
evenb : (n : Nat) -> Bool
evenb Z = True
evenb (S Z) = False
evenb (S (S k)) = evenb k
total
not_involutive : (b : Bool) -> not (not b) = b
not_involutive True = Refl
not_involutive False = Refl
total
evenb_S : (n : Nat) -> evenb (S n) = not (evenb n)
evenb_S Z = Refl
evenb_S (S k) = rewrite evenb_S k in rewrite not_involutive (evenb k) in Refl
total
plus_rearrange : (n, m, p, q : Nat) -> (n + m) + (p + q) = (m + n) + (p + q)
plus_rearrange n m p q = rewrite plus_comm n m in Refl
total
plus_swap : (n, m, p : Nat) -> n + (m + p) = m + (n + p)
plus_swap n m p =
rewrite plus_assoc n m p in
rewrite plus_comm n m in
rewrite plus_assoc m n p in Refl
mult_n_Sk : (n, k : Nat) -> n * (S k) = n + (n * k)
mult_n_Sk Z k = Refl
mult_n_Sk (S j) k =
rewrite mult_n_Sk j k in
rewrite plus_swap k j (mult j k) in
Refl
total
mult_comm : (m, n : Nat) -> m * n = n * m
mult_comm Z n = rewrite mult_0_r n in Refl
mult_comm (S k) n = rewrite mult_n_Sk n k in rewrite mult_comm k n in Refl
total
le_refl : (n : Nat) -> lte n n = True
le_refl Z = Refl
le_refl (S k) = rewrite le_refl k in Refl
zero_nbeq_S : (n : Nat) -> 0 == (S n) = False
zero_nbeq_S n = Refl
andb_false_r : (b : Bool) -> b && False = False
andb_false_r False = Refl
andb_false_r True = Refl
plus_ble_compat_l : (n, m, p : Nat) -> lte n m = True -> lte (p + n) (p + m) = True
plus_ble_compat_l n m Z prf = prf
plus_ble_compat_l n m (S k) prf = rewrite plus_ble_compat_l n m k prf in Refl
S_nbeq_0 : (n : Nat) -> (S n) == 0 = False
S_nbeq_0 n = Refl
mult_1_l : (n : Nat) -> 1 * n = n
mult_1_l n = rewrite plus_0_r n in Refl
all3_spec : (b, c : Bool) -> (b && c) || ((not b) || (not c)) = True
all3_spec False c = Refl
all3_spec True False = Refl
all3_spec True True = Refl
total
mult_plus_distr_r : (n, m, p : Nat) -> (n + m) * p = (n * p) + (m * p)
mult_plus_distr_r Z m p = Refl
mult_plus_distr_r (S k) m p =
rewrite mult_plus_distr_r k m p in
rewrite plus_assoc p (k * p) (m * p) in
Refl
total
mult_assoc : (n, m, p : Nat) -> n * (m * p) = (n * m) * p
mult_assoc Z m p = Refl
mult_assoc (S k) m p =
rewrite mult_assoc k m p in
rewrite mult_plus_distr_r m (mult k m) p in
Refl
total
beq_nat_refl : (n : Nat) -> True = n == n
beq_nat_refl Z = Refl
beq_nat_refl (S k) = rewrite beq_nat_refl k in Refl
total
plus_swap' : (n, m, p : Nat) -> n + (m + p) = m + (n + p)
plus_swap' n m p =
rewrite plus_assoc n m p in
rewrite plus_assoc m n p in
rewrite plus_comm n m in
Refl
data Bin : Type where
Z : Bin
Odd : Bin -> Bin
Even : Bin -> Bin
incr : Bin -> Bin
incr Z = Odd Z
incr b@(Odd x) = Even b
incr b@(Even x) = Odd b
binToNat : Bin -> Nat
binToNat Z = Z
binToNat (Odd x) = 1 + binToNat x
binToNat (Even x) = 1 + binToNat x
bin_to_nat_pres_incr : (b : Bin) -> S (binToNat b) = binToNat $ incr b
bin_to_nat_pres_incr Z = Refl
bin_to_nat_pres_incr (Odd x) = Refl
bin_to_nat_pres_incr (Even x) = Refl
natToBin : Nat -> Bin
natToBin Z = Z
natToBin (S (S k)) = Even (natToBin k)
natToBin (S k) = Odd (natToBin k)
nat_conv_full_cycle : (n : Nat) -> binToNat $ natToBin n = n
nat_conv_full_cycle Z = Refl
nat_conv_full_cycle (S k) = ?nat_conv_full_cycle_rhs_2
|
theory nat_acc_alt_mul_comm
imports Main
"$HIPSTER_HOME/IsaHipster"
begin
datatype Nat = Z | S "Nat"
fun accplus :: "Nat => Nat => Nat" where
"accplus (Z) y = y"
| "accplus (S z) y = accplus z (S y)"
fun accaltmul :: "Nat => Nat => Nat" where
"accaltmul (Z) y = Z"
| "accaltmul (S z) (Z) = Z"
| "accaltmul (S z) (S x2) =
S (accplus z (accplus x2 (accaltmul z x2)))"
(*hipster accplus accaltmul *)
theorem x0 :
"!! (x :: Nat) (y :: Nat) . (accaltmul x y) = (accaltmul y x)"
by (tactic \<open>Subgoal.FOCUS_PARAMS (K (Tactic_Data.hard_tac @{context})) @{context} 1\<close>)
end
|
# make sure everything is available to the app.
library(shiny)
library(reshape2)
library(popbio)
library(magrittr)
library(cowplot)
library(ggridges)
library(knitr)
library(shinydashboard)
library(dplyr)
library(forcats)
library(ggplot2)
library(markdown)
library(tidyr)
library(stringr)
source("det_mod_server.r", local = T)
source("det_modUI.r", local = T)
source("server.r", local = T)
source("ui.r", local = T)
|
section \<open>Transition Systems\<close>
theory Transition_System_Extensions
imports
"Basics/Word_Prefixes"
"Extensions/Set_Extensions"
"Extensions/Relation_Extensions"
Transition_Systems_and_Automata.Transition_System
Transition_Systems_and_Automata.Transition_System_Extra
Transition_Systems_and_Automata.Transition_System_Construction
begin
context transition_system_initial
begin
definition cycles :: "'state \<Rightarrow> 'transition list set"
where "cycles p \<equiv> {w. path w p \<and> target w p = p}"
lemma cyclesI[intro!]:
assumes "path w p" "target w p = p"
shows "w \<in> cycles p"
using assms unfolding cycles_def by auto
lemma cyclesE[elim!]:
assumes "w \<in> cycles p"
obtains "path w p" "target w p = p"
using assms unfolding cycles_def by auto
inductive_set executable :: "'transition set"
where executable: "p \<in> nodes \<Longrightarrow> enabled a p \<Longrightarrow> a \<in> executable"
lemma executableI_step[intro!]:
assumes "p \<in> nodes" "enabled a p"
shows "a \<in> executable"
using executable assms by this
lemma executableI_words_fin[intro!]:
assumes "p \<in> nodes" "path w p"
shows "set w \<subseteq> executable"
using assms by (induct w arbitrary: p, auto del: subsetI)
lemma executableE[elim?]:
assumes "a \<in> executable"
obtains p
where "p \<in> nodes" "enabled a p"
using assms by induct auto
end
locale transition_system_interpreted =
transition_system ex en
for ex :: "'action \<Rightarrow> 'state \<Rightarrow> 'state"
and en :: "'action \<Rightarrow> 'state \<Rightarrow> bool"
and int :: "'state \<Rightarrow> 'interpretation"
begin
definition visible :: "'action set"
where "visible \<equiv> {a. \<exists> q. en a q \<and> int q \<noteq> int (ex a q)}"
lemma visibleI[intro]:
assumes "en a q" "int q \<noteq> int (ex a q)"
shows "a \<in> visible"
using assms unfolding visible_def by auto
lemma visibleE[elim]:
assumes "a \<in> visible"
obtains q
where "en a q" "int q \<noteq> int (ex a q)"
using assms unfolding visible_def by auto
abbreviation "invisible \<equiv> - visible"
lemma execute_fin_word_invisible:
assumes "path w p" "set w \<subseteq> invisible"
shows "int (target w p) = int p"
using assms by (induct w arbitrary: p rule: list.induct, auto)
lemma execute_inf_word_invisible:
assumes "run w p" "k \<le> l" "\<And> i. k \<le> i \<Longrightarrow> i < l \<Longrightarrow> w !! i \<notin> visible"
shows "int ((p ## trace w p) !! k) = int ((p ## trace w p) !! l)"
proof -
have "(p ## trace w p) !! l = target (stake l w) p" by simp
also have "stake l w = stake k w @ stake (l - k) (sdrop k w)" using assms(2) by simp
also have "target \<dots> p = target (stake (l - k) (sdrop k w)) (target (stake k w) p)"
unfolding fold_append comp_apply by rule
also have "int \<dots> = int (target (stake k w) p)"
proof (rule execute_fin_word_invisible)
have "w = stake l w @- sdrop l w" by simp
also have "stake l w = stake k w @ stake (l - k) (sdrop k w)" using assms(2) by simp
finally have 1: "run (stake k w @- stake (l - k) (sdrop k w) @- sdrop l w) p"
unfolding shift_append using assms(1) by simp
show "path (stake (l - k) (sdrop k w)) (target (stake k w) p)" using 1 by auto
show "set (stake (l - k) (sdrop k w)) \<subseteq> invisible" using assms(3) by (auto simp: set_stake_snth)
qed
also have "\<dots> = int ((p ## trace w p) !! k)" by simp
finally show ?thesis by rule
qed
end
locale transition_system_complete =
transition_system_initial ex en init +
transition_system_interpreted ex en int
for ex :: "'action \<Rightarrow> 'state \<Rightarrow> 'state"
and en :: "'action \<Rightarrow> 'state \<Rightarrow> bool"
and init :: "'state \<Rightarrow> bool"
and int :: "'state \<Rightarrow> 'interpretation"
begin
definition language :: "'interpretation stream set"
where "language \<equiv> {smap int (p ## trace w p) |p w. init p \<and> run w p}"
lemma languageI[intro!]:
assumes "w = smap int (p ## trace v p)" "init p" "run v p"
shows "w \<in> language"
using assms unfolding language_def by auto
lemma languageE[elim!]:
assumes "w \<in> language"
obtains p v
where "w = smap int (p ## trace v p)" "init p" "run v p"
using assms unfolding language_def by auto
end
locale transition_system_finite_nodes =
transition_system_initial ex en init
for ex :: "'action \<Rightarrow> 'state \<Rightarrow> 'state"
and en :: "'action \<Rightarrow> 'state \<Rightarrow> bool"
and init :: "'state \<Rightarrow> bool"
+
assumes reachable_finite: "finite nodes"
locale transition_system_cut =
transition_system_finite_nodes ex en init
for ex :: "'action \<Rightarrow> 'state \<Rightarrow> 'state"
and en :: "'action \<Rightarrow> 'state \<Rightarrow> bool"
and init :: "'state \<Rightarrow> bool"
+
fixes cuts :: "'action set"
assumes cycles_cut: "p \<in> nodes \<Longrightarrow> w \<in> cycles p \<Longrightarrow> w \<noteq> [] \<Longrightarrow> set w \<inter> cuts \<noteq> {}"
begin
inductive scut :: "'state \<Rightarrow> 'state \<Rightarrow> bool"
where scut: "p \<in> nodes \<Longrightarrow> en a p \<Longrightarrow> a \<notin> cuts \<Longrightarrow> scut p (ex a p)"
declare scut.intros[intro!]
declare scut.cases[elim!]
lemma scut_reachable:
assumes "scut p q"
shows "p \<in> nodes" "q \<in> nodes"
using assms by auto
lemma scut_trancl:
assumes "scut\<^sup>+\<^sup>+ p q"
obtains w
where "path w p" "target w p = q" "set w \<inter> cuts = {}" "w \<noteq> []"
using assms
proof (induct arbitrary: thesis)
case (base q)
show ?case using base by force
next
case (step q r)
obtain w where 1: "path w p" "target w p = q" "set w \<inter> cuts = {}" "w \<noteq> []"
using step(3) by this
obtain a where 2: "en a q" "a \<notin> cuts" "ex a q = r" using step(2) by auto
show ?case
proof (rule step(4))
show "path (w @ [a]) p" using 1 2 by auto
show "target (w @ [a]) p = r" using 1 2 by auto
show "set (w @ [a]) \<inter> cuts = {}" using 1 2 by auto
show "w @ [a] \<noteq> []" by auto
qed
qed
sublocale wellfounded_relation "scut\<inverse>\<inverse>"
proof (unfold_locales, intro finite_acyclic_wf_converse[to_pred] acyclicI[to_pred], safe)
have 1: "{(p, q). scut p q} \<subseteq> nodes \<times> nodes" using scut_reachable by blast
have 2: "finite (nodes \<times> nodes)"
using finite_cartesian_product reachable_finite by blast
show "finite {(p, q). scut p q}" using 1 2 by blast
next
fix p
assume 1: "scut\<^sup>+\<^sup>+ p p"
have 2: "p \<in> nodes" using 1 tranclE[to_pred] scut_reachable by metis
obtain w where 3: "path w p" "target w p = p" "set w \<inter> cuts = {}" "w \<noteq> []"
using scut_trancl 1 by this
have 4: "w \<in> cycles p" using 3(1, 2) by auto
have 5: "set w \<inter> cuts \<noteq> {}" using cycles_cut 2 4 3(4) by this
show "False" using 3(3) 5 by simp
qed
lemma no_cut_scut:
assumes "p \<in> nodes" "en a p" "a \<notin> cuts"
shows "scut\<inverse>\<inverse> (ex a p) p"
using assms by auto
end
locale transition_system_sticky =
transition_system_complete ex en init int +
transition_system_cut ex en init sticky
for ex :: "'action \<Rightarrow> 'state \<Rightarrow> 'state"
and en :: "'action \<Rightarrow> 'state \<Rightarrow> bool"
and init :: "'state \<Rightarrow> bool"
and int :: "'state \<Rightarrow> 'interpretation"
and sticky :: "'action set"
+
assumes executable_visible_sticky: "executable \<inter> visible \<subseteq> sticky"
end
|
inductive Mem' (a : α) : List α → Prop where
| intro (as bs) : Mem' a (as ++ (a :: bs))
example {x : α} (h : Mem' x l) : True :=
match h with
| ⟨as', bs'⟩ => True.intro
example {x : α} (h : Mem' x l ∧ Mem' x l') : True :=
match h with
| ⟨⟨as', bs'⟩, _⟩ => True.intro
|
import algebra.order
universe u
variables {α : Type u}
variables [decidable_linear_order α]
lemma cmp_eq_eq (a b : α)
: cmp a b = ordering.eq = (a = b) :=
by { simp [cmp,cmp_using_eq_eq], rw ← le_antisymm_iff, cc }
|
-- | Planning by Dynamic Programming (Matrix Version)
module PlanningM where
import Data.Map (Map)
import qualified Data.Map as M
import qualified Data.Set as S
import Numeric.LinearAlgebra.Data (Matrix, Vector, (!))
import Numeric.LinearAlgebra.HMatrix as H
import MDP
import Planning
data MDPM s a = MDPM
{ statesm :: Map s Int
, actionsm :: Map a Int
, transitionm :: Matrix Double -- matrix S x S
, rewardm :: Vector Double -- vector 1 x S
, gammam :: Double -- discount
, sizeStates :: Int -- S
, sizeActions :: Int -- A
}
type PolicyM = Matrix Double
policyEvaluationM :: (Ord s, Ord a) => MDP s a -> ValueFunction s a
policyEvaluationM mdp policy k s =
matrixPolicyEvaluation mdpm k ! (M.!) (statesm mdpm) s
where
mdpm = prepareMDP mdp (preparePolicy mdp policy)
preparePolicy :: MDP s a -> Policy s a -> PolicyM
preparePolicy mdp policy = H.fromLists (
map (
\s -> map (policy s) (S.toList (actions mdp)))
(S.toList (states mdp)))
prepareMDP :: (Ord s, Ord a) => MDP s a -> PolicyM -> MDPM s a
prepareMDP mdp policy =
MDPM
{ statesm = statesM
, actionsm = actionsM
, transitionm = transitionM
, rewardm = rewardM
, gammam = gamma mdp
, sizeStates = S.size (states mdp)
, sizeActions = S.size (actions mdp)
}
where
statesM = M.fromList (zip (S.toList (states mdp)) [0 ..])
actionsM = M.fromList (zip (S.toList (actions mdp)) [0 ..])
transitionM =
H.fromRows
(map
(\s ->
takeDiag (policy H.<> (transitionMap M.! s)))
(S.toList (states mdp)))
transitionMap =
M.fromList
(map
(\s ->
(s, createMatrix s))
(S.toList (states mdp)))
createMatrix s' =
H.fromLists
(map
(\a ->
map
(\s ->
transition mdp a s s')
(S.toList (states mdp)))
(S.toList (actions mdp)))
rewardM = takeDiag (policy H.<> createRewardMatrix)
createRewardMatrix =
H.fromLists
(map
(\a ->
map
(
reward mdp a)
(S.toList (states mdp)))
(S.toList (actions mdp)))
matrixPolicyEvaluation :: MDPM s a -> Int -> Vector Double
matrixPolicyEvaluation mdpm = evalVF (konst 0.0 (sizeStates mdpm))
where
evalVF v 0 = v
evalVF v k =
evalVF
(rewardm mdpm + gammam mdpm `scale` (transitionm mdpm H.#> v))
(k - 1)
matrixPolicyImprove :: MDPM s a -> Vector Double -> Matrix Double
matrixPolicyImprove mdpm vf = undefined
|
module Testlib1
export
val1 : String
val1 = "Hello, Idris!"
|
```python
import torch
import pandas as pd
import matplotlib.pyplot as plt
```
```python
128*128*4
```
65536
```python
m = 4
Y_true_score = torch.randint(low=0, high=2, size=(m, 1, 3, 3))
text_mask = Y_true_score
text_mask = text_mask.expand(-1, 8, -1, -1)
print("text_mask.sum()", text_mask.sum())
Y_true_geometry = torch.randint(low=0, high=512, size=(m, 8, 3, 3))
print(Y_true_score.shape, text_mask.shape)
print(Y_true_geometry.shape)
```
text_mask.sum() tensor(184)
torch.Size([4, 1, 3, 3]) torch.Size([4, 8, 3, 3])
torch.Size([4, 8, 3, 3])
```python
(Y_true_geometry * text_mask).shape
```
torch.Size([4, 8, 3, 3])
```python
print(Y_true_geometry.sum())
print((Y_true_geometry * text_mask).sum())
```
tensor(71464)
tensor(46471)
```python
(Y_true_geometry * Y_true_score).sum()
```
tensor(46471)
```python
print(text_mask.sum())
print(19*8)
```
tensor(152)
152
```python
text_mask
```
tensor([[[[ True, True, True],
[ True, True, True],
[False, False, False]],
[[ True, True, True],
[ True, True, True],
[False, False, False]],
[[ True, True, True],
[ True, True, True],
[False, False, False]],
[[ True, True, True],
[ True, True, True],
[False, False, False]],
[[ True, True, True],
[ True, True, True],
[False, False, False]],
[[ True, True, True],
[ True, True, True],
[False, False, False]],
[[ True, True, True],
[ True, True, True],
[False, False, False]],
[[ True, True, True],
[ True, True, True],
[False, False, False]]],
[[[ True, False, True],
[ True, False, True],
[False, False, True]],
[[ True, False, True],
[ True, False, True],
[False, False, True]],
[[ True, False, True],
[ True, False, True],
[False, False, True]],
[[ True, False, True],
[ True, False, True],
[False, False, True]],
[[ True, False, True],
[ True, False, True],
[False, False, True]],
[[ True, False, True],
[ True, False, True],
[False, False, True]],
[[ True, False, True],
[ True, False, True],
[False, False, True]],
[[ True, False, True],
[ True, False, True],
[False, False, True]]],
[[[False, False, False],
[False, True, True],
[False, True, True]],
[[False, False, False],
[False, True, True],
[False, True, True]],
[[False, False, False],
[False, True, True],
[False, True, True]],
[[False, False, False],
[False, True, True],
[False, True, True]],
[[False, False, False],
[False, True, True],
[False, True, True]],
[[False, False, False],
[False, True, True],
[False, True, True]],
[[False, False, False],
[False, True, True],
[False, True, True]],
[[False, False, False],
[False, True, True],
[False, True, True]]],
[[[False, False, True],
[ True, False, False],
[False, True, True]],
[[False, False, True],
[ True, False, False],
[False, True, True]],
[[False, False, True],
[ True, False, False],
[False, True, True]],
[[False, False, True],
[ True, False, False],
[False, True, True]],
[[False, False, True],
[ True, False, False],
[False, True, True]],
[[False, False, True],
[ True, False, False],
[False, True, True]],
[[False, False, True],
[ True, False, False],
[False, True, True]],
[[False, False, True],
[ True, False, False],
[False, True, True]]]])
```python
Y_true_geometry[text_mask].shape
```
torch.Size([152])
```python
data = pd.read_csv("experiment_loss/experiment_1.csv")
```
```python
data.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>epoch_number</th>
<th>mini_batch_number</th>
<th>score_loss</th>
<th>geometry_loss</th>
<th>loss</th>
</tr>
</thead>
<tbody>
<tr>
<td>0</td>
<td>1</td>
<td>1</td>
<td>9.469061</td>
<td>255.954950</td>
<td>265.424011</td>
</tr>
<tr>
<td>1</td>
<td>1</td>
<td>2</td>
<td>9.007338</td>
<td>255.939075</td>
<td>264.946413</td>
</tr>
<tr>
<td>2</td>
<td>1</td>
<td>3</td>
<td>8.718753</td>
<td>255.925082</td>
<td>264.643834</td>
</tr>
<tr>
<td>3</td>
<td>1</td>
<td>4</td>
<td>8.776470</td>
<td>255.913685</td>
<td>264.690156</td>
</tr>
<tr>
<td>4</td>
<td>1</td>
<td>5</td>
<td>7.910673</td>
<td>255.905035</td>
<td>263.815708</td>
</tr>
</tbody>
</table>
</div>
```python
loss_data = data.groupby("epoch_number").mean()
loss_data.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>mini_batch_number</th>
<th>score_loss</th>
<th>geometry_loss</th>
<th>loss</th>
</tr>
<tr>
<th>epoch_number</th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>209</td>
<td>8.044558</td>
<td>253.295127</td>
<td>261.339685</td>
</tr>
<tr>
<td>2</td>
<td>209</td>
<td>8.045735</td>
<td>247.963428</td>
<td>256.009163</td>
</tr>
<tr>
<td>3</td>
<td>209</td>
<td>8.046287</td>
<td>242.642643</td>
<td>250.688930</td>
</tr>
<tr>
<td>4</td>
<td>209</td>
<td>8.045045</td>
<td>237.339430</td>
<td>245.384476</td>
</tr>
<tr>
<td>5</td>
<td>209</td>
<td>8.044556</td>
<td>232.059850</td>
<td>240.104406</td>
</tr>
</tbody>
</table>
</div>
```python
plt.figure()
plt.plot(loss_data.index[:-1], loss_data['geo_loss'][:-1], marker="o", linestyle="--")
plt.xticks(loss_data.index[:-1])
plt.xlabel("epochs")
plt.ylabel("loss")
plt.savefig("experiment_plot/experiment_1_score_loss.png")
```
```python
bbs = [[320,2,373,1,376,23,319,25],
[377,4,430,2,431,30,375,31],
[347,3,388,2,390,36,347,37],
[336,7,376,5,378,48,334,50],
[313,11,355,7,357,61,312,63]]
```
```python
from sympy import Polygon
from sympy.geometry import intersection
import numpy as np
for idx1, bb1 in enumerate(bbs):
for idx2, bb2 in enumerate(bbs):
#print(idx1, idx2, compute_iou(bb1, bb2))
bb1_ = [(x, y) for x, y in np.array(bb1).reshape(-1, 2)]
bb2_ = [(x, y) for x, y in np.array(bb2).reshape(-1, 2)]
#print(bb1_, bb2_)
poly1 = Polygon(*bb1_)
poly2 = Polygon(*bb2_)
print(poly1.vertices, poly2.vertices)
print("intersection: ", intersection(poly1, poly2))
print(poly1, poly2)
print(poly1.area, poly2.area)
```
0 0 0.9999999914965987
[(320, 2), (373, 1), (376, 23), (319, 25)] [(320, 2), (373, 1), (376, 23), (319, 25)]
[Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25)] [Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25)]
intersection: [Segment2D(Point2D(319, 25), Point2D(320, 2)), Segment2D(Point2D(320, 2), Point2D(373, 1)), Segment2D(Point2D(373, 1), Point2D(376, 23)), Segment2D(Point2D(376, 23), Point2D(319, 25))]
Polygon(Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25)) Polygon(Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25))
1239 1239
0 1 0.007418976933155107
[(320, 2), (373, 1), (376, 23), (319, 25)] [(377, 4), (430, 2), (431, 30), (375, 31)]
[Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25)] [Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31)]
intersection: [Point2D(576533/1535, 35327/1535), Point2D(46967/125, 2633/125)]
Polygon(Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25)) Polygon(Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31))
1239 1498
0 2 0.28784118963850525
[(320, 2), (373, 1), (376, 23), (319, 25)] [(347, 3), (388, 2), (390, 36), (347, 37)]
[Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25)] [Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37)]
intersection: [Point2D(347, 1369/57), Point2D(337733/905, 2137/905)]
Polygon(Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25)) Polygon(Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37))
1239 1429
0 3 0.28343666836387654
[(320, 2), (373, 1), (376, 23), (319, 25)] [(336, 7), (376, 5), (378, 48), (334, 50)]
[Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25)] [Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50)]
intersection: [Point2D(820208/2447, 59785/2447), Point2D(165488/443, 2269/443)]
Polygon(Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25)) Polygon(Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50))
1239 1806
0 4 0.15143246878774735
[(320, 2), (373, 1), (376, 23), (319, 25)] [(313, 11), (355, 7), (357, 61), (312, 63)]
[Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25)] [Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63)]
intersection: [Point2D(153745/481, 4987/481), Point2D(548009/1541, 36545/1541)]
Polygon(Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25)) Polygon(Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63))
1239 2307
1 0 0.007418976933155107
[(377, 4), (430, 2), (431, 30), (375, 31)] [(320, 2), (373, 1), (376, 23), (319, 25)]
[Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31)] [Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25)]
intersection: [Point2D(576533/1535, 35327/1535), Point2D(46967/125, 2633/125)]
Polygon(Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31)) Polygon(Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25))
1498 1239
1 1 0.999999992877493
[(377, 4), (430, 2), (431, 30), (375, 31)] [(377, 4), (430, 2), (431, 30), (375, 31)]
[Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31)] [Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31)]
intersection: [Segment2D(Point2D(375, 31), Point2D(377, 4)), Segment2D(Point2D(377, 4), Point2D(430, 2)), Segment2D(Point2D(430, 2), Point2D(431, 30)), Segment2D(Point2D(431, 30), Point2D(375, 31))]
Polygon(Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31)) Polygon(Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31))
1498 1498
1 2 0.1360160960321284
[(377, 4), (430, 2), (431, 30), (375, 31)] [(347, 3), (388, 2), (390, 36), (347, 37)]
[Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31)] [Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37)]
intersection: [Point2D(16688/43, 154/43), Point2D(371375/953, 29293/953)]
Polygon(Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31)) Polygon(Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37))
1498 1429
1 3 0.007412181735700348
[(377, 4), (430, 2), (431, 30), (375, 31)] [(336, 7), (376, 5), (378, 48), (334, 50)]
[Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31)] [Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50)]
intersection: [Point2D(5269/14, 355/28), Point2D(90907/241, 14923/482)]
Polygon(Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31)) Polygon(Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50))
1498 1806
1 4 0.11786600459718981
[(377, 4), (430, 2), (431, 30), (375, 31)] [(313, 11), (355, 7), (357, 61), (312, 63)]
[Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31)] [Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63)]
intersection: []
Polygon(Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31)) Polygon(Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63))
1498 2307
2 0 0.28784118963850525
[(347, 3), (388, 2), (390, 36), (347, 37)] [(320, 2), (373, 1), (376, 23), (319, 25)]
[Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37)] [Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25)]
intersection: [Point2D(347, 1369/57), Point2D(337733/905, 2137/905)]
Polygon(Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37)) Polygon(Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25))
1429 1239
2 1 0.1360160960321284
[(347, 3), (388, 2), (390, 36), (347, 37)] [(377, 4), (430, 2), (431, 30), (375, 31)]
[Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37)] [Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31)]
intersection: [Point2D(16688/43, 154/43), Point2D(371375/953, 29293/953)]
Polygon(Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37)) Polygon(Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31))
1429 1498
2 2 0.9999999929527837
[(347, 3), (388, 2), (390, 36), (347, 37)] [(347, 3), (388, 2), (390, 36), (347, 37)]
[Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37)] [Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37)]
intersection: [Segment2D(Point2D(347, 3), Point2D(388, 2)), Segment2D(Point2D(347, 37), Point2D(347, 3)), Segment2D(Point2D(388, 2), Point2D(390, 36)), Segment2D(Point2D(390, 36), Point2D(347, 37))]
Polygon(Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37)) Polygon(Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37))
1429 1429
2 3 0.4009812649376393
[(347, 3), (388, 2), (390, 36), (347, 37)] [(336, 7), (376, 5), (378, 48), (334, 50)]
[Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37)] [Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50)]
intersection: [Point2D(347, 129/20), Point2D(232890/617, 22392/617)]
Polygon(Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37)) Polygon(Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50))
1429 1806
2 4 0.07420599562420303
[(347, 3), (388, 2), (390, 36), (347, 37)] [(313, 11), (355, 7), (357, 61), (312, 63)]
[Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37)] [Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63)]
intersection: [Point2D(347, 163/21), Point2D(206896/581, 21374/581)]
Polygon(Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37)) Polygon(Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63))
1429 2307
3 0 0.28343666836387654
[(336, 7), (376, 5), (378, 48), (334, 50)] [(320, 2), (373, 1), (376, 23), (319, 25)]
[Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50)] [Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25)]
intersection: [Point2D(820208/2447, 59785/2447), Point2D(165488/443, 2269/443)]
Polygon(Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50)) Polygon(Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25))
1806 1239
3 1 0.007412181735700348
[(336, 7), (376, 5), (378, 48), (334, 50)] [(377, 4), (430, 2), (431, 30), (375, 31)]
[Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50)] [Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31)]
intersection: [Point2D(5269/14, 355/28), Point2D(90907/241, 14923/482)]
Polygon(Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50)) Polygon(Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31))
1806 1498
3 2 0.4009812649376393
[(336, 7), (376, 5), (378, 48), (334, 50)] [(347, 3), (388, 2), (390, 36), (347, 37)]
[Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50)] [Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37)]
intersection: [Point2D(347, 129/20), Point2D(232890/617, 22392/617)]
Polygon(Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50)) Polygon(Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37))
1806 1429
3 3 0.9999999941927992
[(336, 7), (376, 5), (378, 48), (334, 50)] [(336, 7), (376, 5), (378, 48), (334, 50)]
[Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50)] [Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50)]
intersection: [Segment2D(Point2D(334, 50), Point2D(336, 7)), Segment2D(Point2D(336, 7), Point2D(376, 5)), Segment2D(Point2D(376, 5), Point2D(378, 48)), Segment2D(Point2D(378, 48), Point2D(334, 50))]
Polygon(Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50)) Polygon(Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50))
1806 1806
3 4 0.2470588227438511
[(336, 7), (376, 5), (378, 48), (334, 50)] [(313, 11), (355, 7), (357, 61), (312, 63)]
[Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50)] [Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63)]
intersection: [Point2D(301988/899, 7927/899), Point2D(42430/119, 5828/119)]
Polygon(Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50)) Polygon(Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63))
1806 2307
4 0 0.15143246878774735
[(313, 11), (355, 7), (357, 61), (312, 63)] [(320, 2), (373, 1), (376, 23), (319, 25)]
[Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63)] [Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25)]
intersection: [Point2D(153745/481, 4987/481), Point2D(548009/1541, 36545/1541)]
Polygon(Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63)) Polygon(Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25))
2307 1239
4 1 0.11786600459718981
[(313, 11), (355, 7), (357, 61), (312, 63)] [(377, 4), (430, 2), (431, 30), (375, 31)]
[Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63)] [Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31)]
intersection: []
Polygon(Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63)) Polygon(Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31))
2307 1498
4 2 0.07420599562420303
[(313, 11), (355, 7), (357, 61), (312, 63)] [(347, 3), (388, 2), (390, 36), (347, 37)]
[Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63)] [Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37)]
intersection: [Point2D(347, 163/21), Point2D(206896/581, 21374/581)]
Polygon(Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63)) Polygon(Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37))
2307 1429
4 3 0.2470588227438511
[(313, 11), (355, 7), (357, 61), (312, 63)] [(336, 7), (376, 5), (378, 48), (334, 50)]
[Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63)] [Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50)]
intersection: [Point2D(301988/899, 7927/899), Point2D(42430/119, 5828/119)]
Polygon(Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63)) Polygon(Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50))
2307 1806
4 4 0.9999999954545454
[(313, 11), (355, 7), (357, 61), (312, 63)] [(313, 11), (355, 7), (357, 61), (312, 63)]
[Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63)] [Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63)]
intersection: [Segment2D(Point2D(312, 63), Point2D(313, 11)), Segment2D(Point2D(313, 11), Point2D(355, 7)), Segment2D(Point2D(355, 7), Point2D(357, 61)), Segment2D(Point2D(357, 61), Point2D(312, 63))]
Polygon(Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63)) Polygon(Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63))
2307 2307
```python
from sympy import Polygon
from sympy.geometry import intersection
import numpy as np
for idx1, bb1 in enumerate(bbs):
bb1_ = [(x, y) for x, y in np.array(bb1).reshape(-1, 2)]
poly1 = Polygon(*bb1_)
print(poly1)
print(poly1.bounds)
```
Polygon(Point2D(320, 2), Point2D(373, 1), Point2D(376, 23), Point2D(319, 25))
(319, 1, 376, 25)
Polygon(Point2D(377, 4), Point2D(430, 2), Point2D(431, 30), Point2D(375, 31))
(375, 2, 431, 31)
Polygon(Point2D(347, 3), Point2D(388, 2), Point2D(390, 36), Point2D(347, 37))
(347, 2, 390, 37)
Polygon(Point2D(336, 7), Point2D(376, 5), Point2D(378, 48), Point2D(334, 50))
(334, 5, 378, 50)
Polygon(Point2D(313, 11), Point2D(355, 7), Point2D(357, 61), Point2D(312, 63))
(312, 7, 357, 63)
```python
a = np.random.randint(1, 10, (4, 2))
print(a.shape)
b = np.array([1, 2])
print(b.shape)
c = a - b
print(c.shape)
```
(4, 2)
(2,)
(4, 2)
```python
```
|
header {* Program statements, Hoare and refinement rules *}
theory Statements
imports Assertion_Algebra
begin
text {*In this section we introduce assume, if, and while program statements
as well as Hoare triples, and data refienment. We prove Hoare correctness
rules for the program statements and we prove some theorems linking Hoare
correctness statement to (data) refinement. Most of the theorems assume
a monotonic boolean transformers algebra. The theorem stating the
equivalence between a Hoare
correctness triple and a refinement statement holds under the
assumption that we have a monotonic boolean transformers algebra with
post condition statement.*}
definition
"assume" :: "'a::mbt_algebra Assertion \<Rightarrow> 'a" ("[\<cdot> _ ]" [0] 1000) where
"[\<cdot>p] = {\<cdot>p} ^ o"
lemma [simp]: "[\<cdot>p] * x \<squnion> {\<cdot>-p} * \<top> = [\<cdot>p] * x"
by (simp add: assume_def uminus_Assertion_def)
lemma [simp]: "{\<cdot>p} * \<top> \<squnion> [\<cdot>-p] * x = [\<cdot>-p] * x"
by (simp add: assume_def uminus_Assertion_def)
lemma assert_sup: "{\<cdot>p \<squnion> q} = {\<cdot>p} \<squnion> {\<cdot>q}"
by (simp add: sup_Assertion_def)
lemma assert_inf: "{\<cdot>p \<sqinter> q} = {\<cdot>p} \<sqinter> {\<cdot>q}"
by (simp add: inf_Assertion_def)
lemma assert_neg: "{\<cdot>-p} = neg_assert {\<cdot>p}"
by (simp add: uminus_Assertion_def)
lemma assert_false [simp]: "{\<cdot>\<bottom>} = \<bottom>"
by (simp add: bot_Assertion_def)
lemma if_Assertion_assumption: "({\<cdot>p} * x) \<squnion> ({\<cdot>-p} * y) = ([\<cdot>p] * x) \<sqinter> ([\<cdot>-p] * y)"
proof -
have "({\<cdot>p} * x) \<squnion> {\<cdot>-p} * y = ({\<cdot>p} * \<top> \<sqinter> [\<cdot>p]) * x \<squnion> ({\<cdot>-p} * \<top> \<sqinter> [\<cdot>-p]) * y" by simp
also have "\<dots> = ({\<cdot>p} * \<top> \<sqinter> ([\<cdot>p] * x)) \<squnion> ({\<cdot>-p} * \<top> \<sqinter> ([\<cdot>-p] * y))" by (unfold inf_comp, simp)
also have "\<dots> = (({\<cdot>p} * \<top> \<sqinter> ([\<cdot>p] * x)) \<squnion> ({\<cdot>-p} * \<top>)) \<sqinter> (({\<cdot>p} * \<top> \<sqinter> ([\<cdot>p] * x)) \<squnion> ([\<cdot>-p] * y))" by (simp add: sup_inf_distrib)
also have "\<dots> = (({\<cdot>p} * \<top> \<squnion> ({\<cdot>-p} * \<top>)) \<sqinter> (([\<cdot>p] * x))) \<sqinter> (([\<cdot>-p] * y) \<sqinter> (([\<cdot>p] * x) \<squnion> ([\<cdot>-p] * y)))"
by (simp add: sup_inf_distrib2)
also have "\<dots> = ([\<cdot>p] * x) \<sqinter> ([\<cdot>-p] * y) \<sqinter> (([\<cdot>p] * x) \<squnion> ([\<cdot>-p] * y))"
apply (simp add: sup_comp [THEN sym] )
by (simp add: assert_sup [THEN sym] inf_assoc sup_compl_top)
also have "\<dots> = ([\<cdot>p] * x) \<sqinter> ([\<cdot>-p] * y)"
by (rule antisym, simp_all add: inf_assoc)
finally show ?thesis .
qed
definition
"wp x p = abs_wpt (x * {\<cdot>p})"
lemma wp_assume: "wp [\<cdot>p] q = -p \<squnion> q"
apply (simp add: wp_def abs_wpt_def)
apply (rule assert_injective)
apply simp
by (simp add: assert_sup assert_neg assume_def wpt_dual_assertion_comp)
lemma assert_commute: "y \<in> conjunctive \<Longrightarrow> y * {\<cdot>p} = {\<cdot> wp y p } * y"
apply (simp add: wp_def abs_wpt_def)
by (rule assertion_commute, simp_all)
lemma wp_assert: "wp {\<cdot>p} q = p \<sqinter> q"
by (simp add: wp_def assertion_inf_comp_eq [THEN sym] assert_inf [THEN sym])
lemma wp_mono [simp]: "mono (wp x)"
apply (simp add: le_fun_def wp_def abs_wpt_def less_eq_Assertion_def mono_def)
apply (simp add: wpt_def, safe)
apply (rule_tac y = " x * {\<cdot> xa } * \<top>" in order_trans, simp_all)
apply (rule le_comp_right)
by (rule le_comp, simp)
lemma wp_fun_mono [simp]: "mono wp"
apply (simp add: le_fun_def wp_def abs_wpt_def less_eq_Assertion_def mono_def)
apply (simp add: wpt_def, safe)
apply (rule_tac y = " x * {\<cdot> xa } * \<top>" in order_trans, simp_all)
apply (rule le_comp_right)
by (rule le_comp_right, simp)
lemma wp_fun_mono2: "x \<le> y \<Longrightarrow> wp x p \<le> wp y p"
apply (cut_tac wp_fun_mono)
apply (unfold mono_def)
apply (simp add: le_fun_def)
by blast
lemma wp_comp: "wp (x * y) p = wp x (wp y p)"
apply (simp add: wp_def abs_wpt_def)
by (unfold wpt_comp_2 [THEN sym] mult.assoc, simp)
lemma wp_choice: "wp (x \<sqinter> y) = wp x \<sqinter> wp y"
apply (simp add: fun_eq_iff wp_def inf_fun_def inf_comp inf_Assertion_def abs_wpt_def)
by (simp add: wpt_choice)
lemma [simp]: "wp 1 = id"
apply (unfold fun_eq_iff, safe)
apply (rule assert_injective)
by (simp add: wp_def abs_wpt_def)
lemma wp_omega_fix: "wp (x ^ \<omega>) p = wp x (wp (x ^ \<omega>) p) \<sqinter> p"
apply (subst omega_fix)
by (simp add: wp_choice wp_comp)
lemma wp_omega_least: "(wp x r) \<sqinter> p \<le> r \<Longrightarrow> wp (x ^ \<omega>) p \<le> r"
apply (simp add: wp_def abs_wpt_def inf_Assertion_def less_eq_Assertion_def)
apply (simp add: wpt_def)
apply (rule_tac y = "{\<cdot>r} * \<top> \<sqinter> 1" in order_trans)
apply simp
apply (rule_tac y = "x ^ \<omega> * {\<cdot> p } * \<top>" in order_trans, simp)
apply (simp add: mult.assoc)
apply (rule omega_least)
apply (drule_tac z = \<top> in le_comp_right)
apply (simp add: inf_comp mult.assoc [THEN sym])
by (simp add: assertion_prop)
lemma Assertion_wp: "{\<cdot>wp x p} = (x * {\<cdot>p} * \<top>) \<sqinter> 1"
apply (simp add: wp_def abs_wpt_def)
by (simp add: wpt_def)
definition
"hoare p S q = (p \<le> wp S q)"
definition
"grd x = - (wp x \<bottom>)"
lemma grd_comp: "[\<cdot>grd x] * x = x"
apply (simp add: grd_def wp_def uminus_Assertion_def assume_def neg_assert_def abs_wpt_def dual_sup sup_comp)
apply (simp add: wpt_def dual_inf sup_comp dual_comp bot_Assertion_def)
by (rule antisym, simp_all)
lemma assert_assume: "{\<cdot>p} * [\<cdot>p] = {\<cdot> p}"
by (simp add: assume_def)
lemma dual_assume: "[\<cdot>p] ^ o = {\<cdot>p}"
by (simp add: assume_def)
lemma assume_prop: "([\<cdot>p] * \<bottom>) \<squnion> 1 = [\<cdot>p]"
by (simp add: assume_def dual_assertion_prop)
text{*An alternative definition of a Hoare triple*}
definition "hoare1 p S q = ([\<cdot> p ] * S * [\<cdot> -q ] = \<top>)"
lemma "hoare1 p S q = hoare p S q"
apply (simp add: hoare1_def dual_inf dual_comp)
apply (simp add: hoare_def wp_def less_eq_Assertion_def abs_wpt_def)
apply (simp add: wpt_def)
apply safe
proof -
assume A: "[\<cdot> p ] * S * [\<cdot> - q ] = \<top>"
have "{\<cdot>p} \<le> {\<cdot>p} * \<top>" by simp
also have "... \<le> {\<cdot>p} * \<top> * \<bottom>" by (unfold mult.assoc, simp)
also have "... = {\<cdot>p} * [\<cdot> p ] * S * [\<cdot> - q ] * \<bottom>" by (subst A [THEN sym], simp add: mult.assoc)
also have "... = {\<cdot>p} * S * [\<cdot> - q ] * \<bottom>" by (simp add: assert_assume)
also have "... \<le> {\<cdot>p} * S * {\<cdot> q } * \<top>"
apply (simp add: mult.assoc)
apply (rule le_comp, rule le_comp)
apply (simp add: assume_def uminus_Assertion_def)
by (simp add: neg_assert_def dual_inf dual_comp sup_comp)
also have "... \<le> S * {\<cdot> q } * \<top>" by (simp add: mult.assoc)
finally show "{\<cdot>p} \<le> S * {\<cdot> q } * \<top>" .
next
assume A: "{\<cdot> p } \<le> S * {\<cdot> q } * \<top>"
have "\<top> = ((S * {\<cdot>q}) ^ o) * \<bottom> \<squnion> S * {\<cdot>q} * \<top>" by simp
also have "\<dots> \<le> [\<cdot>p] * \<bottom> \<squnion> S * {\<cdot>q} * \<top>"
apply (simp del: dual_neg_top)
apply (rule_tac y = "[\<cdot>p] * \<bottom>" in order_trans, simp_all)
apply (subst dual_le)
apply (simp add: dual_comp dual_assume)
apply (cut_tac x = "{\<cdot>p}" and y = "S * {\<cdot>q} * \<top>" and z = \<top> in le_comp_right)
apply (rule A)
by (simp add: mult.assoc)
also have "\<dots> = [\<cdot>p] * S * ({\<cdot>q} * \<top>)"
apply (subst (2) assume_prop [THEN sym])
by (simp_all add: sup_comp mult.assoc)
also have "\<dots> \<le> [\<cdot>p] * S * ({\<cdot>q} * \<top> \<squnion> 1)"
by (rule le_comp, simp)
also have "\<dots> = [\<cdot>p] * S * [\<cdot>-q]"
apply (simp add: assume_def uminus_Assertion_def)
by (simp add: neg_assert_def dual_inf dual_comp)
finally show "[\<cdot>p] * S * [\<cdot> - q] = \<top>"
by (rule_tac antisym, simp_all)
qed
lemma hoare_choice: "hoare p (x \<sqinter> y) q = ((hoare p) x q & (hoare p y q))"
apply (unfold hoare_def wp_choice inf_fun_def)
by auto
definition
if_stm:: "'a::mbt_algebra Assertion \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'a" ("(If (_)/ then (_)/ else (_))" [0, 0, 10] 10) where
"if_stm b x y = (([\<cdot> b ] * x) \<sqinter> ([\<cdot> -b ] * y))"
lemma if_assertion: "(If p then x else y) = {\<cdot>p} * x \<squnion> {\<cdot> -p} * y"
by (simp add: if_stm_def if_Assertion_assumption)
lemma (in boolean_algebra) sup_neg_inf:
"(p \<le> q \<squnion> r) = (p \<sqinter> -q \<le> r)"
apply (safe)
apply(cut_tac a = p and c = "q \<squnion> r" and b = "-q" and d = "-q" in inf_mono)
apply simp apply simp apply (simp add: inf_sup_distrib2 inf_compl_bot)
apply(cut_tac b = "p \<sqinter> - q" and d = "r" and a = "q" and c = "q" in sup_mono)
apply simp apply simp by (simp add: sup_inf_distrib sup_compl_top)
lemma hoare_if: "hoare p (If b then x else y) q = (hoare (p \<sqinter> b) x q \<and> hoare (p \<sqinter> -b) y q)"
by (simp add: hoare_def if_stm_def wp_choice inf_fun_def wp_comp wp_assume sup_neg_inf)
lemma hoare_comp: "hoare p (x * y) q = (\<exists> r . (hoare p x r) \<and> (hoare r y q))"
apply (simp add: hoare_def wp_comp)
apply safe
apply (rule_tac x = "wp y q" in exI, simp)
apply (rule_tac y = "wp x r" in order_trans, simp)
apply (rule_tac f = "wp x" in monoD)
by simp_all
lemma hoare_refinement: "hoare p S q = ({\<cdot>p} * (post {\<cdot>q}) \<le> S)"
apply (simp add: hoare_def less_eq_Assertion_def Assertion_wp)
proof
assume A: "{\<cdot>p} \<le> S * {\<cdot>q} * \<top>"
have "{\<cdot>p} * post {\<cdot>q} = ({\<cdot>p} * \<top> \<sqinter> 1) * post {\<cdot>q}" by (simp add: assertion_prop)
also have "\<dots> = {\<cdot>p} * \<top> \<sqinter> post {\<cdot>q}" by (simp add: inf_comp)
also have "\<dots> \<le> S * {\<cdot>q} * \<top> \<sqinter> post {\<cdot>q}" apply simp
apply (rule_tac y = "{\<cdot>p} * \<top>" in order_trans, simp_all)
apply (cut_tac x = "{\<cdot>p}" and y = "S * {\<cdot>q} * \<top>" and z = \<top> in le_comp_right)
by (rule A, simp)
also have "\<dots> \<le> S" by (simp add: post_2)
finally show "{\<cdot>p} * post {\<cdot>q} \<le> S".
next
assume A: "{\<cdot>p} * post {\<cdot>q} \<le> S"
have "{\<cdot>p} = {\<cdot>p} * \<top> \<sqinter> 1" by (simp add: assertion_prop)
also have "\<dots> = {\<cdot>p} * ((post {\<cdot>q}) * {\<cdot>q} * \<top>) \<sqinter> 1" by (simp add: post_1)
also have "\<dots> \<le> {\<cdot>p} * ((post {\<cdot>q}) * {\<cdot>q} * \<top>)" by simp
also have "\<dots> \<le> S * {\<cdot>q} * \<top>"
apply (cut_tac x = "{\<cdot>p} * post {\<cdot>q}" and y = S and z = "{\<cdot>q} * \<top>" in le_comp_right)
apply (simp add: A)
by (simp add: mult.assoc)
finally show "{\<cdot>p} \<le> S * {\<cdot>q} * \<top>" .
qed
theorem hoare_fixpoint_mbt:
"F x = x
\<Longrightarrow> (!! (w::'a::well_founded) f . (\<And>v. v < w \<Longrightarrow> hoare (p v) f q) \<Longrightarrow> hoare (p w) (F f) q)
\<Longrightarrow> hoare (p u) x q"
apply (rule less_induct1)
proof -
fix xa
assume A: "\<And> w f. (\<And> v . v < w \<Longrightarrow> hoare (p v) f q) \<Longrightarrow> hoare (p w) (F f) q"
assume B: "F x = x"
assume C: "\<And>y . y < xa \<Longrightarrow> hoare (p y) x q"
have D: "hoare (p xa) (F x) q"
apply (rule A)
by (rule C, simp)
show "hoare (p xa) x q"
by (cut_tac D, simp add: B)
qed
lemma hoare_Sup: "hoare (Sup P) x q = (\<forall> p \<in> P . hoare p x q)"
apply (simp add: hoare_def)
apply auto
apply (rule_tac y = "Sup P" in order_trans, simp_all add: Sup_upper)
apply (rule Sup_least)
by simp
theorem hoare_fixpoint_complete_mbt:
"F x = x
\<Longrightarrow> (!! w f . hoare (Sup_less p w) f q \<Longrightarrow> hoare (p w) (F f) q)
\<Longrightarrow> hoare (Sup (range p)) x q"
apply (simp add: hoare_Sup Sup_less_def SUP_def del: Sup_image_eq, safe)
apply (rule_tac F = F in hoare_fixpoint_mbt)
by auto
definition
while:: "'a::mbt_algebra Assertion \<Rightarrow> 'a \<Rightarrow> 'a" ("(While (_)/ do (_))" [0, 10] 10) where
"while p x = ([\<cdot> p] * x) ^ \<omega> * [\<cdot> -p ]"
lemma while_false: "(While \<bottom> do x) = 1"
apply (unfold while_def)
apply (subst omega_fix)
by (simp_all add: assume_def)
lemma while_true: "(While \<top> do 1) = \<bottom>"
apply (unfold while_def)
by (rule antisym, simp_all add: assume_def)
lemma hoare_wp [simp]: "hoare (wp x q) x q"
by (simp add: hoare_def)
lemma hoare_comp_wp: "hoare p (x * y) q = hoare p x (wp y q)"
apply (unfold hoare_comp, safe)
apply (simp add: hoare_def)
apply (rule_tac y = "wp x r" in order_trans, simp)
apply (rule wp_mono2, simp)
by (rule_tac x = "wp y q" in exI, simp)
lemma (in mbt_algebra) hoare_assume: "hoare p [\<cdot>b] q = (p \<sqinter> b \<le> q)"
by (simp add: hoare_def wp_assume sup_neg_inf)
lemma hoare_while_mbt:
"(\<forall> (w::'b::well_founded) r . (\<forall> v . v < w \<longrightarrow> p v \<le> r) \<longrightarrow> hoare ((p w) \<sqinter> b) x r) \<Longrightarrow>
(\<forall> u . p u \<le> q) \<Longrightarrow> hoare (p w) (While b do x) (q \<sqinter> -b)"
apply (unfold while_def)
apply (rule_tac F = "\<lambda>z. [\<cdot> b ] * x * z \<sqinter> [\<cdot> - b ]" in hoare_fixpoint_mbt)
apply (simp add: mult.assoc [THEN sym])
apply (simp add: omega_comp_fix)
apply (unfold hoare_choice)
apply safe
apply (subst hoare_comp_wp)
apply (subst hoare_assume_comp)
apply (drule_tac x = w in spec)
apply (drule_tac x = "wp f (q \<sqinter> - b)" in spec)
apply (auto simp add: hoare_def) [1]
apply (auto simp add: hoare_assume)
apply (rule_tac y = "p w" in order_trans)
by simp_all
lemma hoare_while_complete_mbt:
"(\<forall> w::'b::well_founded . hoare ((p w) \<sqinter> b) x (Sup_less p w)) \<Longrightarrow>
hoare (Sup (range p)) (While b do x) ((Sup (range p)) \<sqinter> -b)"
apply (simp add: hoare_Sup del: Sup_image_eq, safe)
apply (rule hoare_while_mbt)
apply safe
apply (drule_tac x = w in spec)
apply (simp add: hoare_def)
apply (rule_tac y = "wp x (Sup_less p w)" in order_trans, simp_all)
apply (rule wp_mono2)
apply (simp add: Sup_less_def)
apply (rule Sup_least, auto)
by (rule SUP_upper, simp)
definition
"datarefin S S1 D D1 = (D * S \<le> S1 * D1)"
lemma "hoare p S q \<Longrightarrow> datarefin S S1 D D1 \<Longrightarrow> hoare (wp D p) S1 (wp D1 q)"
apply (simp add: hoare_def datarefin_def)
apply (simp add: wp_comp [THEN sym] mult.assoc [THEN sym])
apply (rule_tac y = "wp (D * S) q" in order_trans)
apply (subst wp_comp)
apply (rule monoD, simp_all)
by (rule wp_fun_mono2, simp_all)
lemma "hoare p S q \<Longrightarrow> datarefin ({\<cdot>p} * S) S1 D D1 \<Longrightarrow> hoare (wp D p) S1 (wp D1 q)"
apply (simp add: hoare_def datarefin_def)
apply (rule_tac y = "wp (D * {\<cdot>p} * S) q" in order_trans)
apply (simp add: mult.assoc)
apply (subst wp_comp)
apply (rule monoD, simp_all)
apply (subst wp_comp)
apply (unfold wp_assert, simp)
apply (unfold wp_comp [THEN sym])
apply (rule wp_fun_mono2)
by (simp add: mult.assoc)
lemma inf_pres_conj: "x \<in> conjunctive \<Longrightarrow> y \<in> conjunctive \<Longrightarrow> x \<sqinter> y \<in> conjunctive"
apply (subst conjunctive_def, safe)
apply (simp add: inf_comp conjunctiveD)
by (metis (hide_lams, no_types) inf_assoc inf_left_commute)
lemma sup_pres_disj: "x \<in> disjunctive \<Longrightarrow> y \<in> disjunctive \<Longrightarrow> x \<squnion> y \<in> disjunctive"
apply (subst disjunctive_def, safe)
apply (simp add: sup_comp disjunctiveD)
by (metis (hide_lams, no_types) sup_assoc sup_left_commute)
lemma assumption_conjuncive [simp]: "[\<cdot>p] \<in> conjunctive"
by (simp add: assume_def dual_disjunctive assertion_disjunctive)
lemma assumption_disjuncive [simp]: "[\<cdot>p] \<in> disjunctive"
by (simp add: assume_def dual_conjunctive assertion_conjunctive)
lemma if_pres_conj: "x \<in> conjunctive \<Longrightarrow> y \<in> conjunctive \<Longrightarrow> (If p then x else y) \<in> conjunctive"
apply (unfold if_stm_def)
by (simp add: inf_pres_conj comp_pres_conj)
lemma if_pres_disj: "x \<in> disjunctive \<Longrightarrow> y \<in> disjunctive \<Longrightarrow> (If p then x else y) \<in> disjunctive"
apply (unfold if_assertion)
by (simp add: sup_pres_disj comp_pres_disj assertion_disjunctive)
lemma while_dual_star: "(While p do (x::'a::mbt_algebra)) = (({\<cdot> p} * x)^\<otimes> * {\<cdot> -p })"
apply (simp add: while_def)
apply (rule antisym)
apply (rule omega_least)
proof -
have "([\<cdot> p] * x * (({\<cdot> p} * x)^\<otimes> * {\<cdot>-p}) \<sqinter> [\<cdot>-p]) = ({\<cdot> p} * x * (({\<cdot> p} * x)^\<otimes> * {\<cdot>-p})) \<squnion> {\<cdot>-p}"
apply (unfold mult.assoc)
by (cut_tac p = p and x = "(x * (({\<cdot> p } * x)^\<otimes> * {\<cdot> -p }))" and y = 1 in if_Assertion_assumption, simp)
also have "\<dots> = ({\<cdot> p} * x)^\<otimes> * {\<cdot>-p}"
by (simp add: mult.assoc [THEN sym], simp add: dual_star_comp_fix [THEN sym])
finally show "[\<cdot> p ] * x * (({\<cdot> p } * x)^\<otimes> * {\<cdot> - p }) \<sqinter> [\<cdot> - p ] \<le> ({\<cdot> p } * x)^\<otimes> * {\<cdot> - p }" by simp
next
show "({\<cdot> p } * x)^\<otimes> * {\<cdot> - p } \<le> ([\<cdot> p ] * x) ^ \<omega> * [\<cdot> - p ]"
apply (rule dual_star_least)
proof -
have "{\<cdot> p } * x * (([\<cdot> p ] * x) ^ \<omega> * [\<cdot> - p ]) \<squnion> {\<cdot> - p } = [\<cdot> p ] * x * (([\<cdot> p ] * x) ^ \<omega> * [\<cdot> - p ]) \<sqinter> [\<cdot> - p ]"
apply (unfold mult.assoc)
by (cut_tac p = p and x = "(x * (([\<cdot>p] * x)^\<omega> * [\<cdot>-p]))" and y = 1 in if_Assertion_assumption, simp)
also have "... = ([\<cdot> p ] * x) ^ \<omega> * [\<cdot> - p ]"
apply (simp add: mult.assoc [THEN sym])
by (metis omega_comp_fix)
finally show "{\<cdot> p } * x * (([\<cdot> p ] * x) ^ \<omega> * [\<cdot> - p ]) \<squnion> {\<cdot> - p } \<le> ([\<cdot> p ] * x) ^ \<omega> * [\<cdot> - p ] " by simp
qed
qed
lemma while_pres_disj: "(x::'a::mbt_algebra) \<in> disjunctive \<Longrightarrow> (While p do x) \<in> disjunctive"
apply (unfold while_dual_star)
apply (rule comp_pres_disj)
apply (rule dual_star_pres_disj)
by (rule comp_pres_disj, simp_all add: assertion_disjunctive)
lemma while_pres_conj: "(x::'a::mbt_algebra_fusion) \<in> conjunctive \<Longrightarrow> (While p do x) \<in> conjunctive"
apply(unfold while_def)
by (simp add: comp_pres_conj omega_pres_conj)
no_notation
bot ("\<bottom>") and
top ("\<top>") and
inf (infixl "\<sqinter>" 70) and
sup (infixl "\<squnion>" 65) and
Inf ("\<Sqinter>_" [900] 900) and
Sup ("\<Squnion>_" [900] 900)
no_syntax (xsymbols)
"_INF1" :: "pttrns \<Rightarrow> 'b \<Rightarrow> 'b" ("(3\<Sqinter>_./ _)" [0, 10] 10)
"_INF" :: "pttrn \<Rightarrow> 'a set \<Rightarrow> 'b \<Rightarrow> 'b" ("(3\<Sqinter>_\<in>_./ _)" [0, 0, 10] 10)
"_SUP1" :: "pttrns \<Rightarrow> 'b \<Rightarrow> 'b" ("(3\<Squnion>_./ _)" [0, 10] 10)
"_SUP" :: "pttrn \<Rightarrow> 'a set \<Rightarrow> 'b \<Rightarrow> 'b" ("(3\<Squnion>_\<in>_./ _)" [0, 0, 10] 10)
end
|
State Before: n m m' : ℕ
h : lxor' n m = lxor' n m'
⊢ m = m' State After: no goals Tactic: rw [← lxor'_cancel_left n m, ← lxor'_cancel_left n m', h] |
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
Class(SymSPL, BaseContainer, SumsBase, rec(
isBlock:=true,
transpose := self >> self,
rng:=self>>self._children[1].rng(),
dmn:=self>>self._children[1].dmn(),
vcost := self >> self._children[1].vcost()
));
|
"""
Generate coefficients related to PML boundaries, e.g., damping profiles.
This function outputs a, b, and k variables in eq. 25-26 from Komatitsch 2007 (Geophysics).
TODO
* At the moment, K=1, and is dummy, unlike in the case of EM when it was useful. Removing K and K_inv might save us memory when using GPUs.
"""
function update_pml!(
pml::NamedVector{T},
exmgrid, # 1D grid extended
mgrid, # 1D grid without extension
flags::Vector{Bool},
δt::Float64,
velavg::Float64,
freqpeak::Float64, # dominant frequency in Hz
) where {T<:Data.Array}
nx = length(exmgrid)
# origin, where the PMLs start (offset b/w the domain of interest and PML region depends on order
xoriginleft = mgrid[1] - 2 * (step(mgrid))
xoriginright = mgrid[end] + 2 * (step(mgrid))
NPOWER = 2.e0
K_MAX_PML = 1.e0 # from Gedney page 8.11
ALPHA_MAX_PML = pi * freqpeak # from Festa and Vilotte
# thickness of the PML layer in meters
thickness_PML = abs(xoriginleft - exmgrid[1])
"reflection coefficient (INRIA report section 6.1) http://hal.inria.fr/docs/00/07/32/19/PDF/RR-3471.pdf"
Rcoef = 0.001e0
#! check that NPOWER is okay
#if(NPOWER < 1) stop "NPOWER must be greater than 1"
# compute d0 from INRIA report section 6.1 http://hal.inria.fr/docs/00/07/32/19/PDF/RR-3471.pdf
d0 = -(NPOWER + 1) * (velavg) * log(Rcoef) / (2.e0 * thickness_PML)
k = zeros(nx)
fill!(k, 1)
d = zeros(nx)
alpha = zero(d)
a = zero(d)
b = zero(d)
"damping in the X direction"
"origin of the PML layer (position of right edge minus thickness, in meters)"
for ix = 1:nx
#---------- left edge
if (flags[1])
# define damping profile at the grid points
abscissa_in_PML = xoriginleft - exmgrid[ix]
if (abscissa_in_PML >= 0.0)
abscissa_normalized = abscissa_in_PML / thickness_PML
d[ix] = d0 * abscissa_normalized .^ NPOWER
# this taken from Gedney page 8.2
k[ix] = 1.e0 + (K_MAX_PML - 1.e0) * abscissa_normalized .^ NPOWER
alpha[ix] =
ALPHA_MAX_PML * (1.e0 - abscissa_normalized) + 0.01e0 * ALPHA_MAX_PML
end
end
#---------- right edge
if (flags[2])
# define damping profile at the grid points
abscissa_in_PML = exmgrid[ix] - xoriginright
if (abscissa_in_PML >= 0.0)
abscissa_normalized = abscissa_in_PML / thickness_PML
d[ix] = d0 * abscissa_normalized^NPOWER
# this taken from Gedney page 8.2
k[ix] = 1.e0 + (K_MAX_PML - 1.e0) * abscissa_normalized^NPOWER
alpha[ix] =
ALPHA_MAX_PML * (1.e0 - abscissa_normalized) + 0.01e0 * ALPHA_MAX_PML
end
end
#
# just in case, for -5 at the end
(alpha[ix] < 0.0) ? alpha[ix] = 0.0 : nothing
# see equation 25 Komatitsch, 2007, get b and a (need k, alpha, and d before this )
b[ix] = exp(-(d[ix] / k[ix] + alpha[ix]) * δt)
# this to avoid division by zero outside the PML
(abs(d[ix]) > 1.e-6) ?
a[ix] = d[ix] * (b[ix] - 1.e0) / (k[ix] * (d[ix] + k[ix] * alpha[ix])) : nothing
end
pkI = zeros(2 * _fd.npml)
pa = zeros(2 * _fd.npml)
pb = zeros(2 * _fd.npml)
for i = 1:_fd.npml
j = i + _fd.npml
pkI[i] = inv(k[i])
pkI[j] = inv(k[end-_fd.npml+i])
pa[i] = a[i]
pa[j] = a[end-_fd.npml+i]
pb[i] = b[i]
pb[j] = b[end-_fd.npml+i]
end
copyto!(pml[:kI], pkI)
copyto!(pml[:a], pa)
copyto!(pml[:b], pb)
end
"""
Generate a NamedArray with PML coefficients for all the dimensions that are then stored in the FDTD structs.
"""
function get_pml(attrib_mod, mgrid)
dfields = Fields(attrib_mod, "d", ndims = length(mgrid)) # derivative fields that need PML memory
pnames = [:a, :b, :kI]
np = 2 * _fd.npml
return NamedArray(
[
NamedArray(Data.Array.([zeros(np), zeros(np), ones(np)]), pnames) for
df in dfields
],
dfields,
)
end
"""
Just a loop over dims of pml
"""
function update_pml!(
pml::NamedVector{T},
exmgrid,
mgrid,
pml_edges::Vector{Symbol},
attrib_mod,
args...,
) where {T<:NamedVector}
for df in names(pml)[1]
dim = string(last(string(df)))
i = findfirst(x -> string(x) == dim, dim_names(length(exmgrid)))
exmgridf = get_mgrid(eval(df)(), attrib_mod, exmgrid...)[i]
mgridf = mgrid[i]
update_pml!(
pml[df],
exmgridf,
mgridf,
[
any(pml_edges .== Symbol(string(dim), "min")),
any(pml_edges .== Symbol(string(dim), "max")),
],
args...,
)
end
end
function update_pml!(pac)
update_pml!(
pac.pml,
pac.exmedium.mgrid,
pac.medium.mgrid,
pac.pml_edges,
pac.attrib_mod,
pac.fc[:dt],
Statistics.mean(pac.exmedium.bounds[:vp]),
pac.fc[:freqpeak],
)
end
for dimnames in [zip([:1, :2, :3], dim_names(3)), zip([:1, :2], dim_names(2))]
is = broadcast(x -> Symbol(string("i", x)), getindex.(collect(dimnames), 2))
ist = Meta.parse(string("(", [string(s, ",") for s in is]..., ")"))
N = Meta.parse(string(length(is)))
for (idim, dim) in dimnames
i = Symbol("i", string(dim))
ismoff = replace(is, i => :($i + moff))
isdoff = replace(is, i => :(doff + $i))
for (fname, fnamenp, imoff) in zip(
[Symbol("memory", string(dim), "!"), Symbol("memory1", string(dim), "!")],
[Symbol("memorynp", string(dim), "!"), Symbol("memorynp1", string(dim), "!")],
[:($i + moff), :($i + moff + 1)],
)
@eval @parallel_indices(
$ist,
function $fnamenp(memory::Data.Array{$N}, d, a, b, kI, moff, doff)
memory[$(ismoff...)] =
b[$imoff] * memory[$(ismoff...)] + a[$imoff] * d[$(isdoff...)]
d[$(isdoff...)] = d[$(isdoff...)] * kI[$imoff] + memory[$(ismoff...)]
return
end
)
@eval function $fname(memory::Data.Array{$N}, d, a, b, kI)
sm = collect(size(memory))
setindex!(sm, _fd.npml, $idim)
# first _fd.npml points
@parallel map(x -> (:)(1, x), Tuple(sm)) $fnamenp(memory, d, a, b, kI, 0, 0)
# last _fd.npml points independent of d
@parallel map(x -> (:)(1, x), Tuple(sm)) $fnamenp(
memory,
d,
a,
b,
kI,
_fd.npml,
getindex(size(d), $idim) - _fd.npml,
)
end
end
end
end
|
/////////////////////////////////////////////////////////////////////////////////
// distribution::toolkit::distributions::chi_squared::random.hpp //
// //
// (C) Copyright 2009 Erwann Rogard //
// Use, modification and distribution are subject to the //
// Boost Software License, Version 1.0. (See accompanying file //
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) //
/////////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_STATISTICS_DETAIL_DISTRIBUTION_TOOLKIT_CHI_SQUARED_RANDOM_HPP_ER_2009
#define BOOST_STATISTICS_DETAIL_DISTRIBUTION_TOOLKIT_CHI_SQUARED_RANDOM_HPP_ER_2009
#include <boost/math/distributions/chi_squared.hpp>
#include <boost/random/chi_squared.hpp>
#include <boost/statistics/detail/distribution_common/meta/random/distribution.hpp>
namespace boost{
namespace statistics{
namespace detail{
namespace distribution{
namespace meta{
template<typename T,typename P>
struct random_distribution<
boost::math::chi_squared_distribution<T,P>
>{
typedef boost::math::chi_squared_distribution<T,P> dist_;
typedef boost::random::chi_squared_distribution<T> type;
static type call(const dist_& d){
return type(d.degrees_of_freedom());
}
};
}// meta
}// distribution
}// detail
}// statistics
}// boost
#endif
|
From iris.proofmode Require Import coq_tactics reduction.
From iris.proofmode Require Export proofmode.
From iris.program_logic Require Export weakestpre.
From iris.program_logic Require Import lifting.
From lrust.lang Require Export tactics lifting.
From iris.prelude Require Import options.
Import uPred.
Lemma tac_wp_value `{!lrustGS Σ} Δ E Φ e v :
IntoVal e v →
envs_entails Δ (Φ v) → envs_entails Δ (WP e @ E {{ Φ }}).
Proof. rewrite envs_entails_unseal=> ? ->. by apply wp_value. Qed.
Ltac wp_value_head := eapply tac_wp_value; [iSolveTC|reduction.pm_prettify].
Lemma tac_wp_pure `{!lrustGS Σ} K Δ Δ' E e1 e2 φ n Φ :
PureExec φ n e1 e2 →
φ →
MaybeIntoLaterNEnvs n Δ Δ' →
envs_entails Δ' (WP fill K e2 @ E {{ Φ }}) →
envs_entails Δ (WP fill K e1 @ E {{ Φ }}).
Proof.
rewrite envs_entails_unseal=> ??? HΔ'. rewrite into_laterN_env_sound /=.
rewrite -wp_bind HΔ' -wp_pure_step_later //. by rewrite -wp_bind_inv.
Qed.
Tactic Notation "wp_pure" open_constr(efoc) :=
iStartProof;
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) => reshape_expr e ltac:(fun K e' =>
unify e' efoc;
eapply (tac_wp_pure K);
[simpl; iSolveTC (* PureExec *)
|try done (* The pure condition for PureExec *)
|iSolveTC (* IntoLaters *)
|simpl_subst; try wp_value_head (* new goal *)])
|| fail "wp_pure: cannot find" efoc "in" e "or" efoc "is not a reduct"
| _ => fail "wp_pure: not a 'wp'"
end.
Lemma tac_wp_eq_loc `{!lrustGS Σ} K Δ Δ' E i1 i2 l1 l2 q1 q2 v1 v2 Φ :
MaybeIntoLaterNEnvs 1 Δ Δ' →
envs_lookup i1 Δ' = Some (false, l1 ↦{q1} v1)%I →
envs_lookup i2 Δ' = Some (false, l2 ↦{q2} v2)%I →
envs_entails Δ' (WP fill K (Lit (bool_decide (l1 = l2))) @ E {{ Φ }}) →
envs_entails Δ (WP fill K (BinOp EqOp (Lit (LitLoc l1)) (Lit (LitLoc l2))) @ E {{ Φ }}).
Proof.
rewrite envs_entails_unseal=> ? /envs_lookup_sound /=. rewrite sep_elim_l=> ?.
move /envs_lookup_sound; rewrite sep_elim_l=> ? HΔ. rewrite -wp_bind.
rewrite into_laterN_env_sound /=. eapply wp_eq_loc; eauto using later_mono.
Qed.
Tactic Notation "wp_eq_loc" :=
iStartProof;
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
reshape_expr e ltac:(fun K e' => eapply (tac_wp_eq_loc K));
[iSolveTC|iAssumptionCore|iAssumptionCore|simpl; try wp_value_head]
| _ => fail "wp_pure: not a 'wp'"
end.
Tactic Notation "wp_rec" := wp_pure (App _ _).
Tactic Notation "wp_lam" := wp_rec.
Tactic Notation "wp_let" := wp_lam.
Tactic Notation "wp_seq" := wp_let.
Tactic Notation "wp_op" := wp_pure (BinOp _ _ _) || wp_eq_loc.
Tactic Notation "wp_if" := wp_pure (If _ _ _).
Tactic Notation "wp_case" := wp_pure (Case _ _); try wp_value_head.
Lemma tac_wp_bind `{!lrustGS Σ} K Δ E Φ e :
envs_entails Δ (WP e @ E {{ v, WP fill K (of_val v) @ E {{ Φ }} }})%I →
envs_entails Δ (WP fill K e @ E {{ Φ }}).
Proof. rewrite envs_entails_unseal=> ->. apply: wp_bind. Qed.
Ltac wp_bind_core K :=
lazymatch eval hnf in K with
| [] => idtac
| _ => apply (tac_wp_bind K); simpl
end.
Tactic Notation "wp_bind" open_constr(efoc) :=
iStartProof;
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) => reshape_expr e ltac:(fun K e' =>
match e' with
| efoc => unify e' efoc; wp_bind_core K
end) || fail "wp_bind: cannot find" efoc "in" e
| _ => fail "wp_bind: not a 'wp'"
end.
Section heap.
Context `{!lrustGS Σ}.
Implicit Types P Q : iProp Σ.
Implicit Types Φ : val → iProp Σ.
Implicit Types Δ : envs (uPredI (iResUR Σ)).
Lemma tac_wp_alloc K Δ Δ' E j1 j2 n Φ :
0 < n →
MaybeIntoLaterNEnvs 1 Δ Δ' →
(∀ l (sz: nat), n = sz → ∃ Δ'',
envs_app false (Esnoc (Esnoc Enil j1 (l ↦∗ repeat (LitV LitPoison) sz)) j2 (†l…sz)) Δ'
= Some Δ'' ∧
envs_entails Δ'' (WP fill K (Lit $ LitLoc l) @ E {{ Φ }})) →
envs_entails Δ (WP fill K (Alloc (Lit $ LitInt n)) @ E {{ Φ }}).
Proof.
rewrite envs_entails_unseal=> ?? HΔ. rewrite -wp_bind.
eapply wand_apply; first exact:wp_alloc.
rewrite -persistent_and_sep. apply and_intro; first by auto.
rewrite into_laterN_env_sound; apply later_mono, forall_intro=> l.
apply forall_intro=>sz. apply wand_intro_l. rewrite -assoc.
rewrite sep_and. apply pure_elim_l=> Hn. apply wand_elim_r'.
destruct (HΔ l sz) as (Δ''&?&HΔ'); first done.
rewrite envs_app_sound //; simpl. by rewrite right_id HΔ'.
Qed.
Lemma tac_wp_free K Δ Δ' Δ'' Δ''' E i1 i2 vl (n : Z) (n' : nat) l Φ :
n = length vl →
MaybeIntoLaterNEnvs 1 Δ Δ' →
envs_lookup i1 Δ' = Some (false, l ↦∗ vl)%I →
envs_delete false i1 false Δ' = Δ'' →
envs_lookup i2 Δ'' = Some (false, †l…n')%I →
envs_delete false i2 false Δ'' = Δ''' →
n' = length vl →
envs_entails Δ''' (WP fill K (Lit LitPoison) @ E {{ Φ }}) →
envs_entails Δ (WP fill K (Free (Lit $ LitInt n) (Lit $ LitLoc l)) @ E {{ Φ }}).
Proof.
rewrite envs_entails_unseal; intros -> ?? <- ? <- -> HΔ. rewrite -wp_bind.
eapply wand_apply; first exact:wp_free; simpl.
rewrite into_laterN_env_sound -!later_sep; apply later_mono.
do 2 (rewrite envs_lookup_sound //). by rewrite HΔ True_emp emp_wand -assoc.
Qed.
Lemma tac_wp_read K Δ Δ' E i l q v o Φ :
o = Na1Ord ∨ o = ScOrd →
MaybeIntoLaterNEnvs 1 Δ Δ' →
envs_lookup i Δ' = Some (false, l ↦{q} v)%I →
envs_entails Δ' (WP fill K (of_val v) @ E {{ Φ }}) →
envs_entails Δ (WP fill K (Read o (Lit $ LitLoc l)) @ E {{ Φ }}).
Proof.
rewrite envs_entails_unseal; intros [->| ->] ???.
- rewrite -wp_bind. eapply wand_apply; first exact:wp_read_na.
rewrite into_laterN_env_sound -later_sep envs_lookup_split //; simpl.
by apply later_mono, sep_mono_r, wand_mono.
- rewrite -wp_bind. eapply wand_apply; first exact:wp_read_sc.
rewrite into_laterN_env_sound -later_sep envs_lookup_split //; simpl.
by apply later_mono, sep_mono_r, wand_mono.
Qed.
Lemma tac_wp_write K Δ Δ' Δ'' E i l v e v' o Φ :
IntoVal e v' →
o = Na1Ord ∨ o = ScOrd →
MaybeIntoLaterNEnvs 1 Δ Δ' →
envs_lookup i Δ' = Some (false, l ↦ v)%I →
envs_simple_replace i false (Esnoc Enil i (l ↦ v')) Δ' = Some Δ'' →
envs_entails Δ'' (WP fill K (Lit LitPoison) @ E {{ Φ }}) →
envs_entails Δ (WP fill K (Write o (Lit $ LitLoc l) e) @ E {{ Φ }}).
Proof.
rewrite envs_entails_unseal; intros ? [->| ->] ????.
- rewrite -wp_bind. eapply wand_apply; first by apply wp_write_na.
rewrite into_laterN_env_sound -later_sep envs_simple_replace_sound //; simpl.
rewrite right_id. by apply later_mono, sep_mono_r, wand_mono.
- rewrite -wp_bind. eapply wand_apply; first by apply wp_write_sc.
rewrite into_laterN_env_sound -later_sep envs_simple_replace_sound //; simpl.
rewrite right_id. by apply later_mono, sep_mono_r, wand_mono.
Qed.
End heap.
Tactic Notation "wp_apply" open_constr(lem) :=
iPoseProofCore lem as false (fun H =>
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
reshape_expr e ltac:(fun K e' =>
wp_bind_core K; iApplyHyp H; try iNext; simpl) ||
lazymatch iTypeOf H with
| Some (_,?P) => fail "wp_apply: cannot apply" P
end
| _ => fail "wp_apply: not a 'wp'"
end).
Tactic Notation "wp_alloc" ident(l) "as" constr(H) constr(Hf) :=
iStartProof;
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_wp_alloc K _ _ _ H Hf))
|fail 1 "wp_alloc: cannot find 'Alloc' in" e];
[try fast_done
|iSolveTC
|let sz := fresh "sz" in let Hsz := fresh "Hsz" in
first [intros l sz Hsz | fail 1 "wp_alloc:" l "not fresh"];
(* If Hsz is "constant Z = nat", change that to an equation on nat and
potentially substitute away the sz. *)
try (match goal with Hsz : ?x = _ |- _ => rewrite <-(Z2Nat.id x) in Hsz; last done end;
apply Nat2Z.inj in Hsz;
try (cbv [Z.to_nat Pos.to_nat] in Hsz;
simpl in Hsz;
(* Substitute only if we have a literal nat. *)
match goal with Hsz : S _ = _ |- _ => subst sz end));
eexists; split;
[pm_reflexivity || fail "wp_alloc:" H "or" Hf "not fresh"
|simpl; try wp_value_head]]
| _ => fail "wp_alloc: not a 'wp'"
end.
Tactic Notation "wp_alloc" ident(l) :=
let H := iFresh in let Hf := iFresh in wp_alloc l as H Hf.
Tactic Notation "wp_free" :=
iStartProof;
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_wp_free K))
|fail 1 "wp_free: cannot find 'Free' in" e];
[try fast_done
|iSolveTC
|let l := match goal with |- _ = Some (_, (?l ↦∗ _)%I) => l end in
iAssumptionCore || fail "wp_free: cannot find" l "↦∗ ?"
|pm_reflexivity
|let l := match goal with |- _ = Some (_, († ?l … _)%I) => l end in
iAssumptionCore || fail "wp_free: cannot find †" l "… ?"
|pm_reflexivity
|try fast_done
|simpl; try first [wp_pure (Seq (Lit LitPoison) _)|wp_value_head]]
| _ => fail "wp_free: not a 'wp'"
end.
Tactic Notation "wp_read" :=
iStartProof;
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_wp_read K))
|fail 1 "wp_read: cannot find 'Read' in" e];
[(right; fast_done) || (left; fast_done) ||
fail "wp_read: order is neither Na2Ord nor ScOrd"
|iSolveTC
|let l := match goal with |- _ = Some (_, (?l ↦{_} _)%I) => l end in
iAssumptionCore || fail "wp_read: cannot find" l "↦ ?"
|simpl; try wp_value_head]
| _ => fail "wp_read: not a 'wp'"
end.
Tactic Notation "wp_write" :=
iStartProof;
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_wp_write K); [iSolveTC|..])
|fail 1 "wp_write: cannot find 'Write' in" e];
[(right; fast_done) || (left; fast_done) ||
fail "wp_write: order is neither Na2Ord nor ScOrd"
|iSolveTC
|let l := match goal with |- _ = Some (_, (?l ↦{_} _)%I) => l end in
iAssumptionCore || fail "wp_write: cannot find" l "↦ ?"
|pm_reflexivity
|simpl; try first [wp_pure (Seq (Lit LitPoison) _)|wp_value_head]]
| _ => fail "wp_write: not a 'wp'"
end.
|
__title__ = 'Plot Heatmap performance'
__author__ = 'Carel van Niekerk'
__contact__ = '[email protected]'
#%% Load Packages
import numpy as np
import pandas as pd
from os import chdir
from matplotlib import pyplot as plt
import matplotlib.cm as cm
#%%
Labs = ['Grass', 'Bark', 'Straw', 'Herringbone weave', 'Woolen cloth', 'Pressed calf leather', 'Beach sand', 'Water', 'Wood grain', 'Raffia', 'Pigskin', 'Brick wall', 'Plastic bubbles', 'Sand']
for n in [5,6,7,8]:
chdir(r'...\DistComp\MSFM Euclidean')
Data = pd.read_csv('DistComp(n={}).csv'.format(n))
t_range = Data.t.unique()
f_range = Data.f.unique()
Res = list()
for t in t_range:
for f in f_range:
Subset = Data[(Data.t == t)&(Data.f == f)]
Subset = Subset[[v in Labs for v in Subset.Label.values]]
Acc = (Subset.Similar_Tex_Dist < Subset.Random_Tex_Dist).sum() / Subset.shape[0]
Res += [[t, f, Acc]]
Res = np.array(Res)[:,-1].reshape(-1,3)
fig = plt.figure()
ax = fig.add_subplot(111)
p = ax.imshow(Res, cmap = cm.get_cmap(name='coolwarm'), vmin = 0.4, vmax = 0.8)
plt.yticks((0,1,2,3,4), (0.4, 0.35, 0.3, 0.25, 0.2))
plt.xticks((0,1,2), (0.8, 0.85, 0.9))
ax.set_ylabel('t', fontsize = 16)
ax.set_xlabel('f', fontsize = 16)
chdir(r'...')
plt.savefig('Heat(n={}).png'.format(n), bbox_inches = 'tight', pad_inches = 0) |
for N in [Float64, Rational{Int}, Float32]
# =====================================
# Run decompose for different set types
# =====================================
function test_directions(set)
res = σ(N[1, 0], set)[1] == one(N)
res &= σ(N[0, 1], set)[2] == one(N)
res &= σ(N[-1, 0], set)[1] == -one(N)
res &= σ(N[0, -1], set)[2] == -one(N)
return res
end
partition = [[1, 2], [3, 4], [5, 6]]
b = BallInf(zeros(N, 6), one(N))
d = decompose(b, partition, HPolygon)
@test d.array[1] isa HPolygon && test_directions(d.array[1])
d = decompose(b, partition, Hyperrectangle)
@test d.array[1] isa Hyperrectangle && test_directions(d.array[1])
d = decompose(b, partition, LinearMap)
@test d.array[1] isa LinearMap && test_directions(d.array[1])
d = decompose(b, [[i] for i in 1:6], Interval)
@test d.array[1] isa Interval &&
σ(N[1], d.array[1])[1] == one(N) && σ(N[-1], d.array[1])[1] == -one(N)
# ===================
# 1D/3D decomposition
# ===================
b1 = Ball1(zeros(N, 7), one(N))
d = decompose(b1, [[i] for i in 1:7], Hyperrectangle)
@test length(d.array) == 7
d = decompose(b1, [[1], 2:3, 4:6, [7]], Hyperrectangle)
@test length(d.array) == 4
# ===================
# template directions
# ===================
for dir in [BoxDirections{N}, OctDirections{N}, BoxDiagDirections{N}]
d = decompose(b, partition, dir)
@test d isa CartesianProductArray && array(d)[1] isa HPolytope
end
# ==========================
# different options per block
# ==========================
block2oa1 = Dict(1 => Hyperrectangle, 2 => HPolygon, 3 => OctDirections{N},
4 => Interval)
block2oa2 = [Hyperrectangle, HPolygon, OctDirections{N}, Interval]
for block2oa in [block2oa1, block2oa2] # both Dict and Vector work
d = decompose(b1, [[1], 2:3, 4:6, [7]], block2oa)
@test d isa CartesianProductArray && array(d)[1] isa Hyperrectangle &&
array(d)[2] isa HPolygon && array(d)[3] isa HPolytope &&
array(d)[4] isa Interval
end
# ==================
# uniform block size
# ==================
d = decompose(b, Hyperrectangle; block_size=2)
@test length(d.array) == 3
d = decompose(b1, Hyperrectangle; block_size=3)
@test length(d.array) == 3
end
# tests that do not work with Rational{Int}
for N in [Float64, Float32]
# =============================
# Check that Issue #43 is fixed
# =============================
X = CartesianProductArray([BallInf(N[0.767292, 0.936613], N(0.1)),
BallInf(N[0.734104, 0.87296], N(0.1))])
A = N[1.92664 1.00674 1.0731 -0.995149;
-2.05704 3.48059 0.0317863 1.83481;
0.990993 -1.97754 0.754192 -0.807085;
-2.43723 0.782825 -3.99255 3.93324]
Ω0 = CH(X, A * X)
dec = decompose(Ω0, [1:2, 3:4], HPolygon)
dec1 = dec.array[1]
@test dec1.constraints[1].b ≈ N(2.84042586)
@test dec1.constraints[2].b ≈ N(4.04708832)
@test dec1.constraints[3].b ≈ N(-0.667292)
@test dec1.constraints[4].b ≈ N(-0.836613)
# =====================
# ε-close approximation
# =====================
function test_directions(set)
res = σ(N[1, 0], set)[1] == one(N)
res &= σ(N[0, 1], set)[2] == one(N)
res &= σ(N[-1, 0], set)[1] == -one(N)
res &= σ(N[0, -1], set)[2] == -one(N)
return res
end
partition = [[1, 2], [3, 4], [5, 6]]
b = BallInf(zeros(N, 6), one(N))
d = decompose(b, partition, HPolygon => N(1e-2))
@test d.array[1] isa HPolygon && test_directions(d.array[1])
d = decompose(b, partition, N(1e-2))
@test d.array[1] isa HPolygon && test_directions(d.array[1])
d = decompose(b, partition, [N(1e-2), HPolygon => N(1e-2), N(1e-2)])
@test d.array[1] isa HPolygon && test_directions(d.array[1])
end
|
[STATEMENT]
lemma accum3_lemma [simp]:
shows "rec_eval (rec_accum3 f) [x, y1, y2, y3] = (\<Prod> z \<le> x. (rec_eval f) [z, y1, y2, y3])"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rec_eval (rec_accum3 f) [x, y1, y2, y3] = (\<Prod>z\<le>x. rec_eval f [z, y1, y2, y3])
[PROOF STEP]
by (induct x) (simp_all add: rec_accum3_def) |
module STCR2Z2T0S0KoffkaCross where
import Data.Array.Repa as R
import Data.Binary (decodeFile)
import Data.Complex
import Data.List as L
import DFT.Plan
import FokkerPlanck.MonteCarlo
import FokkerPlanck.Pinwheel
import Image.IO
import STC.PowerMethod
import System.Directory
import System.Environment
import System.FilePath
import Text.Printf
import Types
main = do
args@(numPointStr:numOrientationStr:numScaleStr:thetaSigmaStr:scaleSigmaStr:maxScaleStr:taoDecayStr:taoReversalStr:taoCornerStr:numTrailStr:maxTrailStr:theta0FreqsStr:thetaFreqsStr:scale0FreqsStr:scaleFreqsStr:histFilePath:numIterationStr:writeSourceFlagStr:dStr:wStr:sigmaStr:thetaStr:numThreadStr:_) <-
getArgs
print args
let numPoint = read numPointStr :: Int
numOrientation = read numOrientationStr :: Int
numScale = read numScaleStr :: Int
thetaSigma = read thetaSigmaStr :: Double
scaleSigma = read scaleSigmaStr :: Double
maxScale = read maxScaleStr :: Double
taoDecay = read taoDecayStr :: Double
taoReversal = read taoReversalStr :: Double
taoCorner = read taoCornerStr :: Double
numTrail = read numTrailStr :: Int
maxTrail = read maxTrailStr :: Int
theta0Freq = read theta0FreqsStr :: Double
theta0Freqs = [-theta0Freq .. theta0Freq]
thetaFreq = read thetaFreqsStr :: Double
thetaFreqs = [-thetaFreq .. thetaFreq]
scale0Freq = read scale0FreqsStr :: Double
scaleFreq = read scaleFreqsStr :: Double
scale0Freqs = [-scale0Freq .. scale0Freq]
scaleFreqs = [-scaleFreq .. scaleFreq]
numIteration = read numIterationStr :: Int
writeSourceFlag = read writeSourceFlagStr :: Bool
d = read dStr :: Int
w = read wStr :: Int
sigma = read sigmaStr :: Double
theta = read thetaStr :: Double
numThread = read numThreadStr :: Int
folderPath = "output/test/STCR2Z2T0S0KoffkaCross"
createDirectoryIfMissing True folderPath
flag <- doesFileExist histFilePath
radialArr <-
if flag
then R.map magnitude . getNormalizedHistogramArr <$>
decodeFile histFilePath
else do
putStrLn "Couldn't find a Green's function data. Start simulation..."
solveMonteCarloR2Z2T0S0ReversalCornerRadial
numThread
numTrail
maxTrail
numPoint
numPoint
thetaSigma
scaleSigma
maxScale
taoDecay
taoReversal
taoCorner
theta0Freqs
thetaFreqs
scale0Freqs
scaleFreqs
histFilePath
(emptyHistogram
[ (round . sqrt . fromIntegral $ 2 * (div numPoint 2) ^ 2)
, L.length scale0Freqs
, L.length theta0Freqs
, L.length scaleFreqs
, L.length thetaFreqs
]
0)
arrR2Z2T0S0 <-
computeUnboxedP $
computeR2Z2T0S0ArrayRadial
radialArr
numPoint
numPoint
1
maxScale
thetaFreqs
scaleFreqs
theta0Freqs
scale0Freqs
plan <- makeR2Z2T0S0Plan emptyPlan arrR2Z2T0S0
let xs =
[ R2S1RPPoint (d, w, theta, 1)
, R2S1RPPoint (d, -w, theta, 1)
, R2S1RPPoint (w, d, theta + 90, 1)
, R2S1RPPoint (-w, d, theta + 90, 1)
, R2S1RPPoint (-d, w, theta + 180, 1)
, R2S1RPPoint (-d, -w, theta + 180, 1)
, R2S1RPPoint (w, -d, theta + 270, 1)
, R2S1RPPoint (-w, -d, theta + 270, 1)
]
bias =
-- computeBiasR2T0S0Gaussian
-- numPoint
-- numPoint
-- theta0Freqs
-- scale0Freqs
-- 90
-- sigma
-- xs
computeS $
R.zipWith
(+)
(computeBiasR2T0S0Gaussian
numPoint
numPoint
theta0Freqs
scale0Freqs
90
sigma
xs)
(computeBiasR2T0S0Gaussian
numPoint
numPoint
theta0Freqs
scale0Freqs
(-90)
sigma
xs)
eigenVec =
computeInitialEigenVectorR2T0S0
numPoint
numPoint
theta0Freqs
scale0Freqs
thetaFreqs
scaleFreqs
xs
powerMethodR2Z2T0S0Bias
plan
folderPath
numPoint
numPoint
numOrientation
thetaFreqs
theta0Freqs
numScale
scaleFreqs
scale0Freqs
arrR2Z2T0S0
numIteration
writeSourceFlag
-- (printf "_%d" (round r :: Int))
(printf
"_%d_%d_%d_%d_%d_%.2f_%.2f_%d_%d"
numPoint
(round maxScale :: Int)
(round taoDecay :: Int)
(round taoReversal :: Int)
(round taoCorner :: Int)
thetaSigma
scaleSigma
d
w)
0.5
bias
eigenVec
|
Formal statement is: lemma prime_intI: "prime p" if "p \<ge> 2" and "\<And>m n. p dvd m * n \<Longrightarrow> p dvd m \<or> p dvd n" for p :: int Informal statement is: If $p$ is an integer greater than or equal to 2, and if $p$ divides the product of any two integers, then $p$ divides one of the integers. Then $p$ is a prime number. |
[STATEMENT]
lemma path_seg_butlast:
"p\<noteq>[] \<Longrightarrow> path_seg p 0 (length p - Suc 0) = \<Union>(set (butlast p))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. p \<noteq> [] \<Longrightarrow> path_seg p 0 (length p - Suc 0) = \<Union> (set (butlast p))
[PROOF STEP]
apply (cases p rule: rev_cases, simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>ys y. \<lbrakk>p \<noteq> []; p = ys @ [y]\<rbrakk> \<Longrightarrow> path_seg p 0 (length p - Suc 0) = \<Union> (set (butlast p))
[PROOF STEP]
apply (fastforce simp: path_seg_def nth_append in_set_conv_nth)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
module Numeric.LinearAlgebra.Matrix.Class where
-- import Numeric.LinearAlgebra.Vector
class Functor m => Matrix m where
mDim :: m a -> Int
mElement :: m a -> Int -> Int -> a
mIndexOf :: (Ord a) => (a -> a -> Bool) -> m a -> (Int, Int)
mZip :: (a -> b -> c) -> m a -> m b -> m c
-- | mFold is foldl1'
mFold :: (a -> a -> a) -> m a -> a
det :: Num a => m a -> a
{-# INLINE mApply #-}
mApply :: Functor f => f (a -> b) -> a -> f b
mApply f m = fmap ($ m) f
(.+.) :: (Num k, Matrix m) => m k -> m k -> m k
(.+.) = mZip (+)
(.-.) :: (Num k, Matrix m) => m k -> m k -> m k
(.-.) = mZip (-)
{-
(.*.) :: (k ~ Element m, Num k, Matrix m) => m -> m -> m
m .*. n = mIdxMap m $ \i j -> sum [ mElement m i k * mElement n k j | k <- [ 0 .. 3 ] ]
-}
{-
(.*>) :: (k ~ Element m, k ~ Scalar v, Num k, Matrix m, Vector v)
=> m -> v -> v
m .*> v | vDim v == mDim m = flip vIdxMap v $ \k -> sum [ mElement m i k * vElement v k | i <- [ 0 .. mDim m ] ]
| otherwise = error "Dimensions do not match"
-}
-- (*.) :: (k ~ Element m, Num k, Matrix m) => m -> k -> m
-- m *. k = mMap (k*) m
{-
transpose :: Matrix m => m -> m
transpose m = flip mIdxMap m $ \i j -> mElement j i m
-}
|
[STATEMENT]
lemma region_continuous:
assumes "valid_region X k I r"
defines R: "R \<equiv> region X I r"
assumes between: "0 \<le> t1" "t1 \<le> t2"
assumes elem: "u \<in> R" "u \<oplus> t2 \<in> R"
shows "u \<oplus> t1 \<in> R"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. u \<oplus> t1 \<in> R
[PROOF STEP]
unfolding R
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. u \<oplus> t1 \<in> Regions.region X I r
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<forall>x\<in>X. 0 \<le> (u \<oplus> t1) x
2. \<forall>x\<in>X. Regions.intv_elem x (u \<oplus> t1) (I x)
3. ?X\<^sub>0 = {x \<in> X. \<exists>d. I x = Regions.intv.Intv d}
4. \<forall>x\<in>?X\<^sub>0. \<forall>y\<in>?X\<^sub>0. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
from \<open>0 \<le> t1\<close> \<open>u \<in> R\<close>
[PROOF STATE]
proof (chain)
picking this:
0 \<le> t1
u \<in> R
[PROOF STEP]
show "\<forall>x\<in>X. 0 \<le> (u \<oplus> t1) x"
[PROOF STATE]
proof (prove)
using this:
0 \<le> t1
u \<in> R
goal (1 subgoal):
1. \<forall>x\<in>X. 0 \<le> (u \<oplus> t1) x
[PROOF STEP]
by (auto simp: R cval_add_def)
[PROOF STATE]
proof (state)
this:
\<forall>x\<in>X. 0 \<le> (u \<oplus> t1) x
goal (3 subgoals):
1. \<forall>x\<in>X. Regions.intv_elem x (u \<oplus> t1) (I x)
2. ?X\<^sub>0 = {x \<in> X. \<exists>d. I x = Regions.intv.Intv d}
3. \<forall>x\<in>?X\<^sub>0. \<forall>y\<in>?X\<^sub>0. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
have "intv_elem x (u \<oplus> t1) (I x)" if "x \<in> X" for x
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Regions.intv_elem x (u \<oplus> t1) (I x)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Regions.intv_elem x (u \<oplus> t1) (I x)
[PROOF STEP]
from elem that
[PROOF STATE]
proof (chain)
picking this:
u \<in> R
u \<oplus> t2 \<in> R
x \<in> X
[PROOF STEP]
have "intv_elem x u (I x)" "intv_elem x (u \<oplus> t2) (I x)"
[PROOF STATE]
proof (prove)
using this:
u \<in> R
u \<oplus> t2 \<in> R
x \<in> X
goal (1 subgoal):
1. Regions.intv_elem x u (I x) &&& Regions.intv_elem x (u \<oplus> t2) (I x)
[PROOF STEP]
by (auto simp: R)
[PROOF STATE]
proof (state)
this:
Regions.intv_elem x u (I x)
Regions.intv_elem x (u \<oplus> t2) (I x)
goal (1 subgoal):
1. Regions.intv_elem x (u \<oplus> t1) (I x)
[PROOF STEP]
with between
[PROOF STATE]
proof (chain)
picking this:
0 \<le> t1
t1 \<le> t2
Regions.intv_elem x u (I x)
Regions.intv_elem x (u \<oplus> t2) (I x)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
0 \<le> t1
t1 \<le> t2
Regions.intv_elem x u (I x)
Regions.intv_elem x (u \<oplus> t2) (I x)
goal (1 subgoal):
1. Regions.intv_elem x (u \<oplus> t1) (I x)
[PROOF STEP]
by (cases "I x", auto simp: cval_add_def)
[PROOF STATE]
proof (state)
this:
Regions.intv_elem x (u \<oplus> t1) (I x)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
?x \<in> X \<Longrightarrow> Regions.intv_elem ?x (u \<oplus> t1) (I ?x)
goal (3 subgoals):
1. \<forall>x\<in>X. Regions.intv_elem x (u \<oplus> t1) (I x)
2. ?X\<^sub>0 = {x \<in> X. \<exists>d. I x = Regions.intv.Intv d}
3. \<forall>x\<in>?X\<^sub>0. \<forall>y\<in>?X\<^sub>0. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
?x \<in> X \<Longrightarrow> Regions.intv_elem ?x (u \<oplus> t1) (I ?x)
[PROOF STEP]
show "\<forall> x \<in> X. intv_elem x (u \<oplus> t1) (I x)"
[PROOF STATE]
proof (prove)
using this:
?x \<in> X \<Longrightarrow> Regions.intv_elem ?x (u \<oplus> t1) (I ?x)
goal (1 subgoal):
1. \<forall>x\<in>X. Regions.intv_elem x (u \<oplus> t1) (I x)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<forall>x\<in>X. Regions.intv_elem x (u \<oplus> t1) (I x)
goal (2 subgoals):
1. ?X\<^sub>0 = {x \<in> X. \<exists>d. I x = Regions.intv.Intv d}
2. \<forall>x\<in>?X\<^sub>0. \<forall>y\<in>?X\<^sub>0. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
let ?X\<^sub>0 = "{x \<in> X. \<exists>d. I x = Intv d}"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. ?X\<^sub>0 = {x \<in> X. \<exists>d. I x = Regions.intv.Intv d}
2. \<forall>x\<in>?X\<^sub>0. \<forall>y\<in>?X\<^sub>0. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
show "?X\<^sub>0 = ?X\<^sub>0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {x \<in> X. \<exists>d. I x = Regions.intv.Intv d} = {x \<in> X. \<exists>d. I x = Regions.intv.Intv d}
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
{x \<in> X. \<exists>d. I x = Regions.intv.Intv d} = {x \<in> X. \<exists>d. I x = Regions.intv.Intv d}
goal (1 subgoal):
1. \<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
from elem
[PROOF STATE]
proof (chain)
picking this:
u \<in> R
u \<oplus> t2 \<in> R
[PROOF STEP]
have "\<forall> x \<in> ?X\<^sub>0. \<forall> y \<in> ?X\<^sub>0. (x, y) \<in> r \<longleftrightarrow> frac (u x) \<le> frac (u y)"
[PROOF STATE]
proof (prove)
using this:
u \<in> R
u \<oplus> t2 \<in> R
goal (1 subgoal):
1. \<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac (u x) \<le> frac (u y))
[PROOF STEP]
by (auto simp: R)
[PROOF STATE]
proof (state)
this:
\<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac (u x) \<le> frac (u y))
goal (1 subgoal):
1. \<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac (u x) \<le> frac (u y))
goal (1 subgoal):
1. \<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
\<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac (u x) \<le> frac (u y))
goal (1 subgoal):
1. \<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
fix x y c d
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
assume A: "x \<in> X" "y \<in> X" "I x = Intv c" "I y = Intv d"
[PROOF STATE]
proof (state)
this:
x \<in> X
y \<in> X
I x = Regions.intv.Intv c
I y = Regions.intv.Intv d
goal (1 subgoal):
1. \<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
from A elem between
[PROOF STATE]
proof (chain)
picking this:
x \<in> X
y \<in> X
I x = Regions.intv.Intv c
I y = Regions.intv.Intv d
u \<in> R
u \<oplus> t2 \<in> R
0 \<le> t1
t1 \<le> t2
[PROOF STEP]
have *:
"c < u x" "u x < c + 1" "c < u x + t1" "u x + t1 < c + 1"
[PROOF STATE]
proof (prove)
using this:
x \<in> X
y \<in> X
I x = Regions.intv.Intv c
I y = Regions.intv.Intv d
u \<in> R
u \<oplus> t2 \<in> R
0 \<le> t1
t1 \<le> t2
goal (1 subgoal):
1. (real c < u x &&& u x < real (c + 1)) &&& real c < u x + t1 &&& u x + t1 < real (c + 1)
[PROOF STEP]
by (fastforce simp: cval_add_def R)+
[PROOF STATE]
proof (state)
this:
real c < u x
u x < real (c + 1)
real c < u x + t1
u x + t1 < real (c + 1)
goal (1 subgoal):
1. \<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
real c < u x
u x < real (c + 1)
real c < u x + t1
u x + t1 < real (c + 1)
goal (1 subgoal):
1. \<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
from A(2,4) elem between
[PROOF STATE]
proof (chain)
picking this:
y \<in> X
I y = Regions.intv.Intv d
u \<in> R
u \<oplus> t2 \<in> R
0 \<le> t1
t1 \<le> t2
[PROOF STEP]
have **:
"d < u y" "u y < d + 1" "d < u y + t1" "u y + t1 < d + 1"
[PROOF STATE]
proof (prove)
using this:
y \<in> X
I y = Regions.intv.Intv d
u \<in> R
u \<oplus> t2 \<in> R
0 \<le> t1
t1 \<le> t2
goal (1 subgoal):
1. (real d < u y &&& u y < real (d + 1)) &&& real d < u y + t1 &&& u y + t1 < real (d + 1)
[PROOF STEP]
by (fastforce simp: cval_add_def R)+
[PROOF STATE]
proof (state)
this:
real d < u y
u y < real (d + 1)
real d < u y + t1
u y + t1 < real (d + 1)
goal (1 subgoal):
1. \<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
real c < u x
u x < real (c + 1)
real c < u x + t1
u x + t1 < real (c + 1)
real d < u y
u y < real (d + 1)
real d < u y + t1
u y + t1 < real (d + 1)
[PROOF STEP]
have "u x = c + frac (u x)" "u y = d + frac (u y)"
[PROOF STATE]
proof (prove)
using this:
real c < u x
u x < real (c + 1)
real c < u x + t1
u x + t1 < real (c + 1)
real d < u y
u y < real (d + 1)
real d < u y + t1
u y + t1 < real (d + 1)
goal (1 subgoal):
1. u x = real c + frac (u x) &&& u y = real d + frac (u y)
[PROOF STEP]
using nat_intv_frac_decomp
[PROOF STATE]
proof (prove)
using this:
real c < u x
u x < real (c + 1)
real c < u x + t1
u x + t1 < real (c + 1)
real d < u y
u y < real (d + 1)
real d < u y + t1
u y + t1 < real (d + 1)
\<lbrakk>real ?c < ?d; ?d < real (?c + 1)\<rbrakk> \<Longrightarrow> ?d = real ?c + frac ?d
goal (1 subgoal):
1. u x = real c + frac (u x) &&& u y = real d + frac (u y)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
u x = real c + frac (u x)
u y = real d + frac (u y)
goal (1 subgoal):
1. \<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
u x = real c + frac (u x)
u y = real d + frac (u y)
[PROOF STEP]
have
"frac (u x + t1) = frac (u x) + t1" "frac (u y + t1) = frac (u y) + t1"
[PROOF STATE]
proof (prove)
using this:
u x = real c + frac (u x)
u y = real d + frac (u y)
goal (1 subgoal):
1. frac (u x + t1) = frac (u x) + t1 &&& frac (u y + t1) = frac (u y) + t1
[PROOF STEP]
using *(3,4) **(3,4) nat_intv_frac_decomp
[PROOF STATE]
proof (prove)
using this:
u x = real c + frac (u x)
u y = real d + frac (u y)
real c < u x + t1
u x + t1 < real (c + 1)
real d < u y + t1
u y + t1 < real (d + 1)
\<lbrakk>real ?c < ?d; ?d < real (?c + 1)\<rbrakk> \<Longrightarrow> ?d = real ?c + frac ?d
goal (1 subgoal):
1. frac (u x + t1) = frac (u x) + t1 &&& frac (u y + t1) = frac (u y) + t1
[PROOF STEP]
by force+
[PROOF STATE]
proof (state)
this:
frac (u x + t1) = frac (u x) + t1
frac (u y + t1) = frac (u y) + t1
goal (1 subgoal):
1. \<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
frac (u x + t1) = frac (u x) + t1
frac (u y + t1) = frac (u y) + t1
[PROOF STEP]
have
"frac (u x) \<le> frac (u y) \<longleftrightarrow> frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y)"
[PROOF STATE]
proof (prove)
using this:
frac (u x + t1) = frac (u x) + t1
frac (u y + t1) = frac (u y) + t1
goal (1 subgoal):
1. (frac (u x) \<le> frac (u y)) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
by (auto simp: cval_add_def)
[PROOF STATE]
proof (state)
this:
(frac (u x) \<le> frac (u y)) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
goal (1 subgoal):
1. \<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
\<lbrakk>?x2 \<in> X; ?y2 \<in> X; I ?x2 = Regions.intv.Intv ?c2; I ?y2 = Regions.intv.Intv ?d2\<rbrakk> \<Longrightarrow> (frac (u ?x2) \<le> frac (u ?y2)) = (frac ((u \<oplus> t1) ?x2) \<le> frac ((u \<oplus> t1) ?y2))
goal (1 subgoal):
1. \<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
\<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac (u x) \<le> frac (u y))
\<lbrakk>?x2 \<in> X; ?y2 \<in> X; I ?x2 = Regions.intv.Intv ?c2; I ?y2 = Regions.intv.Intv ?d2\<rbrakk> \<Longrightarrow> (frac (u ?x2) \<le> frac (u ?y2)) = (frac ((u \<oplus> t1) ?x2) \<le> frac ((u \<oplus> t1) ?y2))
[PROOF STEP]
show
"\<forall> x \<in> ?X\<^sub>0. \<forall> y \<in> ?X\<^sub>0. (x, y) \<in> r \<longleftrightarrow> frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y)"
[PROOF STATE]
proof (prove)
using this:
\<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac (u x) \<le> frac (u y))
\<lbrakk>?x2 \<in> X; ?y2 \<in> X; I ?x2 = Regions.intv.Intv ?c2; I ?y2 = Regions.intv.Intv ?d2\<rbrakk> \<Longrightarrow> (frac (u ?x2) \<le> frac (u ?y2)) = (frac ((u \<oplus> t1) ?x2) \<le> frac ((u \<oplus> t1) ?y2))
goal (1 subgoal):
1. \<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
[PROOF STEP]
by (auto simp: cval_add_def)
[PROOF STATE]
proof (state)
this:
\<forall>x\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. \<forall>y\<in>{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}. ((x, y) \<in> r) = (frac ((u \<oplus> t1) x) \<le> frac ((u \<oplus> t1) y))
goal:
No subgoals!
[PROOF STEP]
qed |
(*
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
*
* SPDX-License-Identifier: BSD-2-Clause
*)
(*
A story of AutoCorres and recursion.
To do:
-- prove SIMPL total correctness
-- remove (* slow *) steps
-- remove word32 metis (or wait for word lifting...)
-- fix wp to support recursive fib'
-- Isar style proof?
*)
theory FibProof
imports
"AutoCorres.AutoCorres"
begin
external_file "fib.c"
(*
* The venerable Fibonacci function.
*)
fun fibo :: "nat \<Rightarrow> nat" where
"fibo 0 = 0" |
"fibo (Suc 0) = Suc 0" |
"fibo (Suc (Suc n)) = fibo n + fibo (Suc n)"
declare fibo.simps [simp del]
lemma fibo_alt_def: "fibo n = (if n = 0 then 0 else if n = 1 then 1 else fibo (n - 1) + fibo (n - 2))"
apply (induct n rule: less_induct)
apply (rename_tac n, case_tac n, simp add: fibo.simps)
apply (rename_tac n1, case_tac n1, simp add: fibo.simps)
apply (simp add: fibo.simps)
done
lemma fibo_mono_Suc: "fibo n \<le> fibo (Suc n)"
by (simp add: fibo_alt_def)
lemma fibo_mono: "a \<le> b \<Longrightarrow> fibo a \<le> fibo b"
by (metis mono_iff_le_Suc mono_def fibo_mono_Suc)
lemma fibo_mono_strict: "n \<ge> 2 \<Longrightarrow> fibo n < fibo (Suc n)"
apply (case_tac n, simp)
apply (rename_tac n', subgoal_tac "fibo (Suc 0) \<le> fibo n'")
apply (simp add: fibo.simps)
apply (simp add: fibo_mono)
done
lemma fiboI: "\<lbrakk> a + 1 = b; b + 1 = c \<rbrakk> \<Longrightarrow> fibo a + fibo b = fibo c"
by (auto simp: fibo.simps)
(*
* We write two versions in C, and compare correctness proofs on the
* SIMPL and AutoCorres embeddings.
*)
(*
* C arithmetic is done using `unsigned', which is translated as `word32'.
* So, it is much easier to prove that the C code implements this function,
* which is not quite the same function as fibo.
*)
function fibo32 :: "word32 \<Rightarrow> word32" where
"fibo32 n = (if n = 0 then 0 else if n = 1 then 1 else fibo32 (n - 1) + fibo32 (n - 2))"
apply auto
done
termination fibo32
by (relation "measure (\<lambda>x. unat x)", (simp|unat_arith)+)
declare fibo32.simps [simp del]
(*
* But we really want to say that the C code does implement fibo
* (at least up till 2971215073 < 2^32)...
*)
lemma fibo_greater: "(6 + n) < fibo (6 + n)"
apply (induct n)
apply eval
apply (subst add_Suc_right)+
apply (subgoal_tac "Suc (6 + n) \<le> fibo (6 + n)")
apply (subgoal_tac "fibo (6 + n) < fibo (Suc (6 + n))")
apply simp
apply (rule fibo_mono_strict)
apply simp
apply simp
done
lemma fibo_greater': "n \<ge> 6 \<Longrightarrow> n < fibo n"
by (metis le_iff_add fibo_greater)
lemma unat_word32_plus: "unat x + unat y < 2^32 \<Longrightarrow> unat x + unat y = unat (x + y :: word32)"
by (metis len32 unat_of_nat_len word_arith_nat_add)
(* ... so we should say that too. *)
lemma fibo32_is_fibo: "fibo n < 2^32 \<Longrightarrow> fibo n = unat (fibo32 (of_nat n))"
apply (induct n rule: less_induct)
apply (subst fibo32.simps)
apply (subst fibo_alt_def)
apply (rename_tac n, case_tac "n = 0", simp)
apply (case_tac "n = 1", simp)
apply (subgoal_tac "n < 2^32"
"fibo (n - 1) + fibo (n - 2) < 2^32"
"of_nat n \<noteq> (0 :: word32)"
"of_nat n \<noteq> (1 :: word32)"
"fibo (n - 1) < 2^32"
"fibo (n - 2) < 2^32"
"fibo (n - 1) = unat (fibo32 (of_nat (n - 1)))"
"fibo (n - 2) = unat (fibo32 (of_nat (n - 2)))")
apply (fastforce intro: unat_word32_plus)
apply (metis diff_less gr0I zero_less_numeral)
apply (metis diff_less gr0I zero_less_one)
apply simp
apply simp
apply (metis len32 unat_1 unat_of_nat_len)
apply (metis len32 unat_0 unat_of_nat_len)
apply (metis fibo_alt_def)
apply (case_tac "n < 6")
apply simp
apply (subgoal_tac "n < fibo n")
apply simp
apply (simp add: fibo_greater')
done
(* A helper for the SIMPL proofs later. *)
lemma fibo32_rec: "\<lbrakk> a < a + 2; b = a + 1; c = a + 2 \<rbrakk> \<Longrightarrow> fibo32 a + fibo32 b = fibo32 c"
apply (subst(3) fibo32.simps)
apply simp
apply safe
apply unat_arith
apply (metis not_le overflow_plus_one_self word_n1_ge word_not_simps(1))
apply (metis word_not_simps(1))
apply (simp add: field_simps)
done
(* First we invoke CParser to translate the C code to SIMPL. *)
install_C_file "fib.c"
context fib begin
(* fib_linear\<^bsub>C\<^esub> is the linear-time implementation. *)
thm fib_linear_body_def
(* fib\<^bsub>C\<^esub> is the pretty (inefficient) recursive implementation. *)
thm fib_body_def
(* First, let us prove that they implement fibo32. *)
(* First, the linear version. *)
lemma fib_linear_simpl_spec:
"\<Gamma> \<turnstile> {s. s = t}
\<acute>ret__unsigned :== CALL fib_linear(\<acute>n)
\<lbrace> (\<acute>ret__unsigned = fibo32 \<^bsup>t\<^esup>n) \<rbrace>"
(* We have not annotated the code yet, so we cannot apply vcg usefully. *)
(* First we expand the function call and defer the overall precondition. *)
apply vcg_step
defer
(* Now we annotate the loop with the correct invariant and termination measure. *)
apply (subst whileAnno_def)
apply (subst whileAnno_def [symmetric,
where I=" \<lbrace> \<acute>a = fibo32 (\<^bsup>t\<^esup>n - \<acute>n) \<and> \<acute>n \<le> \<^bsup>t\<^esup>n \<and> (\<acute>n \<noteq> 0 \<longrightarrow> (\<acute>b = fibo32 (\<^bsup>t\<^esup>n + 1 - \<acute>n))) \<rbrace>"
and V="measure (\<lambda>s. unat (n_' s))"])
apply vcg
(* It is mostly word arithmetic from here on. *)
apply (simp add: scast_def field_simps)
apply clarsimp
apply (case_tac "n = 0")
apply clarsimp
apply (case_tac "n = 1")
apply (rename_tac n1, subgoal_tac "n1 = 1")
apply simp
apply unat_arith
apply (rename_tac n1, case_tac "n1 = 1")
apply simp
apply clarsimp
apply safe
apply (metis linear word_must_wrap)
apply (rule fibo32_rec)
(* unat_arith is too slow for this subgoal *)
apply (subst word_less_nowrapI'[where k = 2 and z = "-1"])
apply (subgoal_tac "n1 \<ge> 2")
apply (metis word_n1_ge word_sub_le_iff word_sub_mono)
apply unat_arith
apply simp
apply simp
apply simp
apply (simp add: field_simps)+
apply (simp add: fibo32.simps)
apply (simp add: scast_def fibo32.simps)
done
(* And the recursive version. *)
thm fib_body_def
lemma fib_simpl_spec: "\<forall>n. \<Gamma>,\<Theta>\<turnstile>\<^sub>t\<lbrace>\<acute>n=n\<rbrace> PROC fib(\<acute>n,\<acute>ret__unsigned) \<lbrace>\<acute>ret__unsigned = fibo32 n\<rbrace>"
apply (hoare_rule HoareTotal.ProcRec1[where r = "measure (\<lambda>(s, d). unat \<^bsup>s\<^esup>n)"])
apply (unfold creturn_def | vcg_step)+
apply (subst fibo32.simps, simp)
apply (subst fibo32.simps, simp)
apply (subst fibo32.simps[where n = n])
apply unat_arith
done
(*
* We need to temporarily leave the local context to run autocorres.
* Normally, we would run autocorres immediately after install_C_file.
*)
end (* context fib *)
autocorres [unsigned_word_abs = fib_linear] "fib.c"
context fib begin
thm fib_linear'_def
thm fib'.simps fib'.simps[unfolded fun_app_def, folded One_nat_def]
thm call_fib'_def call_fib'_def[simplified]
(*
* fib_linear\<^bsub>C\<^esub> has been lifted to fib_linear', using the option monad.
* The option monad expresses programs that read the heap
* and may fail. fib_linear\<^bsub>C\<^esub> does not read the heap, but its loop
* might fail to terminate, so it cannot be a simple HOL function.
*
* Note that arithmetic in fib_linear' has been converted to type @{typ nat}.
* This conversion is enabled by the word_abs option.
* fib_linear' still matches the C code as long as calculations do not wrap around;
* AutoCorres inserts an extra guard to ensure this.
*)
thm fib_linear'_def
(* Here we prove that fib_linear' implements fibo, assuming that
* no calculations wrap around. *)
lemma fib_linear'_correct: "ovalid (\<lambda>_. True) (fib_linear' n) (\<lambda>r s. r = fibo n)"
unfolding fib_linear'_def
(* The loop invariant, as before. *)
apply (subst owhile_add_inv
[where I = "\<lambda>(a, b, i) s. i \<le> n \<and> a = fibo (n - i) \<and> (i \<noteq> 0 \<longrightarrow> (b = fibo (n - i + 1)))"])
apply wp
apply safe
(* Again, we prove largely arithmetical facts. However, these
* proofs are easier because we are using the @{typ nat} type. *)
apply (fastforce intro: arg_cong[where f = fibo])
apply (simp add: Suc_diff_le)
apply (fastforce intro: fiboI simp: field_simps)
apply simp
apply (rename_tac b s, case_tac b, simp, simp)
apply (simp_all add: fibo.simps)
done
(* Here we prove that fib_linear' terminates, and calculations do not overflow
* for Fibonacci numbers below UINT_MAX.
* This involves proving something similar to fibo32_is_fibo, so we
* do not need that theorem to interpret this one.
*
* NB: Because the variable b is ahead of the Fibonacci number
* being calculated, we need to consider integer wraparound for
* fibo (Suc n) instead of fibo n. *)
lemma fib_linear'_term: "no_ofail (\<lambda>_. fibo (Suc n) < UINT_MAX) (fib_linear' n)"
(* We would like to use the wp tactic, so we will translate our goal into
ovalidNF form. *)
unfolding fib_linear'_def no_ofail_is_ovalidNF
apply (subst owhile_add_inv
[where I = "\<lambda>(a, b, i) s. i \<le> n \<and> a = fibo (n - i) \<and> b = fibo (n - i + 1) \<and> fibo (Suc n) \<le> UINT_MAX"
and M = "\<lambda>(_, _, i) _. i"])
apply wp
apply safe
apply (fastforce intro: le_trans[rotated] add_le_mono simp: fibo_alt_def[where n = "Suc n"] fibo_mono)
apply arith
apply (simp add: field_simps Suc_diff_le)
apply (fastforce intro: fiboI simp: field_simps)
apply wp
apply simp
apply (simp_all add: fibo.simps)
done
(* And we are done. *)
lemma fib_linear'_proof: "ovalidNF (\<lambda>_. fibo (Suc n) < UINT_MAX) (fib_linear' n) (\<lambda>r _. r = fibo n)"
by (metis ovalidNF_combine ovalid_pre_imp fib_linear'_correct fib_linear'_term)
(* WIP: convert and prove in nondet_monad *)
(* First, set up the conversion... *)
lemma validE_NF_validNF: "validE_NF P (liftE f) Q (K (K False)) \<Longrightarrow> validNF P f Q"
unfolding validE_NF_def validE_def validNF_def valid_def no_fail_def liftE_def K_def
apply monad_eq
apply fastforce
done
lemma [ts_rule option unlift]: "oreturn a = ogets (\<lambda>_. a)"
by (simp add: oreturn_def ogets_def)
declare liftE_liftE[symmetric, ts_rule option unlift]
lemma gets_theE_split[ts_rule option unlift]:
"gets_theE (case p of (x, y) \<Rightarrow> f x y) = (case p of (x, y) \<Rightarrow> gets_theE (f x y))"
apply (simp split: prod.splits)
done
declare gets_theE_L2_while[symmetric, where n = "[]", ts_rule option unlift]
declare K_bind_def[ts_rule option unlift]
lemma nondet_fib_linear'_proof: "\<lbrace> \<lambda>s. fibo (Suc n) \<le> UINT_MAX \<and> P s \<rbrace> gets_the (fib_linear' n) \<lbrace> \<lambda>r s. r = fibo n \<and> P s \<rbrace>!"
unfolding fib_linear'_def
apply (rule validE_NF_validNF)
apply (subst fun_cong[OF gets_theE_def[symmetric, THEN meta_eq_to_obj_eq]])
(* Then it works! *)
apply (monad_convert nondet "gets_theE _")
apply (subst whileLoop_add_inv
[where I="\<lambda>(a', b', n') s. a' = fibo (n - n') \<and> n' \<le> n \<and> (n' \<noteq> 0 \<longrightarrow> (b' = fibo (n - n' + 1))) \<and> fibo (Suc n) \<le> UINT_MAX \<and> P s"
and M="\<lambda>((_, _, n'), _). n'"])
apply wp
apply safe
apply (fastforce intro: arg_cong[where f = fibo])
apply arith
apply (fastforce intro: fiboI simp: field_simps)
apply simp
apply (fastforce intro: le_trans[rotated] simp: fiboI field_simps fibo_mono)
apply simp
apply simp
apply (simp_all add: UINT_MAX_def fibo.simps)
done
(*
* Now for the recursive function fib\<^bsub>C\<^esub>.
* The lifted function fib' is a recursive function in HOL, but all HOL functions
* must be well-founded (ie. terminate) and AutoCorres cannot prove this.
* Instead, fib' gets an extra parameter called “measure”, which limits the
* recursion depth. With this recursion limit, it is obvious that
* fib' terminates, so fib' can be defined as a HOL function.
*)
thm fib'.simps (* Just a reminder *)
(*
* Like fib_linear\<^bsub>C\<^esub>, fib\<^bsub>C\<^esub> is lifted to the option monad. If the measure parameter
* is too small, fib' will fail instead of giving the result that would be
* returned by fib\<^bsub>C\<^esub>.
*)
(*
* AutoCorres also generates a \<^bitalic>measure function\<^eitalic> for fib'. This function is
* used to generate the measure parameter anywhere fib' is called. For example,
*
* void call_fib(void) { fib(42); }
*
* is translated to:
*)
thm call_fib'_def
(*
* The measure function receives all of the information needed, in principle,
* to determine an upper bound on the call depth (if there is one).
* Namely, the function arguments and the global program state.
*)
term measure_of'_fib
(*
* measure_of'_fib does not actually have a definition, yet.
* To give it a sensible definition, we need to work out the maximum
* recursion depth, and AutoCorres does not know how to do that.
* We will manually define the function later.
*)
(*
* But first, let us prove the correctness of fib'.
* Again, we prove partial correctness separately, assuming that fib'
* does not return None.
*)
lemma fib'_correct_rec_helper:
assumes ind: "\<And>y P m. ovalid (\<lambda>_. y < x \<and> P (fibo32 y)) (fib' m y) (\<lambda>r _. P r)"
shows "ovalid (\<lambda>_. P (fibo32 x)) (fib' m x) (\<lambda>r _. P r)"
apply (subst fib'.simps)
apply (clarsimp simp: unat_eq_of_nat)
apply (wp ind)
apply (clarsimp simp: split: if_split_asm)
apply safe
apply (subst (asm) fibo32.simps, simp)
apply (subst (asm) fibo32.simps, simp)
apply unat_arith
apply unat_arith
apply (subst (asm) fibo32.simps, simp)
done
lemma fib'_correct: "ovalid P (fib' m n) (\<lambda>r s. P s \<and> r = fibo32 n)"
apply (subgoal_tac "\<And>P. ovalid (\<lambda>_. P (fibo32 n)) (fib' m n) (\<lambda>r _. P r)")
apply (unfold ovalid_def, fast)[1]
apply (induct n arbitrary: P m rule: less_induct)
apply (subgoal_tac "\<And>y P m. ovalid (\<lambda>_. y < x \<and> P (fibo32 y)) (fib' m y) (\<lambda>r _. P r)")
apply (rule fib'_correct_rec_helper)
apply (unfold ovalid_def, fast)[1]
apply (unfold ovalid_def)
apply blast
done
lemma fib'_term_rec_helper:
assumes ind: "\<And>y P rec_measure'. ovalidNF (\<lambda>_. y < x \<and> unat y < rec_measure' \<and> P) (fib' rec_measure' y) (\<lambda>_ _. P)"
shows "ovalidNF (\<lambda>_. unat x < rec_measure') (fib' rec_measure' x) (\<lambda>_ _. True)"
apply (subst fib'.simps)
apply (wp ind)
apply simp
apply unat_arith (* slow *)
done
lemma fib'_term: "no_ofail (\<lambda>_. rec_measure' > unat n) (fib' rec_measure' n)"
apply (subst no_ofail_is_ovalidNF)
apply (induct n arbitrary: rec_measure' rule: less_induct)
apply (subgoal_tac "\<And>y P rec_measure'. ovalidNF (\<lambda>_. y < x \<and> unat y < rec_measure' \<and> P) (fib' rec_measure' y) (\<lambda>_ _. P)")
apply (rule fib'_term_rec_helper, assumption)
apply (metis (full_types) ovalidNF_assume_pre)
done
(* The overall correctness proof. *)
lemma fib'_spec: "ovalidNF (\<lambda>s. P s \<and> m > unat n) (fib' m n) (\<lambda>r s. P s \<and> r = fibo32 n)"
apply (rule ovalidNF_combine)
apply (wp fib'_correct, simp)
apply (wp fib'_term, simp)
done
(*
If fib\<^bsub>C\<^esub> was lifted to the state monad...
Once we get hoare triples and wp reasoning on the option monad,
the proof might also look like this.
(* isar proof lets us rewrite the inductive assumption r and pass it to wp *)
lemma fib2'_correct: "\<lbrace> \<lambda>_. P (fibo32 n) \<rbrace> fib2' m n \<lbrace> \<lambda>r s. P r \<rbrace>"
proof (induct n arbitrary: P m rule: less_induct)
fix x P m
assume r: "\<And>y P m. y < x \<Longrightarrow> \<lbrace> \<lambda>_. P (fibo32 y) \<rbrace> fib2' m y \<lbrace> \<lambda>r s. P r \<rbrace>"
have r': "\<And>y P m. \<lbrace> \<lambda>_. y < x \<and> P (fibo32 y) \<rbrace> fib2' m y \<lbrace> \<lambda>r s. P r \<rbrace>"
apply (rule hoare_assume_pre)
apply clarsimp
apply (insert r)
apply (clarsimp simp: valid_def)
done
show "\<lbrace> \<lambda>_. P (fibo32 x) \<rbrace> fib2' m x \<lbrace> \<lambda>r s. P r \<rbrace>"
apply (subst fib2'.simps)
apply (wp r')
apply auto
apply (simp_all add: fibo32.simps)
apply unat_arith+
done
qed
*)
(* Finally, we can show that anyone who calls fib' will get the correct result. *)
lemma fib'_call:
"fib' (unat n + 1) n s = Some (fibo32 n)"
using fib'_spec[unfolded ovalidNF_def]
apply fastforce
done
lemma "\<lbrace> P \<rbrace> call_fib' \<lbrace> \<lambda>_. P \<rbrace>!"
including nf_no_pre
apply (unfold call_fib'_def)
apply wp
apply (blast intro: fib'_call)
apply (force simp: fib'_mono option_monad_mono_eq)
done
end (* context fib *)
end
|
lemma complex_i_not_numeral [simp]: "\<i> \<noteq> numeral w" |
------------------------------------------------------------------------
-- The Agda standard library
--
-- Extensional pointwise lifting of relations to vectors
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
module Data.Vec.Relation.Binary.Pointwise.Extensional where
open import Data.Fin using (zero; suc)
open import Data.Nat using (zero; suc)
open import Data.Vec as Vec hiding ([_]; head; tail; map)
open import Data.Vec.Relation.Binary.Pointwise.Inductive as Inductive
using ([]; _∷_)
renaming (Pointwise to IPointwise)
open import Level using (_⊔_)
open import Function using (_∘_)
open import Function.Equality using (_⟨$⟩_)
open import Function.Equivalence as Equiv
using (_⇔_; ⇔-setoid; equivalence; module Equivalence)
open import Level using (_⊔_) renaming (zero to ℓ₀)
open import Relation.Binary
open import Relation.Binary.PropositionalEquality as P using (_≡_)
open import Relation.Binary.Construct.Closure.Transitive as Plus
hiding (equivalent; map)
open import Relation.Nullary
import Relation.Nullary.Decidable as Dec
record Pointwise {a b ℓ} {A : Set a} {B : Set b} (_∼_ : REL A B ℓ)
{n} (xs : Vec A n) (ys : Vec B n) : Set (a ⊔ b ⊔ ℓ)
where
constructor ext
field app : ∀ i → lookup xs i ∼ lookup ys i
------------------------------------------------------------------------
-- Operations
head : ∀ {a b ℓ} {A : Set a} {B : Set b} {_∼_ : REL A B ℓ}
{n x y xs} {ys : Vec B n} →
Pointwise _∼_ (x ∷ xs) (y ∷ ys) → x ∼ y
head (ext app) = app zero
tail : ∀ {a b ℓ} {A : Set a} {B : Set b} {_∼_ : REL A B ℓ}
{n x y xs} {ys : Vec B n} →
Pointwise _∼_ (x ∷ xs) (y ∷ ys) → Pointwise _∼_ xs ys
tail (ext app) = ext (app ∘ suc)
map : ∀ {a b ℓ} {A : Set a} {B : Set b} {_∼_ _∼′_ : REL A B ℓ} {n} →
_∼_ ⇒ _∼′_ → Pointwise _∼_ ⇒ Pointwise _∼′_ {n}
map ∼⇒∼′ xs∼ys = ext (∼⇒∼′ ∘ Pointwise.app xs∼ys)
gmap : ∀ {a b ℓ} {A : Set a} {B : Set b}
{_∼_ : Rel A ℓ} {_∼′_ : Rel B ℓ} {f : A → B} {n} →
_∼_ =[ f ]⇒ _∼′_ →
Pointwise _∼_ =[ Vec.map {n = n} f ]⇒ Pointwise _∼′_
gmap {_} ∼⇒∼′ {[]} {[]} xs∼ys = ext λ()
gmap {_∼′_ = _∼′_} ∼⇒∼′ {x ∷ xs} {y ∷ ys} xs∼ys = ext λ
{ zero → ∼⇒∼′ (head xs∼ys)
; (suc i) → Pointwise.app (gmap {_∼′_ = _∼′_} ∼⇒∼′ (tail xs∼ys)) i
}
------------------------------------------------------------------------
-- The inductive and extensional definitions are equivalent.
module _ {a b ℓ} {A : Set a} {B : Set b} {_∼_ : REL A B ℓ} where
extensional⇒inductive : ∀ {n} {xs : Vec A n} {ys : Vec B n} →
Pointwise _∼_ xs ys → IPointwise _∼_ xs ys
extensional⇒inductive {zero} {[]} {[]} xs∼ys = []
extensional⇒inductive {suc n} {x ∷ xs} {y ∷ ys} xs∼ys =
(head xs∼ys) ∷ extensional⇒inductive (tail xs∼ys)
inductive⇒extensional : ∀ {n} {xs : Vec A n} {ys : Vec B n} →
IPointwise _∼_ xs ys → Pointwise _∼_ xs ys
inductive⇒extensional [] = ext λ()
inductive⇒extensional (x∼y ∷ xs∼ys) = ext λ
{ zero → x∼y
; (suc i) → Pointwise.app (inductive⇒extensional xs∼ys) i
}
equivalent : ∀ {n} {xs : Vec A n} {ys : Vec B n} →
Pointwise _∼_ xs ys ⇔ IPointwise _∼_ xs ys
equivalent = equivalence extensional⇒inductive inductive⇒extensional
------------------------------------------------------------------------
-- Relational properties
refl : ∀ {a ℓ} {A : Set a} {_∼_ : Rel A ℓ} →
∀ {n} → Reflexive _∼_ → Reflexive (Pointwise _∼_ {n = n})
refl ∼-rfl = ext (λ _ → ∼-rfl)
sym : ∀ {a b ℓ} {A : Set a} {B : Set b} {P : REL A B ℓ} {Q : REL B A ℓ}
{n} → Sym P Q → Sym (Pointwise P) (Pointwise Q {n = n})
sym sm xs∼ys = ext λ i → sm (Pointwise.app xs∼ys i)
trans : ∀ {a b c ℓ} {A : Set a} {B : Set b} {C : Set c}
{P : REL A B ℓ} {Q : REL B C ℓ} {R : REL A C ℓ} {n} →
Trans P Q R →
Trans (Pointwise P) (Pointwise Q) (Pointwise R {n = n})
trans trns xs∼ys ys∼zs = ext λ i →
trns (Pointwise.app xs∼ys i) (Pointwise.app ys∼zs i)
decidable : ∀ {a b ℓ} {A : Set a} {B : Set b} {_∼_ : REL A B ℓ} →
Decidable _∼_ → ∀ {n} → Decidable (Pointwise _∼_ {n = n})
decidable dec xs ys = Dec.map
(Setoid.sym (⇔-setoid _) equivalent)
(Inductive.decidable dec xs ys)
isEquivalence : ∀ {a ℓ} {A : Set a} {_∼_ : Rel A ℓ} →
∀ {n} → IsEquivalence _∼_ →
IsEquivalence (Pointwise _∼_ {n = n})
isEquivalence equiv = record
{ refl = refl Eq.refl
; sym = sym Eq.sym
; trans = trans Eq.trans
} where module Eq = IsEquivalence equiv
isDecEquivalence : ∀ {a ℓ} {A : Set a} {_∼_ : Rel A ℓ} →
∀ {n} → IsDecEquivalence _∼_ →
IsDecEquivalence (Pointwise _∼_ {n = n})
isDecEquivalence decEquiv = record
{ isEquivalence = isEquivalence DecEq.isEquivalence
; _≟_ = decidable DecEq._≟_
} where module DecEq = IsDecEquivalence decEquiv
------------------------------------------------------------------------
-- Pointwise _≡_ is equivalent to _≡_.
module _ {a} {A : Set a} where
Pointwise-≡⇒≡ : ∀ {n} {xs ys : Vec A n} →
Pointwise _≡_ xs ys → xs ≡ ys
Pointwise-≡⇒≡ {zero} {[]} {[]} (ext app) = P.refl
Pointwise-≡⇒≡ {suc n} {x ∷ xs} {y ∷ ys} xs∼ys =
P.cong₂ _∷_ (head xs∼ys) (Pointwise-≡⇒≡ (tail xs∼ys))
≡⇒Pointwise-≡ : ∀ {n} {xs ys : Vec A n} →
xs ≡ ys → Pointwise _≡_ xs ys
≡⇒Pointwise-≡ P.refl = refl P.refl
Pointwise-≡↔≡ : ∀ {n} {xs ys : Vec A n} →
Pointwise _≡_ xs ys ⇔ xs ≡ ys
Pointwise-≡↔≡ {ℓ} {A} =
Equiv.equivalence Pointwise-≡⇒≡ ≡⇒Pointwise-≡
------------------------------------------------------------------------
-- Pointwise and Plus commute when the underlying relation is
-- reflexive.
module _ {a ℓ} {A : Set a} {_∼_ : Rel A ℓ} where
⁺∙⇒∙⁺ : ∀ {n} {xs ys : Vec A n} →
Plus (Pointwise _∼_) xs ys → Pointwise (Plus _∼_) xs ys
⁺∙⇒∙⁺ [ ρ≈ρ′ ] = ext (λ x → [ Pointwise.app ρ≈ρ′ x ])
⁺∙⇒∙⁺ (ρ ∼⁺⟨ ρ≈ρ′ ⟩ ρ′≈ρ″) = ext (λ x →
_ ∼⁺⟨ Pointwise.app (⁺∙⇒∙⁺ ρ≈ρ′ ) x ⟩
Pointwise.app (⁺∙⇒∙⁺ ρ′≈ρ″) x)
∙⁺⇒⁺∙ : ∀ {n} {xs ys : Vec A n} → Reflexive _∼_ →
Pointwise (Plus _∼_) xs ys → Plus (Pointwise _∼_) xs ys
∙⁺⇒⁺∙ rfl =
Plus.map (_⟨$⟩_ (Equivalence.from equivalent)) ∘
helper ∘
_⟨$⟩_ (Equivalence.to equivalent)
where
helper : ∀ {n} {xs ys : Vec A n} →
IPointwise (Plus _∼_) xs ys → Plus (IPointwise _∼_) xs ys
helper [] = [ [] ]
helper (_∷_ {x = x} {y = y} {xs = xs} {ys = ys} x∼y xs∼ys) =
x ∷ xs ∼⁺⟨ Plus.map (_∷ Inductive.refl rfl) x∼y ⟩
y ∷ xs ∼⁺⟨ Plus.map (rfl ∷_) (helper xs∼ys) ⟩∎
y ∷ ys ∎
-- ∙⁺⇒⁺∙ cannot be defined if the requirement of reflexivity
-- is dropped.
private
module Counterexample where
data D : Set where
i j x y z : D
data _R_ : Rel D ℓ₀ where
iRj : i R j
xRy : x R y
yRz : y R z
xR⁺z : x [ _R_ ]⁺ z
xR⁺z =
x ∼⁺⟨ [ xRy ] ⟩
y ∼⁺⟨ [ yRz ] ⟩∎
z ∎
ix : Vec D 2
ix = i ∷ x ∷ []
jz : Vec D 2
jz = j ∷ z ∷ []
ix∙⁺jz : IPointwise (Plus _R_) ix jz
ix∙⁺jz = [ iRj ] ∷ xR⁺z ∷ []
¬ix⁺∙jz : ¬ Plus′ (IPointwise _R_) ix jz
¬ix⁺∙jz [ iRj ∷ () ∷ [] ]
¬ix⁺∙jz ((iRj ∷ xRy ∷ []) ∷ [ () ∷ yRz ∷ [] ])
¬ix⁺∙jz ((iRj ∷ xRy ∷ []) ∷ (() ∷ yRz ∷ []) ∷ _)
counterexample :
¬ (∀ {n} {xs ys : Vec D n} →
Pointwise (Plus _R_) xs ys →
Plus (Pointwise _R_) xs ys)
counterexample ∙⁺⇒⁺∙ =
¬ix⁺∙jz (Equivalence.to Plus.equivalent ⟨$⟩
Plus.map (_⟨$⟩_ (Equivalence.to equivalent))
(∙⁺⇒⁺∙ (Equivalence.from equivalent ⟨$⟩ ix∙⁺jz)))
------------------------------------------------------------------------
-- DEPRECATED NAMES
------------------------------------------------------------------------
-- Please use the new names as continuing support for the old names is
-- not guaranteed.
-- Version 0.15
Pointwise-≡ = Pointwise-≡↔≡
{-# WARNING_ON_USAGE Pointwise-≡
"Warning: Pointwise-≡ was deprecated in v0.15.
Please use Pointwise-≡↔≡ instead."
#-}
|
module Categories.Terminal where
open import Library
open import Categories
open import Categories.Sets
open Cat
record Term {a b} (C : Cat {a}{b})(T : Obj C) : Set (a ⊔ b) where
constructor term
field t : ∀{X} → Hom C X T
law : ∀{X}{f : Hom C X T} → t {X} ≅ f
OneSet : Term Sets ⊤
OneSet = record {t = λ _ → _; law = ext (λ _ → refl)}
|
lemma Ints_of_real [intro]: "x \<in> \<int> \<Longrightarrow> of_real x \<in> \<int>" |
Short proteins can also be synthesized chemically by a family of methods known as peptide synthesis , which rely on organic synthesis techniques such as chemical ligation to produce peptides in high yield . Chemical synthesis allows for the introduction of non @-@ natural amino acids into polypeptide chains , such as attachment of fluorescent probes to amino acid side chains . These methods are useful in laboratory biochemistry and cell biology , though generally not for commercial applications . Chemical synthesis is inefficient for polypeptides longer than about 300 amino acids , and the synthesized proteins may not readily assume their native tertiary structure . Most chemical synthesis methods proceed from C @-@ terminus to N @-@ terminus , opposite the biological reaction .
|
(** Impure monad for interface with impure code
*)
Require Import Program.
Module Type MayReturnMonad.
Axiom t: Type -> Type.
Axiom mayRet: forall {A:Type}, t A -> A -> Prop.
Axiom ret: forall {A}, A -> t A.
Axiom bind: forall {A B}, (t A) -> (A -> t B) -> t B.
Axiom mk_annot: forall {A} (k: t A), t { a: A | mayRet k a }.
Axiom mayRet_ret: forall A (a b:A),
mayRet (ret a) b -> a=b.
Axiom mayRet_bind: forall A B k1 k2 (b:B),
mayRet (bind k1 k2) b -> exists a:A, mayRet k1 a /\ mayRet (k2 a) b.
End MayReturnMonad.
(** Model of impure computation as predicate *)
Module PowerSetMonad<: MayReturnMonad.
Definition t (A:Type) := A -> Prop.
Definition mayRet {A:Type} (k: t A) a: Prop := k a.
Definition ret {A:Type} (a:A) := eq a.
Definition bind {A B:Type} (k1: t A) (k2: A -> t B) :=
fun b => exists a, k1 a /\ k2 a b.
Definition mk_annot {A} (k: t A) : t { a | mayRet k a } := fun _ => True.
Lemma mayRet_ret A (a b:A): mayRet (ret a) b -> a=b.
Proof.
unfold mayRet, ret. firstorder.
Qed.
Lemma mayRet_bind A B k1 k2 (b:B):
mayRet (bind k1 k2) b -> exists (a:A), mayRet k1 a /\ mayRet (k2 a) b.
Proof.
unfold mayRet, bind.
firstorder.
Qed.
End PowerSetMonad.
(** The identity interpretation *)
Module IdentityMonad<: MayReturnMonad.
Definition t (A:Type) := A.
(* may-return semantics of computations *)
Definition mayRet {A:Type} (a b:A): Prop := a=b.
Definition ret {A:Type} (a:A) := a.
Definition bind {A B:Type} (k1: A) (k2: A -> B) := k2 k1.
Definition mk_annot {A} (k: t A) : t { a: A | mayRet k a }
:= exist _ k (eq_refl k) .
Lemma mayRet_ret (A:Type) (a b:A): mayRet (ret a) b -> a=b.
Proof.
intuition.
Qed.
Lemma mayRet_bind (A B:Type) (k1:t A) k2 (b:B):
mayRet (bind k1 k2) b -> exists (a:A), mayRet k1 a /\ mayRet (k2 a) b.
Proof.
firstorder.
Qed.
End IdentityMonad.
(** Model of impure computation as state-transformers *)
Module StateMonad<: MayReturnMonad.
Parameter St: Type. (* A global state *)
Definition t (A:Type) := St -> A * St.
Definition mayRet {A:Type} (k: t A) a: Prop :=
exists s, fst (k s)=a.
Definition ret {A:Type} (a:A) := fun (s:St) => (a,s).
Definition bind {A B:Type} (k1: t A) (k2: A -> t B) :=
fun s0 => let r := k1 s0 in k2 (fst r) (snd r).
Program Definition mk_annot {A} (k: t A) : t { a | mayRet k a } :=
fun s0 => let r := k s0 in (exist _ (fst r) _, snd r).
Obligation 1.
unfold mayRet; eauto.
Qed.
Lemma mayRet_ret {A:Type} (a b:A): mayRet (ret a) b -> a=b.
Proof.
unfold mayRet, ret. firstorder.
Qed.
Lemma mayRet_bind {A B:Type} k1 k2 (b:B):
mayRet (bind k1 k2) b -> exists (a:A), mayRet k1 a /\ mayRet (k2 a) b.
Proof.
unfold mayRet, bind. firstorder eauto.
Qed.
End StateMonad.
(** The deferred interpretation *)
Module DeferredMonad<: MayReturnMonad.
Definition t (A:Type) := unit -> A.
(* may-return semantics of computations *)
Definition mayRet {A:Type} (a: t A) (b:A): Prop := a tt=b.
Definition ret {A:Type} (a:A) : t A := fun _ => a.
Definition bind {A B:Type} (k1: t A) (k2: A -> t B) : t B := fun _ => k2 (k1 tt) tt.
Definition mk_annot {A} (k: t A) : t { a: A | mayRet k a }
:= fun _ => exist _ (k tt) (eq_refl (k tt)).
Lemma mayRet_ret (A:Type) (a b: A): mayRet (ret a) b -> a=b.
Proof.
intuition.
Qed.
Lemma mayRet_bind (A B:Type) (k1:t A) k2 (b:B):
mayRet (bind k1 k2) b -> exists (a:A), mayRet k1 a /\ mayRet (k2 a) b.
Proof.
firstorder.
Qed.
End DeferredMonad.
|
\subsubsection{Nodes}
\input{sections/programming/tikz/Nodes}
\subsubsection{Links}
\input{sections/programming/tikz/Links}
\subsubsection{Styles}
\input{sections/programming/tikz/Styles}
\subsubsection{Recursion}
\input{sections/programming/tikz/Recursion}
|
------------------------------------------------------------------------------
-- Equality on Conat
------------------------------------------------------------------------------
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module FOTC.Data.Conat.Equality.Type where
open import FOTC.Base
infix 4 _≈_
------------------------------------------------------------------------------
-- Functional for the relation _≈_ (adapted from (Sander 1992,
-- p. 58)).
--
-- ≈-F : (D → D → Set) → D → D → Set
-- ≈-F R m n =
-- (m ≡ zero ∧ n ≡ zero) ∨ (∃[ m' ] ∃[ n' ] m ≡ succ m' ∧ n ≡ succ n' ∧ R m' n')
-- The relation _≈_ is the greatest post-fixed point of the functional
-- ≈-F (by ≈-out and ≈-coind).
-- The equality on Conat.
postulate _≈_ : D → D → Set
-- The relation _≈_ is a post-fixed point of the functional ≈-F,
-- i.e.
--
-- _≈_ ≤ ≈-F _≈_.
postulate ≈-out : ∀ {m n} → m ≈ n →
m ≡ zero ∧ n ≡ zero
∨ (∃[ m' ] ∃[ n' ] m ≡ succ₁ m' ∧ n ≡ succ₁ n' ∧ m' ≈ n')
{-# ATP axiom ≈-out #-}
-- The relation _N≈_ is the greatest post-fixed point of _N≈_, i.e.
--
-- ∀ R. R ≤ ≈-F R ⇒ R ≤ _N≈_.
--
-- N.B. This is an axiom schema. Because in the automatic proofs we
-- *must* use an instance, we do not add this postulate as an ATP
-- axiom.
postulate
≈-coind :
(R : D → D → Set) →
-- R is a post-fixed point of the functional ≈-F.
(∀ {m n} → R m n → m ≡ zero ∧ n ≡ zero
∨ (∃[ m' ] ∃[ n' ] m ≡ succ₁ m' ∧ n ≡ succ₁ n' ∧ R m' n')) →
-- _≈_ is greater than R.
∀ {m n} → R m n → m ≈ n
------------------------------------------------------------------------------
-- References
--
-- Sander, Herbert P. (1992). A Logic of Functional Programs with an
-- Application to Concurrency. PhD thesis. Department of Computer
-- Sciences: Chalmers University of Technology and University of
-- Gothenburg.
|
# N-FPN Rock Paper Scissors Toy Problem
We perform a rock-paper-scissors experiment. Each player's actions are restricted to the unit simplex $\Delta^3 \triangleq \{ x \in \mathbb{R}^{3}_{\geq0} : \|x\|_1 = 1\} \subset\mathbb{R}^3$ so that $\mathcal{C} = \Delta^3 \times \Delta^3$ and actions $x_i$ are interpreted as probability distributions over three choices: rock, paper and scissors.
Equilibria $x_d^\star$ are VI solutions, _i.e._
$$ \left< F(x_d^\star;d), x - x_d^\star\right> \geq 0,\quad \mbox{for all $x\in \mathcal{C}$},$$
using the game gradient
$$F = [\nabla_{x_1} u_k(x;d)^\top, \ \nabla_{x_2} u_k(x;d)^\top ]^\top $$
with cost functions given by
$$
u_1(x;d) \triangleq \left< x_1, B(d)x_2\right>
\quad \mbox{and}\quad
u_2(x;d) \triangleq -\left< x_1, B(d) x_2\right>.
$$
where the antisymmetric payoff matrix $B(d) \in \mathbb{R}^{3\times 3}$ is defined in the paper is used to define the players' cost functions and each vectors $w^i \in \mathbb{R}^{3}$ is drawn uniformly in $[0,1]^3$ for $i \in [3]$.
Contextual data $d$ are drawn from a distribution $\mathcal{D}$ that is uniform over $[0,1]^3$. An N-FPN is trained to predict $x_d^\star$ from $d$ using training data context-action pairs $\{(d^i,x_{d^i}^\star)\}_{i=1}^{1000}$, without using knowledge of $F$. The learned $F_{\Theta}$ has two fully connected layers with a leaky ReLU activation, and forward propagation uses projected gradient. This notebook generates data for two plots. The first plot shows convergence of the test loss while training the N-FPN.
The final plot simulates play between an optimal Nash player with access to $u_1(\cdot,d)$ and an N-FPN player that only has access to $d$ and training data.
The final plot measures performance using
\begin{equation}
\mbox{(Expected Nash Player $k$-Game Cost Variance)} \equiv y^k \triangleq \mathbb{E}_{d\sim\mathcal{D}}\left[\left(\dfrac{1}{k} \sum_{\ell=1}^k u_1^2\Big(s^\ell; d \Big) \right)^{1/2}\right],
\end{equation}
where $s$ is a tuple of two one-hot vectors $s_1^k \sim x_d^\star$ and $s_2^k \sim \mathcal{N}_\Theta(d)$.
Because rock-paper-scissors is a zero-sum game, if the N-FPN plays optimally, then the average reward of the Nash player converges to zero as the number of games increases.
That is, optimal $\mathcal{N}_\Theta$ yields $y^k\rightarrow 0$, as shown in the final plot of this notebook.
```
import time
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, TensorDataset, DataLoader
from torch.utils.data.dataset import random_split
context = torch.tensor
action = torch.tensor
weight_matrix = torch.tensor
payoff_matrix = torch.tensor
seed = 30
torch.manual_seed(seed)
W = torch.rand(3, 3) * torch.tensor([0.5, 10, 20])
W = W.permute(1,0)
print(W)
```
tensor([[ 0.4504, 0.4369, 0.4473],
[ 7.4636, 7.4026, 6.2383],
[ 9.4323, 15.6801, 8.5520]])
## Generate Synthetic Training Data
Notes:
1) The analytic solutions are generated using the Extra Gradient Scheme as in (1.2) and (1.3) of this [paper](http://math.haifa.ac.il/agibali/Web/Extensions%20of%20Korpelevich's%20extragradient%20method%20for%20the%20variational%20inequality%20problem%20in%20Euclidean%20space.pdf).
2) The simplex projection is performed using Algorithm 1 in this [paper](https://arxiv.org/pdf/1309.1541.pdf).
```
def sample_context(num_samples: int) -> context:
return torch.rand(num_samples, 3)
def project_simplex(y: action, action_size=3, num_players=2) -> action:
num_samples = y.shape[0]
proj = torch.zeros(y.shape)
for i in range(num_players):
ind = [i * action_size + j for j in range(action_size)]
u = torch.flip(torch.sort(y[:, ind], dim=1)[0], dims=(1,))
u_sum = torch.cumsum(u, dim=1)
j = torch.arange(1, action_size + 1, dtype=y.dtype, device=y.device)
pos_u_expr = u * j + 1.0 - u_sum > 0
pos_u_expr = pos_u_expr.float()
rho = torch.sum(pos_u_expr, dim=1, keepdim=True)
rho = rho.long()
lambd = [(1 - u_sum[sample, rho[sample]-1]) / rho[sample]
for sample in range(num_samples)]
lambd = torch.tensor(lambd)
lambd = lambd.view(lambd.shape[0], 1)
proj[:, ind] = torch.clamp(y[:, ind] + lambd, min=0)
return proj
def create_payoff_matrix(d: context) -> payoff_matrix:
num_samples = d.shape[0]
action_size = d.shape[1]
Wd = d.mm(W.permute(1,0))
B = torch.zeros(num_samples, action_size, action_size)
B[:,0,1] = -Wd[:, 0]
B[:,0,2] = Wd[:, 1]
B[:,1,2] = -Wd[:, 2]
B -= B.permute(0, 2, 1)
return B
def F(x: action, d: context, player_size=3) -> action:
B = create_payoff_matrix(d)
x = x.view(x.shape[0], x.shape[1], 1)
Fx_1 = B.bmm(x[:, player_size:, :])
Bt = B.permute(0, 2, 1)
Fx_2 = -Bt.bmm(x[:, :player_size, :])
Fx = torch.cat((Fx_1, Fx_2), dim=1)
return Fx.view(Fx.shape[0], Fx.shape[1])
def get_nash_eq(d: context, fxd_pt_tol=1e-5, max_iter=10000, step_size=5e-3,
action_size=6, debug_mode=False) -> action:
num_samples = d.shape[0]
x = torch.rand(num_samples, action_size)
conv = False
step = 0
while not conv and step < max_iter:
x_prev = x.clone()
y = project_simplex(x - step_size * F(x, d))
x = project_simplex(x - step_size * F(y, d))
res = torch.max(torch.norm(x - x_prev, dim=1))
step += 1
conv = res < fxd_pt_tol
if step % 5 == 0 and debug_mode:
fmt_str = "Step {:5d}: |xk - xk_prev| = {:2.2e} x[0,:] = "
print(fmt_str.format(step, res) + str(x[0,:]))
return x
def create_data(train_batch_size=200, test_batch_size=100,
train_size=1000, test_size=100):
d_context = sample_context(train_size + test_size)
x_true = get_nash_eq(d_context, debug_mode=True)
dataset = TensorDataset(x_true, d_context)
train_dataset, test_dataset = random_split(dataset,
[train_size, test_size])
train_loader = DataLoader(dataset=train_dataset,
batch_size=train_batch_size, shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=test_batch_size, shuffle=False)
return train_loader, test_loader
if not 'train_loader' in globals():
train_loader, test_loader = create_data()
```
Step 5: |xk - xk_prev| = 1.48e-01 x[0,:] = tensor([0.4481, 0.4214, 0.1305, 0.0943, 0.8389, 0.0668])
Step 10: |xk - xk_prev| = 1.34e-01 x[0,:] = tensor([0.5126, 0.4874, 0.0000, 0.1333, 0.8667, 0.0000])
Step 15: |xk - xk_prev| = 1.08e-01 x[0,:] = tensor([0.5249, 0.4751, 0.0000, 0.1456, 0.8544, 0.0000])
Step 20: |xk - xk_prev| = 8.69e-02 x[0,:] = tensor([0.5371, 0.4629, 0.0000, 0.1579, 0.8421, 0.0000])
Step 25: |xk - xk_prev| = 6.22e-02 x[0,:] = tensor([0.5494, 0.4506, 0.0000, 0.1701, 0.8299, 0.0000])
Step 30: |xk - xk_prev| = 5.13e-02 x[0,:] = tensor([0.5617, 0.4383, 0.0000, 0.1824, 0.8176, 0.0000])
Step 35: |xk - xk_prev| = 4.67e-02 x[0,:] = tensor([0.5739, 0.4261, 0.0000, 0.1947, 0.8053, 0.0000])
Step 40: |xk - xk_prev| = 4.13e-02 x[0,:] = tensor([0.5862, 0.4138, 0.0000, 0.2069, 0.7931, 0.0000])
Step 45: |xk - xk_prev| = 3.42e-02 x[0,:] = tensor([0.5985, 0.4015, 0.0000, 0.2192, 0.7808, 0.0000])
Step 50: |xk - xk_prev| = 3.09e-02 x[0,:] = tensor([0.6107, 0.3893, 0.0000, 0.2315, 0.7685, 0.0000])
Step 55: |xk - xk_prev| = 2.46e-02 x[0,:] = tensor([0.6221, 0.3779, 0.0000, 0.2415, 0.7540, 0.0045])
Step 60: |xk - xk_prev| = 2.19e-02 x[0,:] = tensor([0.6290, 0.3710, 0.0000, 0.2484, 0.7364, 0.0152])
Step 65: |xk - xk_prev| = 2.18e-02 x[0,:] = tensor([0.6294, 0.3706, 0.0000, 0.2542, 0.7177, 0.0281])
Step 70: |xk - xk_prev| = 2.18e-02 x[0,:] = tensor([0.6235, 0.3765, 0.0000, 0.2613, 0.7002, 0.0386])
Step 75: |xk - xk_prev| = 2.02e-02 x[0,:] = tensor([0.6139, 0.3861, 0.0000, 0.2712, 0.6856, 0.0432])
Step 80: |xk - xk_prev| = 2.01e-02 x[0,:] = tensor([0.6041, 0.3959, 0.0000, 0.2847, 0.6746, 0.0407])
Step 85: |xk - xk_prev| = 2.01e-02 x[0,:] = tensor([0.5974, 0.4026, 0.0000, 0.3011, 0.6664, 0.0325])
Step 90: |xk - xk_prev| = 2.01e-02 x[0,:] = tensor([0.5961, 0.4039, 0.0000, 0.3187, 0.6595, 0.0218])
Step 95: |xk - xk_prev| = 1.13e-02 x[0,:] = tensor([0.6001, 0.3999, 0.0000, 0.3356, 0.6519, 0.0125])
Step 100: |xk - xk_prev| = 1.13e-02 x[0,:] = tensor([0.6077, 0.3923, 0.0000, 0.3503, 0.6420, 0.0076])
Step 105: |xk - xk_prev| = 1.12e-02 x[0,:] = tensor([0.6161, 0.3839, 0.0000, 0.3621, 0.6293, 0.0086])
Step 110: |xk - xk_prev| = 1.12e-02 x[0,:] = tensor([0.6222, 0.3778, 0.0000, 0.3713, 0.6140, 0.0147])
Step 115: |xk - xk_prev| = 1.03e-02 x[0,:] = tensor([0.6242, 0.3758, 0.0000, 0.3793, 0.5973, 0.0234])
Step 120: |xk - xk_prev| = 1.03e-02 x[0,:] = tensor([0.6217, 0.3783, 0.0000, 0.3874, 0.5810, 0.0316])
Step 125: |xk - xk_prev| = 1.03e-02 x[0,:] = tensor([0.6157, 0.3843, 0.0000, 0.3972, 0.5663, 0.0365])
Step 130: |xk - xk_prev| = 1.03e-02 x[0,:] = tensor([0.6087, 0.3913, 0.0000, 0.4095, 0.5539, 0.0366])
Step 135: |xk - xk_prev| = 1.03e-02 x[0,:] = tensor([0.6031, 0.3969, 0.0000, 0.4239, 0.5439, 0.0322])
Step 140: |xk - xk_prev| = 1.03e-02 x[0,:] = tensor([0.6007, 0.3993, 0.0000, 0.4397, 0.5351, 0.0252])
Step 145: |xk - xk_prev| = 5.83e-03 x[0,:] = tensor([0.6022, 0.3978, 0.0000, 0.4555, 0.5264, 0.0181])
Step 150: |xk - xk_prev| = 5.83e-03 x[0,:] = tensor([0.6067, 0.3933, 0.0000, 0.4701, 0.5165, 0.0134])
Step 155: |xk - xk_prev| = 5.83e-03 x[0,:] = tensor([0.6125, 0.3875, 0.0000, 0.4828, 0.5046, 0.0125])
Step 160: |xk - xk_prev| = 5.83e-03 x[0,:] = tensor([0.6176, 0.3824, 0.0000, 0.4936, 0.4909, 0.0156])
Step 165: |xk - xk_prev| = 5.83e-03 x[0,:] = tensor([0.6201, 0.3799, 0.0000, 0.5031, 0.4758, 0.0211])
Step 170: |xk - xk_prev| = 5.83e-03 x[0,:] = tensor([0.6195, 0.3805, 0.0000, 0.5123, 0.4605, 0.0272])
Step 175: |xk - xk_prev| = 5.83e-03 x[0,:] = tensor([0.6161, 0.3839, 0.0000, 0.5224, 0.4460, 0.0316])
Step 180: |xk - xk_prev| = 5.82e-03 x[0,:] = tensor([0.6113, 0.3887, 0.0000, 0.5340, 0.4331, 0.0329])
Step 185: |xk - xk_prev| = 5.82e-03 x[0,:] = tensor([0.6069, 0.3931, 0.0000, 0.5472, 0.4218, 0.0310])
Step 190: |xk - xk_prev| = 5.82e-03 x[0,:] = tensor([0.6043, 0.3957, 0.0000, 0.5617, 0.4117, 0.0266])
Step 195: |xk - xk_prev| = 5.82e-03 x[0,:] = tensor([0.6044, 0.3956, 0.0000, 0.5765, 0.4020, 0.0215])
Step 200: |xk - xk_prev| = 5.82e-03 x[0,:] = tensor([0.6068, 0.3932, 0.0000, 0.5908, 0.3917, 0.0175])
Step 205: |xk - xk_prev| = 5.82e-03 x[0,:] = tensor([6.1064e-01, 3.8931e-01, 5.3708e-05, 6.0385e-01, 3.8030e-01, 1.5853e-02])
Step 210: |xk - xk_prev| = 5.82e-03 x[0,:] = tensor([0.6119, 0.3829, 0.0052, 0.6144, 0.3686, 0.0170])
Step 215: |xk - xk_prev| = 5.82e-03 x[0,:] = tensor([0.6089, 0.3748, 0.0163, 0.6191, 0.3605, 0.0203])
Step 220: |xk - xk_prev| = 5.82e-03 x[0,:] = tensor([0.6028, 0.3680, 0.0292, 0.6168, 0.3586, 0.0246])
Step 225: |xk - xk_prev| = 5.82e-03 x[0,:] = tensor([0.5961, 0.3647, 0.0392, 0.6087, 0.3631, 0.0282])
Step 230: |xk - xk_prev| = 5.81e-03 x[0,:] = tensor([0.5910, 0.3658, 0.0432, 0.5980, 0.3720, 0.0300])
Step 235: |xk - xk_prev| = 5.81e-03 x[0,:] = tensor([0.5892, 0.3706, 0.0402, 0.5886, 0.3819, 0.0295])
Step 240: |xk - xk_prev| = 5.81e-03 x[0,:] = tensor([0.5912, 0.3772, 0.0317, 0.5836, 0.3894, 0.0270])
Step 245: |xk - xk_prev| = 5.81e-03 x[0,:] = tensor([0.5958, 0.3832, 0.0209, 0.5845, 0.3920, 0.0235])
Step 250: |xk - xk_prev| = 5.81e-03 x[0,:] = tensor([0.6015, 0.3866, 0.0119, 0.5905, 0.3892, 0.0203])
Step 255: |xk - xk_prev| = 5.81e-03 x[0,:] = tensor([0.6061, 0.3864, 0.0075, 0.5992, 0.3823, 0.0185])
Step 260: |xk - xk_prev| = 5.81e-03 x[0,:] = tensor([0.6081, 0.3829, 0.0089, 0.6075, 0.3740, 0.0185])
Step 265: |xk - xk_prev| = 5.81e-03 x[0,:] = tensor([0.6071, 0.3775, 0.0154, 0.6124, 0.3672, 0.0204])
Step 270: |xk - xk_prev| = 5.81e-03 x[0,:] = tensor([0.6035, 0.3723, 0.0242, 0.6126, 0.3642, 0.0232])
Step 275: |xk - xk_prev| = 3.72e-03 x[0,:] = tensor([0.5989, 0.3689, 0.0322, 0.6083, 0.3657, 0.0260])
Step 280: |xk - xk_prev| = 3.68e-03 x[0,:] = tensor([0.5948, 0.3685, 0.0367, 0.6013, 0.3710, 0.0277])
Step 285: |xk - xk_prev| = 3.64e-03 x[0,:] = tensor([0.5926, 0.3710, 0.0364, 0.5942, 0.3779, 0.0280])
Step 290: |xk - xk_prev| = 3.59e-03 x[0,:] = tensor([0.5930, 0.3753, 0.0317, 0.5894, 0.3839, 0.0267])
Step 295: |xk - xk_prev| = 3.52e-03 x[0,:] = tensor([0.5956, 0.3798, 0.0245, 0.5885, 0.3871, 0.0244])
Step 300: |xk - xk_prev| = 3.46e-03 x[0,:] = tensor([0.5994, 0.3830, 0.0176, 0.5914, 0.3865, 0.0221])
Step 305: |xk - xk_prev| = 3.43e-03 x[0,:] = tensor([0.6030, 0.3838, 0.0132, 0.5970, 0.3826, 0.0204])
Step 310: |xk - xk_prev| = 3.38e-03 x[0,:] = tensor([0.6052, 0.3821, 0.0126, 0.6031, 0.3770, 0.0199])
Step 315: |xk - xk_prev| = 3.35e-03 x[0,:] = tensor([0.6053, 0.3788, 0.0160, 0.6075, 0.3717, 0.0208])
Step 320: |xk - xk_prev| = 3.32e-03 x[0,:] = tensor([0.6034, 0.3750, 0.0217, 0.6089, 0.3685, 0.0225])
Step 325: |xk - xk_prev| = 3.29e-03 x[0,:] = tensor([0.6003, 0.3720, 0.0277, 0.6070, 0.3684, 0.0246])
Step 330: |xk - xk_prev| = 3.27e-03 x[0,:] = tensor([0.5972, 0.3710, 0.0318, 0.6027, 0.3712, 0.0261])
Step 335: |xk - xk_prev| = 3.24e-03 x[0,:] = tensor([0.5951, 0.3720, 0.0329, 0.5976, 0.3757, 0.0267])
Step 340: |xk - xk_prev| = 3.22e-03 x[0,:] = tensor([0.5947, 0.3746, 0.0307, 0.5935, 0.3803, 0.0262])
Step 345: |xk - xk_prev| = 3.18e-03 x[0,:] = tensor([0.5960, 0.3778, 0.0262, 0.5918, 0.3834, 0.0248])
Step 350: |xk - xk_prev| = 3.12e-03 x[0,:] = tensor([0.5985, 0.3804, 0.0211, 0.5929, 0.3840, 0.0231])
Step 355: |xk - xk_prev| = 3.06e-03 x[0,:] = tensor([0.6011, 0.3816, 0.0172, 0.5962, 0.3820, 0.0217])
Step 360: |xk - xk_prev| = 3.00e-03 x[0,:] = tensor([0.6031, 0.3811, 0.0158, 0.6005, 0.3785, 0.0211])
Step 365: |xk - xk_prev| = 2.94e-03 x[0,:] = tensor([0.6037, 0.3791, 0.0172, 0.6041, 0.3745, 0.0213])
Step 370: |xk - xk_prev| = 2.89e-03 x[0,:] = tensor([0.6028, 0.3765, 0.0207, 0.6059, 0.3717, 0.0224])
Step 375: |xk - xk_prev| = 2.85e-03 x[0,:] = tensor([0.6009, 0.3741, 0.0249, 0.6055, 0.3708, 0.0238])
Step 380: |xk - xk_prev| = 2.81e-03 x[0,:] = tensor([0.5987, 0.3729, 0.0284, 0.6030, 0.3720, 0.0250])
Step 385: |xk - xk_prev| = 2.77e-03 x[0,:] = tensor([0.5969, 0.3730, 0.0300, 0.5995, 0.3748, 0.0257])
Step 390: |xk - xk_prev| = 2.73e-03 x[0,:] = tensor([0.5962, 0.3745, 0.0293, 0.5963, 0.3781, 0.0256])
Step 395: |xk - xk_prev| = 2.72e-03 x[0,:] = tensor([0.5967, 0.3767, 0.0267, 0.5944, 0.3807, 0.0248])
Step 400: |xk - xk_prev| = 2.69e-03 x[0,:] = tensor([0.5981, 0.3787, 0.0232, 0.5945, 0.3818, 0.0237])
Step 405: |xk - xk_prev| = 2.67e-03 x[0,:] = tensor([0.6000, 0.3800, 0.0201, 0.5963, 0.3811, 0.0226])
Step 410: |xk - xk_prev| = 2.66e-03 x[0,:] = tensor([0.6016, 0.3800, 0.0184, 0.5991, 0.3790, 0.0220])
Step 415: |xk - xk_prev| = 2.64e-03 x[0,:] = tensor([0.6024, 0.3790, 0.0186, 0.6019, 0.3762, 0.0219])
Step 420: |xk - xk_prev| = 2.63e-03 x[0,:] = tensor([0.6021, 0.3773, 0.0206, 0.6037, 0.3739, 0.0224])
Step 425: |xk - xk_prev| = 2.61e-03 x[0,:] = tensor([0.6011, 0.3755, 0.0234, 0.6039, 0.3727, 0.0234])
Step 430: |xk - xk_prev| = 2.59e-03 x[0,:] = tensor([0.5995, 0.3743, 0.0262, 0.6027, 0.3730, 0.0243])
Step 435: |xk - xk_prev| = 2.55e-03 x[0,:] = tensor([0.5981, 0.3740, 0.0278, 0.6005, 0.3746, 0.0249])
Step 440: |xk - xk_prev| = 2.52e-03 x[0,:] = tensor([0.5973, 0.3748, 0.0279, 0.5981, 0.3769, 0.0251])
Step 445: |xk - xk_prev| = 2.49e-03 x[0,:] = tensor([0.5974, 0.3761, 0.0265, 0.5964, 0.3789, 0.0247])
Step 450: |xk - xk_prev| = 2.46e-03 x[0,:] = tensor([0.5981, 0.3776, 0.0242, 0.5959, 0.3801, 0.0240])
Step 455: |xk - xk_prev| = 2.44e-03 x[0,:] = tensor([0.5994, 0.3787, 0.0219, 0.5967, 0.3801, 0.0232])
Step 460: |xk - xk_prev| = 2.42e-03 x[0,:] = tensor([0.6006, 0.3791, 0.0203, 0.5985, 0.3789, 0.0226])
Step 465: |xk - xk_prev| = 2.40e-03 x[0,:] = tensor([0.6014, 0.3787, 0.0200, 0.6005, 0.3771, 0.0224])
Step 470: |xk - xk_prev| = 2.39e-03 x[0,:] = tensor([0.6015, 0.3776, 0.0209, 0.6021, 0.3753, 0.0226])
Step 475: |xk - xk_prev| = 2.37e-03 x[0,:] = tensor([0.6009, 0.3763, 0.0227, 0.6027, 0.3742, 0.0232])
Step 480: |xk - xk_prev| = 2.34e-03 x[0,:] = tensor([0.6000, 0.3753, 0.0247, 0.6022, 0.3740, 0.0238])
Step 485: |xk - xk_prev| = 2.31e-03 x[0,:] = tensor([0.5989, 0.3749, 0.0262, 0.6008, 0.3748, 0.0244])
Step 490: |xk - xk_prev| = 2.28e-03 x[0,:] = tensor([0.5982, 0.3751, 0.0267, 0.5991, 0.3763, 0.0246])
Step 495: |xk - xk_prev| = 2.25e-03 x[0,:] = tensor([0.5980, 0.3759, 0.0261, 0.5977, 0.3778, 0.0245])
Step 500: |xk - xk_prev| = 2.22e-03 x[0,:] = tensor([0.5983, 0.3770, 0.0247, 0.5970, 0.3789, 0.0241])
Step 505: |xk - xk_prev| = 2.20e-03 x[0,:] = tensor([0.5991, 0.3779, 0.0230, 0.5973, 0.3792, 0.0235])
Step 510: |xk - xk_prev| = 2.19e-03 x[0,:] = tensor([0.6000, 0.3784, 0.0216, 0.5983, 0.3786, 0.0230])
Step 515: |xk - xk_prev| = 2.18e-03 x[0,:] = tensor([0.6007, 0.3783, 0.0211, 0.5997, 0.3775, 0.0228])
Step 520: |xk - xk_prev| = 2.17e-03 x[0,:] = tensor([0.6009, 0.3777, 0.0214, 0.6009, 0.3762, 0.0228])
Step 525: |xk - xk_prev| = 2.16e-03 x[0,:] = tensor([0.6007, 0.3768, 0.0225, 0.6016, 0.3752, 0.0232])
Step 530: |xk - xk_prev| = 2.15e-03 x[0,:] = tensor([0.6001, 0.3760, 0.0239, 0.6016, 0.3748, 0.0236])
Step 535: |xk - xk_prev| = 2.14e-03 x[0,:] = tensor([0.5994, 0.3755, 0.0251, 0.6008, 0.3751, 0.0240])
Step 540: |xk - xk_prev| = 2.12e-03 x[0,:] = tensor([0.5988, 0.3755, 0.0257, 0.5997, 0.3760, 0.0243])
Step 545: |xk - xk_prev| = 2.09e-03 x[0,:] = tensor([0.5985, 0.3760, 0.0256, 0.5986, 0.3771, 0.0243])
Step 550: |xk - xk_prev| = 2.07e-03 x[0,:] = tensor([0.5986, 0.3767, 0.0248, 0.5979, 0.3780, 0.0241])
Step 555: |xk - xk_prev| = 2.05e-03 x[0,:] = tensor([0.5990, 0.3774, 0.0236, 0.5979, 0.3784, 0.0237])
Step 560: |xk - xk_prev| = 2.03e-03 x[0,:] = tensor([0.5996, 0.3778, 0.0226, 0.5984, 0.3783, 0.0233])
Step 565: |xk - xk_prev| = 2.01e-03 x[0,:] = tensor([0.6002, 0.3779, 0.0219, 0.5993, 0.3776, 0.0231])
Step 570: |xk - xk_prev| = 1.99e-03 x[0,:] = tensor([0.6005, 0.3776, 0.0219, 0.6002, 0.3767, 0.0230])
Step 575: |xk - xk_prev| = 1.97e-03 x[0,:] = tensor([0.6005, 0.3770, 0.0225, 0.6009, 0.3759, 0.0232])
Step 580: |xk - xk_prev| = 1.95e-03 x[0,:] = tensor([0.6001, 0.3764, 0.0234, 0.6010, 0.3755, 0.0235])
Step 585: |xk - xk_prev| = 1.94e-03 x[0,:] = tensor([0.5996, 0.3760, 0.0244, 0.6007, 0.3755, 0.0238])
Step 590: |xk - xk_prev| = 1.93e-03 x[0,:] = tensor([0.5992, 0.3759, 0.0250, 0.6000, 0.3760, 0.0240])
Step 595: |xk - xk_prev| = 1.92e-03 x[0,:] = tensor([0.5989, 0.3761, 0.0251, 0.5992, 0.3767, 0.0241])
Step 600: |xk - xk_prev| = 1.90e-03 x[0,:] = tensor([0.5988, 0.3765, 0.0247, 0.5986, 0.3774, 0.0240])
Step 605: |xk - xk_prev| = 1.89e-03 x[0,:] = tensor([0.5991, 0.3770, 0.0239, 0.5984, 0.3779, 0.0238])
Step 610: |xk - xk_prev| = 1.88e-03 x[0,:] = tensor([0.5995, 0.3774, 0.0231, 0.5986, 0.3779, 0.0235])
Step 615: |xk - xk_prev| = 1.87e-03 x[0,:] = tensor([0.5999, 0.3776, 0.0226, 0.5991, 0.3776, 0.0233])
Step 620: |xk - xk_prev| = 1.86e-03 x[0,:] = tensor([0.6002, 0.3774, 0.0224, 0.5998, 0.3770, 0.0232])
Step 625: |xk - xk_prev| = 1.84e-03 x[0,:] = tensor([0.6002, 0.3771, 0.0227, 0.6003, 0.3764, 0.0233])
Step 630: |xk - xk_prev| = 1.83e-03 x[0,:] = tensor([0.6001, 0.3767, 0.0232, 0.6006, 0.3760, 0.0234])
Step 635: |xk - xk_prev| = 1.82e-03 x[0,:] = tensor([0.5997, 0.3764, 0.0239, 0.6005, 0.3759, 0.0237])
Step 640: |xk - xk_prev| = 1.81e-03 x[0,:] = tensor([0.5994, 0.3762, 0.0244, 0.6000, 0.3761, 0.0239])
Step 645: |xk - xk_prev| = 1.80e-03 x[0,:] = tensor([0.5991, 0.3762, 0.0246, 0.5995, 0.3766, 0.0239])
Step 650: |xk - xk_prev| = 1.78e-03 x[0,:] = tensor([0.5990, 0.3765, 0.0245, 0.5990, 0.3771, 0.0239])
Step 655: |xk - xk_prev| = 1.77e-03 x[0,:] = tensor([0.5991, 0.3768, 0.0240, 0.5988, 0.3775, 0.0238])
Step 660: |xk - xk_prev| = 1.76e-03 x[0,:] = tensor([0.5994, 0.3771, 0.0235, 0.5988, 0.3776, 0.0236])
Step 665: |xk - xk_prev| = 1.75e-03 x[0,:] = tensor([0.5997, 0.3773, 0.0230, 0.5991, 0.3774, 0.0234])
Step 670: |xk - xk_prev| = 1.74e-03 x[0,:] = tensor([0.5999, 0.3773, 0.0228, 0.5996, 0.3771, 0.0233])
Step 675: |xk - xk_prev| = 1.73e-03 x[0,:] = tensor([0.6000, 0.3771, 0.0229, 0.6000, 0.3767, 0.0234])
Step 680: |xk - xk_prev| = 1.72e-03 x[0,:] = tensor([0.6000, 0.3768, 0.0232, 0.6002, 0.3763, 0.0234])
Step 685: |xk - xk_prev| = 1.71e-03 x[0,:] = tensor([0.5998, 0.3766, 0.0236, 0.6003, 0.3762, 0.0236])
Step 690: |xk - xk_prev| = 1.69e-03 x[0,:] = tensor([0.5995, 0.3764, 0.0241, 0.6000, 0.3762, 0.0237])
Step 695: |xk - xk_prev| = 1.68e-03 x[0,:] = tensor([0.5993, 0.3764, 0.0243, 0.5997, 0.3765, 0.0238])
Step 700: |xk - xk_prev| = 1.67e-03 x[0,:] = tensor([0.5992, 0.3765, 0.0243, 0.5993, 0.3769, 0.0238])
Step 705: |xk - xk_prev| = 1.66e-03 x[0,:] = tensor([0.5992, 0.3767, 0.0240, 0.5991, 0.3772, 0.0238])
Step 710: |xk - xk_prev| = 1.65e-03 x[0,:] = tensor([0.5994, 0.3770, 0.0237, 0.5990, 0.3773, 0.0237])
Step 715: |xk - xk_prev| = 1.64e-03 x[0,:] = tensor([0.5996, 0.3771, 0.0233, 0.5992, 0.3773, 0.0235])
Step 720: |xk - xk_prev| = 1.63e-03 x[0,:] = tensor([0.5998, 0.3772, 0.0231, 0.5994, 0.3771, 0.0234])
Step 725: |xk - xk_prev| = 1.62e-03 x[0,:] = tensor([0.5999, 0.3771, 0.0230, 0.5998, 0.3768, 0.0234])
Step 730: |xk - xk_prev| = 1.61e-03 x[0,:] = tensor([0.5999, 0.3769, 0.0232, 0.6000, 0.3765, 0.0235])
Step 735: |xk - xk_prev| = 1.60e-03 x[0,:] = tensor([0.5998, 0.3767, 0.0235, 0.6001, 0.3764, 0.0236])
Step 740: |xk - xk_prev| = 1.59e-03 x[0,:] = tensor([0.5996, 0.3766, 0.0238, 0.6000, 0.3764, 0.0237])
Step 745: |xk - xk_prev| = 1.59e-03 x[0,:] = tensor([0.5995, 0.3765, 0.0240, 0.5997, 0.3765, 0.0237])
Step 750: |xk - xk_prev| = 1.58e-03 x[0,:] = tensor([0.5994, 0.3766, 0.0241, 0.5995, 0.3767, 0.0238])
Step 755: |xk - xk_prev| = 1.57e-03 x[0,:] = tensor([0.5993, 0.3767, 0.0240, 0.5993, 0.3770, 0.0237])
Step 760: |xk - xk_prev| = 1.56e-03 x[0,:] = tensor([0.5994, 0.3769, 0.0237, 0.5992, 0.3771, 0.0237])
Step 765: |xk - xk_prev| = 1.55e-03 x[0,:] = tensor([0.5995, 0.3770, 0.0235, 0.5992, 0.3772, 0.0236])
Step 770: |xk - xk_prev| = 1.54e-03 x[0,:] = tensor([0.5997, 0.3771, 0.0233, 0.5994, 0.3771, 0.0235])
Step 775: |xk - xk_prev| = 1.54e-03 x[0,:] = tensor([0.5998, 0.3770, 0.0232, 0.5996, 0.3769, 0.0235])
Step 780: |xk - xk_prev| = 1.53e-03 x[0,:] = tensor([0.5998, 0.3769, 0.0233, 0.5998, 0.3767, 0.0235])
Step 785: |xk - xk_prev| = 1.53e-03 x[0,:] = tensor([0.5997, 0.3768, 0.0235, 0.5999, 0.3765, 0.0236])
Step 790: |xk - xk_prev| = 1.53e-03 x[0,:] = tensor([0.5996, 0.3767, 0.0237, 0.5999, 0.3765, 0.0236])
Step 795: |xk - xk_prev| = 1.52e-03 x[0,:] = tensor([0.5995, 0.3766, 0.0239, 0.5998, 0.3766, 0.0237])
Step 800: |xk - xk_prev| = 1.52e-03 x[0,:] = tensor([0.5994, 0.3766, 0.0239, 0.5996, 0.3767, 0.0237])
Step 805: |xk - xk_prev| = 1.51e-03 x[0,:] = tensor([0.5994, 0.3767, 0.0239, 0.5994, 0.3769, 0.0237])
Step 810: |xk - xk_prev| = 1.51e-03 x[0,:] = tensor([0.5994, 0.3768, 0.0238, 0.5993, 0.3770, 0.0237])
Step 815: |xk - xk_prev| = 1.50e-03 x[0,:] = tensor([0.5995, 0.3769, 0.0236, 0.5993, 0.3771, 0.0236])
Step 820: |xk - xk_prev| = 1.50e-03 x[0,:] = tensor([0.5996, 0.3770, 0.0234, 0.5994, 0.3770, 0.0236])
Step 825: |xk - xk_prev| = 1.49e-03 x[0,:] = tensor([0.5997, 0.3770, 0.0233, 0.5996, 0.3769, 0.0235])
Step 830: |xk - xk_prev| = 1.49e-03 x[0,:] = tensor([0.5997, 0.3769, 0.0234, 0.5997, 0.3768, 0.0235])
Step 835: |xk - xk_prev| = 1.48e-03 x[0,:] = tensor([0.5997, 0.3768, 0.0235, 0.5998, 0.3767, 0.0236])
Step 840: |xk - xk_prev| = 1.48e-03 x[0,:] = tensor([0.5997, 0.3767, 0.0236, 0.5998, 0.3766, 0.0236])
Step 845: |xk - xk_prev| = 1.47e-03 x[0,:] = tensor([0.5996, 0.3767, 0.0237, 0.5997, 0.3766, 0.0236])
Step 850: |xk - xk_prev| = 1.47e-03 x[0,:] = tensor([0.5995, 0.3767, 0.0238, 0.5996, 0.3767, 0.0237])
Step 855: |xk - xk_prev| = 1.46e-03 x[0,:] = tensor([0.5995, 0.3767, 0.0238, 0.5995, 0.3768, 0.0237])
Step 860: |xk - xk_prev| = 1.46e-03 x[0,:] = tensor([0.5995, 0.3768, 0.0238, 0.5994, 0.3769, 0.0237])
Step 865: |xk - xk_prev| = 1.46e-03 x[0,:] = tensor([0.5995, 0.3768, 0.0236, 0.5994, 0.3770, 0.0236])
Step 870: |xk - xk_prev| = 1.45e-03 x[0,:] = tensor([0.5996, 0.3769, 0.0235, 0.5994, 0.3770, 0.0236])
Step 875: |xk - xk_prev| = 1.45e-03 x[0,:] = tensor([0.5996, 0.3769, 0.0234, 0.5995, 0.3769, 0.0236])
Step 880: |xk - xk_prev| = 1.44e-03 x[0,:] = tensor([0.5997, 0.3769, 0.0234, 0.5996, 0.3768, 0.0236])
Step 885: |xk - xk_prev| = 1.44e-03 x[0,:] = tensor([0.5997, 0.3768, 0.0235, 0.5997, 0.3767, 0.0236])
Step 890: |xk - xk_prev| = 1.43e-03 x[0,:] = tensor([0.5997, 0.3768, 0.0236, 0.5997, 0.3767, 0.0236])
Step 895: |xk - xk_prev| = 1.43e-03 x[0,:] = tensor([0.5996, 0.3767, 0.0237, 0.5997, 0.3767, 0.0236])
Step 900: |xk - xk_prev| = 1.42e-03 x[0,:] = tensor([0.5995, 0.3767, 0.0237, 0.5996, 0.3767, 0.0237])
Step 905: |xk - xk_prev| = 1.42e-03 x[0,:] = tensor([0.5995, 0.3767, 0.0238, 0.5996, 0.3768, 0.0237])
Step 910: |xk - xk_prev| = 1.42e-03 x[0,:] = tensor([0.5995, 0.3768, 0.0237, 0.5995, 0.3769, 0.0237])
Step 915: |xk - xk_prev| = 1.41e-03 x[0,:] = tensor([0.5995, 0.3768, 0.0237, 0.5995, 0.3769, 0.0236])
Step 920: |xk - xk_prev| = 1.41e-03 x[0,:] = tensor([0.5996, 0.3769, 0.0236, 0.5995, 0.3769, 0.0236])
Step 925: |xk - xk_prev| = 1.40e-03 x[0,:] = tensor([0.5996, 0.3769, 0.0235, 0.5995, 0.3769, 0.0236])
Step 930: |xk - xk_prev| = 1.40e-03 x[0,:] = tensor([0.5996, 0.3769, 0.0235, 0.5996, 0.3768, 0.0236])
Step 935: |xk - xk_prev| = 1.39e-03 x[0,:] = tensor([0.5997, 0.3768, 0.0235, 0.5997, 0.3768, 0.0236])
Step 940: |xk - xk_prev| = 1.39e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5997, 0.3767, 0.0236])
Step 945: |xk - xk_prev| = 1.38e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5997, 0.3767, 0.0236])
Step 950: |xk - xk_prev| = 1.38e-03 x[0,:] = tensor([0.5996, 0.3767, 0.0237, 0.5996, 0.3767, 0.0236])
Step 955: |xk - xk_prev| = 1.38e-03 x[0,:] = tensor([0.5995, 0.3767, 0.0237, 0.5996, 0.3768, 0.0236])
Step 960: |xk - xk_prev| = 1.37e-03 x[0,:] = tensor([0.5995, 0.3768, 0.0237, 0.5995, 0.3768, 0.0236])
Step 965: |xk - xk_prev| = 1.37e-03 x[0,:] = tensor([0.5995, 0.3768, 0.0237, 0.5995, 0.3769, 0.0236])
Step 970: |xk - xk_prev| = 1.36e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5995, 0.3769, 0.0236])
Step 975: |xk - xk_prev| = 1.36e-03 x[0,:] = tensor([0.5996, 0.3769, 0.0236, 0.5995, 0.3769, 0.0236])
Step 980: |xk - xk_prev| = 1.35e-03 x[0,:] = tensor([0.5996, 0.3769, 0.0235, 0.5996, 0.3768, 0.0236])
Step 985: |xk - xk_prev| = 1.35e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0235, 0.5996, 0.3768, 0.0236])
Step 990: |xk - xk_prev| = 1.35e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 995: |xk - xk_prev| = 1.34e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5997, 0.3767, 0.0236])
Step 1000: |xk - xk_prev| = 1.34e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0237, 0.5996, 0.3767, 0.0236])
Step 1005: |xk - xk_prev| = 1.33e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0237, 0.5996, 0.3768, 0.0236])
Step 1010: |xk - xk_prev| = 1.33e-03 x[0,:] = tensor([0.5995, 0.3768, 0.0237, 0.5996, 0.3768, 0.0236])
Step 1015: |xk - xk_prev| = 1.33e-03 x[0,:] = tensor([0.5995, 0.3768, 0.0237, 0.5995, 0.3768, 0.0236])
Step 1020: |xk - xk_prev| = 1.32e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5995, 0.3769, 0.0236])
Step 1025: |xk - xk_prev| = 1.32e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5995, 0.3769, 0.0236])
Step 1030: |xk - xk_prev| = 1.31e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1035: |xk - xk_prev| = 1.31e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1040: |xk - xk_prev| = 1.31e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1045: |xk - xk_prev| = 1.30e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1050: |xk - xk_prev| = 1.30e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1055: |xk - xk_prev| = 1.29e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0237, 0.5996, 0.3768, 0.0236])
Step 1060: |xk - xk_prev| = 1.29e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0237, 0.5996, 0.3768, 0.0236])
Step 1065: |xk - xk_prev| = 1.28e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0237, 0.5996, 0.3768, 0.0236])
Step 1070: |xk - xk_prev| = 1.28e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5995, 0.3768, 0.0236])
Step 1075: |xk - xk_prev| = 1.28e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5995, 0.3768, 0.0236])
Step 1080: |xk - xk_prev| = 1.27e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1085: |xk - xk_prev| = 1.27e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1090: |xk - xk_prev| = 1.27e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1095: |xk - xk_prev| = 1.26e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1100: |xk - xk_prev| = 1.26e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1105: |xk - xk_prev| = 1.25e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1110: |xk - xk_prev| = 1.25e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0237, 0.5996, 0.3768, 0.0236])
Step 1115: |xk - xk_prev| = 1.25e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1120: |xk - xk_prev| = 1.24e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1125: |xk - xk_prev| = 1.24e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1130: |xk - xk_prev| = 1.23e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1135: |xk - xk_prev| = 1.23e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1140: |xk - xk_prev| = 1.23e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1145: |xk - xk_prev| = 1.22e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1150: |xk - xk_prev| = 1.22e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1155: |xk - xk_prev| = 1.21e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1160: |xk - xk_prev| = 1.21e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1165: |xk - xk_prev| = 1.21e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1170: |xk - xk_prev| = 1.20e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1175: |xk - xk_prev| = 1.20e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1180: |xk - xk_prev| = 1.20e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1185: |xk - xk_prev| = 1.19e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1190: |xk - xk_prev| = 1.19e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1195: |xk - xk_prev| = 1.18e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1200: |xk - xk_prev| = 1.18e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1205: |xk - xk_prev| = 1.18e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1210: |xk - xk_prev| = 1.17e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1215: |xk - xk_prev| = 1.17e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1220: |xk - xk_prev| = 1.17e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1225: |xk - xk_prev| = 1.16e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1230: |xk - xk_prev| = 1.16e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1235: |xk - xk_prev| = 1.16e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1240: |xk - xk_prev| = 1.15e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1245: |xk - xk_prev| = 1.15e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1250: |xk - xk_prev| = 1.14e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1255: |xk - xk_prev| = 1.14e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1260: |xk - xk_prev| = 1.14e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1265: |xk - xk_prev| = 1.13e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1270: |xk - xk_prev| = 1.13e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1275: |xk - xk_prev| = 1.13e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1280: |xk - xk_prev| = 1.12e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1285: |xk - xk_prev| = 1.12e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1290: |xk - xk_prev| = 1.12e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1295: |xk - xk_prev| = 1.11e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1300: |xk - xk_prev| = 1.11e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1305: |xk - xk_prev| = 1.11e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1310: |xk - xk_prev| = 1.10e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1315: |xk - xk_prev| = 1.10e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1320: |xk - xk_prev| = 1.10e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1325: |xk - xk_prev| = 1.09e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1330: |xk - xk_prev| = 1.09e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1335: |xk - xk_prev| = 1.09e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1340: |xk - xk_prev| = 1.08e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1345: |xk - xk_prev| = 1.08e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1350: |xk - xk_prev| = 1.08e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1355: |xk - xk_prev| = 1.07e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1360: |xk - xk_prev| = 1.07e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1365: |xk - xk_prev| = 1.07e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1370: |xk - xk_prev| = 1.06e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1375: |xk - xk_prev| = 1.06e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1380: |xk - xk_prev| = 1.06e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1385: |xk - xk_prev| = 1.05e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1390: |xk - xk_prev| = 1.05e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1395: |xk - xk_prev| = 1.05e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1400: |xk - xk_prev| = 1.04e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1405: |xk - xk_prev| = 1.04e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1410: |xk - xk_prev| = 1.04e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1415: |xk - xk_prev| = 1.03e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1420: |xk - xk_prev| = 1.03e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1425: |xk - xk_prev| = 1.03e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1430: |xk - xk_prev| = 1.02e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1435: |xk - xk_prev| = 1.02e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1440: |xk - xk_prev| = 1.02e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1445: |xk - xk_prev| = 1.01e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1450: |xk - xk_prev| = 1.01e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1455: |xk - xk_prev| = 1.01e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1460: |xk - xk_prev| = 1.00e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1465: |xk - xk_prev| = 1.00e-03 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1470: |xk - xk_prev| = 9.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1475: |xk - xk_prev| = 9.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1480: |xk - xk_prev| = 9.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1485: |xk - xk_prev| = 9.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1490: |xk - xk_prev| = 9.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1495: |xk - xk_prev| = 9.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1500: |xk - xk_prev| = 9.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1505: |xk - xk_prev| = 9.76e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1510: |xk - xk_prev| = 9.73e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1515: |xk - xk_prev| = 9.70e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1520: |xk - xk_prev| = 9.67e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1525: |xk - xk_prev| = 9.64e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1530: |xk - xk_prev| = 9.61e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1535: |xk - xk_prev| = 9.58e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1540: |xk - xk_prev| = 9.55e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1545: |xk - xk_prev| = 9.52e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1550: |xk - xk_prev| = 9.49e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1555: |xk - xk_prev| = 9.46e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1560: |xk - xk_prev| = 9.44e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1565: |xk - xk_prev| = 9.41e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1570: |xk - xk_prev| = 9.38e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1575: |xk - xk_prev| = 9.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1580: |xk - xk_prev| = 9.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1585: |xk - xk_prev| = 9.29e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1590: |xk - xk_prev| = 9.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1595: |xk - xk_prev| = 9.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1600: |xk - xk_prev| = 9.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1605: |xk - xk_prev| = 9.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1610: |xk - xk_prev| = 9.15e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1615: |xk - xk_prev| = 9.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1620: |xk - xk_prev| = 9.09e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1625: |xk - xk_prev| = 9.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1630: |xk - xk_prev| = 9.03e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1635: |xk - xk_prev| = 9.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1640: |xk - xk_prev| = 8.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1645: |xk - xk_prev| = 8.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1650: |xk - xk_prev| = 8.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1655: |xk - xk_prev| = 8.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1660: |xk - xk_prev| = 8.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1665: |xk - xk_prev| = 8.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1670: |xk - xk_prev| = 8.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1675: |xk - xk_prev| = 8.78e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1680: |xk - xk_prev| = 8.75e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1685: |xk - xk_prev| = 8.73e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1690: |xk - xk_prev| = 8.70e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1695: |xk - xk_prev| = 8.67e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1700: |xk - xk_prev| = 8.65e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1705: |xk - xk_prev| = 8.62e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1710: |xk - xk_prev| = 8.59e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1715: |xk - xk_prev| = 8.56e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1720: |xk - xk_prev| = 8.54e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1725: |xk - xk_prev| = 8.51e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1730: |xk - xk_prev| = 8.49e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1735: |xk - xk_prev| = 8.46e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1740: |xk - xk_prev| = 8.43e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1745: |xk - xk_prev| = 8.41e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1750: |xk - xk_prev| = 8.38e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1755: |xk - xk_prev| = 8.37e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1760: |xk - xk_prev| = 8.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1765: |xk - xk_prev| = 8.34e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1770: |xk - xk_prev| = 8.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1775: |xk - xk_prev| = 8.31e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1780: |xk - xk_prev| = 8.29e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1785: |xk - xk_prev| = 8.28e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1790: |xk - xk_prev| = 8.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1795: |xk - xk_prev| = 8.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1800: |xk - xk_prev| = 8.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1805: |xk - xk_prev| = 8.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1810: |xk - xk_prev| = 8.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1815: |xk - xk_prev| = 8.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1820: |xk - xk_prev| = 8.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1825: |xk - xk_prev| = 8.16e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1830: |xk - xk_prev| = 8.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1835: |xk - xk_prev| = 8.13e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1840: |xk - xk_prev| = 8.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1845: |xk - xk_prev| = 8.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1850: |xk - xk_prev| = 8.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1855: |xk - xk_prev| = 8.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1860: |xk - xk_prev| = 8.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1865: |xk - xk_prev| = 8.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1870: |xk - xk_prev| = 8.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1875: |xk - xk_prev| = 8.01e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1880: |xk - xk_prev| = 7.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1885: |xk - xk_prev| = 7.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1890: |xk - xk_prev| = 7.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1895: |xk - xk_prev| = 7.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1900: |xk - xk_prev| = 7.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1905: |xk - xk_prev| = 7.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1910: |xk - xk_prev| = 7.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1915: |xk - xk_prev| = 7.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1920: |xk - xk_prev| = 7.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1925: |xk - xk_prev| = 7.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1930: |xk - xk_prev| = 7.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1935: |xk - xk_prev| = 7.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1940: |xk - xk_prev| = 7.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1945: |xk - xk_prev| = 7.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1950: |xk - xk_prev| = 7.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1955: |xk - xk_prev| = 7.78e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1960: |xk - xk_prev| = 7.76e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1965: |xk - xk_prev| = 7.75e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1970: |xk - xk_prev| = 7.74e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1975: |xk - xk_prev| = 7.72e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1980: |xk - xk_prev| = 7.71e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1985: |xk - xk_prev| = 7.69e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1990: |xk - xk_prev| = 7.68e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 1995: |xk - xk_prev| = 7.67e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2000: |xk - xk_prev| = 7.65e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2005: |xk - xk_prev| = 7.64e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2010: |xk - xk_prev| = 7.62e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2015: |xk - xk_prev| = 7.61e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2020: |xk - xk_prev| = 7.60e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2025: |xk - xk_prev| = 7.58e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2030: |xk - xk_prev| = 7.57e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2035: |xk - xk_prev| = 7.55e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2040: |xk - xk_prev| = 7.54e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2045: |xk - xk_prev| = 7.53e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2050: |xk - xk_prev| = 7.51e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2055: |xk - xk_prev| = 7.50e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2060: |xk - xk_prev| = 7.49e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2065: |xk - xk_prev| = 7.47e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2070: |xk - xk_prev| = 7.46e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2075: |xk - xk_prev| = 7.44e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2080: |xk - xk_prev| = 7.43e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2085: |xk - xk_prev| = 7.42e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2090: |xk - xk_prev| = 7.40e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2095: |xk - xk_prev| = 7.39e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2100: |xk - xk_prev| = 7.38e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2105: |xk - xk_prev| = 7.36e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2110: |xk - xk_prev| = 7.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2115: |xk - xk_prev| = 7.34e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2120: |xk - xk_prev| = 7.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2125: |xk - xk_prev| = 7.31e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2130: |xk - xk_prev| = 7.30e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2135: |xk - xk_prev| = 7.28e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2140: |xk - xk_prev| = 7.28e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2145: |xk - xk_prev| = 7.27e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2150: |xk - xk_prev| = 7.27e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2155: |xk - xk_prev| = 7.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2160: |xk - xk_prev| = 7.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2165: |xk - xk_prev| = 7.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2170: |xk - xk_prev| = 7.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2175: |xk - xk_prev| = 7.24e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2180: |xk - xk_prev| = 7.24e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2185: |xk - xk_prev| = 7.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2190: |xk - xk_prev| = 7.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2195: |xk - xk_prev| = 7.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2200: |xk - xk_prev| = 7.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2205: |xk - xk_prev| = 7.21e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2210: |xk - xk_prev| = 7.21e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2215: |xk - xk_prev| = 7.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2220: |xk - xk_prev| = 7.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2225: |xk - xk_prev| = 7.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2230: |xk - xk_prev| = 7.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2235: |xk - xk_prev| = 7.18e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2240: |xk - xk_prev| = 7.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2245: |xk - xk_prev| = 7.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2250: |xk - xk_prev| = 7.16e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2255: |xk - xk_prev| = 7.16e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2260: |xk - xk_prev| = 7.15e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2265: |xk - xk_prev| = 7.15e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2270: |xk - xk_prev| = 7.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2275: |xk - xk_prev| = 7.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2280: |xk - xk_prev| = 7.13e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2285: |xk - xk_prev| = 7.13e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2290: |xk - xk_prev| = 7.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2295: |xk - xk_prev| = 7.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2300: |xk - xk_prev| = 7.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2305: |xk - xk_prev| = 7.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2310: |xk - xk_prev| = 7.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2315: |xk - xk_prev| = 7.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2320: |xk - xk_prev| = 7.09e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2325: |xk - xk_prev| = 7.09e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2330: |xk - xk_prev| = 7.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2335: |xk - xk_prev| = 7.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2340: |xk - xk_prev| = 7.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2345: |xk - xk_prev| = 7.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2350: |xk - xk_prev| = 7.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2355: |xk - xk_prev| = 7.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2360: |xk - xk_prev| = 7.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2365: |xk - xk_prev| = 7.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2370: |xk - xk_prev| = 7.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2375: |xk - xk_prev| = 7.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2380: |xk - xk_prev| = 7.03e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2385: |xk - xk_prev| = 7.03e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2390: |xk - xk_prev| = 7.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2395: |xk - xk_prev| = 7.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2400: |xk - xk_prev| = 7.01e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2405: |xk - xk_prev| = 7.01e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2410: |xk - xk_prev| = 7.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2415: |xk - xk_prev| = 7.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2420: |xk - xk_prev| = 7.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2425: |xk - xk_prev| = 6.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2430: |xk - xk_prev| = 6.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2435: |xk - xk_prev| = 6.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2440: |xk - xk_prev| = 6.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2445: |xk - xk_prev| = 6.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2450: |xk - xk_prev| = 6.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2455: |xk - xk_prev| = 6.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2460: |xk - xk_prev| = 6.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2465: |xk - xk_prev| = 6.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2470: |xk - xk_prev| = 6.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2475: |xk - xk_prev| = 6.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2480: |xk - xk_prev| = 6.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2485: |xk - xk_prev| = 6.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2490: |xk - xk_prev| = 6.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2495: |xk - xk_prev| = 6.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2500: |xk - xk_prev| = 6.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2505: |xk - xk_prev| = 6.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2510: |xk - xk_prev| = 6.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2515: |xk - xk_prev| = 6.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2520: |xk - xk_prev| = 6.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2525: |xk - xk_prev| = 6.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2530: |xk - xk_prev| = 6.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2535: |xk - xk_prev| = 6.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2540: |xk - xk_prev| = 6.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2545: |xk - xk_prev| = 6.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2550: |xk - xk_prev| = 6.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2555: |xk - xk_prev| = 6.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2560: |xk - xk_prev| = 6.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2565: |xk - xk_prev| = 6.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2570: |xk - xk_prev| = 6.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2575: |xk - xk_prev| = 6.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2580: |xk - xk_prev| = 6.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2585: |xk - xk_prev| = 6.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2590: |xk - xk_prev| = 6.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2595: |xk - xk_prev| = 6.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2600: |xk - xk_prev| = 6.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2605: |xk - xk_prev| = 6.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2610: |xk - xk_prev| = 6.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2615: |xk - xk_prev| = 6.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2620: |xk - xk_prev| = 6.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2625: |xk - xk_prev| = 6.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2630: |xk - xk_prev| = 6.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2635: |xk - xk_prev| = 6.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2640: |xk - xk_prev| = 6.78e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2645: |xk - xk_prev| = 6.78e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2650: |xk - xk_prev| = 6.77e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2655: |xk - xk_prev| = 6.77e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2660: |xk - xk_prev| = 6.76e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2665: |xk - xk_prev| = 6.76e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2670: |xk - xk_prev| = 6.75e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2675: |xk - xk_prev| = 6.75e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2680: |xk - xk_prev| = 6.74e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2685: |xk - xk_prev| = 6.74e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2690: |xk - xk_prev| = 6.73e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2695: |xk - xk_prev| = 6.73e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2700: |xk - xk_prev| = 6.72e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2705: |xk - xk_prev| = 6.72e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2710: |xk - xk_prev| = 6.71e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2715: |xk - xk_prev| = 6.71e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2720: |xk - xk_prev| = 6.71e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2725: |xk - xk_prev| = 6.70e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2730: |xk - xk_prev| = 6.70e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2735: |xk - xk_prev| = 6.69e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2740: |xk - xk_prev| = 6.69e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2745: |xk - xk_prev| = 6.68e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2750: |xk - xk_prev| = 6.68e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2755: |xk - xk_prev| = 6.67e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2760: |xk - xk_prev| = 6.67e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2765: |xk - xk_prev| = 6.66e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2770: |xk - xk_prev| = 6.66e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2775: |xk - xk_prev| = 6.65e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2780: |xk - xk_prev| = 6.65e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2785: |xk - xk_prev| = 6.64e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2790: |xk - xk_prev| = 6.64e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2795: |xk - xk_prev| = 6.63e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2800: |xk - xk_prev| = 6.63e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2805: |xk - xk_prev| = 6.63e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2810: |xk - xk_prev| = 6.62e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2815: |xk - xk_prev| = 6.62e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2820: |xk - xk_prev| = 6.61e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2825: |xk - xk_prev| = 6.61e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2830: |xk - xk_prev| = 6.60e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2835: |xk - xk_prev| = 6.60e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2840: |xk - xk_prev| = 6.59e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2845: |xk - xk_prev| = 6.59e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2850: |xk - xk_prev| = 6.58e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2855: |xk - xk_prev| = 6.58e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2860: |xk - xk_prev| = 6.57e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2865: |xk - xk_prev| = 6.57e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2870: |xk - xk_prev| = 6.56e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2875: |xk - xk_prev| = 6.56e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2880: |xk - xk_prev| = 6.56e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2885: |xk - xk_prev| = 6.55e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2890: |xk - xk_prev| = 6.55e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2895: |xk - xk_prev| = 6.54e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2900: |xk - xk_prev| = 6.54e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2905: |xk - xk_prev| = 6.53e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2910: |xk - xk_prev| = 6.53e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2915: |xk - xk_prev| = 6.52e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2920: |xk - xk_prev| = 6.52e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2925: |xk - xk_prev| = 6.51e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2930: |xk - xk_prev| = 6.51e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2935: |xk - xk_prev| = 6.50e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2940: |xk - xk_prev| = 6.50e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2945: |xk - xk_prev| = 6.50e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2950: |xk - xk_prev| = 6.49e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2955: |xk - xk_prev| = 6.49e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2960: |xk - xk_prev| = 6.48e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2965: |xk - xk_prev| = 6.48e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2970: |xk - xk_prev| = 6.47e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2975: |xk - xk_prev| = 6.47e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2980: |xk - xk_prev| = 6.46e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2985: |xk - xk_prev| = 6.46e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2990: |xk - xk_prev| = 6.45e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 2995: |xk - xk_prev| = 6.45e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3000: |xk - xk_prev| = 6.45e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3005: |xk - xk_prev| = 6.44e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3010: |xk - xk_prev| = 6.44e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3015: |xk - xk_prev| = 6.43e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3020: |xk - xk_prev| = 6.43e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3025: |xk - xk_prev| = 6.42e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3030: |xk - xk_prev| = 6.42e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3035: |xk - xk_prev| = 6.41e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3040: |xk - xk_prev| = 6.41e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3045: |xk - xk_prev| = 6.40e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3050: |xk - xk_prev| = 6.40e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3055: |xk - xk_prev| = 6.40e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3060: |xk - xk_prev| = 6.39e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3065: |xk - xk_prev| = 6.39e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3070: |xk - xk_prev| = 6.38e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3075: |xk - xk_prev| = 6.38e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3080: |xk - xk_prev| = 6.37e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3085: |xk - xk_prev| = 6.37e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3090: |xk - xk_prev| = 6.36e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3095: |xk - xk_prev| = 6.36e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3100: |xk - xk_prev| = 6.36e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3105: |xk - xk_prev| = 6.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3110: |xk - xk_prev| = 6.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3115: |xk - xk_prev| = 6.34e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3120: |xk - xk_prev| = 6.34e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3125: |xk - xk_prev| = 6.33e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3130: |xk - xk_prev| = 6.33e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3135: |xk - xk_prev| = 6.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3140: |xk - xk_prev| = 6.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3145: |xk - xk_prev| = 6.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3150: |xk - xk_prev| = 6.31e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3155: |xk - xk_prev| = 6.31e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3160: |xk - xk_prev| = 6.30e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3165: |xk - xk_prev| = 6.30e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3170: |xk - xk_prev| = 6.29e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3175: |xk - xk_prev| = 6.29e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3180: |xk - xk_prev| = 6.28e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3185: |xk - xk_prev| = 6.28e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3190: |xk - xk_prev| = 6.27e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3195: |xk - xk_prev| = 6.27e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3200: |xk - xk_prev| = 6.27e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3205: |xk - xk_prev| = 6.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3210: |xk - xk_prev| = 6.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3215: |xk - xk_prev| = 6.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3220: |xk - xk_prev| = 6.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3225: |xk - xk_prev| = 6.24e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3230: |xk - xk_prev| = 6.24e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3235: |xk - xk_prev| = 6.24e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3240: |xk - xk_prev| = 6.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3245: |xk - xk_prev| = 6.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3250: |xk - xk_prev| = 6.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3255: |xk - xk_prev| = 6.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3260: |xk - xk_prev| = 6.21e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3265: |xk - xk_prev| = 6.21e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3270: |xk - xk_prev| = 6.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3275: |xk - xk_prev| = 6.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3280: |xk - xk_prev| = 6.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3285: |xk - xk_prev| = 6.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3290: |xk - xk_prev| = 6.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3295: |xk - xk_prev| = 6.18e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3300: |xk - xk_prev| = 6.18e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3305: |xk - xk_prev| = 6.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3310: |xk - xk_prev| = 6.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3315: |xk - xk_prev| = 6.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3320: |xk - xk_prev| = 6.16e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3325: |xk - xk_prev| = 6.16e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3330: |xk - xk_prev| = 6.15e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3335: |xk - xk_prev| = 6.15e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3340: |xk - xk_prev| = 6.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3345: |xk - xk_prev| = 6.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3350: |xk - xk_prev| = 6.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3355: |xk - xk_prev| = 6.13e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3360: |xk - xk_prev| = 6.13e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3365: |xk - xk_prev| = 6.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3370: |xk - xk_prev| = 6.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3375: |xk - xk_prev| = 6.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3380: |xk - xk_prev| = 6.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3385: |xk - xk_prev| = 6.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3390: |xk - xk_prev| = 6.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3395: |xk - xk_prev| = 6.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3400: |xk - xk_prev| = 6.09e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3405: |xk - xk_prev| = 6.09e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3410: |xk - xk_prev| = 6.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3415: |xk - xk_prev| = 6.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3420: |xk - xk_prev| = 6.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3425: |xk - xk_prev| = 6.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3430: |xk - xk_prev| = 6.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3435: |xk - xk_prev| = 6.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3440: |xk - xk_prev| = 6.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3445: |xk - xk_prev| = 6.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3450: |xk - xk_prev| = 6.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3455: |xk - xk_prev| = 6.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3460: |xk - xk_prev| = 6.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3465: |xk - xk_prev| = 6.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3470: |xk - xk_prev| = 6.03e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3475: |xk - xk_prev| = 6.03e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3480: |xk - xk_prev| = 6.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3485: |xk - xk_prev| = 6.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3490: |xk - xk_prev| = 6.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3495: |xk - xk_prev| = 6.01e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3500: |xk - xk_prev| = 6.01e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3505: |xk - xk_prev| = 6.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3510: |xk - xk_prev| = 6.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3515: |xk - xk_prev| = 5.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3520: |xk - xk_prev| = 5.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3525: |xk - xk_prev| = 5.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3530: |xk - xk_prev| = 5.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3535: |xk - xk_prev| = 5.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3540: |xk - xk_prev| = 5.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3545: |xk - xk_prev| = 5.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3550: |xk - xk_prev| = 5.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3555: |xk - xk_prev| = 5.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3560: |xk - xk_prev| = 5.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3565: |xk - xk_prev| = 5.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3570: |xk - xk_prev| = 5.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3575: |xk - xk_prev| = 5.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3580: |xk - xk_prev| = 5.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3585: |xk - xk_prev| = 5.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3590: |xk - xk_prev| = 5.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3595: |xk - xk_prev| = 5.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3600: |xk - xk_prev| = 5.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3605: |xk - xk_prev| = 5.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3610: |xk - xk_prev| = 5.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3615: |xk - xk_prev| = 5.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3620: |xk - xk_prev| = 5.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3625: |xk - xk_prev| = 5.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3630: |xk - xk_prev| = 5.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3635: |xk - xk_prev| = 5.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3640: |xk - xk_prev| = 5.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3645: |xk - xk_prev| = 5.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3650: |xk - xk_prev| = 5.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3655: |xk - xk_prev| = 5.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3660: |xk - xk_prev| = 5.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3665: |xk - xk_prev| = 5.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3670: |xk - xk_prev| = 5.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3675: |xk - xk_prev| = 5.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3680: |xk - xk_prev| = 5.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3685: |xk - xk_prev| = 5.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3690: |xk - xk_prev| = 5.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3695: |xk - xk_prev| = 5.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3700: |xk - xk_prev| = 5.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3705: |xk - xk_prev| = 5.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3710: |xk - xk_prev| = 5.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3715: |xk - xk_prev| = 5.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3720: |xk - xk_prev| = 5.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3725: |xk - xk_prev| = 5.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3730: |xk - xk_prev| = 5.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3735: |xk - xk_prev| = 5.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3740: |xk - xk_prev| = 5.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3745: |xk - xk_prev| = 5.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3750: |xk - xk_prev| = 5.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3755: |xk - xk_prev| = 5.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3760: |xk - xk_prev| = 5.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3765: |xk - xk_prev| = 5.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3770: |xk - xk_prev| = 5.78e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3775: |xk - xk_prev| = 5.78e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3780: |xk - xk_prev| = 5.77e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3785: |xk - xk_prev| = 5.77e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3790: |xk - xk_prev| = 5.77e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3795: |xk - xk_prev| = 5.76e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3800: |xk - xk_prev| = 5.76e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3805: |xk - xk_prev| = 5.75e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3810: |xk - xk_prev| = 5.75e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3815: |xk - xk_prev| = 5.75e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3820: |xk - xk_prev| = 5.74e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3825: |xk - xk_prev| = 5.74e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3830: |xk - xk_prev| = 5.73e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3835: |xk - xk_prev| = 5.73e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3840: |xk - xk_prev| = 5.73e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3845: |xk - xk_prev| = 5.72e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3850: |xk - xk_prev| = 5.72e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3855: |xk - xk_prev| = 5.71e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3860: |xk - xk_prev| = 5.71e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3865: |xk - xk_prev| = 5.71e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3870: |xk - xk_prev| = 5.70e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3875: |xk - xk_prev| = 5.70e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3880: |xk - xk_prev| = 5.69e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3885: |xk - xk_prev| = 5.69e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3890: |xk - xk_prev| = 5.68e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3895: |xk - xk_prev| = 5.68e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3900: |xk - xk_prev| = 5.68e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3905: |xk - xk_prev| = 5.67e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3910: |xk - xk_prev| = 5.67e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3915: |xk - xk_prev| = 5.66e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3920: |xk - xk_prev| = 5.66e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3925: |xk - xk_prev| = 5.66e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3930: |xk - xk_prev| = 5.65e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3935: |xk - xk_prev| = 5.65e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3940: |xk - xk_prev| = 5.64e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3945: |xk - xk_prev| = 5.64e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3950: |xk - xk_prev| = 5.64e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3955: |xk - xk_prev| = 5.63e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3960: |xk - xk_prev| = 5.63e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3965: |xk - xk_prev| = 5.63e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3970: |xk - xk_prev| = 5.62e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3975: |xk - xk_prev| = 5.62e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3980: |xk - xk_prev| = 5.61e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3985: |xk - xk_prev| = 5.61e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3990: |xk - xk_prev| = 5.61e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 3995: |xk - xk_prev| = 5.60e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4000: |xk - xk_prev| = 5.60e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4005: |xk - xk_prev| = 5.59e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4010: |xk - xk_prev| = 5.59e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4015: |xk - xk_prev| = 5.59e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4020: |xk - xk_prev| = 5.58e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4025: |xk - xk_prev| = 5.58e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4030: |xk - xk_prev| = 5.57e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4035: |xk - xk_prev| = 5.57e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4040: |xk - xk_prev| = 5.57e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4045: |xk - xk_prev| = 5.56e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4050: |xk - xk_prev| = 5.56e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4055: |xk - xk_prev| = 5.55e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4060: |xk - xk_prev| = 5.55e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4065: |xk - xk_prev| = 5.55e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4070: |xk - xk_prev| = 5.54e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4075: |xk - xk_prev| = 5.54e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4080: |xk - xk_prev| = 5.53e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4085: |xk - xk_prev| = 5.53e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4090: |xk - xk_prev| = 5.53e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4095: |xk - xk_prev| = 5.52e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4100: |xk - xk_prev| = 5.52e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4105: |xk - xk_prev| = 5.52e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4110: |xk - xk_prev| = 5.51e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4115: |xk - xk_prev| = 5.51e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4120: |xk - xk_prev| = 5.50e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4125: |xk - xk_prev| = 5.50e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4130: |xk - xk_prev| = 5.50e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4135: |xk - xk_prev| = 5.49e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4140: |xk - xk_prev| = 5.49e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4145: |xk - xk_prev| = 5.48e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4150: |xk - xk_prev| = 5.48e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4155: |xk - xk_prev| = 5.48e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4160: |xk - xk_prev| = 5.47e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4165: |xk - xk_prev| = 5.47e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4170: |xk - xk_prev| = 5.46e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4175: |xk - xk_prev| = 5.46e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4180: |xk - xk_prev| = 5.46e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4185: |xk - xk_prev| = 5.45e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4190: |xk - xk_prev| = 5.45e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4195: |xk - xk_prev| = 5.45e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4200: |xk - xk_prev| = 5.44e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4205: |xk - xk_prev| = 5.44e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4210: |xk - xk_prev| = 5.43e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4215: |xk - xk_prev| = 5.43e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4220: |xk - xk_prev| = 5.43e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4225: |xk - xk_prev| = 5.42e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4230: |xk - xk_prev| = 5.42e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4235: |xk - xk_prev| = 5.42e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4240: |xk - xk_prev| = 5.41e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4245: |xk - xk_prev| = 5.41e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4250: |xk - xk_prev| = 5.40e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4255: |xk - xk_prev| = 5.40e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4260: |xk - xk_prev| = 5.40e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4265: |xk - xk_prev| = 5.39e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4270: |xk - xk_prev| = 5.39e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4275: |xk - xk_prev| = 5.38e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4280: |xk - xk_prev| = 5.38e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4285: |xk - xk_prev| = 5.38e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4290: |xk - xk_prev| = 5.37e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4295: |xk - xk_prev| = 5.37e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4300: |xk - xk_prev| = 5.37e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4305: |xk - xk_prev| = 5.36e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4310: |xk - xk_prev| = 5.36e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4315: |xk - xk_prev| = 5.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4320: |xk - xk_prev| = 5.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4325: |xk - xk_prev| = 5.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4330: |xk - xk_prev| = 5.34e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4335: |xk - xk_prev| = 5.34e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4340: |xk - xk_prev| = 5.34e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4345: |xk - xk_prev| = 5.33e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4350: |xk - xk_prev| = 5.33e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4355: |xk - xk_prev| = 5.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4360: |xk - xk_prev| = 5.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4365: |xk - xk_prev| = 5.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4370: |xk - xk_prev| = 5.31e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4375: |xk - xk_prev| = 5.31e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4380: |xk - xk_prev| = 5.31e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4385: |xk - xk_prev| = 5.30e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4390: |xk - xk_prev| = 5.30e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4395: |xk - xk_prev| = 5.29e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4400: |xk - xk_prev| = 5.29e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4405: |xk - xk_prev| = 5.29e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4410: |xk - xk_prev| = 5.28e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4415: |xk - xk_prev| = 5.28e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4420: |xk - xk_prev| = 5.28e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4425: |xk - xk_prev| = 5.27e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4430: |xk - xk_prev| = 5.27e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4435: |xk - xk_prev| = 5.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4440: |xk - xk_prev| = 5.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4445: |xk - xk_prev| = 5.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4450: |xk - xk_prev| = 5.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4455: |xk - xk_prev| = 5.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4460: |xk - xk_prev| = 5.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4465: |xk - xk_prev| = 5.24e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4470: |xk - xk_prev| = 5.24e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4475: |xk - xk_prev| = 5.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4480: |xk - xk_prev| = 5.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4485: |xk - xk_prev| = 5.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4490: |xk - xk_prev| = 5.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4495: |xk - xk_prev| = 5.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4500: |xk - xk_prev| = 5.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4505: |xk - xk_prev| = 5.21e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4510: |xk - xk_prev| = 5.21e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4515: |xk - xk_prev| = 5.21e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4520: |xk - xk_prev| = 5.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4525: |xk - xk_prev| = 5.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4530: |xk - xk_prev| = 5.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4535: |xk - xk_prev| = 5.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4540: |xk - xk_prev| = 5.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4545: |xk - xk_prev| = 5.18e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4550: |xk - xk_prev| = 5.18e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4555: |xk - xk_prev| = 5.18e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4560: |xk - xk_prev| = 5.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4565: |xk - xk_prev| = 5.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4570: |xk - xk_prev| = 5.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4575: |xk - xk_prev| = 5.16e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4580: |xk - xk_prev| = 5.16e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4585: |xk - xk_prev| = 5.15e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4590: |xk - xk_prev| = 5.15e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4595: |xk - xk_prev| = 5.15e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4600: |xk - xk_prev| = 5.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4605: |xk - xk_prev| = 5.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4610: |xk - xk_prev| = 5.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4615: |xk - xk_prev| = 5.13e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4620: |xk - xk_prev| = 5.13e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4625: |xk - xk_prev| = 5.13e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4630: |xk - xk_prev| = 5.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4635: |xk - xk_prev| = 5.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4640: |xk - xk_prev| = 5.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4645: |xk - xk_prev| = 5.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4650: |xk - xk_prev| = 5.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4655: |xk - xk_prev| = 5.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4660: |xk - xk_prev| = 5.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4665: |xk - xk_prev| = 5.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4670: |xk - xk_prev| = 5.09e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4675: |xk - xk_prev| = 5.09e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4680: |xk - xk_prev| = 5.09e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4685: |xk - xk_prev| = 5.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4690: |xk - xk_prev| = 5.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4695: |xk - xk_prev| = 5.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4700: |xk - xk_prev| = 5.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4705: |xk - xk_prev| = 5.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4710: |xk - xk_prev| = 5.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4715: |xk - xk_prev| = 5.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4720: |xk - xk_prev| = 5.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4725: |xk - xk_prev| = 5.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4730: |xk - xk_prev| = 5.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4735: |xk - xk_prev| = 5.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4740: |xk - xk_prev| = 5.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4745: |xk - xk_prev| = 5.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4750: |xk - xk_prev| = 5.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4755: |xk - xk_prev| = 5.03e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4760: |xk - xk_prev| = 5.03e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4765: |xk - xk_prev| = 5.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4770: |xk - xk_prev| = 5.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4775: |xk - xk_prev| = 5.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4780: |xk - xk_prev| = 5.01e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4785: |xk - xk_prev| = 5.01e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4790: |xk - xk_prev| = 5.01e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4795: |xk - xk_prev| = 5.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4800: |xk - xk_prev| = 5.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4805: |xk - xk_prev| = 5.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4810: |xk - xk_prev| = 4.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4815: |xk - xk_prev| = 4.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4820: |xk - xk_prev| = 4.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4825: |xk - xk_prev| = 4.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4830: |xk - xk_prev| = 4.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4835: |xk - xk_prev| = 4.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4840: |xk - xk_prev| = 4.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4845: |xk - xk_prev| = 4.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4850: |xk - xk_prev| = 4.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4855: |xk - xk_prev| = 4.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4860: |xk - xk_prev| = 4.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4865: |xk - xk_prev| = 4.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4870: |xk - xk_prev| = 4.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4875: |xk - xk_prev| = 4.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4880: |xk - xk_prev| = 4.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4885: |xk - xk_prev| = 4.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4890: |xk - xk_prev| = 4.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4895: |xk - xk_prev| = 4.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4900: |xk - xk_prev| = 4.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4905: |xk - xk_prev| = 4.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4910: |xk - xk_prev| = 4.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4915: |xk - xk_prev| = 4.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4920: |xk - xk_prev| = 4.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4925: |xk - xk_prev| = 4.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4930: |xk - xk_prev| = 4.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4935: |xk - xk_prev| = 4.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4940: |xk - xk_prev| = 4.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4945: |xk - xk_prev| = 4.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4950: |xk - xk_prev| = 4.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4955: |xk - xk_prev| = 4.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4960: |xk - xk_prev| = 4.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4965: |xk - xk_prev| = 4.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4970: |xk - xk_prev| = 4.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4975: |xk - xk_prev| = 4.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4980: |xk - xk_prev| = 4.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4985: |xk - xk_prev| = 4.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4990: |xk - xk_prev| = 4.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 4995: |xk - xk_prev| = 4.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5000: |xk - xk_prev| = 4.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5005: |xk - xk_prev| = 4.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5010: |xk - xk_prev| = 4.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5015: |xk - xk_prev| = 4.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5020: |xk - xk_prev| = 4.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5025: |xk - xk_prev| = 4.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5030: |xk - xk_prev| = 4.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5035: |xk - xk_prev| = 4.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5040: |xk - xk_prev| = 4.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5045: |xk - xk_prev| = 4.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5050: |xk - xk_prev| = 4.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5055: |xk - xk_prev| = 4.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5060: |xk - xk_prev| = 4.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5065: |xk - xk_prev| = 4.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5070: |xk - xk_prev| = 4.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5075: |xk - xk_prev| = 4.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5080: |xk - xk_prev| = 4.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5085: |xk - xk_prev| = 4.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5090: |xk - xk_prev| = 4.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5095: |xk - xk_prev| = 4.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5100: |xk - xk_prev| = 4.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5105: |xk - xk_prev| = 4.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5110: |xk - xk_prev| = 4.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5115: |xk - xk_prev| = 4.78e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5120: |xk - xk_prev| = 4.78e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5125: |xk - xk_prev| = 4.78e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5130: |xk - xk_prev| = 4.77e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5135: |xk - xk_prev| = 4.77e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5140: |xk - xk_prev| = 4.77e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5145: |xk - xk_prev| = 4.76e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5150: |xk - xk_prev| = 4.76e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5155: |xk - xk_prev| = 4.76e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5160: |xk - xk_prev| = 4.75e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5165: |xk - xk_prev| = 4.75e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5170: |xk - xk_prev| = 4.75e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5175: |xk - xk_prev| = 4.74e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5180: |xk - xk_prev| = 4.74e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5185: |xk - xk_prev| = 4.74e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5190: |xk - xk_prev| = 4.73e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5195: |xk - xk_prev| = 4.73e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5200: |xk - xk_prev| = 4.73e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5205: |xk - xk_prev| = 4.72e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5210: |xk - xk_prev| = 4.72e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5215: |xk - xk_prev| = 4.72e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5220: |xk - xk_prev| = 4.71e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5225: |xk - xk_prev| = 4.71e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5230: |xk - xk_prev| = 4.71e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5235: |xk - xk_prev| = 4.70e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5240: |xk - xk_prev| = 4.70e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5245: |xk - xk_prev| = 4.70e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5250: |xk - xk_prev| = 4.69e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5255: |xk - xk_prev| = 4.69e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5260: |xk - xk_prev| = 4.69e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5265: |xk - xk_prev| = 4.68e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5270: |xk - xk_prev| = 4.68e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5275: |xk - xk_prev| = 4.68e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5280: |xk - xk_prev| = 4.67e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5285: |xk - xk_prev| = 4.67e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5290: |xk - xk_prev| = 4.67e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5295: |xk - xk_prev| = 4.66e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5300: |xk - xk_prev| = 4.66e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5305: |xk - xk_prev| = 4.66e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5310: |xk - xk_prev| = 4.65e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5315: |xk - xk_prev| = 4.65e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5320: |xk - xk_prev| = 4.65e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5325: |xk - xk_prev| = 4.64e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5330: |xk - xk_prev| = 4.64e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5335: |xk - xk_prev| = 4.64e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5340: |xk - xk_prev| = 4.63e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5345: |xk - xk_prev| = 4.63e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5350: |xk - xk_prev| = 4.63e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5355: |xk - xk_prev| = 4.62e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5360: |xk - xk_prev| = 4.62e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5365: |xk - xk_prev| = 4.62e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5370: |xk - xk_prev| = 4.61e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5375: |xk - xk_prev| = 4.61e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5380: |xk - xk_prev| = 4.61e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5385: |xk - xk_prev| = 4.60e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5390: |xk - xk_prev| = 4.60e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5395: |xk - xk_prev| = 4.60e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5400: |xk - xk_prev| = 4.59e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5405: |xk - xk_prev| = 4.59e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5410: |xk - xk_prev| = 4.59e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5415: |xk - xk_prev| = 4.58e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5420: |xk - xk_prev| = 4.58e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5425: |xk - xk_prev| = 4.58e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5430: |xk - xk_prev| = 4.57e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5435: |xk - xk_prev| = 4.57e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5440: |xk - xk_prev| = 4.57e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5445: |xk - xk_prev| = 4.57e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5450: |xk - xk_prev| = 4.56e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5455: |xk - xk_prev| = 4.56e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5460: |xk - xk_prev| = 4.56e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5465: |xk - xk_prev| = 4.55e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5470: |xk - xk_prev| = 4.55e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5475: |xk - xk_prev| = 4.55e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5480: |xk - xk_prev| = 4.54e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5485: |xk - xk_prev| = 4.54e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5490: |xk - xk_prev| = 4.54e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5495: |xk - xk_prev| = 4.53e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5500: |xk - xk_prev| = 4.53e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5505: |xk - xk_prev| = 4.53e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5510: |xk - xk_prev| = 4.52e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5515: |xk - xk_prev| = 4.52e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5520: |xk - xk_prev| = 4.52e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5525: |xk - xk_prev| = 4.51e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5530: |xk - xk_prev| = 4.51e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5535: |xk - xk_prev| = 4.51e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5540: |xk - xk_prev| = 4.50e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5545: |xk - xk_prev| = 4.50e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5550: |xk - xk_prev| = 4.50e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5555: |xk - xk_prev| = 4.50e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5560: |xk - xk_prev| = 4.49e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5565: |xk - xk_prev| = 4.49e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5570: |xk - xk_prev| = 4.49e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5575: |xk - xk_prev| = 4.48e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5580: |xk - xk_prev| = 4.48e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5585: |xk - xk_prev| = 4.48e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5590: |xk - xk_prev| = 4.47e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5595: |xk - xk_prev| = 4.47e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5600: |xk - xk_prev| = 4.47e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5605: |xk - xk_prev| = 4.46e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5610: |xk - xk_prev| = 4.46e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5615: |xk - xk_prev| = 4.46e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5620: |xk - xk_prev| = 4.45e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5625: |xk - xk_prev| = 4.45e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5630: |xk - xk_prev| = 4.45e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5635: |xk - xk_prev| = 4.44e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5640: |xk - xk_prev| = 4.44e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5645: |xk - xk_prev| = 4.44e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5650: |xk - xk_prev| = 4.44e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5655: |xk - xk_prev| = 4.43e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5660: |xk - xk_prev| = 4.43e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5665: |xk - xk_prev| = 4.43e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5670: |xk - xk_prev| = 4.42e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5675: |xk - xk_prev| = 4.42e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5680: |xk - xk_prev| = 4.42e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5685: |xk - xk_prev| = 4.41e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5690: |xk - xk_prev| = 4.41e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5695: |xk - xk_prev| = 4.41e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5700: |xk - xk_prev| = 4.40e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5705: |xk - xk_prev| = 4.40e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5710: |xk - xk_prev| = 4.40e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5715: |xk - xk_prev| = 4.39e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5720: |xk - xk_prev| = 4.39e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5725: |xk - xk_prev| = 4.39e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5730: |xk - xk_prev| = 4.39e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5735: |xk - xk_prev| = 4.38e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5740: |xk - xk_prev| = 4.38e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5745: |xk - xk_prev| = 4.38e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5750: |xk - xk_prev| = 4.37e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5755: |xk - xk_prev| = 4.37e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5760: |xk - xk_prev| = 4.37e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5765: |xk - xk_prev| = 4.36e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5770: |xk - xk_prev| = 4.36e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5775: |xk - xk_prev| = 4.36e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5780: |xk - xk_prev| = 4.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5785: |xk - xk_prev| = 4.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5790: |xk - xk_prev| = 4.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5795: |xk - xk_prev| = 4.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5800: |xk - xk_prev| = 4.34e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5805: |xk - xk_prev| = 4.34e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5810: |xk - xk_prev| = 4.34e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5815: |xk - xk_prev| = 4.33e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5820: |xk - xk_prev| = 4.33e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5825: |xk - xk_prev| = 4.33e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5830: |xk - xk_prev| = 4.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5835: |xk - xk_prev| = 4.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5840: |xk - xk_prev| = 4.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5845: |xk - xk_prev| = 4.31e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5850: |xk - xk_prev| = 4.31e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5855: |xk - xk_prev| = 4.31e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5860: |xk - xk_prev| = 4.31e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5865: |xk - xk_prev| = 4.30e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5870: |xk - xk_prev| = 4.30e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5875: |xk - xk_prev| = 4.30e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5880: |xk - xk_prev| = 4.29e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5885: |xk - xk_prev| = 4.29e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5890: |xk - xk_prev| = 4.29e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5895: |xk - xk_prev| = 4.28e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5900: |xk - xk_prev| = 4.28e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5905: |xk - xk_prev| = 4.28e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5910: |xk - xk_prev| = 4.28e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5915: |xk - xk_prev| = 4.27e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5920: |xk - xk_prev| = 4.27e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5925: |xk - xk_prev| = 4.27e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5930: |xk - xk_prev| = 4.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5935: |xk - xk_prev| = 4.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5940: |xk - xk_prev| = 4.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5945: |xk - xk_prev| = 4.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5950: |xk - xk_prev| = 4.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5955: |xk - xk_prev| = 4.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5960: |xk - xk_prev| = 4.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5965: |xk - xk_prev| = 4.24e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5970: |xk - xk_prev| = 4.24e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5975: |xk - xk_prev| = 4.24e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5980: |xk - xk_prev| = 4.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5985: |xk - xk_prev| = 4.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5990: |xk - xk_prev| = 4.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 5995: |xk - xk_prev| = 4.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6000: |xk - xk_prev| = 4.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6005: |xk - xk_prev| = 4.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6010: |xk - xk_prev| = 4.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6015: |xk - xk_prev| = 4.21e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6020: |xk - xk_prev| = 4.21e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6025: |xk - xk_prev| = 4.21e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6030: |xk - xk_prev| = 4.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6035: |xk - xk_prev| = 4.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6040: |xk - xk_prev| = 4.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6045: |xk - xk_prev| = 4.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6050: |xk - xk_prev| = 4.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6055: |xk - xk_prev| = 4.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6060: |xk - xk_prev| = 4.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6065: |xk - xk_prev| = 4.18e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6070: |xk - xk_prev| = 4.18e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6075: |xk - xk_prev| = 4.18e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6080: |xk - xk_prev| = 4.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6085: |xk - xk_prev| = 4.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6090: |xk - xk_prev| = 4.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6095: |xk - xk_prev| = 4.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6100: |xk - xk_prev| = 4.16e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6105: |xk - xk_prev| = 4.16e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6110: |xk - xk_prev| = 4.16e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6115: |xk - xk_prev| = 4.15e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6120: |xk - xk_prev| = 4.15e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6125: |xk - xk_prev| = 4.15e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6130: |xk - xk_prev| = 4.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6135: |xk - xk_prev| = 4.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6140: |xk - xk_prev| = 4.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6145: |xk - xk_prev| = 4.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6150: |xk - xk_prev| = 4.13e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6155: |xk - xk_prev| = 4.13e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6160: |xk - xk_prev| = 4.13e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6165: |xk - xk_prev| = 4.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6170: |xk - xk_prev| = 4.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6175: |xk - xk_prev| = 4.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6180: |xk - xk_prev| = 4.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6185: |xk - xk_prev| = 4.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6190: |xk - xk_prev| = 4.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6195: |xk - xk_prev| = 4.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6200: |xk - xk_prev| = 4.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6205: |xk - xk_prev| = 4.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6210: |xk - xk_prev| = 4.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6215: |xk - xk_prev| = 4.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6220: |xk - xk_prev| = 4.09e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6225: |xk - xk_prev| = 4.09e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6230: |xk - xk_prev| = 4.09e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6235: |xk - xk_prev| = 4.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6240: |xk - xk_prev| = 4.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6245: |xk - xk_prev| = 4.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6250: |xk - xk_prev| = 4.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6255: |xk - xk_prev| = 4.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6260: |xk - xk_prev| = 4.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6265: |xk - xk_prev| = 4.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6270: |xk - xk_prev| = 4.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6275: |xk - xk_prev| = 4.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6280: |xk - xk_prev| = 4.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6285: |xk - xk_prev| = 4.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6290: |xk - xk_prev| = 4.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6295: |xk - xk_prev| = 4.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6300: |xk - xk_prev| = 4.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6305: |xk - xk_prev| = 4.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6310: |xk - xk_prev| = 4.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6315: |xk - xk_prev| = 4.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6320: |xk - xk_prev| = 4.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6325: |xk - xk_prev| = 4.03e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6330: |xk - xk_prev| = 4.03e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6335: |xk - xk_prev| = 4.03e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6340: |xk - xk_prev| = 4.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6345: |xk - xk_prev| = 4.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6350: |xk - xk_prev| = 4.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6355: |xk - xk_prev| = 4.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6360: |xk - xk_prev| = 4.01e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6365: |xk - xk_prev| = 4.01e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6370: |xk - xk_prev| = 4.01e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6375: |xk - xk_prev| = 4.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6380: |xk - xk_prev| = 4.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6385: |xk - xk_prev| = 4.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6390: |xk - xk_prev| = 4.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6395: |xk - xk_prev| = 3.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6400: |xk - xk_prev| = 3.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6405: |xk - xk_prev| = 3.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6410: |xk - xk_prev| = 3.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6415: |xk - xk_prev| = 3.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6420: |xk - xk_prev| = 3.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6425: |xk - xk_prev| = 3.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6430: |xk - xk_prev| = 3.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6435: |xk - xk_prev| = 3.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6440: |xk - xk_prev| = 3.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6445: |xk - xk_prev| = 3.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6450: |xk - xk_prev| = 3.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6455: |xk - xk_prev| = 3.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6460: |xk - xk_prev| = 3.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6465: |xk - xk_prev| = 3.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6470: |xk - xk_prev| = 3.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6475: |xk - xk_prev| = 3.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6480: |xk - xk_prev| = 3.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6485: |xk - xk_prev| = 3.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6490: |xk - xk_prev| = 3.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6495: |xk - xk_prev| = 3.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6500: |xk - xk_prev| = 3.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6505: |xk - xk_prev| = 3.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6510: |xk - xk_prev| = 3.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6515: |xk - xk_prev| = 3.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6520: |xk - xk_prev| = 3.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6525: |xk - xk_prev| = 3.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6530: |xk - xk_prev| = 3.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6535: |xk - xk_prev| = 3.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6540: |xk - xk_prev| = 3.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6545: |xk - xk_prev| = 3.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6550: |xk - xk_prev| = 3.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6555: |xk - xk_prev| = 3.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6560: |xk - xk_prev| = 3.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6565: |xk - xk_prev| = 3.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6570: |xk - xk_prev| = 3.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6575: |xk - xk_prev| = 3.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6580: |xk - xk_prev| = 3.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6585: |xk - xk_prev| = 3.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6590: |xk - xk_prev| = 3.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6595: |xk - xk_prev| = 3.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6600: |xk - xk_prev| = 3.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6605: |xk - xk_prev| = 3.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6610: |xk - xk_prev| = 3.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6615: |xk - xk_prev| = 3.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6620: |xk - xk_prev| = 3.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6625: |xk - xk_prev| = 3.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6630: |xk - xk_prev| = 3.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6635: |xk - xk_prev| = 3.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6640: |xk - xk_prev| = 3.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6645: |xk - xk_prev| = 3.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6650: |xk - xk_prev| = 3.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6655: |xk - xk_prev| = 3.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6660: |xk - xk_prev| = 3.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6665: |xk - xk_prev| = 3.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6670: |xk - xk_prev| = 3.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6675: |xk - xk_prev| = 3.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6680: |xk - xk_prev| = 3.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6685: |xk - xk_prev| = 3.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6690: |xk - xk_prev| = 3.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6695: |xk - xk_prev| = 3.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6700: |xk - xk_prev| = 3.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6705: |xk - xk_prev| = 3.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6710: |xk - xk_prev| = 3.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6715: |xk - xk_prev| = 3.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6720: |xk - xk_prev| = 3.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6725: |xk - xk_prev| = 3.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6730: |xk - xk_prev| = 3.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6735: |xk - xk_prev| = 3.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6740: |xk - xk_prev| = 3.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6745: |xk - xk_prev| = 3.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6750: |xk - xk_prev| = 3.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6755: |xk - xk_prev| = 3.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6760: |xk - xk_prev| = 3.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6765: |xk - xk_prev| = 3.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6770: |xk - xk_prev| = 3.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6775: |xk - xk_prev| = 3.78e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6780: |xk - xk_prev| = 3.78e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6785: |xk - xk_prev| = 3.78e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6790: |xk - xk_prev| = 3.78e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6795: |xk - xk_prev| = 3.77e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6800: |xk - xk_prev| = 3.77e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6805: |xk - xk_prev| = 3.77e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6810: |xk - xk_prev| = 3.77e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6815: |xk - xk_prev| = 3.76e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6820: |xk - xk_prev| = 3.76e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6825: |xk - xk_prev| = 3.76e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6830: |xk - xk_prev| = 3.75e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6835: |xk - xk_prev| = 3.75e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6840: |xk - xk_prev| = 3.75e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6845: |xk - xk_prev| = 3.75e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6850: |xk - xk_prev| = 3.74e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6855: |xk - xk_prev| = 3.74e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6860: |xk - xk_prev| = 3.74e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6865: |xk - xk_prev| = 3.74e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6870: |xk - xk_prev| = 3.73e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6875: |xk - xk_prev| = 3.73e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6880: |xk - xk_prev| = 3.73e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6885: |xk - xk_prev| = 3.73e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6890: |xk - xk_prev| = 3.72e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6895: |xk - xk_prev| = 3.72e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6900: |xk - xk_prev| = 3.72e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6905: |xk - xk_prev| = 3.72e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6910: |xk - xk_prev| = 3.71e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6915: |xk - xk_prev| = 3.71e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6920: |xk - xk_prev| = 3.71e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6925: |xk - xk_prev| = 3.70e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6930: |xk - xk_prev| = 3.70e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6935: |xk - xk_prev| = 3.70e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6940: |xk - xk_prev| = 3.70e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6945: |xk - xk_prev| = 3.69e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6950: |xk - xk_prev| = 3.69e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6955: |xk - xk_prev| = 3.69e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6960: |xk - xk_prev| = 3.69e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6965: |xk - xk_prev| = 3.68e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6970: |xk - xk_prev| = 3.68e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6975: |xk - xk_prev| = 3.68e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6980: |xk - xk_prev| = 3.68e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6985: |xk - xk_prev| = 3.67e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6990: |xk - xk_prev| = 3.67e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 6995: |xk - xk_prev| = 3.67e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7000: |xk - xk_prev| = 3.67e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7005: |xk - xk_prev| = 3.66e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7010: |xk - xk_prev| = 3.66e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7015: |xk - xk_prev| = 3.66e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7020: |xk - xk_prev| = 3.66e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7025: |xk - xk_prev| = 3.66e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7030: |xk - xk_prev| = 3.66e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7035: |xk - xk_prev| = 3.65e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7040: |xk - xk_prev| = 3.65e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7045: |xk - xk_prev| = 3.65e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7050: |xk - xk_prev| = 3.65e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7055: |xk - xk_prev| = 3.65e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7060: |xk - xk_prev| = 3.64e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7065: |xk - xk_prev| = 3.64e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7070: |xk - xk_prev| = 3.64e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7075: |xk - xk_prev| = 3.64e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7080: |xk - xk_prev| = 3.64e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7085: |xk - xk_prev| = 3.64e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7090: |xk - xk_prev| = 3.63e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7095: |xk - xk_prev| = 3.63e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7100: |xk - xk_prev| = 3.63e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7105: |xk - xk_prev| = 3.63e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7110: |xk - xk_prev| = 3.63e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7115: |xk - xk_prev| = 3.62e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7120: |xk - xk_prev| = 3.62e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7125: |xk - xk_prev| = 3.62e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7130: |xk - xk_prev| = 3.62e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7135: |xk - xk_prev| = 3.62e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7140: |xk - xk_prev| = 3.61e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7145: |xk - xk_prev| = 3.61e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7150: |xk - xk_prev| = 3.61e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7155: |xk - xk_prev| = 3.61e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7160: |xk - xk_prev| = 3.61e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7165: |xk - xk_prev| = 3.60e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7170: |xk - xk_prev| = 3.60e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7175: |xk - xk_prev| = 3.60e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7180: |xk - xk_prev| = 3.60e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7185: |xk - xk_prev| = 3.60e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7190: |xk - xk_prev| = 3.60e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7195: |xk - xk_prev| = 3.59e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7200: |xk - xk_prev| = 3.59e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7205: |xk - xk_prev| = 3.59e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7210: |xk - xk_prev| = 3.59e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7215: |xk - xk_prev| = 3.59e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7220: |xk - xk_prev| = 3.58e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7225: |xk - xk_prev| = 3.58e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7230: |xk - xk_prev| = 3.58e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7235: |xk - xk_prev| = 3.58e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7240: |xk - xk_prev| = 3.58e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7245: |xk - xk_prev| = 3.58e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7250: |xk - xk_prev| = 3.57e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7255: |xk - xk_prev| = 3.57e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7260: |xk - xk_prev| = 3.57e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7265: |xk - xk_prev| = 3.57e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7270: |xk - xk_prev| = 3.57e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7275: |xk - xk_prev| = 3.56e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7280: |xk - xk_prev| = 3.56e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7285: |xk - xk_prev| = 3.56e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7290: |xk - xk_prev| = 3.56e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7295: |xk - xk_prev| = 3.56e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7300: |xk - xk_prev| = 3.55e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7305: |xk - xk_prev| = 3.55e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7310: |xk - xk_prev| = 3.55e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7315: |xk - xk_prev| = 3.55e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7320: |xk - xk_prev| = 3.55e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7325: |xk - xk_prev| = 3.55e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7330: |xk - xk_prev| = 3.54e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7335: |xk - xk_prev| = 3.54e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7340: |xk - xk_prev| = 3.54e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7345: |xk - xk_prev| = 3.54e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7350: |xk - xk_prev| = 3.54e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7355: |xk - xk_prev| = 3.53e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7360: |xk - xk_prev| = 3.53e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7365: |xk - xk_prev| = 3.53e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7370: |xk - xk_prev| = 3.53e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7375: |xk - xk_prev| = 3.53e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7380: |xk - xk_prev| = 3.53e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7385: |xk - xk_prev| = 3.52e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7390: |xk - xk_prev| = 3.52e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7395: |xk - xk_prev| = 3.52e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7400: |xk - xk_prev| = 3.52e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7405: |xk - xk_prev| = 3.52e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7410: |xk - xk_prev| = 3.51e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7415: |xk - xk_prev| = 3.51e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7420: |xk - xk_prev| = 3.51e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7425: |xk - xk_prev| = 3.51e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7430: |xk - xk_prev| = 3.51e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7435: |xk - xk_prev| = 3.51e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7440: |xk - xk_prev| = 3.50e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7445: |xk - xk_prev| = 3.50e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7450: |xk - xk_prev| = 3.50e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7455: |xk - xk_prev| = 3.50e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7460: |xk - xk_prev| = 3.50e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7465: |xk - xk_prev| = 3.49e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7470: |xk - xk_prev| = 3.49e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7475: |xk - xk_prev| = 3.49e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7480: |xk - xk_prev| = 3.49e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7485: |xk - xk_prev| = 3.49e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7490: |xk - xk_prev| = 3.49e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7495: |xk - xk_prev| = 3.48e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7500: |xk - xk_prev| = 3.48e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7505: |xk - xk_prev| = 3.48e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7510: |xk - xk_prev| = 3.48e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7515: |xk - xk_prev| = 3.48e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7520: |xk - xk_prev| = 3.47e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7525: |xk - xk_prev| = 3.47e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7530: |xk - xk_prev| = 3.47e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7535: |xk - xk_prev| = 3.47e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7540: |xk - xk_prev| = 3.47e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7545: |xk - xk_prev| = 3.47e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7550: |xk - xk_prev| = 3.46e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7555: |xk - xk_prev| = 3.46e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7560: |xk - xk_prev| = 3.46e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7565: |xk - xk_prev| = 3.46e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7570: |xk - xk_prev| = 3.46e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7575: |xk - xk_prev| = 3.46e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7580: |xk - xk_prev| = 3.45e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7585: |xk - xk_prev| = 3.45e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7590: |xk - xk_prev| = 3.45e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7595: |xk - xk_prev| = 3.45e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7600: |xk - xk_prev| = 3.45e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7605: |xk - xk_prev| = 3.44e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7610: |xk - xk_prev| = 3.44e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7615: |xk - xk_prev| = 3.44e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7620: |xk - xk_prev| = 3.44e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7625: |xk - xk_prev| = 3.44e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7630: |xk - xk_prev| = 3.44e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7635: |xk - xk_prev| = 3.43e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7640: |xk - xk_prev| = 3.43e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7645: |xk - xk_prev| = 3.43e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7650: |xk - xk_prev| = 3.43e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7655: |xk - xk_prev| = 3.43e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7660: |xk - xk_prev| = 3.43e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7665: |xk - xk_prev| = 3.42e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7670: |xk - xk_prev| = 3.42e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7675: |xk - xk_prev| = 3.42e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7680: |xk - xk_prev| = 3.42e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7685: |xk - xk_prev| = 3.42e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7690: |xk - xk_prev| = 3.41e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7695: |xk - xk_prev| = 3.41e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7700: |xk - xk_prev| = 3.41e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7705: |xk - xk_prev| = 3.41e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7710: |xk - xk_prev| = 3.41e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7715: |xk - xk_prev| = 3.41e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7720: |xk - xk_prev| = 3.40e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7725: |xk - xk_prev| = 3.40e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7730: |xk - xk_prev| = 3.40e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7735: |xk - xk_prev| = 3.40e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7740: |xk - xk_prev| = 3.40e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7745: |xk - xk_prev| = 3.40e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7750: |xk - xk_prev| = 3.39e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7755: |xk - xk_prev| = 3.39e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7760: |xk - xk_prev| = 3.39e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7765: |xk - xk_prev| = 3.39e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7770: |xk - xk_prev| = 3.39e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7775: |xk - xk_prev| = 3.39e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7780: |xk - xk_prev| = 3.38e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7785: |xk - xk_prev| = 3.38e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7790: |xk - xk_prev| = 3.38e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7795: |xk - xk_prev| = 3.38e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7800: |xk - xk_prev| = 3.38e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7805: |xk - xk_prev| = 3.37e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7810: |xk - xk_prev| = 3.37e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7815: |xk - xk_prev| = 3.37e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7820: |xk - xk_prev| = 3.37e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7825: |xk - xk_prev| = 3.37e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7830: |xk - xk_prev| = 3.37e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7835: |xk - xk_prev| = 3.36e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7840: |xk - xk_prev| = 3.36e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7845: |xk - xk_prev| = 3.36e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7850: |xk - xk_prev| = 3.36e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7855: |xk - xk_prev| = 3.36e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7860: |xk - xk_prev| = 3.36e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7865: |xk - xk_prev| = 3.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7870: |xk - xk_prev| = 3.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7875: |xk - xk_prev| = 3.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7880: |xk - xk_prev| = 3.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7885: |xk - xk_prev| = 3.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7890: |xk - xk_prev| = 3.35e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7895: |xk - xk_prev| = 3.34e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7900: |xk - xk_prev| = 3.34e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7905: |xk - xk_prev| = 3.34e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7910: |xk - xk_prev| = 3.34e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7915: |xk - xk_prev| = 3.34e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7920: |xk - xk_prev| = 3.33e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7925: |xk - xk_prev| = 3.33e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7930: |xk - xk_prev| = 3.33e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7935: |xk - xk_prev| = 3.33e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7940: |xk - xk_prev| = 3.33e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7945: |xk - xk_prev| = 3.33e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7950: |xk - xk_prev| = 3.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7955: |xk - xk_prev| = 3.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7960: |xk - xk_prev| = 3.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7965: |xk - xk_prev| = 3.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7970: |xk - xk_prev| = 3.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7975: |xk - xk_prev| = 3.32e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7980: |xk - xk_prev| = 3.31e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7985: |xk - xk_prev| = 3.31e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7990: |xk - xk_prev| = 3.31e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 7995: |xk - xk_prev| = 3.31e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8000: |xk - xk_prev| = 3.31e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8005: |xk - xk_prev| = 3.31e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8010: |xk - xk_prev| = 3.30e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8015: |xk - xk_prev| = 3.30e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8020: |xk - xk_prev| = 3.30e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8025: |xk - xk_prev| = 3.30e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8030: |xk - xk_prev| = 3.30e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8035: |xk - xk_prev| = 3.30e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8040: |xk - xk_prev| = 3.29e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8045: |xk - xk_prev| = 3.29e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8050: |xk - xk_prev| = 3.29e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8055: |xk - xk_prev| = 3.29e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8060: |xk - xk_prev| = 3.29e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8065: |xk - xk_prev| = 3.29e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8070: |xk - xk_prev| = 3.28e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8075: |xk - xk_prev| = 3.28e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8080: |xk - xk_prev| = 3.28e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8085: |xk - xk_prev| = 3.28e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8090: |xk - xk_prev| = 3.28e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8095: |xk - xk_prev| = 3.27e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8100: |xk - xk_prev| = 3.27e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8105: |xk - xk_prev| = 3.27e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8110: |xk - xk_prev| = 3.27e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8115: |xk - xk_prev| = 3.27e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8120: |xk - xk_prev| = 3.27e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8125: |xk - xk_prev| = 3.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8130: |xk - xk_prev| = 3.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8135: |xk - xk_prev| = 3.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8140: |xk - xk_prev| = 3.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8145: |xk - xk_prev| = 3.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8150: |xk - xk_prev| = 3.26e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8155: |xk - xk_prev| = 3.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8160: |xk - xk_prev| = 3.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8165: |xk - xk_prev| = 3.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8170: |xk - xk_prev| = 3.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8175: |xk - xk_prev| = 3.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8180: |xk - xk_prev| = 3.25e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8185: |xk - xk_prev| = 3.24e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8190: |xk - xk_prev| = 3.24e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8195: |xk - xk_prev| = 3.24e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8200: |xk - xk_prev| = 3.24e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8205: |xk - xk_prev| = 3.24e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8210: |xk - xk_prev| = 3.24e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8215: |xk - xk_prev| = 3.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8220: |xk - xk_prev| = 3.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8225: |xk - xk_prev| = 3.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8230: |xk - xk_prev| = 3.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8235: |xk - xk_prev| = 3.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8240: |xk - xk_prev| = 3.23e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8245: |xk - xk_prev| = 3.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8250: |xk - xk_prev| = 3.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8255: |xk - xk_prev| = 3.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8260: |xk - xk_prev| = 3.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8265: |xk - xk_prev| = 3.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8270: |xk - xk_prev| = 3.22e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8275: |xk - xk_prev| = 3.21e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8280: |xk - xk_prev| = 3.21e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8285: |xk - xk_prev| = 3.21e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8290: |xk - xk_prev| = 3.21e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8295: |xk - xk_prev| = 3.21e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8300: |xk - xk_prev| = 3.21e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8305: |xk - xk_prev| = 3.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8310: |xk - xk_prev| = 3.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8315: |xk - xk_prev| = 3.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8320: |xk - xk_prev| = 3.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8325: |xk - xk_prev| = 3.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8330: |xk - xk_prev| = 3.20e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8335: |xk - xk_prev| = 3.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8340: |xk - xk_prev| = 3.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8345: |xk - xk_prev| = 3.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8350: |xk - xk_prev| = 3.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8355: |xk - xk_prev| = 3.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8360: |xk - xk_prev| = 3.19e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8365: |xk - xk_prev| = 3.18e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8370: |xk - xk_prev| = 3.18e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8375: |xk - xk_prev| = 3.18e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8380: |xk - xk_prev| = 3.18e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8385: |xk - xk_prev| = 3.18e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8390: |xk - xk_prev| = 3.18e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8395: |xk - xk_prev| = 3.18e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8400: |xk - xk_prev| = 3.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8405: |xk - xk_prev| = 3.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8410: |xk - xk_prev| = 3.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8415: |xk - xk_prev| = 3.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8420: |xk - xk_prev| = 3.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8425: |xk - xk_prev| = 3.17e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8430: |xk - xk_prev| = 3.16e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8435: |xk - xk_prev| = 3.16e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8440: |xk - xk_prev| = 3.16e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8445: |xk - xk_prev| = 3.16e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8450: |xk - xk_prev| = 3.16e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8455: |xk - xk_prev| = 3.16e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8460: |xk - xk_prev| = 3.15e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8465: |xk - xk_prev| = 3.15e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8470: |xk - xk_prev| = 3.15e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8475: |xk - xk_prev| = 3.15e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8480: |xk - xk_prev| = 3.15e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8485: |xk - xk_prev| = 3.15e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8490: |xk - xk_prev| = 3.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8495: |xk - xk_prev| = 3.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8500: |xk - xk_prev| = 3.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8505: |xk - xk_prev| = 3.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8510: |xk - xk_prev| = 3.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8515: |xk - xk_prev| = 3.14e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8520: |xk - xk_prev| = 3.13e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8525: |xk - xk_prev| = 3.13e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8530: |xk - xk_prev| = 3.13e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8535: |xk - xk_prev| = 3.13e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8540: |xk - xk_prev| = 3.13e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8545: |xk - xk_prev| = 3.13e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8550: |xk - xk_prev| = 3.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8555: |xk - xk_prev| = 3.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8560: |xk - xk_prev| = 3.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8565: |xk - xk_prev| = 3.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8570: |xk - xk_prev| = 3.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8575: |xk - xk_prev| = 3.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8580: |xk - xk_prev| = 3.12e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8585: |xk - xk_prev| = 3.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8590: |xk - xk_prev| = 3.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8595: |xk - xk_prev| = 3.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8600: |xk - xk_prev| = 3.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8605: |xk - xk_prev| = 3.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8610: |xk - xk_prev| = 3.11e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8615: |xk - xk_prev| = 3.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8620: |xk - xk_prev| = 3.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8625: |xk - xk_prev| = 3.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8630: |xk - xk_prev| = 3.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8635: |xk - xk_prev| = 3.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8640: |xk - xk_prev| = 3.10e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8645: |xk - xk_prev| = 3.09e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8650: |xk - xk_prev| = 3.09e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8655: |xk - xk_prev| = 3.09e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8660: |xk - xk_prev| = 3.09e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8665: |xk - xk_prev| = 3.09e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8670: |xk - xk_prev| = 3.09e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8675: |xk - xk_prev| = 3.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8680: |xk - xk_prev| = 3.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8685: |xk - xk_prev| = 3.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8690: |xk - xk_prev| = 3.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8695: |xk - xk_prev| = 3.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8700: |xk - xk_prev| = 3.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8705: |xk - xk_prev| = 3.08e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8710: |xk - xk_prev| = 3.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8715: |xk - xk_prev| = 3.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8720: |xk - xk_prev| = 3.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8725: |xk - xk_prev| = 3.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8730: |xk - xk_prev| = 3.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8735: |xk - xk_prev| = 3.07e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8740: |xk - xk_prev| = 3.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8745: |xk - xk_prev| = 3.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8750: |xk - xk_prev| = 3.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8755: |xk - xk_prev| = 3.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8760: |xk - xk_prev| = 3.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8765: |xk - xk_prev| = 3.06e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8770: |xk - xk_prev| = 3.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8775: |xk - xk_prev| = 3.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8780: |xk - xk_prev| = 3.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8785: |xk - xk_prev| = 3.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8790: |xk - xk_prev| = 3.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8795: |xk - xk_prev| = 3.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8800: |xk - xk_prev| = 3.05e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8805: |xk - xk_prev| = 3.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8810: |xk - xk_prev| = 3.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8815: |xk - xk_prev| = 3.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8820: |xk - xk_prev| = 3.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8825: |xk - xk_prev| = 3.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8830: |xk - xk_prev| = 3.04e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8835: |xk - xk_prev| = 3.03e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8840: |xk - xk_prev| = 3.03e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8845: |xk - xk_prev| = 3.03e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8850: |xk - xk_prev| = 3.03e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8855: |xk - xk_prev| = 3.03e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8860: |xk - xk_prev| = 3.03e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8865: |xk - xk_prev| = 3.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8870: |xk - xk_prev| = 3.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8875: |xk - xk_prev| = 3.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8880: |xk - xk_prev| = 3.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8885: |xk - xk_prev| = 3.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8890: |xk - xk_prev| = 3.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8895: |xk - xk_prev| = 3.02e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8900: |xk - xk_prev| = 3.01e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8905: |xk - xk_prev| = 3.01e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8910: |xk - xk_prev| = 3.01e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8915: |xk - xk_prev| = 3.01e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8920: |xk - xk_prev| = 3.01e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8925: |xk - xk_prev| = 3.01e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8930: |xk - xk_prev| = 3.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8935: |xk - xk_prev| = 3.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8940: |xk - xk_prev| = 3.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8945: |xk - xk_prev| = 3.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8950: |xk - xk_prev| = 3.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8955: |xk - xk_prev| = 3.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8960: |xk - xk_prev| = 3.00e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8965: |xk - xk_prev| = 2.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8970: |xk - xk_prev| = 2.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8975: |xk - xk_prev| = 2.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8980: |xk - xk_prev| = 2.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8985: |xk - xk_prev| = 2.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8990: |xk - xk_prev| = 2.99e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 8995: |xk - xk_prev| = 2.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9000: |xk - xk_prev| = 2.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9005: |xk - xk_prev| = 2.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9010: |xk - xk_prev| = 2.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9015: |xk - xk_prev| = 2.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9020: |xk - xk_prev| = 2.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9025: |xk - xk_prev| = 2.98e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9030: |xk - xk_prev| = 2.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9035: |xk - xk_prev| = 2.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9040: |xk - xk_prev| = 2.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9045: |xk - xk_prev| = 2.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9050: |xk - xk_prev| = 2.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9055: |xk - xk_prev| = 2.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9060: |xk - xk_prev| = 2.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9065: |xk - xk_prev| = 2.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9070: |xk - xk_prev| = 2.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9075: |xk - xk_prev| = 2.97e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9080: |xk - xk_prev| = 2.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9085: |xk - xk_prev| = 2.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9090: |xk - xk_prev| = 2.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9095: |xk - xk_prev| = 2.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9100: |xk - xk_prev| = 2.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9105: |xk - xk_prev| = 2.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9110: |xk - xk_prev| = 2.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9115: |xk - xk_prev| = 2.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9120: |xk - xk_prev| = 2.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9125: |xk - xk_prev| = 2.96e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9130: |xk - xk_prev| = 2.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9135: |xk - xk_prev| = 2.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9140: |xk - xk_prev| = 2.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9145: |xk - xk_prev| = 2.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9150: |xk - xk_prev| = 2.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9155: |xk - xk_prev| = 2.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9160: |xk - xk_prev| = 2.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9165: |xk - xk_prev| = 2.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9170: |xk - xk_prev| = 2.95e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9175: |xk - xk_prev| = 2.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9180: |xk - xk_prev| = 2.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9185: |xk - xk_prev| = 2.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9190: |xk - xk_prev| = 2.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9195: |xk - xk_prev| = 2.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9200: |xk - xk_prev| = 2.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9205: |xk - xk_prev| = 2.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9210: |xk - xk_prev| = 2.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9215: |xk - xk_prev| = 2.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9220: |xk - xk_prev| = 2.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9225: |xk - xk_prev| = 2.94e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9230: |xk - xk_prev| = 2.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9235: |xk - xk_prev| = 2.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9240: |xk - xk_prev| = 2.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9245: |xk - xk_prev| = 2.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9250: |xk - xk_prev| = 2.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9255: |xk - xk_prev| = 2.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9260: |xk - xk_prev| = 2.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9265: |xk - xk_prev| = 2.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9270: |xk - xk_prev| = 2.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9275: |xk - xk_prev| = 2.93e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9280: |xk - xk_prev| = 2.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9285: |xk - xk_prev| = 2.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9290: |xk - xk_prev| = 2.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9295: |xk - xk_prev| = 2.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9300: |xk - xk_prev| = 2.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9305: |xk - xk_prev| = 2.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9310: |xk - xk_prev| = 2.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9315: |xk - xk_prev| = 2.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9320: |xk - xk_prev| = 2.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9325: |xk - xk_prev| = 2.92e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9330: |xk - xk_prev| = 2.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9335: |xk - xk_prev| = 2.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9340: |xk - xk_prev| = 2.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9345: |xk - xk_prev| = 2.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9350: |xk - xk_prev| = 2.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9355: |xk - xk_prev| = 2.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9360: |xk - xk_prev| = 2.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9365: |xk - xk_prev| = 2.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9370: |xk - xk_prev| = 2.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9375: |xk - xk_prev| = 2.91e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9380: |xk - xk_prev| = 2.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9385: |xk - xk_prev| = 2.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9390: |xk - xk_prev| = 2.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9395: |xk - xk_prev| = 2.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9400: |xk - xk_prev| = 2.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9405: |xk - xk_prev| = 2.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9410: |xk - xk_prev| = 2.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9415: |xk - xk_prev| = 2.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9420: |xk - xk_prev| = 2.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9425: |xk - xk_prev| = 2.90e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9430: |xk - xk_prev| = 2.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9435: |xk - xk_prev| = 2.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9440: |xk - xk_prev| = 2.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9445: |xk - xk_prev| = 2.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9450: |xk - xk_prev| = 2.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9455: |xk - xk_prev| = 2.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9460: |xk - xk_prev| = 2.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9465: |xk - xk_prev| = 2.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9470: |xk - xk_prev| = 2.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9475: |xk - xk_prev| = 2.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9480: |xk - xk_prev| = 2.89e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9485: |xk - xk_prev| = 2.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9490: |xk - xk_prev| = 2.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9495: |xk - xk_prev| = 2.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9500: |xk - xk_prev| = 2.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9505: |xk - xk_prev| = 2.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9510: |xk - xk_prev| = 2.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9515: |xk - xk_prev| = 2.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9520: |xk - xk_prev| = 2.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9525: |xk - xk_prev| = 2.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9530: |xk - xk_prev| = 2.88e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9535: |xk - xk_prev| = 2.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9540: |xk - xk_prev| = 2.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9545: |xk - xk_prev| = 2.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9550: |xk - xk_prev| = 2.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9555: |xk - xk_prev| = 2.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9560: |xk - xk_prev| = 2.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9565: |xk - xk_prev| = 2.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9570: |xk - xk_prev| = 2.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9575: |xk - xk_prev| = 2.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9580: |xk - xk_prev| = 2.87e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9585: |xk - xk_prev| = 2.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9590: |xk - xk_prev| = 2.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9595: |xk - xk_prev| = 2.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9600: |xk - xk_prev| = 2.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9605: |xk - xk_prev| = 2.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9610: |xk - xk_prev| = 2.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9615: |xk - xk_prev| = 2.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9620: |xk - xk_prev| = 2.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9625: |xk - xk_prev| = 2.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9630: |xk - xk_prev| = 2.86e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9635: |xk - xk_prev| = 2.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9640: |xk - xk_prev| = 2.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9645: |xk - xk_prev| = 2.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9650: |xk - xk_prev| = 2.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9655: |xk - xk_prev| = 2.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9660: |xk - xk_prev| = 2.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9665: |xk - xk_prev| = 2.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9670: |xk - xk_prev| = 2.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9675: |xk - xk_prev| = 2.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9680: |xk - xk_prev| = 2.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9685: |xk - xk_prev| = 2.85e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9690: |xk - xk_prev| = 2.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9695: |xk - xk_prev| = 2.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9700: |xk - xk_prev| = 2.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9705: |xk - xk_prev| = 2.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9710: |xk - xk_prev| = 2.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9715: |xk - xk_prev| = 2.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9720: |xk - xk_prev| = 2.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9725: |xk - xk_prev| = 2.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9730: |xk - xk_prev| = 2.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9735: |xk - xk_prev| = 2.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9740: |xk - xk_prev| = 2.84e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9745: |xk - xk_prev| = 2.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9750: |xk - xk_prev| = 2.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9755: |xk - xk_prev| = 2.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9760: |xk - xk_prev| = 2.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9765: |xk - xk_prev| = 2.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9770: |xk - xk_prev| = 2.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9775: |xk - xk_prev| = 2.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9780: |xk - xk_prev| = 2.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9785: |xk - xk_prev| = 2.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9790: |xk - xk_prev| = 2.83e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9795: |xk - xk_prev| = 2.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9800: |xk - xk_prev| = 2.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9805: |xk - xk_prev| = 2.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9810: |xk - xk_prev| = 2.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9815: |xk - xk_prev| = 2.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9820: |xk - xk_prev| = 2.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9825: |xk - xk_prev| = 2.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9830: |xk - xk_prev| = 2.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9835: |xk - xk_prev| = 2.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9840: |xk - xk_prev| = 2.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9845: |xk - xk_prev| = 2.82e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9850: |xk - xk_prev| = 2.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9855: |xk - xk_prev| = 2.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9860: |xk - xk_prev| = 2.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9865: |xk - xk_prev| = 2.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9870: |xk - xk_prev| = 2.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9875: |xk - xk_prev| = 2.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9880: |xk - xk_prev| = 2.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9885: |xk - xk_prev| = 2.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9890: |xk - xk_prev| = 2.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9895: |xk - xk_prev| = 2.81e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9900: |xk - xk_prev| = 2.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9905: |xk - xk_prev| = 2.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9910: |xk - xk_prev| = 2.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9915: |xk - xk_prev| = 2.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9920: |xk - xk_prev| = 2.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9925: |xk - xk_prev| = 2.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9930: |xk - xk_prev| = 2.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9935: |xk - xk_prev| = 2.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9940: |xk - xk_prev| = 2.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9945: |xk - xk_prev| = 2.80e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9950: |xk - xk_prev| = 2.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9955: |xk - xk_prev| = 2.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9960: |xk - xk_prev| = 2.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9965: |xk - xk_prev| = 2.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9970: |xk - xk_prev| = 2.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9975: |xk - xk_prev| = 2.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9980: |xk - xk_prev| = 2.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9985: |xk - xk_prev| = 2.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9990: |xk - xk_prev| = 2.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 9995: |xk - xk_prev| = 2.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
Step 10000: |xk - xk_prev| = 2.79e-04 x[0,:] = tensor([0.5996, 0.3768, 0.0236, 0.5996, 0.3768, 0.0236])
## Define N-FPN for Rock Paper Scissors
```
class RPS_Net(nn.Module):
def __init__(self, action_size=6, context_size=3):
super(RPS_Net, self).__init__()
self.fc_1 = nn.Linear(action_size + context_size, 5 * action_size)
self.fc_2 = nn.Linear(5 * action_size, action_size)
self.leaky_relu = nn.LeakyReLU(0.1)
self.action_size = action_size
def device(self) -> str:
return next(self.parameters()).data.device
def F(self, x: action, d: context) -> action:
xd = torch.cat((x,d), dim=1)
Fxd = x + self.fc_2(self.leaky_relu(self.fc_1(xd)))
return Fxd
def project_simplex(self, y: action, action_size=3, num_players=2) -> action:
num_samples = y.shape[0]
proj = torch.zeros(y.shape)
for i in range(num_players):
ind = [i * action_size + j for j in range(action_size)]
u = torch.flip(torch.sort(y[:, ind], dim=1)[0], dims=(1,))
u_sum = torch.cumsum(u, dim=1)
j = torch.arange(1, action_size + 1, dtype=y.dtype, device=y.device)
pos_u_expr = u * j + 1.0 - u_sum > 0
pos_u_expr = pos_u_expr.float()
rho = torch.sum(pos_u_expr, dim=1, keepdim=True)
rho = rho.long()
lambd = [(1 - u_sum[sample, rho[sample]-1]) / rho[sample]
for sample in range(num_samples)]
lambd = torch.tensor(lambd)
lambd = lambd.view(lambd.shape[0], 1)
proj[:, ind] = torch.clamp(y[:, ind] + lambd, min=0)
return proj
def forward(self, d: context, fxd_pt_tol=1.0e-5, max_depth=100,
depth_warning=False) -> action:
with torch.no_grad():
self.depth = 0.0
x = torch.zeros((d.shape[0], self.action_size), device=self.device())
x_prev = np.Inf * torch.ones(x.shape, device=self.device())
all_samp_conv = False
while not all_samp_conv and self.depth < max_depth:
x_prev = x.clone()
x = self.project_simplex(x - self.F(x,d))
res_norm = torch.max(torch.norm(x - x_prev, dim=1))
self.depth += 1.0
all_samp_conv = res_norm <= fxd_pt_tol
if self.depth >= max_depth and depth_warning:
print("\nWarning: Max Depth Reached - Break Forward Loop\n")
attach_gradients = self.training
return self.project_simplex(x - self.F(x,d)) if attach_gradients else x
```
## Create Training Setup
```
model = RPS_Net()
learning_rate = 1e-3
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
fxd_pt_tol = 1.0e-6
criterion = nn.MSELoss()
max_epochs = 1000
save_str = 'NFPN_RPS_data.pth'
def num_params(model):
num_params = 0
for name, parameter in model.named_parameters():
num_params += parameter.numel()
return num_params
print("Trainable Parameters: ", num_params(model))
print(model)
```
Trainable Parameters: 486
RPS_Net(
(fc_1): Linear(in_features=9, out_features=30, bias=True)
(fc_2): Linear(in_features=30, out_features=6, bias=True)
(leaky_relu): LeakyReLU(negative_slope=0.1)
)
## Train the Network
```
test_loss_hist = []
train_loss_hist = []
depth_hist = []
train_loss_ave = 0
fmt = '[{:4d}/{:4d}]: train loss = {:7.3e} | test_loss = {:7.3e} | depth '
fmt += '= {:5.1f} | lr = {:5.1e} | fxt_pt_tol = {:5.1e} | time = {:4.1f} sec'
print('\nTraining Fixed Point Network')
for epoch in range(max_epochs):
start_time = time.time()
for x_batch, d_batch in train_loader:
model.train()
optimizer.zero_grad()
x_pred = model(d_batch, fxd_pt_tol=fxd_pt_tol)
loss = criterion(x_pred, x_batch)
train_loss_ave = 0.95 * train_loss_ave + 0.05 * loss.item()
loss.backward()
optimizer.step()
for x_batch, d_batch in test_loader:
with torch.no_grad():
x_pred = model(d_batch, fxd_pt_tol=fxd_pt_tol)
test_loss = criterion(x_pred, x_batch)
time_epoch = time.time() - start_time
print(fmt.format(epoch+1, max_epochs, train_loss_ave, test_loss.item(),
model.depth, optimizer.param_groups[0]['lr'], fxd_pt_tol,
time_epoch))
test_loss_hist.append(test_loss.item())
train_loss_hist.append(loss.item())
depth_hist.append(model.depth)
if epoch % 10 == 0 or epoch == max_epochs-1:
state = {
'fxd_pt_tol': fxd_pt_tol,
'T_state_dict': model.state_dict(),
'test_loss_hist': test_loss_hist,
'train_loss_hist': train_loss_hist,
'depth_hist': depth_hist
}
torch.save(state, save_str)
```
Training Fixed Point Network
[ 1/1000]: train loss = 8.473e-03 | test_loss = 3.118e-02 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.0 sec
[ 2/1000]: train loss = 1.275e-02 | test_loss = 2.256e-02 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 3/1000]: train loss = 1.429e-02 | test_loss = 1.587e-02 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 4/1000]: train loss = 1.413e-02 | test_loss = 1.077e-02 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 5/1000]: train loss = 1.299e-02 | test_loss = 6.964e-03 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 6/1000]: train loss = 1.136e-02 | test_loss = 4.261e-03 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.1 sec
[ 7/1000]: train loss = 9.587e-03 | test_loss = 2.497e-03 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 8/1000]: train loss = 7.895e-03 | test_loss = 1.482e-03 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 9/1000]: train loss = 6.408e-03 | test_loss = 9.918e-04 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 10/1000]: train loss = 5.173e-03 | test_loss = 7.896e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 11/1000]: train loss = 4.179e-03 | test_loss = 7.058e-04 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 12/1000]: train loss = 3.393e-03 | test_loss = 6.551e-04 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 13/1000]: train loss = 2.770e-03 | test_loss = 6.008e-04 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 14/1000]: train loss = 2.275e-03 | test_loss = 5.437e-04 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 15/1000]: train loss = 1.878e-03 | test_loss = 4.921e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.0 sec
[ 16/1000]: train loss = 1.558e-03 | test_loss = 4.490e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 17/1000]: train loss = 1.301e-03 | test_loss = 4.150e-04 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 18/1000]: train loss = 1.093e-03 | test_loss = 3.787e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 19/1000]: train loss = 9.246e-04 | test_loss = 3.438e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 20/1000]: train loss = 7.873e-04 | test_loss = 3.099e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 21/1000]: train loss = 6.750e-04 | test_loss = 2.802e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 22/1000]: train loss = 5.838e-04 | test_loss = 2.603e-04 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 23/1000]: train loss = 5.095e-04 | test_loss = 2.436e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 24/1000]: train loss = 4.489e-04 | test_loss = 2.280e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 25/1000]: train loss = 3.992e-04 | test_loss = 2.158e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.0 sec
[ 26/1000]: train loss = 3.585e-04 | test_loss = 2.062e-04 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 27/1000]: train loss = 3.249e-04 | test_loss = 1.966e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 28/1000]: train loss = 2.972e-04 | test_loss = 1.896e-04 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 29/1000]: train loss = 2.741e-04 | test_loss = 1.821e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 30/1000]: train loss = 2.547e-04 | test_loss = 1.768e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 31/1000]: train loss = 2.385e-04 | test_loss = 1.727e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 32/1000]: train loss = 2.246e-04 | test_loss = 1.672e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 33/1000]: train loss = 2.128e-04 | test_loss = 1.631e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.0 sec
[ 34/1000]: train loss = 2.024e-04 | test_loss = 1.580e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 35/1000]: train loss = 1.933e-04 | test_loss = 1.545e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 36/1000]: train loss = 1.852e-04 | test_loss = 1.513e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 37/1000]: train loss = 1.779e-04 | test_loss = 1.477e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 38/1000]: train loss = 1.712e-04 | test_loss = 1.426e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 39/1000]: train loss = 1.647e-04 | test_loss = 1.389e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 40/1000]: train loss = 1.587e-04 | test_loss = 1.353e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 41/1000]: train loss = 1.528e-04 | test_loss = 1.302e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 42/1000]: train loss = 1.471e-04 | test_loss = 1.258e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 43/1000]: train loss = 1.417e-04 | test_loss = 1.214e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 44/1000]: train loss = 1.365e-04 | test_loss = 1.179e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 45/1000]: train loss = 1.313e-04 | test_loss = 1.124e-04 | depth = 13.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 46/1000]: train loss = 1.263e-04 | test_loss = 1.077e-04 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 47/1000]: train loss = 1.213e-04 | test_loss = 1.047e-04 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 48/1000]: train loss = 1.165e-04 | test_loss = 1.004e-04 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 49/1000]: train loss = 1.118e-04 | test_loss = 9.597e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 50/1000]: train loss = 1.072e-04 | test_loss = 9.213e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 51/1000]: train loss = 1.026e-04 | test_loss = 8.941e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 52/1000]: train loss = 9.827e-05 | test_loss = 8.576e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 53/1000]: train loss = 9.401e-05 | test_loss = 8.182e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 54/1000]: train loss = 9.000e-05 | test_loss = 7.928e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 55/1000]: train loss = 8.614e-05 | test_loss = 7.661e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 56/1000]: train loss = 8.251e-05 | test_loss = 7.386e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 57/1000]: train loss = 7.916e-05 | test_loss = 7.118e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 58/1000]: train loss = 7.622e-05 | test_loss = 6.979e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 59/1000]: train loss = 7.355e-05 | test_loss = 6.779e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 60/1000]: train loss = 7.105e-05 | test_loss = 6.528e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 61/1000]: train loss = 6.880e-05 | test_loss = 6.474e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 62/1000]: train loss = 6.675e-05 | test_loss = 6.228e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 63/1000]: train loss = 6.489e-05 | test_loss = 6.152e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 64/1000]: train loss = 6.323e-05 | test_loss = 6.026e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 65/1000]: train loss = 6.174e-05 | test_loss = 5.852e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 66/1000]: train loss = 6.039e-05 | test_loss = 5.664e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 67/1000]: train loss = 5.922e-05 | test_loss = 5.739e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 68/1000]: train loss = 5.795e-05 | test_loss = 5.457e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 69/1000]: train loss = 5.690e-05 | test_loss = 5.413e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 70/1000]: train loss = 5.580e-05 | test_loss = 5.326e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 71/1000]: train loss = 5.479e-05 | test_loss = 5.114e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 72/1000]: train loss = 5.384e-05 | test_loss = 5.142e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 73/1000]: train loss = 5.290e-05 | test_loss = 5.018e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 74/1000]: train loss = 5.203e-05 | test_loss = 4.860e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 75/1000]: train loss = 5.128e-05 | test_loss = 4.782e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 76/1000]: train loss = 5.057e-05 | test_loss = 4.806e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 77/1000]: train loss = 4.989e-05 | test_loss = 4.582e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 78/1000]: train loss = 4.925e-05 | test_loss = 4.593e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 79/1000]: train loss = 4.854e-05 | test_loss = 4.529e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 80/1000]: train loss = 4.791e-05 | test_loss = 4.412e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 81/1000]: train loss = 4.735e-05 | test_loss = 4.502e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 82/1000]: train loss = 4.683e-05 | test_loss = 4.295e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 83/1000]: train loss = 4.645e-05 | test_loss = 4.426e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 84/1000]: train loss = 4.595e-05 | test_loss = 4.236e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 85/1000]: train loss = 4.544e-05 | test_loss = 4.383e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 86/1000]: train loss = 4.498e-05 | test_loss = 4.213e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 87/1000]: train loss = 4.456e-05 | test_loss = 4.218e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 88/1000]: train loss = 4.418e-05 | test_loss = 4.239e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 89/1000]: train loss = 4.381e-05 | test_loss = 4.120e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 90/1000]: train loss = 4.352e-05 | test_loss = 4.169e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 91/1000]: train loss = 4.321e-05 | test_loss = 4.191e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 92/1000]: train loss = 4.298e-05 | test_loss = 3.998e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 93/1000]: train loss = 4.267e-05 | test_loss = 4.121e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 94/1000]: train loss = 4.235e-05 | test_loss = 4.018e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 95/1000]: train loss = 4.208e-05 | test_loss = 3.972e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 96/1000]: train loss = 4.194e-05 | test_loss = 4.007e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 97/1000]: train loss = 4.163e-05 | test_loss = 3.919e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 98/1000]: train loss = 4.143e-05 | test_loss = 3.887e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 99/1000]: train loss = 4.125e-05 | test_loss = 3.982e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 100/1000]: train loss = 4.102e-05 | test_loss = 3.758e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 101/1000]: train loss = 4.086e-05 | test_loss = 3.995e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 102/1000]: train loss = 4.065e-05 | test_loss = 3.779e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 103/1000]: train loss = 4.045e-05 | test_loss = 3.794e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 104/1000]: train loss = 4.023e-05 | test_loss = 3.822e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 105/1000]: train loss = 3.998e-05 | test_loss = 3.707e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 106/1000]: train loss = 3.978e-05 | test_loss = 3.789e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 107/1000]: train loss = 3.963e-05 | test_loss = 3.676e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 108/1000]: train loss = 3.943e-05 | test_loss = 3.717e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 109/1000]: train loss = 3.917e-05 | test_loss = 3.738e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 110/1000]: train loss = 3.903e-05 | test_loss = 3.557e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 111/1000]: train loss = 3.885e-05 | test_loss = 3.731e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 112/1000]: train loss = 3.866e-05 | test_loss = 3.617e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 113/1000]: train loss = 3.842e-05 | test_loss = 3.634e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 114/1000]: train loss = 3.826e-05 | test_loss = 3.595e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 115/1000]: train loss = 3.812e-05 | test_loss = 3.545e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 116/1000]: train loss = 3.803e-05 | test_loss = 3.568e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 117/1000]: train loss = 3.796e-05 | test_loss = 3.598e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 118/1000]: train loss = 3.789e-05 | test_loss = 3.432e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 119/1000]: train loss = 3.769e-05 | test_loss = 3.603e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 120/1000]: train loss = 3.752e-05 | test_loss = 3.478e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 121/1000]: train loss = 3.735e-05 | test_loss = 3.506e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 122/1000]: train loss = 3.729e-05 | test_loss = 3.443e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 123/1000]: train loss = 3.718e-05 | test_loss = 3.382e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 124/1000]: train loss = 3.695e-05 | test_loss = 3.528e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 125/1000]: train loss = 3.679e-05 | test_loss = 3.358e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 126/1000]: train loss = 3.672e-05 | test_loss = 3.344e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 127/1000]: train loss = 3.663e-05 | test_loss = 3.429e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 128/1000]: train loss = 3.639e-05 | test_loss = 3.335e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 129/1000]: train loss = 3.621e-05 | test_loss = 3.332e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 130/1000]: train loss = 3.605e-05 | test_loss = 3.384e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 131/1000]: train loss = 3.593e-05 | test_loss = 3.286e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 132/1000]: train loss = 3.576e-05 | test_loss = 3.326e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 133/1000]: train loss = 3.563e-05 | test_loss = 3.278e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 134/1000]: train loss = 3.551e-05 | test_loss = 3.316e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 135/1000]: train loss = 3.543e-05 | test_loss = 3.221e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 136/1000]: train loss = 3.531e-05 | test_loss = 3.279e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 137/1000]: train loss = 3.524e-05 | test_loss = 3.287e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 138/1000]: train loss = 3.509e-05 | test_loss = 3.187e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 139/1000]: train loss = 3.497e-05 | test_loss = 3.188e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 140/1000]: train loss = 3.479e-05 | test_loss = 3.203e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 141/1000]: train loss = 3.473e-05 | test_loss = 3.203e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 142/1000]: train loss = 3.461e-05 | test_loss = 3.141e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 143/1000]: train loss = 3.446e-05 | test_loss = 3.181e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 144/1000]: train loss = 3.435e-05 | test_loss = 3.186e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 145/1000]: train loss = 3.416e-05 | test_loss = 3.048e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 146/1000]: train loss = 3.407e-05 | test_loss = 3.185e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 147/1000]: train loss = 3.390e-05 | test_loss = 3.066e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 148/1000]: train loss = 3.380e-05 | test_loss = 3.132e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 149/1000]: train loss = 3.374e-05 | test_loss = 3.078e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 150/1000]: train loss = 3.370e-05 | test_loss = 3.042e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 151/1000]: train loss = 3.360e-05 | test_loss = 3.085e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 152/1000]: train loss = 3.345e-05 | test_loss = 3.017e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 153/1000]: train loss = 3.327e-05 | test_loss = 3.007e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 154/1000]: train loss = 3.312e-05 | test_loss = 3.049e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 155/1000]: train loss = 3.304e-05 | test_loss = 2.970e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 156/1000]: train loss = 3.301e-05 | test_loss = 2.996e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 157/1000]: train loss = 3.290e-05 | test_loss = 2.984e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 158/1000]: train loss = 3.280e-05 | test_loss = 2.947e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 159/1000]: train loss = 3.273e-05 | test_loss = 3.003e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 160/1000]: train loss = 3.259e-05 | test_loss = 2.936e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 161/1000]: train loss = 3.251e-05 | test_loss = 2.955e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 162/1000]: train loss = 3.242e-05 | test_loss = 2.879e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 163/1000]: train loss = 3.231e-05 | test_loss = 2.873e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 164/1000]: train loss = 3.222e-05 | test_loss = 2.908e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 165/1000]: train loss = 3.211e-05 | test_loss = 2.923e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 166/1000]: train loss = 3.213e-05 | test_loss = 2.810e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 167/1000]: train loss = 3.210e-05 | test_loss = 2.903e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 168/1000]: train loss = 3.198e-05 | test_loss = 2.783e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 169/1000]: train loss = 3.189e-05 | test_loss = 2.896e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 170/1000]: train loss = 3.167e-05 | test_loss = 2.800e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 171/1000]: train loss = 3.161e-05 | test_loss = 2.839e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 172/1000]: train loss = 3.154e-05 | test_loss = 2.805e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 173/1000]: train loss = 3.142e-05 | test_loss = 2.803e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 174/1000]: train loss = 3.128e-05 | test_loss = 2.768e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 175/1000]: train loss = 3.130e-05 | test_loss = 2.749e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 176/1000]: train loss = 3.119e-05 | test_loss = 2.767e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 177/1000]: train loss = 3.102e-05 | test_loss = 2.764e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 178/1000]: train loss = 3.089e-05 | test_loss = 2.767e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 179/1000]: train loss = 3.081e-05 | test_loss = 2.702e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 180/1000]: train loss = 3.079e-05 | test_loss = 2.722e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 181/1000]: train loss = 3.068e-05 | test_loss = 2.711e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 182/1000]: train loss = 3.055e-05 | test_loss = 2.676e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 183/1000]: train loss = 3.047e-05 | test_loss = 2.678e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 184/1000]: train loss = 3.039e-05 | test_loss = 2.644e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 185/1000]: train loss = 3.018e-05 | test_loss = 2.684e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 186/1000]: train loss = 3.013e-05 | test_loss = 2.607e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 187/1000]: train loss = 3.003e-05 | test_loss = 2.674e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 188/1000]: train loss = 3.001e-05 | test_loss = 2.582e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 189/1000]: train loss = 2.986e-05 | test_loss = 2.713e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 190/1000]: train loss = 2.976e-05 | test_loss = 2.570e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 191/1000]: train loss = 2.970e-05 | test_loss = 2.608e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 192/1000]: train loss = 2.961e-05 | test_loss = 2.574e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 193/1000]: train loss = 2.955e-05 | test_loss = 2.499e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 194/1000]: train loss = 2.946e-05 | test_loss = 2.580e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 195/1000]: train loss = 2.931e-05 | test_loss = 2.547e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 196/1000]: train loss = 2.919e-05 | test_loss = 2.516e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 197/1000]: train loss = 2.909e-05 | test_loss = 2.513e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 198/1000]: train loss = 2.899e-05 | test_loss = 2.513e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 199/1000]: train loss = 2.890e-05 | test_loss = 2.413e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 200/1000]: train loss = 2.891e-05 | test_loss = 2.522e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 201/1000]: train loss = 2.874e-05 | test_loss = 2.398e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 202/1000]: train loss = 2.857e-05 | test_loss = 2.481e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 203/1000]: train loss = 2.847e-05 | test_loss = 2.375e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 204/1000]: train loss = 2.838e-05 | test_loss = 2.378e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 205/1000]: train loss = 2.832e-05 | test_loss = 2.406e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 206/1000]: train loss = 2.824e-05 | test_loss = 2.305e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 207/1000]: train loss = 2.813e-05 | test_loss = 2.442e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 208/1000]: train loss = 2.802e-05 | test_loss = 2.288e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 209/1000]: train loss = 2.792e-05 | test_loss = 2.369e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 210/1000]: train loss = 2.784e-05 | test_loss = 2.317e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 211/1000]: train loss = 2.777e-05 | test_loss = 2.339e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 212/1000]: train loss = 2.767e-05 | test_loss = 2.275e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 213/1000]: train loss = 2.759e-05 | test_loss = 2.283e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 214/1000]: train loss = 2.750e-05 | test_loss = 2.323e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 215/1000]: train loss = 2.736e-05 | test_loss = 2.258e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 216/1000]: train loss = 2.721e-05 | test_loss = 2.269e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 217/1000]: train loss = 2.713e-05 | test_loss = 2.180e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 218/1000]: train loss = 2.701e-05 | test_loss = 2.263e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 219/1000]: train loss = 2.695e-05 | test_loss = 2.195e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 220/1000]: train loss = 2.685e-05 | test_loss = 2.230e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 221/1000]: train loss = 2.672e-05 | test_loss = 2.150e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 222/1000]: train loss = 2.664e-05 | test_loss = 2.170e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 223/1000]: train loss = 2.646e-05 | test_loss = 2.140e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 224/1000]: train loss = 2.634e-05 | test_loss = 2.150e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 225/1000]: train loss = 2.625e-05 | test_loss = 2.141e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 226/1000]: train loss = 2.620e-05 | test_loss = 2.049e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 227/1000]: train loss = 2.611e-05 | test_loss = 2.176e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 228/1000]: train loss = 2.601e-05 | test_loss = 2.034e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 229/1000]: train loss = 2.587e-05 | test_loss = 2.061e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 230/1000]: train loss = 2.575e-05 | test_loss = 2.066e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 231/1000]: train loss = 2.572e-05 | test_loss = 1.955e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 232/1000]: train loss = 2.559e-05 | test_loss = 2.117e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 233/1000]: train loss = 2.549e-05 | test_loss = 1.997e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 234/1000]: train loss = 2.540e-05 | test_loss = 2.027e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 235/1000]: train loss = 2.528e-05 | test_loss = 1.957e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 236/1000]: train loss = 2.516e-05 | test_loss = 2.016e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 237/1000]: train loss = 2.504e-05 | test_loss = 2.006e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 238/1000]: train loss = 2.489e-05 | test_loss = 1.950e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 239/1000]: train loss = 2.477e-05 | test_loss = 1.913e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 240/1000]: train loss = 2.464e-05 | test_loss = 1.949e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 241/1000]: train loss = 2.451e-05 | test_loss = 1.920e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 242/1000]: train loss = 2.437e-05 | test_loss = 1.890e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 243/1000]: train loss = 2.433e-05 | test_loss = 1.906e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 244/1000]: train loss = 2.420e-05 | test_loss = 1.936e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 245/1000]: train loss = 2.412e-05 | test_loss = 1.834e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 246/1000]: train loss = 2.400e-05 | test_loss = 1.897e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 247/1000]: train loss = 2.382e-05 | test_loss = 1.839e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 248/1000]: train loss = 2.368e-05 | test_loss = 1.858e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 249/1000]: train loss = 2.350e-05 | test_loss = 1.884e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 250/1000]: train loss = 2.337e-05 | test_loss = 1.837e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 251/1000]: train loss = 2.325e-05 | test_loss = 1.797e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 252/1000]: train loss = 2.317e-05 | test_loss = 1.853e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 253/1000]: train loss = 2.308e-05 | test_loss = 1.794e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 254/1000]: train loss = 2.294e-05 | test_loss = 1.792e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 255/1000]: train loss = 2.279e-05 | test_loss = 1.777e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 256/1000]: train loss = 2.267e-05 | test_loss = 1.747e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 257/1000]: train loss = 2.249e-05 | test_loss = 1.776e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 258/1000]: train loss = 2.235e-05 | test_loss = 1.736e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 259/1000]: train loss = 2.217e-05 | test_loss = 1.757e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 260/1000]: train loss = 2.198e-05 | test_loss = 1.679e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 261/1000]: train loss = 2.183e-05 | test_loss = 1.717e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 262/1000]: train loss = 2.168e-05 | test_loss = 1.690e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 263/1000]: train loss = 2.154e-05 | test_loss = 1.650e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 264/1000]: train loss = 2.139e-05 | test_loss = 1.638e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 265/1000]: train loss = 2.119e-05 | test_loss = 1.602e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 266/1000]: train loss = 2.102e-05 | test_loss = 1.637e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 267/1000]: train loss = 2.088e-05 | test_loss = 1.568e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 268/1000]: train loss = 2.071e-05 | test_loss = 1.604e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 269/1000]: train loss = 2.057e-05 | test_loss = 1.538e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 270/1000]: train loss = 2.045e-05 | test_loss = 1.588e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 271/1000]: train loss = 2.032e-05 | test_loss = 1.531e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 272/1000]: train loss = 2.016e-05 | test_loss = 1.552e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 273/1000]: train loss = 2.004e-05 | test_loss = 1.507e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 274/1000]: train loss = 1.993e-05 | test_loss = 1.483e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 275/1000]: train loss = 1.979e-05 | test_loss = 1.526e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 276/1000]: train loss = 1.969e-05 | test_loss = 1.480e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 277/1000]: train loss = 1.951e-05 | test_loss = 1.497e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 278/1000]: train loss = 1.939e-05 | test_loss = 1.454e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 279/1000]: train loss = 1.922e-05 | test_loss = 1.458e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 280/1000]: train loss = 1.907e-05 | test_loss = 1.444e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 281/1000]: train loss = 1.895e-05 | test_loss = 1.369e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 282/1000]: train loss = 1.883e-05 | test_loss = 1.447e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 283/1000]: train loss = 1.873e-05 | test_loss = 1.364e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 284/1000]: train loss = 1.858e-05 | test_loss = 1.408e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 285/1000]: train loss = 1.843e-05 | test_loss = 1.411e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 286/1000]: train loss = 1.826e-05 | test_loss = 1.337e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 287/1000]: train loss = 1.815e-05 | test_loss = 1.333e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 288/1000]: train loss = 1.806e-05 | test_loss = 1.363e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 289/1000]: train loss = 1.787e-05 | test_loss = 1.330e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 290/1000]: train loss = 1.776e-05 | test_loss = 1.367e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 291/1000]: train loss = 1.764e-05 | test_loss = 1.276e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 292/1000]: train loss = 1.754e-05 | test_loss = 1.305e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 293/1000]: train loss = 1.739e-05 | test_loss = 1.276e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 294/1000]: train loss = 1.728e-05 | test_loss = 1.292e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 295/1000]: train loss = 1.716e-05 | test_loss = 1.263e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 296/1000]: train loss = 1.706e-05 | test_loss = 1.289e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 297/1000]: train loss = 1.698e-05 | test_loss = 1.249e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 298/1000]: train loss = 1.685e-05 | test_loss = 1.261e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 299/1000]: train loss = 1.675e-05 | test_loss = 1.204e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 300/1000]: train loss = 1.668e-05 | test_loss = 1.311e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 301/1000]: train loss = 1.656e-05 | test_loss = 1.174e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 302/1000]: train loss = 1.646e-05 | test_loss = 1.214e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 303/1000]: train loss = 1.635e-05 | test_loss = 1.204e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 304/1000]: train loss = 1.627e-05 | test_loss = 1.200e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 305/1000]: train loss = 1.618e-05 | test_loss = 1.192e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 306/1000]: train loss = 1.608e-05 | test_loss = 1.186e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 307/1000]: train loss = 1.599e-05 | test_loss = 1.165e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 308/1000]: train loss = 1.591e-05 | test_loss = 1.192e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 309/1000]: train loss = 1.578e-05 | test_loss = 1.139e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 310/1000]: train loss = 1.573e-05 | test_loss = 1.213e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 311/1000]: train loss = 1.564e-05 | test_loss = 1.126e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 312/1000]: train loss = 1.558e-05 | test_loss = 1.190e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 313/1000]: train loss = 1.550e-05 | test_loss = 1.112e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 314/1000]: train loss = 1.539e-05 | test_loss = 1.179e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 315/1000]: train loss = 1.532e-05 | test_loss = 1.107e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 316/1000]: train loss = 1.524e-05 | test_loss = 1.165e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 317/1000]: train loss = 1.517e-05 | test_loss = 1.080e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 318/1000]: train loss = 1.508e-05 | test_loss = 1.162e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 319/1000]: train loss = 1.498e-05 | test_loss = 1.086e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 320/1000]: train loss = 1.488e-05 | test_loss = 1.124e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 321/1000]: train loss = 1.485e-05 | test_loss = 1.074e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 322/1000]: train loss = 1.479e-05 | test_loss = 1.140e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 323/1000]: train loss = 1.475e-05 | test_loss = 1.050e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 324/1000]: train loss = 1.469e-05 | test_loss = 1.184e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 325/1000]: train loss = 1.462e-05 | test_loss = 1.045e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 326/1000]: train loss = 1.456e-05 | test_loss = 1.120e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 327/1000]: train loss = 1.449e-05 | test_loss = 1.055e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 328/1000]: train loss = 1.444e-05 | test_loss = 1.126e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 329/1000]: train loss = 1.436e-05 | test_loss = 1.041e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 330/1000]: train loss = 1.429e-05 | test_loss = 1.038e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 331/1000]: train loss = 1.420e-05 | test_loss = 1.065e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 332/1000]: train loss = 1.415e-05 | test_loss = 1.046e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 333/1000]: train loss = 1.415e-05 | test_loss = 1.088e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 334/1000]: train loss = 1.408e-05 | test_loss = 1.033e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 335/1000]: train loss = 1.403e-05 | test_loss = 1.003e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 336/1000]: train loss = 1.398e-05 | test_loss = 1.057e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 337/1000]: train loss = 1.394e-05 | test_loss = 9.780e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 338/1000]: train loss = 1.390e-05 | test_loss = 1.089e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 339/1000]: train loss = 1.387e-05 | test_loss = 9.879e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 340/1000]: train loss = 1.382e-05 | test_loss = 1.064e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 341/1000]: train loss = 1.375e-05 | test_loss = 9.895e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 342/1000]: train loss = 1.370e-05 | test_loss = 1.014e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 343/1000]: train loss = 1.365e-05 | test_loss = 1.020e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 344/1000]: train loss = 1.361e-05 | test_loss = 1.012e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 345/1000]: train loss = 1.356e-05 | test_loss = 9.707e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 346/1000]: train loss = 1.349e-05 | test_loss = 1.067e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 347/1000]: train loss = 1.345e-05 | test_loss = 9.518e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 348/1000]: train loss = 1.340e-05 | test_loss = 9.954e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 349/1000]: train loss = 1.337e-05 | test_loss = 9.724e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 350/1000]: train loss = 1.329e-05 | test_loss = 9.608e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 351/1000]: train loss = 1.326e-05 | test_loss = 9.918e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 352/1000]: train loss = 1.323e-05 | test_loss = 9.502e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 353/1000]: train loss = 1.319e-05 | test_loss = 9.535e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 354/1000]: train loss = 1.314e-05 | test_loss = 9.598e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 355/1000]: train loss = 1.310e-05 | test_loss = 9.326e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 356/1000]: train loss = 1.310e-05 | test_loss = 9.519e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 357/1000]: train loss = 1.307e-05 | test_loss = 9.462e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 358/1000]: train loss = 1.300e-05 | test_loss = 9.488e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 359/1000]: train loss = 1.298e-05 | test_loss = 9.771e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 360/1000]: train loss = 1.297e-05 | test_loss = 9.104e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 361/1000]: train loss = 1.295e-05 | test_loss = 9.653e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 362/1000]: train loss = 1.298e-05 | test_loss = 9.621e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 363/1000]: train loss = 1.293e-05 | test_loss = 8.898e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 364/1000]: train loss = 1.287e-05 | test_loss = 9.678e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 365/1000]: train loss = 1.285e-05 | test_loss = 9.031e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 366/1000]: train loss = 1.281e-05 | test_loss = 9.205e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 367/1000]: train loss = 1.277e-05 | test_loss = 9.410e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 368/1000]: train loss = 1.274e-05 | test_loss = 8.794e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 369/1000]: train loss = 1.271e-05 | test_loss = 9.445e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 370/1000]: train loss = 1.269e-05 | test_loss = 8.644e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 371/1000]: train loss = 1.265e-05 | test_loss = 1.029e-05 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 372/1000]: train loss = 1.266e-05 | test_loss = 8.601e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 373/1000]: train loss = 1.258e-05 | test_loss = 9.342e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 374/1000]: train loss = 1.259e-05 | test_loss = 8.950e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 375/1000]: train loss = 1.252e-05 | test_loss = 8.663e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.2 sec
[ 376/1000]: train loss = 1.247e-05 | test_loss = 9.519e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 377/1000]: train loss = 1.245e-05 | test_loss = 8.720e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 378/1000]: train loss = 1.242e-05 | test_loss = 9.080e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.1 sec
[ 379/1000]: train loss = 1.235e-05 | test_loss = 8.809e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.0 sec
[ 380/1000]: train loss = 1.234e-05 | test_loss = 8.487e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 381/1000]: train loss = 1.237e-05 | test_loss = 9.931e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 382/1000]: train loss = 1.231e-05 | test_loss = 8.466e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 383/1000]: train loss = 1.232e-05 | test_loss = 9.577e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 384/1000]: train loss = 1.228e-05 | test_loss = 8.406e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 385/1000]: train loss = 1.224e-05 | test_loss = 9.260e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 386/1000]: train loss = 1.218e-05 | test_loss = 8.375e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 387/1000]: train loss = 1.215e-05 | test_loss = 8.546e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 388/1000]: train loss = 1.216e-05 | test_loss = 8.859e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 389/1000]: train loss = 1.213e-05 | test_loss = 8.319e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 390/1000]: train loss = 1.211e-05 | test_loss = 8.573e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 391/1000]: train loss = 1.207e-05 | test_loss = 8.732e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 392/1000]: train loss = 1.204e-05 | test_loss = 8.961e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 393/1000]: train loss = 1.201e-05 | test_loss = 8.412e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.0 sec
[ 394/1000]: train loss = 1.195e-05 | test_loss = 8.423e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 395/1000]: train loss = 1.191e-05 | test_loss = 8.626e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 396/1000]: train loss = 1.191e-05 | test_loss = 8.121e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 397/1000]: train loss = 1.187e-05 | test_loss = 8.815e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 398/1000]: train loss = 1.183e-05 | test_loss = 8.129e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 399/1000]: train loss = 1.183e-05 | test_loss = 8.584e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 400/1000]: train loss = 1.179e-05 | test_loss = 8.231e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 401/1000]: train loss = 1.175e-05 | test_loss = 8.139e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 402/1000]: train loss = 1.172e-05 | test_loss = 8.553e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 403/1000]: train loss = 1.170e-05 | test_loss = 8.052e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 404/1000]: train loss = 1.170e-05 | test_loss = 8.273e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.1 sec
[ 405/1000]: train loss = 1.167e-05 | test_loss = 8.625e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.1 sec
[ 406/1000]: train loss = 1.163e-05 | test_loss = 8.091e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 407/1000]: train loss = 1.159e-05 | test_loss = 8.436e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 408/1000]: train loss = 1.155e-05 | test_loss = 8.102e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 409/1000]: train loss = 1.151e-05 | test_loss = 8.554e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 410/1000]: train loss = 1.149e-05 | test_loss = 7.718e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 411/1000]: train loss = 1.147e-05 | test_loss = 8.210e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 412/1000]: train loss = 1.147e-05 | test_loss = 7.879e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 413/1000]: train loss = 1.143e-05 | test_loss = 8.489e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 414/1000]: train loss = 1.139e-05 | test_loss = 7.716e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 415/1000]: train loss = 1.138e-05 | test_loss = 8.387e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 416/1000]: train loss = 1.139e-05 | test_loss = 8.453e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 417/1000]: train loss = 1.138e-05 | test_loss = 7.946e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 418/1000]: train loss = 1.138e-05 | test_loss = 7.640e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 419/1000]: train loss = 1.140e-05 | test_loss = 8.348e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 420/1000]: train loss = 1.139e-05 | test_loss = 8.127e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 421/1000]: train loss = 1.137e-05 | test_loss = 7.504e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 422/1000]: train loss = 1.137e-05 | test_loss = 9.074e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 423/1000]: train loss = 1.138e-05 | test_loss = 7.615e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 424/1000]: train loss = 1.138e-05 | test_loss = 7.545e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 425/1000]: train loss = 1.138e-05 | test_loss = 8.676e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 426/1000]: train loss = 1.130e-05 | test_loss = 7.293e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 427/1000]: train loss = 1.132e-05 | test_loss = 8.260e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 428/1000]: train loss = 1.128e-05 | test_loss = 8.051e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 429/1000]: train loss = 1.125e-05 | test_loss = 7.344e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 430/1000]: train loss = 1.123e-05 | test_loss = 9.057e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 431/1000]: train loss = 1.123e-05 | test_loss = 7.283e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 432/1000]: train loss = 1.117e-05 | test_loss = 7.826e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 433/1000]: train loss = 1.112e-05 | test_loss = 7.916e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 434/1000]: train loss = 1.110e-05 | test_loss = 7.302e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 435/1000]: train loss = 1.109e-05 | test_loss = 7.812e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 436/1000]: train loss = 1.107e-05 | test_loss = 7.833e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 437/1000]: train loss = 1.105e-05 | test_loss = 7.137e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 438/1000]: train loss = 1.107e-05 | test_loss = 8.867e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 439/1000]: train loss = 1.109e-05 | test_loss = 7.242e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 440/1000]: train loss = 1.105e-05 | test_loss = 7.246e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 441/1000]: train loss = 1.102e-05 | test_loss = 7.988e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 442/1000]: train loss = 1.097e-05 | test_loss = 7.131e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 443/1000]: train loss = 1.093e-05 | test_loss = 7.693e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 444/1000]: train loss = 1.087e-05 | test_loss = 7.082e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 445/1000]: train loss = 1.083e-05 | test_loss = 7.602e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 446/1000]: train loss = 1.082e-05 | test_loss = 7.439e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 447/1000]: train loss = 1.083e-05 | test_loss = 7.471e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 448/1000]: train loss = 1.080e-05 | test_loss = 7.372e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 449/1000]: train loss = 1.076e-05 | test_loss = 7.163e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 450/1000]: train loss = 1.074e-05 | test_loss = 7.877e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 451/1000]: train loss = 1.072e-05 | test_loss = 6.992e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 452/1000]: train loss = 1.074e-05 | test_loss = 7.106e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 453/1000]: train loss = 1.073e-05 | test_loss = 7.710e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.0 sec
[ 454/1000]: train loss = 1.070e-05 | test_loss = 7.585e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 455/1000]: train loss = 1.065e-05 | test_loss = 6.878e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 456/1000]: train loss = 1.064e-05 | test_loss = 7.844e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 457/1000]: train loss = 1.062e-05 | test_loss = 6.908e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 458/1000]: train loss = 1.061e-05 | test_loss = 7.208e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 459/1000]: train loss = 1.058e-05 | test_loss = 7.508e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 460/1000]: train loss = 1.056e-05 | test_loss = 6.969e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 461/1000]: train loss = 1.056e-05 | test_loss = 7.058e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 462/1000]: train loss = 1.056e-05 | test_loss = 7.984e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 463/1000]: train loss = 1.052e-05 | test_loss = 6.780e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 464/1000]: train loss = 1.048e-05 | test_loss = 7.767e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 465/1000]: train loss = 1.048e-05 | test_loss = 6.754e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.0 sec
[ 466/1000]: train loss = 1.054e-05 | test_loss = 8.435e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.0 sec
[ 467/1000]: train loss = 1.054e-05 | test_loss = 7.093e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 468/1000]: train loss = 1.055e-05 | test_loss = 6.784e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 469/1000]: train loss = 1.059e-05 | test_loss = 8.184e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 470/1000]: train loss = 1.056e-05 | test_loss = 7.032e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 471/1000]: train loss = 1.048e-05 | test_loss = 6.747e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 472/1000]: train loss = 1.043e-05 | test_loss = 7.630e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 473/1000]: train loss = 1.040e-05 | test_loss = 6.767e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 474/1000]: train loss = 1.039e-05 | test_loss = 6.712e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 475/1000]: train loss = 1.035e-05 | test_loss = 7.101e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 476/1000]: train loss = 1.034e-05 | test_loss = 7.403e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 477/1000]: train loss = 1.031e-05 | test_loss = 6.636e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 478/1000]: train loss = 1.028e-05 | test_loss = 7.493e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 479/1000]: train loss = 1.029e-05 | test_loss = 7.084e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 480/1000]: train loss = 1.029e-05 | test_loss = 6.819e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 481/1000]: train loss = 1.024e-05 | test_loss = 7.206e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 482/1000]: train loss = 1.018e-05 | test_loss = 6.789e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 483/1000]: train loss = 1.014e-05 | test_loss = 6.932e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 484/1000]: train loss = 1.011e-05 | test_loss = 6.874e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 485/1000]: train loss = 1.010e-05 | test_loss = 6.780e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 486/1000]: train loss = 1.011e-05 | test_loss = 6.976e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 487/1000]: train loss = 1.008e-05 | test_loss = 6.882e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 488/1000]: train loss = 1.007e-05 | test_loss = 6.992e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 489/1000]: train loss = 1.007e-05 | test_loss = 6.528e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 490/1000]: train loss = 1.009e-05 | test_loss = 6.754e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 491/1000]: train loss = 1.009e-05 | test_loss = 7.774e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 492/1000]: train loss = 1.011e-05 | test_loss = 6.745e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 493/1000]: train loss = 1.010e-05 | test_loss = 6.552e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 494/1000]: train loss = 1.008e-05 | test_loss = 7.117e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 495/1000]: train loss = 1.008e-05 | test_loss = 6.489e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 496/1000]: train loss = 1.009e-05 | test_loss = 7.031e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 497/1000]: train loss = 1.009e-05 | test_loss = 7.395e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 498/1000]: train loss = 1.006e-05 | test_loss = 6.621e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 499/1000]: train loss = 1.003e-05 | test_loss = 6.731e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 500/1000]: train loss = 9.973e-06 | test_loss = 6.928e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 501/1000]: train loss = 9.947e-06 | test_loss = 6.410e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 502/1000]: train loss = 9.906e-06 | test_loss = 6.805e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 503/1000]: train loss = 9.866e-06 | test_loss = 6.350e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 504/1000]: train loss = 9.883e-06 | test_loss = 7.001e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 505/1000]: train loss = 9.867e-06 | test_loss = 6.784e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 506/1000]: train loss = 9.854e-06 | test_loss = 6.521e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 507/1000]: train loss = 9.863e-06 | test_loss = 6.554e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 508/1000]: train loss = 9.823e-06 | test_loss = 6.548e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 509/1000]: train loss = 9.806e-06 | test_loss = 6.544e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 510/1000]: train loss = 9.769e-06 | test_loss = 6.699e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 511/1000]: train loss = 9.756e-06 | test_loss = 6.800e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 512/1000]: train loss = 9.789e-06 | test_loss = 7.129e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 513/1000]: train loss = 9.799e-06 | test_loss = 6.289e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 514/1000]: train loss = 9.790e-06 | test_loss = 6.813e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 515/1000]: train loss = 9.784e-06 | test_loss = 6.394e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 516/1000]: train loss = 9.753e-06 | test_loss = 6.639e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 517/1000]: train loss = 9.700e-06 | test_loss = 6.516e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 518/1000]: train loss = 9.680e-06 | test_loss = 6.606e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 519/1000]: train loss = 9.650e-06 | test_loss = 6.296e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 520/1000]: train loss = 9.652e-06 | test_loss = 6.459e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 521/1000]: train loss = 9.642e-06 | test_loss = 6.642e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 522/1000]: train loss = 9.633e-06 | test_loss = 6.854e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 523/1000]: train loss = 9.652e-06 | test_loss = 6.239e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 524/1000]: train loss = 9.705e-06 | test_loss = 6.596e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 525/1000]: train loss = 9.693e-06 | test_loss = 7.069e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 526/1000]: train loss = 9.696e-06 | test_loss = 6.441e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 527/1000]: train loss = 9.663e-06 | test_loss = 6.286e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.0 sec
[ 528/1000]: train loss = 9.610e-06 | test_loss = 7.209e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 529/1000]: train loss = 9.644e-06 | test_loss = 6.418e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 530/1000]: train loss = 9.653e-06 | test_loss = 6.177e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 531/1000]: train loss = 9.644e-06 | test_loss = 7.116e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 532/1000]: train loss = 9.619e-06 | test_loss = 6.343e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 533/1000]: train loss = 9.640e-06 | test_loss = 6.278e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 534/1000]: train loss = 9.683e-06 | test_loss = 7.027e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 535/1000]: train loss = 9.659e-06 | test_loss = 6.692e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 536/1000]: train loss = 9.621e-06 | test_loss = 6.691e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 537/1000]: train loss = 9.532e-06 | test_loss = 6.385e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 538/1000]: train loss = 9.489e-06 | test_loss = 6.208e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 539/1000]: train loss = 9.446e-06 | test_loss = 6.331e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 540/1000]: train loss = 9.435e-06 | test_loss = 6.720e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 541/1000]: train loss = 9.422e-06 | test_loss = 6.141e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 542/1000]: train loss = 9.433e-06 | test_loss = 6.957e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 543/1000]: train loss = 9.416e-06 | test_loss = 6.272e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 544/1000]: train loss = 9.364e-06 | test_loss = 6.521e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 545/1000]: train loss = 9.342e-06 | test_loss = 6.216e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 546/1000]: train loss = 9.332e-06 | test_loss = 7.112e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 547/1000]: train loss = 9.337e-06 | test_loss = 6.481e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 548/1000]: train loss = 9.361e-06 | test_loss = 6.196e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 549/1000]: train loss = 9.389e-06 | test_loss = 6.506e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 550/1000]: train loss = 9.370e-06 | test_loss = 7.451e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 551/1000]: train loss = 9.393e-06 | test_loss = 6.416e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 552/1000]: train loss = 9.369e-06 | test_loss = 7.154e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 553/1000]: train loss = 9.355e-06 | test_loss = 6.417e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 554/1000]: train loss = 9.326e-06 | test_loss = 6.191e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 555/1000]: train loss = 9.305e-06 | test_loss = 6.984e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 556/1000]: train loss = 9.273e-06 | test_loss = 6.426e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 557/1000]: train loss = 9.214e-06 | test_loss = 6.453e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 558/1000]: train loss = 9.179e-06 | test_loss = 6.220e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 559/1000]: train loss = 9.200e-06 | test_loss = 6.548e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 560/1000]: train loss = 9.172e-06 | test_loss = 6.475e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 561/1000]: train loss = 9.145e-06 | test_loss = 6.155e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 562/1000]: train loss = 9.135e-06 | test_loss = 6.243e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 563/1000]: train loss = 9.105e-06 | test_loss = 6.612e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 564/1000]: train loss = 9.084e-06 | test_loss = 6.483e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 565/1000]: train loss = 9.037e-06 | test_loss = 6.074e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 566/1000]: train loss = 8.996e-06 | test_loss = 6.587e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 567/1000]: train loss = 8.966e-06 | test_loss = 6.134e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 568/1000]: train loss = 8.956e-06 | test_loss = 6.473e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 569/1000]: train loss = 8.923e-06 | test_loss = 6.114e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 570/1000]: train loss = 8.898e-06 | test_loss = 6.044e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 571/1000]: train loss = 8.857e-06 | test_loss = 6.030e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 572/1000]: train loss = 8.888e-06 | test_loss = 6.280e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 573/1000]: train loss = 8.911e-06 | test_loss = 5.953e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 574/1000]: train loss = 8.871e-06 | test_loss = 6.145e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 575/1000]: train loss = 8.856e-06 | test_loss = 6.439e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 576/1000]: train loss = 8.857e-06 | test_loss = 5.979e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 577/1000]: train loss = 8.872e-06 | test_loss = 5.802e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 578/1000]: train loss = 8.922e-06 | test_loss = 6.690e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 579/1000]: train loss = 8.923e-06 | test_loss = 6.276e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 580/1000]: train loss = 8.961e-06 | test_loss = 5.930e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 581/1000]: train loss = 8.970e-06 | test_loss = 6.035e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 582/1000]: train loss = 8.903e-06 | test_loss = 6.088e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 583/1000]: train loss = 8.848e-06 | test_loss = 6.234e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 584/1000]: train loss = 8.789e-06 | test_loss = 5.908e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 585/1000]: train loss = 8.770e-06 | test_loss = 6.215e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.0 sec
[ 586/1000]: train loss = 8.752e-06 | test_loss = 5.824e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 587/1000]: train loss = 8.692e-06 | test_loss = 6.129e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 588/1000]: train loss = 8.652e-06 | test_loss = 5.978e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 589/1000]: train loss = 8.645e-06 | test_loss = 5.768e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 590/1000]: train loss = 8.620e-06 | test_loss = 5.748e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 591/1000]: train loss = 8.580e-06 | test_loss = 5.885e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 592/1000]: train loss = 8.569e-06 | test_loss = 5.850e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 593/1000]: train loss = 8.558e-06 | test_loss = 5.668e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 594/1000]: train loss = 8.561e-06 | test_loss = 5.764e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 595/1000]: train loss = 8.546e-06 | test_loss = 5.611e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 596/1000]: train loss = 8.537e-06 | test_loss = 5.653e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 597/1000]: train loss = 8.511e-06 | test_loss = 5.893e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 598/1000]: train loss = 8.498e-06 | test_loss = 5.856e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 599/1000]: train loss = 8.484e-06 | test_loss = 5.800e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 600/1000]: train loss = 8.439e-06 | test_loss = 5.591e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 601/1000]: train loss = 8.424e-06 | test_loss = 5.901e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 602/1000]: train loss = 8.400e-06 | test_loss = 5.558e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 603/1000]: train loss = 8.406e-06 | test_loss = 5.540e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 604/1000]: train loss = 8.364e-06 | test_loss = 5.813e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.1 sec
[ 605/1000]: train loss = 8.371e-06 | test_loss = 5.810e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 606/1000]: train loss = 8.327e-06 | test_loss = 5.405e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 607/1000]: train loss = 8.318e-06 | test_loss = 5.423e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 608/1000]: train loss = 8.315e-06 | test_loss = 5.787e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 609/1000]: train loss = 8.294e-06 | test_loss = 5.641e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 610/1000]: train loss = 8.276e-06 | test_loss = 5.427e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 611/1000]: train loss = 8.248e-06 | test_loss = 5.567e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 612/1000]: train loss = 8.247e-06 | test_loss = 5.884e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 613/1000]: train loss = 8.248e-06 | test_loss = 5.452e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 614/1000]: train loss = 8.214e-06 | test_loss = 5.501e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.0 sec
[ 615/1000]: train loss = 8.180e-06 | test_loss = 5.447e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 616/1000]: train loss = 8.164e-06 | test_loss = 6.294e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 617/1000]: train loss = 8.149e-06 | test_loss = 5.417e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 618/1000]: train loss = 8.124e-06 | test_loss = 5.434e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 619/1000]: train loss = 8.139e-06 | test_loss = 6.222e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 620/1000]: train loss = 8.143e-06 | test_loss = 5.421e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 621/1000]: train loss = 8.130e-06 | test_loss = 5.422e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 622/1000]: train loss = 8.167e-06 | test_loss = 5.385e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 623/1000]: train loss = 8.158e-06 | test_loss = 5.882e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 624/1000]: train loss = 8.144e-06 | test_loss = 5.686e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 625/1000]: train loss = 8.133e-06 | test_loss = 5.215e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 626/1000]: train loss = 8.105e-06 | test_loss = 5.218e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 627/1000]: train loss = 8.085e-06 | test_loss = 5.442e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 628/1000]: train loss = 8.033e-06 | test_loss = 5.346e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 629/1000]: train loss = 7.982e-06 | test_loss = 5.336e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 630/1000]: train loss = 7.935e-06 | test_loss = 5.079e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 631/1000]: train loss = 7.892e-06 | test_loss = 5.128e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 632/1000]: train loss = 7.871e-06 | test_loss = 5.236e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 633/1000]: train loss = 7.826e-06 | test_loss = 5.033e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 634/1000]: train loss = 7.832e-06 | test_loss = 5.113e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 635/1000]: train loss = 7.824e-06 | test_loss = 5.410e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 636/1000]: train loss = 7.802e-06 | test_loss = 5.111e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 637/1000]: train loss = 7.780e-06 | test_loss = 5.243e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 638/1000]: train loss = 7.764e-06 | test_loss = 4.976e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 639/1000]: train loss = 7.728e-06 | test_loss = 5.083e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 640/1000]: train loss = 7.695e-06 | test_loss = 4.924e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 641/1000]: train loss = 7.655e-06 | test_loss = 4.842e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 642/1000]: train loss = 7.690e-06 | test_loss = 5.152e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 643/1000]: train loss = 7.670e-06 | test_loss = 5.044e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 644/1000]: train loss = 7.657e-06 | test_loss = 4.840e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 645/1000]: train loss = 7.620e-06 | test_loss = 4.854e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 646/1000]: train loss = 7.614e-06 | test_loss = 4.879e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 647/1000]: train loss = 7.597e-06 | test_loss = 5.165e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 648/1000]: train loss = 7.562e-06 | test_loss = 4.734e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 649/1000]: train loss = 7.526e-06 | test_loss = 4.973e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 650/1000]: train loss = 7.478e-06 | test_loss = 4.742e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 651/1000]: train loss = 7.449e-06 | test_loss = 4.835e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 652/1000]: train loss = 7.443e-06 | test_loss = 4.665e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 653/1000]: train loss = 7.418e-06 | test_loss = 4.784e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 654/1000]: train loss = 7.388e-06 | test_loss = 4.670e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 655/1000]: train loss = 7.358e-06 | test_loss = 4.817e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 656/1000]: train loss = 7.332e-06 | test_loss = 4.638e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 657/1000]: train loss = 7.301e-06 | test_loss = 4.723e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 658/1000]: train loss = 7.261e-06 | test_loss = 4.650e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 659/1000]: train loss = 7.224e-06 | test_loss = 4.827e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 660/1000]: train loss = 7.253e-06 | test_loss = 4.571e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 661/1000]: train loss = 7.237e-06 | test_loss = 4.527e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 662/1000]: train loss = 7.202e-06 | test_loss = 4.785e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 663/1000]: train loss = 7.190e-06 | test_loss = 4.671e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 664/1000]: train loss = 7.155e-06 | test_loss = 4.589e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 665/1000]: train loss = 7.144e-06 | test_loss = 4.453e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 666/1000]: train loss = 7.119e-06 | test_loss = 4.636e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 667/1000]: train loss = 7.099e-06 | test_loss = 4.457e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.0 sec
[ 668/1000]: train loss = 7.095e-06 | test_loss = 4.880e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 669/1000]: train loss = 7.120e-06 | test_loss = 5.129e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 670/1000]: train loss = 7.165e-06 | test_loss = 4.387e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 671/1000]: train loss = 7.177e-06 | test_loss = 4.735e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 672/1000]: train loss = 7.163e-06 | test_loss = 4.580e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 673/1000]: train loss = 7.129e-06 | test_loss = 5.051e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 674/1000]: train loss = 7.109e-06 | test_loss = 4.342e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 675/1000]: train loss = 7.086e-06 | test_loss = 4.385e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 676/1000]: train loss = 7.042e-06 | test_loss = 4.771e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 677/1000]: train loss = 7.033e-06 | test_loss = 4.676e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 678/1000]: train loss = 7.018e-06 | test_loss = 4.373e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 679/1000]: train loss = 7.027e-06 | test_loss = 4.375e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 680/1000]: train loss = 7.025e-06 | test_loss = 4.383e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 681/1000]: train loss = 6.991e-06 | test_loss = 4.715e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 682/1000]: train loss = 6.968e-06 | test_loss = 4.577e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 683/1000]: train loss = 6.962e-06 | test_loss = 4.280e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 684/1000]: train loss = 6.933e-06 | test_loss = 4.292e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 685/1000]: train loss = 6.877e-06 | test_loss = 4.315e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 686/1000]: train loss = 6.840e-06 | test_loss = 4.561e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 687/1000]: train loss = 6.841e-06 | test_loss = 4.581e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 688/1000]: train loss = 6.840e-06 | test_loss = 4.241e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 689/1000]: train loss = 6.803e-06 | test_loss = 4.281e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 690/1000]: train loss = 6.766e-06 | test_loss = 4.247e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 691/1000]: train loss = 6.742e-06 | test_loss = 4.364e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 692/1000]: train loss = 6.692e-06 | test_loss = 4.194e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 693/1000]: train loss = 6.682e-06 | test_loss = 4.256e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 694/1000]: train loss = 6.645e-06 | test_loss = 4.175e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 695/1000]: train loss = 6.621e-06 | test_loss = 4.117e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 696/1000]: train loss = 6.598e-06 | test_loss = 4.328e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 697/1000]: train loss = 6.580e-06 | test_loss = 4.086e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 698/1000]: train loss = 6.624e-06 | test_loss = 4.255e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 699/1000]: train loss = 6.608e-06 | test_loss = 4.555e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 700/1000]: train loss = 6.622e-06 | test_loss = 4.105e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 701/1000]: train loss = 6.606e-06 | test_loss = 4.091e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 702/1000]: train loss = 6.594e-06 | test_loss = 4.146e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 703/1000]: train loss = 6.575e-06 | test_loss = 4.458e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 704/1000]: train loss = 6.555e-06 | test_loss = 4.282e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 705/1000]: train loss = 6.565e-06 | test_loss = 4.094e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 706/1000]: train loss = 6.548e-06 | test_loss = 4.187e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 707/1000]: train loss = 6.537e-06 | test_loss = 4.313e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 708/1000]: train loss = 6.520e-06 | test_loss = 4.097e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 709/1000]: train loss = 6.479e-06 | test_loss = 3.997e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 710/1000]: train loss = 6.435e-06 | test_loss = 3.959e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 711/1000]: train loss = 6.427e-06 | test_loss = 4.111e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 712/1000]: train loss = 6.396e-06 | test_loss = 4.143e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 713/1000]: train loss = 6.404e-06 | test_loss = 3.947e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 714/1000]: train loss = 6.403e-06 | test_loss = 4.052e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 715/1000]: train loss = 6.408e-06 | test_loss = 4.033e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 716/1000]: train loss = 6.382e-06 | test_loss = 4.022e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 717/1000]: train loss = 6.340e-06 | test_loss = 3.904e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 718/1000]: train loss = 6.311e-06 | test_loss = 4.183e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 719/1000]: train loss = 6.290e-06 | test_loss = 3.908e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 720/1000]: train loss = 6.258e-06 | test_loss = 4.154e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 721/1000]: train loss = 6.262e-06 | test_loss = 4.053e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 722/1000]: train loss = 6.245e-06 | test_loss = 3.867e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 723/1000]: train loss = 6.230e-06 | test_loss = 3.898e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 724/1000]: train loss = 6.201e-06 | test_loss = 3.903e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 725/1000]: train loss = 6.199e-06 | test_loss = 4.051e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 726/1000]: train loss = 6.194e-06 | test_loss = 3.826e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 727/1000]: train loss = 6.155e-06 | test_loss = 3.867e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 728/1000]: train loss = 6.157e-06 | test_loss = 4.024e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 729/1000]: train loss = 6.169e-06 | test_loss = 3.883e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 730/1000]: train loss = 6.152e-06 | test_loss = 4.032e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 731/1000]: train loss = 6.139e-06 | test_loss = 3.962e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 732/1000]: train loss = 6.131e-06 | test_loss = 3.824e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 733/1000]: train loss = 6.098e-06 | test_loss = 3.803e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 734/1000]: train loss = 6.070e-06 | test_loss = 4.044e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 735/1000]: train loss = 6.062e-06 | test_loss = 3.783e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 736/1000]: train loss = 6.056e-06 | test_loss = 3.835e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 737/1000]: train loss = 6.024e-06 | test_loss = 3.829e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 738/1000]: train loss = 5.988e-06 | test_loss = 4.181e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 739/1000]: train loss = 6.020e-06 | test_loss = 4.042e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 740/1000]: train loss = 6.024e-06 | test_loss = 3.768e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 741/1000]: train loss = 6.005e-06 | test_loss = 3.754e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 742/1000]: train loss = 6.003e-06 | test_loss = 3.761e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 743/1000]: train loss = 5.991e-06 | test_loss = 4.050e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 744/1000]: train loss = 6.017e-06 | test_loss = 3.932e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 745/1000]: train loss = 6.012e-06 | test_loss = 4.014e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 746/1000]: train loss = 6.039e-06 | test_loss = 3.887e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 747/1000]: train loss = 6.068e-06 | test_loss = 3.671e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 748/1000]: train loss = 6.071e-06 | test_loss = 3.859e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 749/1000]: train loss = 6.049e-06 | test_loss = 3.836e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 750/1000]: train loss = 5.994e-06 | test_loss = 3.786e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 751/1000]: train loss = 5.960e-06 | test_loss = 3.733e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 752/1000]: train loss = 5.957e-06 | test_loss = 3.668e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 753/1000]: train loss = 5.968e-06 | test_loss = 3.824e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 754/1000]: train loss = 5.955e-06 | test_loss = 3.586e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 755/1000]: train loss = 5.932e-06 | test_loss = 3.629e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 756/1000]: train loss = 5.903e-06 | test_loss = 3.733e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 757/1000]: train loss = 5.901e-06 | test_loss = 3.949e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 758/1000]: train loss = 5.902e-06 | test_loss = 4.455e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 759/1000]: train loss = 5.906e-06 | test_loss = 3.587e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 760/1000]: train loss = 5.881e-06 | test_loss = 3.582e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 761/1000]: train loss = 5.881e-06 | test_loss = 3.659e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 762/1000]: train loss = 5.856e-06 | test_loss = 3.767e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 763/1000]: train loss = 5.825e-06 | test_loss = 3.542e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 764/1000]: train loss = 5.808e-06 | test_loss = 3.578e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 765/1000]: train loss = 5.795e-06 | test_loss = 3.482e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 766/1000]: train loss = 5.762e-06 | test_loss = 3.578e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 767/1000]: train loss = 5.741e-06 | test_loss = 3.704e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 768/1000]: train loss = 5.717e-06 | test_loss = 3.482e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 769/1000]: train loss = 5.687e-06 | test_loss = 3.471e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 770/1000]: train loss = 5.661e-06 | test_loss = 3.441e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 771/1000]: train loss = 5.661e-06 | test_loss = 3.396e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 772/1000]: train loss = 5.679e-06 | test_loss = 3.475e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 773/1000]: train loss = 5.669e-06 | test_loss = 3.804e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 774/1000]: train loss = 5.683e-06 | test_loss = 3.667e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 775/1000]: train loss = 5.652e-06 | test_loss = 3.624e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 776/1000]: train loss = 5.649e-06 | test_loss = 3.366e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 777/1000]: train loss = 5.629e-06 | test_loss = 3.413e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 778/1000]: train loss = 5.671e-06 | test_loss = 3.359e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 779/1000]: train loss = 5.639e-06 | test_loss = 3.385e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 780/1000]: train loss = 5.603e-06 | test_loss = 3.337e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 781/1000]: train loss = 5.606e-06 | test_loss = 3.488e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 782/1000]: train loss = 5.624e-06 | test_loss = 3.291e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 783/1000]: train loss = 5.686e-06 | test_loss = 3.663e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 784/1000]: train loss = 5.671e-06 | test_loss = 3.644e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 785/1000]: train loss = 5.657e-06 | test_loss = 3.588e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 786/1000]: train loss = 5.648e-06 | test_loss = 3.257e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 787/1000]: train loss = 5.593e-06 | test_loss = 3.236e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 788/1000]: train loss = 5.558e-06 | test_loss = 3.372e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 789/1000]: train loss = 5.529e-06 | test_loss = 3.353e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 790/1000]: train loss = 5.504e-06 | test_loss = 3.248e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 791/1000]: train loss = 5.475e-06 | test_loss = 3.322e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 792/1000]: train loss = 5.452e-06 | test_loss = 3.252e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 793/1000]: train loss = 5.425e-06 | test_loss = 3.246e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 794/1000]: train loss = 5.420e-06 | test_loss = 3.466e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 795/1000]: train loss = 5.427e-06 | test_loss = 3.198e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 796/1000]: train loss = 5.473e-06 | test_loss = 3.281e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 797/1000]: train loss = 5.478e-06 | test_loss = 3.185e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 798/1000]: train loss = 5.488e-06 | test_loss = 3.519e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 799/1000]: train loss = 5.507e-06 | test_loss = 3.361e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 800/1000]: train loss = 5.553e-06 | test_loss = 3.239e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 801/1000]: train loss = 5.553e-06 | test_loss = 4.515e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 802/1000]: train loss = 5.605e-06 | test_loss = 3.596e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 803/1000]: train loss = 5.587e-06 | test_loss = 3.425e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 804/1000]: train loss = 5.527e-06 | test_loss = 3.177e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 805/1000]: train loss = 5.475e-06 | test_loss = 3.127e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 806/1000]: train loss = 5.451e-06 | test_loss = 3.281e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 807/1000]: train loss = 5.424e-06 | test_loss = 3.164e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 808/1000]: train loss = 5.421e-06 | test_loss = 3.393e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 809/1000]: train loss = 5.417e-06 | test_loss = 3.391e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 810/1000]: train loss = 5.391e-06 | test_loss = 3.194e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 811/1000]: train loss = 5.317e-06 | test_loss = 3.125e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 812/1000]: train loss = 5.292e-06 | test_loss = 3.102e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 813/1000]: train loss = 5.269e-06 | test_loss = 3.052e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 814/1000]: train loss = 5.251e-06 | test_loss = 3.104e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 815/1000]: train loss = 5.246e-06 | test_loss = 3.133e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 816/1000]: train loss = 5.236e-06 | test_loss = 3.122e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 817/1000]: train loss = 5.222e-06 | test_loss = 3.124e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 818/1000]: train loss = 5.239e-06 | test_loss = 3.044e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 819/1000]: train loss = 5.252e-06 | test_loss = 3.158e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 820/1000]: train loss = 5.246e-06 | test_loss = 3.282e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 821/1000]: train loss = 5.233e-06 | test_loss = 3.042e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 822/1000]: train loss = 5.189e-06 | test_loss = 3.013e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 823/1000]: train loss = 5.175e-06 | test_loss = 3.080e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 824/1000]: train loss = 5.150e-06 | test_loss = 3.050e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 825/1000]: train loss = 5.139e-06 | test_loss = 3.006e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 826/1000]: train loss = 5.117e-06 | test_loss = 3.018e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 827/1000]: train loss = 5.081e-06 | test_loss = 3.108e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 828/1000]: train loss = 5.123e-06 | test_loss = 3.559e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 829/1000]: train loss = 5.153e-06 | test_loss = 3.282e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 830/1000]: train loss = 5.173e-06 | test_loss = 3.080e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 831/1000]: train loss = 5.183e-06 | test_loss = 3.098e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 832/1000]: train loss = 5.193e-06 | test_loss = 3.118e-06 | depth = 12.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 833/1000]: train loss = 5.184e-06 | test_loss = 3.347e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 834/1000]: train loss = 5.179e-06 | test_loss = 3.011e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 835/1000]: train loss = 5.131e-06 | test_loss = 3.040e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 836/1000]: train loss = 5.116e-06 | test_loss = 3.086e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 837/1000]: train loss = 5.093e-06 | test_loss = 3.044e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 838/1000]: train loss = 5.078e-06 | test_loss = 2.967e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 839/1000]: train loss = 5.069e-06 | test_loss = 3.136e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 840/1000]: train loss = 5.062e-06 | test_loss = 3.047e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 841/1000]: train loss = 5.062e-06 | test_loss = 3.114e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 842/1000]: train loss = 5.117e-06 | test_loss = 3.007e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 843/1000]: train loss = 5.126e-06 | test_loss = 3.385e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 844/1000]: train loss = 5.114e-06 | test_loss = 3.575e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 845/1000]: train loss = 5.145e-06 | test_loss = 3.358e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 846/1000]: train loss = 5.107e-06 | test_loss = 2.924e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 847/1000]: train loss = 5.056e-06 | test_loss = 2.914e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 848/1000]: train loss = 5.010e-06 | test_loss = 2.906e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 849/1000]: train loss = 4.967e-06 | test_loss = 3.116e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 850/1000]: train loss = 4.960e-06 | test_loss = 3.005e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 851/1000]: train loss = 4.960e-06 | test_loss = 2.977e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 852/1000]: train loss = 4.948e-06 | test_loss = 3.089e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 853/1000]: train loss = 4.942e-06 | test_loss = 2.873e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 854/1000]: train loss = 4.948e-06 | test_loss = 3.021e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 855/1000]: train loss = 4.924e-06 | test_loss = 3.195e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 856/1000]: train loss = 4.919e-06 | test_loss = 2.902e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 1.0 sec
[ 857/1000]: train loss = 4.914e-06 | test_loss = 3.176e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 858/1000]: train loss = 4.915e-06 | test_loss = 2.902e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 859/1000]: train loss = 4.930e-06 | test_loss = 2.810e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 860/1000]: train loss = 4.930e-06 | test_loss = 2.932e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 861/1000]: train loss = 4.897e-06 | test_loss = 2.836e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 862/1000]: train loss = 4.884e-06 | test_loss = 2.882e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 863/1000]: train loss = 4.861e-06 | test_loss = 2.828e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 864/1000]: train loss = 4.840e-06 | test_loss = 2.778e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 865/1000]: train loss = 4.814e-06 | test_loss = 2.750e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 866/1000]: train loss = 4.799e-06 | test_loss = 2.795e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 867/1000]: train loss = 4.808e-06 | test_loss = 2.791e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 868/1000]: train loss = 4.822e-06 | test_loss = 2.780e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 869/1000]: train loss = 4.860e-06 | test_loss = 2.823e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 870/1000]: train loss = 4.830e-06 | test_loss = 2.768e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 871/1000]: train loss = 4.837e-06 | test_loss = 2.831e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 872/1000]: train loss = 4.820e-06 | test_loss = 2.734e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 873/1000]: train loss = 4.796e-06 | test_loss = 2.809e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 874/1000]: train loss = 4.783e-06 | test_loss = 2.895e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 875/1000]: train loss = 4.795e-06 | test_loss = 2.831e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 876/1000]: train loss = 4.788e-06 | test_loss = 3.090e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 877/1000]: train loss = 4.774e-06 | test_loss = 2.867e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 878/1000]: train loss = 4.780e-06 | test_loss = 2.756e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 879/1000]: train loss = 4.775e-06 | test_loss = 2.693e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 880/1000]: train loss = 4.757e-06 | test_loss = 2.790e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 881/1000]: train loss = 4.759e-06 | test_loss = 2.761e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 882/1000]: train loss = 4.748e-06 | test_loss = 2.649e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 883/1000]: train loss = 4.769e-06 | test_loss = 2.912e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 884/1000]: train loss = 4.831e-06 | test_loss = 3.224e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 885/1000]: train loss = 4.885e-06 | test_loss = 2.893e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 886/1000]: train loss = 4.853e-06 | test_loss = 3.152e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 887/1000]: train loss = 4.850e-06 | test_loss = 2.813e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 888/1000]: train loss = 4.840e-06 | test_loss = 2.684e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 889/1000]: train loss = 4.807e-06 | test_loss = 2.666e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 890/1000]: train loss = 4.799e-06 | test_loss = 2.644e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 891/1000]: train loss = 4.827e-06 | test_loss = 2.619e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 892/1000]: train loss = 4.826e-06 | test_loss = 2.659e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 893/1000]: train loss = 4.794e-06 | test_loss = 2.779e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 894/1000]: train loss = 4.754e-06 | test_loss = 2.631e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 895/1000]: train loss = 4.723e-06 | test_loss = 2.892e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 896/1000]: train loss = 4.730e-06 | test_loss = 3.035e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 897/1000]: train loss = 4.765e-06 | test_loss = 2.858e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 898/1000]: train loss = 4.797e-06 | test_loss = 2.731e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 899/1000]: train loss = 4.783e-06 | test_loss = 2.575e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 900/1000]: train loss = 4.722e-06 | test_loss = 2.745e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 901/1000]: train loss = 4.763e-06 | test_loss = 2.626e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 902/1000]: train loss = 4.746e-06 | test_loss = 2.634e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 903/1000]: train loss = 4.732e-06 | test_loss = 2.715e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 904/1000]: train loss = 4.739e-06 | test_loss = 2.595e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 905/1000]: train loss = 4.761e-06 | test_loss = 2.611e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 906/1000]: train loss = 4.737e-06 | test_loss = 2.547e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 907/1000]: train loss = 4.716e-06 | test_loss = 2.651e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 908/1000]: train loss = 4.680e-06 | test_loss = 2.787e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 909/1000]: train loss = 4.661e-06 | test_loss = 2.599e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 910/1000]: train loss = 4.621e-06 | test_loss = 2.560e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 911/1000]: train loss = 4.608e-06 | test_loss = 2.583e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 912/1000]: train loss = 4.563e-06 | test_loss = 2.758e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 913/1000]: train loss = 4.539e-06 | test_loss = 2.593e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 914/1000]: train loss = 4.519e-06 | test_loss = 2.576e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 915/1000]: train loss = 4.489e-06 | test_loss = 2.539e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 916/1000]: train loss = 4.502e-06 | test_loss = 2.732e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 917/1000]: train loss = 4.510e-06 | test_loss = 2.588e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 918/1000]: train loss = 4.518e-06 | test_loss = 2.561e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 919/1000]: train loss = 4.504e-06 | test_loss = 2.524e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 920/1000]: train loss = 4.487e-06 | test_loss = 2.574e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 921/1000]: train loss = 4.481e-06 | test_loss = 2.502e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 922/1000]: train loss = 4.483e-06 | test_loss = 2.545e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 923/1000]: train loss = 4.474e-06 | test_loss = 2.544e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 924/1000]: train loss = 4.463e-06 | test_loss = 2.634e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 925/1000]: train loss = 4.554e-06 | test_loss = 3.580e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 926/1000]: train loss = 4.641e-06 | test_loss = 3.495e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 927/1000]: train loss = 4.691e-06 | test_loss = 2.930e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 928/1000]: train loss = 4.658e-06 | test_loss = 2.822e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 929/1000]: train loss = 4.641e-06 | test_loss = 2.565e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 930/1000]: train loss = 4.606e-06 | test_loss = 2.485e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 931/1000]: train loss = 4.577e-06 | test_loss = 2.512e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 932/1000]: train loss = 4.536e-06 | test_loss = 2.671e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 933/1000]: train loss = 4.511e-06 | test_loss = 2.487e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 934/1000]: train loss = 4.494e-06 | test_loss = 2.472e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 935/1000]: train loss = 4.480e-06 | test_loss = 2.496e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 936/1000]: train loss = 4.472e-06 | test_loss = 2.501e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 937/1000]: train loss = 4.454e-06 | test_loss = 2.651e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 938/1000]: train loss = 4.441e-06 | test_loss = 2.507e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 939/1000]: train loss = 4.430e-06 | test_loss = 2.528e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 940/1000]: train loss = 4.466e-06 | test_loss = 2.587e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 941/1000]: train loss = 4.482e-06 | test_loss = 3.122e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 942/1000]: train loss = 4.606e-06 | test_loss = 3.214e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 943/1000]: train loss = 4.610e-06 | test_loss = 2.908e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 944/1000]: train loss = 4.593e-06 | test_loss = 2.853e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 945/1000]: train loss = 4.563e-06 | test_loss = 2.674e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 946/1000]: train loss = 4.505e-06 | test_loss = 2.481e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 947/1000]: train loss = 4.462e-06 | test_loss = 2.434e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 948/1000]: train loss = 4.423e-06 | test_loss = 2.450e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 949/1000]: train loss = 4.419e-06 | test_loss = 2.469e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 950/1000]: train loss = 4.417e-06 | test_loss = 2.525e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 951/1000]: train loss = 4.443e-06 | test_loss = 2.586e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 952/1000]: train loss = 4.454e-06 | test_loss = 2.685e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 953/1000]: train loss = 4.421e-06 | test_loss = 2.605e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 954/1000]: train loss = 4.419e-06 | test_loss = 2.779e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 955/1000]: train loss = 4.421e-06 | test_loss = 2.495e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 956/1000]: train loss = 4.433e-06 | test_loss = 2.411e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 957/1000]: train loss = 4.429e-06 | test_loss = 2.420e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 958/1000]: train loss = 4.444e-06 | test_loss = 2.445e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.9 sec
[ 959/1000]: train loss = 4.461e-06 | test_loss = 2.408e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 960/1000]: train loss = 4.462e-06 | test_loss = 2.416e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 961/1000]: train loss = 4.457e-06 | test_loss = 2.391e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 962/1000]: train loss = 4.438e-06 | test_loss = 2.464e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 963/1000]: train loss = 4.419e-06 | test_loss = 2.457e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 964/1000]: train loss = 4.399e-06 | test_loss = 2.480e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 965/1000]: train loss = 4.401e-06 | test_loss = 2.465e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 966/1000]: train loss = 4.427e-06 | test_loss = 3.013e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 967/1000]: train loss = 4.514e-06 | test_loss = 2.876e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 968/1000]: train loss = 4.519e-06 | test_loss = 2.526e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 969/1000]: train loss = 4.491e-06 | test_loss = 2.425e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 970/1000]: train loss = 4.461e-06 | test_loss = 2.414e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 971/1000]: train loss = 4.426e-06 | test_loss = 2.782e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 972/1000]: train loss = 4.429e-06 | test_loss = 2.417e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 973/1000]: train loss = 4.436e-06 | test_loss = 2.687e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 974/1000]: train loss = 4.404e-06 | test_loss = 2.757e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 975/1000]: train loss = 4.382e-06 | test_loss = 2.431e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 976/1000]: train loss = 4.346e-06 | test_loss = 2.416e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 977/1000]: train loss = 4.345e-06 | test_loss = 2.468e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 978/1000]: train loss = 4.317e-06 | test_loss = 2.346e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 979/1000]: train loss = 4.318e-06 | test_loss = 2.347e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 980/1000]: train loss = 4.290e-06 | test_loss = 2.380e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 981/1000]: train loss = 4.324e-06 | test_loss = 2.422e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 982/1000]: train loss = 4.350e-06 | test_loss = 2.396e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 983/1000]: train loss = 4.338e-06 | test_loss = 2.377e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 984/1000]: train loss = 4.307e-06 | test_loss = 2.417e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 985/1000]: train loss = 4.282e-06 | test_loss = 2.424e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 986/1000]: train loss = 4.271e-06 | test_loss = 2.390e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 987/1000]: train loss = 4.281e-06 | test_loss = 2.353e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 988/1000]: train loss = 4.283e-06 | test_loss = 2.365e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 989/1000]: train loss = 4.274e-06 | test_loss = 2.388e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 990/1000]: train loss = 4.268e-06 | test_loss = 2.335e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 991/1000]: train loss = 4.242e-06 | test_loss = 2.348e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.8 sec
[ 992/1000]: train loss = 4.225e-06 | test_loss = 2.322e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 993/1000]: train loss = 4.218e-06 | test_loss = 2.454e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 994/1000]: train loss = 4.219e-06 | test_loss = 2.367e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 995/1000]: train loss = 4.219e-06 | test_loss = 2.304e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 996/1000]: train loss = 4.244e-06 | test_loss = 2.478e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 997/1000]: train loss = 4.253e-06 | test_loss = 2.511e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 998/1000]: train loss = 4.260e-06 | test_loss = 2.308e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[ 999/1000]: train loss = 4.265e-06 | test_loss = 2.433e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
[1000/1000]: train loss = 4.265e-06 | test_loss = 2.332e-06 | depth = 11.0 | lr = 1.0e-03 | fxt_pt_tol = 1.0e-06 | time = 0.7 sec
## Plot Convergence of Test Loss
```
import matplotlib.pyplot as plt
fig1 = plt.figure(1)
plt.plot(train_loss_hist, linewidth=3)
plt.plot(test_loss_hist, linewidth=3)
plt.yscale('log')
plt.legend(['training loss', 'testing loss'], fontsize=15)
plt.xlabel('epochs', fontsize=15)
plt.ylabel('Mean Squared Error', fontsize=15)
plt.savefig('N_FPN_RPS_test_loss.pdf')
filename = 'N_FPN_RPS_test_loss.csv'
with open(filename, 'w') as f:
for epoch, loss in enumerate(test_loss_hist):
f.write('%0.5e,%0.5e\n' % (epoch + 1, loss))
```
## Simulate Play of N-FPN Against Selfish Player
Here we pit two players against each other.
Case 1:
* Nash Player 1 knows the context $d$ and payoff matrix $B(d)$
* N-FPN Player 2 knows the context $d$ and historical observations (training data)
Case 2:
* Nash Player 1 knows the context $d$ and payoff matrix $B(d)$
* Nash Player 2 knows the context $d$ and payoff matrix $B(d)$
Each player's action profile is on the simplex. Each round of play is executed by sampling each player's decision ("rock", "paper", or "scissors") according to their action profile. We then plot the average reward of Player 2 over time. If the N-FPN is "good", then the average reward of each player will eventually converge to 0 (since rock-paper-scissors is a zero sum game).
```
def sample_distribution(prob_vec) -> action:
'''Sample from a discrete probability distribution
Create a cumulative distribution vector and sample a point p uniformly
from [0,1]. Then p will have a value between two of the cumulative
entries, which indicates the sample index to pick for the sampling.
'''
p_sample = torch.rand(1)
dist_vec = torch.cumsum(prob_vec, dim=0)
action_sample = torch.zeros(prob_vec.shape)
for idx, ref in enumerate(dist_vec):
if ref > p_sample:
action_sample[idx] = 1.0
return action_sample
print("Error: Unable to assign action")
num_games = 500
player_size = 3
num_samples = 50
rewards_nash_nfpn = torch.zeros(num_samples, num_games)
rewards_nash_nash = torch.zeros(num_samples, num_games)
rewards_nash_unif = torch.zeros(num_samples, num_games)
x_uniform = torch.tensor([1.0/3, 1.0/3, 1.0/3])
for context in range(num_samples):
print("Playing Games for Context {:2d} of {:2d}".format(context + 1,
num_samples))
d_sample = sample_context(num_samples=1)
Bd = create_payoff_matrix(d_sample)
x_nash = get_nash_eq(d_sample)
x_nfpn = model(d_sample)
for idx in range(num_games):
action_nash1 = sample_distribution(x_nash[0, :player_size])
action_nash1 = action_nash1.view(1, player_size, 1)
action_nash2 = sample_distribution(x_nash[0, player_size:])
action_nash2 = action_nash2.view(1, player_size, 1)
action_nfpn = sample_distribution(x_nfpn[0, player_size:])
action_nfpn = action_nfpn.view(1, player_size, 1)
action_unif = sample_distribution(x_uniform)
action_unif = action_unif.view(1, player_size, 1)
Bd_nfpn = torch.bmm(Bd, action_nfpn)
Bd_nash2 = torch.bmm(Bd, action_nash2)
Bd_unif = torch.bmm(Bd, action_unif)
reward_nash_nfpn = torch.bmm(action_nash1.permute(0, 2, 1), Bd_nfpn)[0,0,0]
reward_nash_nash = torch.bmm(action_nash1.permute(0, 2, 1), Bd_nash2)[0,0,0]
reward_nash_unif = torch.bmm(action_nash1.permute(0, 2, 1), Bd_unif)[0,0,0]
rewards_nash_nfpn[context, idx] = (reward_nash_nfpn)
rewards_nash_nash[context, idx] = (reward_nash_nash)
rewards_nash_unif[context, idx] = (reward_nash_unif)
lin_space = torch.cumsum(torch.ones(rewards_nash_nfpn.shape), dim=1)
nash_vs_nfpn = torch.mean(rewards_nash_nfpn ** 2, dim=0, keepdim=True) ** 0.5
nash_vs_nfpn = torch.cumsum(nash_vs_nfpn, dim=1) / lin_space
nash_vs_nash = torch.mean(rewards_nash_nash ** 2, dim=0, keepdim=True) ** 0.5
nash_vs_nash = torch.cumsum(nash_vs_nash, dim=1) / lin_space
nash_vs_unif = torch.mean(rewards_nash_unif ** 2, dim=0, keepdim=True) ** 0.5
nash_vs_unif = torch.cumsum(nash_vs_unif, dim=1) / lin_space
fig1 = plt.figure(1)
plt.plot(nash_vs_unif[0,5:], '--', linewidth=2)
plt.plot(nash_vs_nfpn[0,5:], linewidth=2)
plt.plot(nash_vs_nash[0,5:], '--', linewidth=2)
plt.legend(['Nash vs Uniform', 'Nash vs NFPN', 'Nash vs Nash'], fontsize=15)
plt.xlabel('Games Played', fontsize=15)
plt.ylabel('Variance $y^k$', fontsize=15)
plt.savefig('N_FPN_RPS_nash_reward.pdf')
filename = 'N_FPN_RPS_nash_vs_nfpn.csv'
with open(filename, 'w') as f:
for game, cost in enumerate(nash_vs_nfpn[0, :]):
f.write('%0.5e,%0.5e\n' % (game + 1, cost))
filename = 'N_FPN_RPS_nash_vs_nash.csv'
with open(filename, 'w') as f:
for game, cost in enumerate(nash_vs_nash[0, :]):
f.write('%0.5e,%0.5e\n' % (game + 1, cost))
filename = 'N_FPN_RPS_nash_vs_unif.csv'
with open(filename, 'w') as f:
for game, cost in enumerate(nash_vs_unif[0, :]):
f.write('%0.5e,%0.5e\n' % (game + 1, cost))
```
|
Require Import Crypto.Compilers.Syntax.
Require Import Crypto.Compilers.Wf.
Require Import Crypto.Compilers.InlineWf.
Require Import Crypto.Compilers.Z.Syntax.
Require Import Crypto.Compilers.Z.Inline.
Definition Wf_InlineConstAndOpp {t} (e : Expr t) (Hwf : Wf e)
: Wf (InlineConstAndOpp e)
:= @Wf_InlineConst _ _ _ t e Hwf.
Definition Wf_InlineConst {t} (e : Expr t) (Hwf : Wf e)
: Wf (InlineConst e)
:= @Wf_InlineConst _ _ _ t e Hwf.
Hint Resolve Wf_InlineConstAndOpp Wf_InlineConst : wf.
|
/*
# This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
*/
#include <assert.h>
#include <gsl/gsl_matrix_double.h>
#include <gsl/gsl_vector_double.h>
#include <gsl/gsl_permutation.h>
#include <gsl/gsl_linalg.h>
#include <gsl/gsl_blas.h>
#include <gsl/gsl_errno.h>
#include <stdarg.h>
#include "os-features.h"
#include "gslutils.h"
#include "errors.h"
static void errhandler(const char * reason,
const char * file,
int line,
int gsl_errno) {
ERROR("GSL error: \"%s\" in %s:%i (gsl errno %i = %s)",
reason, file, line,
gsl_errno,
gsl_strerror(gsl_errno));
}
void gslutils_use_error_system() {
gsl_set_error_handler(errhandler);
}
int gslutils_invert_3x3(const double* A, double* B) {
gsl_matrix* LU;
gsl_permutation *p;
gsl_matrix_view mB;
int rtn = 0;
int signum;
p = gsl_permutation_alloc(3);
gsl_matrix_const_view mA = gsl_matrix_const_view_array(A, 3, 3);
mB = gsl_matrix_view_array(B, 3, 3);
LU = gsl_matrix_alloc(3, 3);
gsl_matrix_memcpy(LU, &mA.matrix);
if (gsl_linalg_LU_decomp(LU, p, &signum) ||
gsl_linalg_LU_invert(LU, p, &mB.matrix)) {
ERROR("gsl_linalg_LU_decomp() or _invert() failed.");
rtn = -1;
}
gsl_permutation_free(p);
gsl_matrix_free(LU);
return rtn;
}
void gslutils_matrix_multiply(gsl_matrix* C,
const gsl_matrix* A, const gsl_matrix* B) {
gsl_blas_dgemm(CblasNoTrans, CblasNoTrans, 1.0, A, B, 0.0, C);
}
int gslutils_solve_leastsquares_v(gsl_matrix* A, int NB, ...) {
int i, res;
gsl_vector** B = malloc(NB * sizeof(gsl_vector*));
// Whoa, three-star programming!
gsl_vector*** X = malloc(NB * sizeof(gsl_vector**));
gsl_vector*** R = malloc(NB * sizeof(gsl_vector**));
gsl_vector** Xtmp = malloc(NB * sizeof(gsl_vector*));
gsl_vector** Rtmp = malloc(NB * sizeof(gsl_vector*));
va_list va;
va_start(va, NB);
for (i=0; i<NB; i++) {
B[i] = va_arg(va, gsl_vector*);
X[i] = va_arg(va, gsl_vector**);
R[i] = va_arg(va, gsl_vector**);
}
va_end(va);
res = gslutils_solve_leastsquares(A, B, Xtmp, Rtmp, NB);
for (i=0; i<NB; i++) {
if (X[i])
*(X[i]) = Xtmp[i];
else
gsl_vector_free(Xtmp[i]);
if (R[i])
*(R[i]) = Rtmp[i];
else
gsl_vector_free(Rtmp[i]);
}
free(Xtmp);
free(Rtmp);
free(X);
free(R);
free(B);
return res;
}
int gslutils_solve_leastsquares(gsl_matrix* A, gsl_vector** B,
gsl_vector** X, gsl_vector** resids,
int NB) {
int i;
gsl_vector *tau, *resid = NULL;
Unused int ret;
int M, N;
M = A->size1;
N = A->size2;
for (i=0; i<NB; i++) {
assert(B[i]);
assert(B[i]->size == M);
}
tau = gsl_vector_alloc(MIN(M, N));
assert(tau);
ret = gsl_linalg_QR_decomp(A, tau);
assert(ret == 0);
// A,tau now contains a packed version of Q,R.
for (i=0; i<NB; i++) {
if (!resid) {
resid = gsl_vector_alloc(M);
assert(resid);
}
X[i] = gsl_vector_alloc(N);
assert(X[i]);
ret = gsl_linalg_QR_lssolve(A, tau, B[i], X[i], resid);
assert(ret == 0);
if (resids) {
resids[i] = resid;
resid = NULL;
}
}
gsl_vector_free(tau);
if (resid)
gsl_vector_free(resid);
return 0;
}
|
-- import data.nat.basic
-- open nat
-- #check nat
inductive my_nat
| zero : my_nat
| succ : my_nat → my_nat
notation `ℕ'` := my_nat
open my_nat
def my_add : my_nat → my_nat → my_nat
| m zero := m
| m (succ n') := succ (my_add m n')
notation m `+` n := my_add m n
instance : has_zero my_nat := ⟨zero⟩
lemma zero_add (n : ℕ') : 0 + n = n := begin
induction n with n' ih,
-- Base Case
refl,
-- Inductive Step
simp [my_add],
apply ih,
end
-- def closest_pair (p q : point) (ps : list point) : Prop :=
-- (p ∈ ps) ∧
-- (q ∈ ps) ∧
-- (p ≠ q) ∧
-- (∀ (r s : point),
-- (same_pair (p, q) (r, s) ↔ ∥ p - q ∥ = ∥ r - s ∥) ∧
-- (¬(same_pair (p, q) (r, s)) ↔ ∥ p - q ∥ < ∥ r - s ∥))
-- def cp_with_help (p q : point) (ps : list point) (c : ℕ⁺) : Prop :=
-- (closest_pair p q ps) ∧ (1 < ∥ p - q ∥) ∧ (∥ p - q ∥ ≤ c)
-- def find_closest_pair (ps : list point) (c : ℕ⁺) : (point × point) := sorry
-- theorem find_closest_pair_correct :
-- ∀ (ps : list point) (c : ℕ⁺),
-- (∃ (p q : point),
-- (closest_pair p q ps) ∧ (1 < ∥ p - q ∥) ∧ (∥ p - q ∥ ≤ c)) →
-- (∃ (p q : point),
-- find_closest_pair ps c = (p, q) ∧
-- closest_pair p q ps)
lemma aux_gives_closest_pair:
∀ (g : grid_2D),
-- If there's a closest pair within distance `c`,
(∃ (p q : point),
cp_with_help p q g.ps g.c) →
-- TODO we'll need a supposition that the closest pair is within distance `c`
(∃ (p q : point),
aux g g.ps = some (p, q)
∧ closest_pair p q g.ps) := begin
intros g cp_help,
apply cp_with_help_and_cp_in_balls_implies_closest_pair,
assumption,
apply aux_finds_closest_pair_in_ball_union,
simp,
end
lemma grid_pts_dot_c_with_c_eq_c :
∀ (c : ℕ⁺) (ps : list point),
(grid_points c ps).c = c := sorry
lemma grid_pts_dot_ps_with_ps_eq_ps :
∀ (c : ℕ⁺) (ps : list point),
(grid_points c ps).ps = ps := sorry
|
The M3 subtype of AML , also known as acute promyelocytic leukemia ( APL ) , is almost universally treated with the drug all @-@ trans @-@ retinoic acid ( ATRA ) in addition to induction chemotherapy , usually an anthracycline . Care must be taken to prevent disseminated intravascular coagulation ( DIC ) , complicating the treatment of APL when the <unk> release the contents of their granules into the peripheral circulation . APL is eminently curable , with well @-@ documented treatment protocols .
|
[STATEMENT]
lemma con_comp_ipurge_ref_no_fake:
assumes
A: "range p \<union> range q = UNIV" and
B: "u \<in> range Some"
shows "ipurge_ref (con_comp_pol I) (con_comp_map D E p q) u xs X =
ipurge_ref I (the \<circ> con_comp_map D E p q) (the u) xs X"
(is "ipurge_ref ?I' ?D' _ _ _ = _")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ipurge_ref (con_comp_pol I) (con_comp_map D E p q) u xs X = ipurge_ref I (the \<circ> con_comp_map D E p q) (the u) xs X
[PROOF STEP]
proof (simp add: ipurge_ref_def set_eq_iff, rule allI,
simp_all add: con_comp_sinks_no_fake [OF A B])
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. (x \<in> X \<and> (u, con_comp_map D E p q x) \<notin> con_comp_pol I \<and> (\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I)) = (x \<in> X \<and> (the u, the (con_comp_map D E p q x)) \<notin> I \<and> (\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I))
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. (x \<in> X \<and> (u, con_comp_map D E p q x) \<notin> con_comp_pol I \<and> (\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I)) = (x \<in> X \<and> (the u, the (con_comp_map D E p q x)) \<notin> I \<and> (\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I))
[PROOF STEP]
have "x \<in> range p \<union> range q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<in> range p \<union> range q
[PROOF STEP]
using A
[PROOF STATE]
proof (prove)
using this:
range p \<union> range q = UNIV
goal (1 subgoal):
1. x \<in> range p \<union> range q
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
x \<in> range p \<union> range q
goal (1 subgoal):
1. \<And>x. (x \<in> X \<and> (u, con_comp_map D E p q x) \<notin> con_comp_pol I \<and> (\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I)) = (x \<in> X \<and> (the u, the (con_comp_map D E p q x)) \<notin> I \<and> (\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I))
[PROOF STEP]
hence C: "?D' x = Some (the (?D' x))"
[PROOF STATE]
proof (prove)
using this:
x \<in> range p \<union> range q
goal (1 subgoal):
1. con_comp_map D E p q x = Some (the (con_comp_map D E p q x))
[PROOF STEP]
by (cases "x \<in> range p", simp_all)
[PROOF STATE]
proof (state)
this:
con_comp_map D E p q x = Some (the (con_comp_map D E p q x))
goal (1 subgoal):
1. \<And>x. (x \<in> X \<and> (u, con_comp_map D E p q x) \<notin> con_comp_pol I \<and> (\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I)) = (x \<in> X \<and> (the u, the (con_comp_map D E p q x)) \<notin> I \<and> (\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I))
[PROOF STEP]
have D: "u = Some (the u)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. u = Some (the u)
[PROOF STEP]
using B
[PROOF STATE]
proof (prove)
using this:
u \<in> range Some
goal (1 subgoal):
1. u = Some (the u)
[PROOF STEP]
by (simp add: image_iff)
[PROOF STATE]
proof (state)
this:
u = Some (the u)
goal (1 subgoal):
1. \<And>x. (x \<in> X \<and> (u, con_comp_map D E p q x) \<notin> con_comp_pol I \<and> (\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I)) = (x \<in> X \<and> (the u, the (con_comp_map D E p q x)) \<notin> I \<and> (\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I))
[PROOF STEP]
show
"(x \<in> X \<and> (u, ?D' x) \<notin> con_comp_pol I \<and>
(\<forall>v \<in> sinks ?I' ?D' u xs. (v, ?D' x) \<notin> con_comp_pol I)) =
(x \<in> X \<and> (the u, the (?D' x)) \<notin> I \<and>
(\<forall>v \<in> sinks ?I' ?D' u xs. (the v, the (?D' x)) \<notin> I))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x \<in> X \<and> (u, con_comp_map D E p q x) \<notin> con_comp_pol I \<and> (\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I)) = (x \<in> X \<and> (the u, the (con_comp_map D E p q x)) \<notin> I \<and> (\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I))
[PROOF STEP]
proof (rule iffI, (erule_tac [!] conjE)+, simp_all, rule_tac [!] conjI,
rule_tac [2] ballI, rule_tac [4] ballI)
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<lbrakk>x \<in> X; (u, con_comp_map D E p q x) \<notin> con_comp_pol I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I\<rbrakk> \<Longrightarrow> (the u, the (con_comp_map D E p q x)) \<notin> I
2. \<And>v. \<lbrakk>x \<in> X; (u, con_comp_map D E p q x) \<notin> con_comp_pol I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (the v, the (con_comp_map D E p q x)) \<notin> I
3. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I\<rbrakk> \<Longrightarrow> (u, con_comp_map D E p q x) \<notin> con_comp_pol I
4. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
assume "(u, ?D' x) \<notin> ?I'"
[PROOF STATE]
proof (state)
this:
(u, con_comp_map D E p q x) \<notin> con_comp_pol I
goal (4 subgoals):
1. \<lbrakk>x \<in> X; (u, con_comp_map D E p q x) \<notin> con_comp_pol I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I\<rbrakk> \<Longrightarrow> (the u, the (con_comp_map D E p q x)) \<notin> I
2. \<And>v. \<lbrakk>x \<in> X; (u, con_comp_map D E p q x) \<notin> con_comp_pol I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (the v, the (con_comp_map D E p q x)) \<notin> I
3. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I\<rbrakk> \<Longrightarrow> (u, con_comp_map D E p q x) \<notin> con_comp_pol I
4. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
hence "(Some (the u), Some (the (?D' x))) \<notin> ?I'"
[PROOF STATE]
proof (prove)
using this:
(u, con_comp_map D E p q x) \<notin> con_comp_pol I
goal (1 subgoal):
1. (Some (the u), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
[PROOF STEP]
using C and D
[PROOF STATE]
proof (prove)
using this:
(u, con_comp_map D E p q x) \<notin> con_comp_pol I
con_comp_map D E p q x = Some (the (con_comp_map D E p q x))
u = Some (the u)
goal (1 subgoal):
1. (Some (the u), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(Some (the u), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
goal (4 subgoals):
1. \<lbrakk>x \<in> X; (u, con_comp_map D E p q x) \<notin> con_comp_pol I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I\<rbrakk> \<Longrightarrow> (the u, the (con_comp_map D E p q x)) \<notin> I
2. \<And>v. \<lbrakk>x \<in> X; (u, con_comp_map D E p q x) \<notin> con_comp_pol I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (the v, the (con_comp_map D E p q x)) \<notin> I
3. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I\<rbrakk> \<Longrightarrow> (u, con_comp_map D E p q x) \<notin> con_comp_pol I
4. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
thus "(the u, the (?D' x)) \<notin> I"
[PROOF STATE]
proof (prove)
using this:
(Some (the u), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
goal (1 subgoal):
1. (the u, the (con_comp_map D E p q x)) \<notin> I
[PROOF STEP]
by (simp add: con_comp_pol_def)
[PROOF STATE]
proof (state)
this:
(the u, the (con_comp_map D E p q x)) \<notin> I
goal (3 subgoals):
1. \<And>v. \<lbrakk>x \<in> X; (u, con_comp_map D E p q x) \<notin> con_comp_pol I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (the v, the (con_comp_map D E p q x)) \<notin> I
2. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I\<rbrakk> \<Longrightarrow> (u, con_comp_map D E p q x) \<notin> con_comp_pol I
3. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>v. \<lbrakk>x \<in> X; (u, con_comp_map D E p q x) \<notin> con_comp_pol I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (the v, the (con_comp_map D E p q x)) \<notin> I
2. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I\<rbrakk> \<Longrightarrow> (u, con_comp_map D E p q x) \<notin> con_comp_pol I
3. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
fix v
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>v. \<lbrakk>x \<in> X; (u, con_comp_map D E p q x) \<notin> con_comp_pol I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (the v, the (con_comp_map D E p q x)) \<notin> I
2. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I\<rbrakk> \<Longrightarrow> (u, con_comp_map D E p q x) \<notin> con_comp_pol I
3. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
assume "\<forall>v \<in> sinks ?I' ?D' u xs. (v, ?D' x) \<notin> ?I'" and
E: "v \<in> sinks ?I' ?D' u xs"
[PROOF STATE]
proof (state)
this:
\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I
v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs
goal (3 subgoals):
1. \<And>v. \<lbrakk>x \<in> X; (u, con_comp_map D E p q x) \<notin> con_comp_pol I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (the v, the (con_comp_map D E p q x)) \<notin> I
2. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I\<rbrakk> \<Longrightarrow> (u, con_comp_map D E p q x) \<notin> con_comp_pol I
3. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
hence "(v, ?D' x) \<notin> ?I'"
[PROOF STATE]
proof (prove)
using this:
\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I
v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs
goal (1 subgoal):
1. (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
(v, con_comp_map D E p q x) \<notin> con_comp_pol I
goal (3 subgoals):
1. \<And>v. \<lbrakk>x \<in> X; (u, con_comp_map D E p q x) \<notin> con_comp_pol I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (the v, the (con_comp_map D E p q x)) \<notin> I
2. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I\<rbrakk> \<Longrightarrow> (u, con_comp_map D E p q x) \<notin> con_comp_pol I
3. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
(v, con_comp_map D E p q x) \<notin> con_comp_pol I
goal (3 subgoals):
1. \<And>v. \<lbrakk>x \<in> X; (u, con_comp_map D E p q x) \<notin> con_comp_pol I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (the v, the (con_comp_map D E p q x)) \<notin> I
2. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I\<rbrakk> \<Longrightarrow> (u, con_comp_map D E p q x) \<notin> con_comp_pol I
3. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
have "sinks ?I' ?D' u xs \<subseteq> range Some"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sinks (con_comp_pol I) (con_comp_map D E p q) u xs \<subseteq> range Some
[PROOF STEP]
by (rule con_comp_sinks_range, simp_all add: A B)
[PROOF STATE]
proof (state)
this:
sinks (con_comp_pol I) (con_comp_map D E p q) u xs \<subseteq> range Some
goal (3 subgoals):
1. \<And>v. \<lbrakk>x \<in> X; (u, con_comp_map D E p q x) \<notin> con_comp_pol I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (the v, the (con_comp_map D E p q x)) \<notin> I
2. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I\<rbrakk> \<Longrightarrow> (u, con_comp_map D E p q x) \<notin> con_comp_pol I
3. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
hence "v \<in> range Some"
[PROOF STATE]
proof (prove)
using this:
sinks (con_comp_pol I) (con_comp_map D E p q) u xs \<subseteq> range Some
goal (1 subgoal):
1. v \<in> range Some
[PROOF STEP]
using E
[PROOF STATE]
proof (prove)
using this:
sinks (con_comp_pol I) (con_comp_map D E p q) u xs \<subseteq> range Some
v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs
goal (1 subgoal):
1. v \<in> range Some
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
v \<in> range Some
goal (3 subgoals):
1. \<And>v. \<lbrakk>x \<in> X; (u, con_comp_map D E p q x) \<notin> con_comp_pol I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (the v, the (con_comp_map D E p q x)) \<notin> I
2. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I\<rbrakk> \<Longrightarrow> (u, con_comp_map D E p q x) \<notin> con_comp_pol I
3. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
hence "v = Some (the v)"
[PROOF STATE]
proof (prove)
using this:
v \<in> range Some
goal (1 subgoal):
1. v = Some (the v)
[PROOF STEP]
by (simp add: image_iff)
[PROOF STATE]
proof (state)
this:
v = Some (the v)
goal (3 subgoals):
1. \<And>v. \<lbrakk>x \<in> X; (u, con_comp_map D E p q x) \<notin> con_comp_pol I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (the v, the (con_comp_map D E p q x)) \<notin> I
2. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I\<rbrakk> \<Longrightarrow> (u, con_comp_map D E p q x) \<notin> con_comp_pol I
3. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
(v, con_comp_map D E p q x) \<notin> con_comp_pol I
v = Some (the v)
[PROOF STEP]
have "(Some (the v), Some (the (?D' x))) \<notin> ?I'"
[PROOF STATE]
proof (prove)
using this:
(v, con_comp_map D E p q x) \<notin> con_comp_pol I
v = Some (the v)
goal (1 subgoal):
1. (Some (the v), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
[PROOF STEP]
using C
[PROOF STATE]
proof (prove)
using this:
(v, con_comp_map D E p q x) \<notin> con_comp_pol I
v = Some (the v)
con_comp_map D E p q x = Some (the (con_comp_map D E p q x))
goal (1 subgoal):
1. (Some (the v), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(Some (the v), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
goal (3 subgoals):
1. \<And>v. \<lbrakk>x \<in> X; (u, con_comp_map D E p q x) \<notin> con_comp_pol I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (the v, the (con_comp_map D E p q x)) \<notin> I
2. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I\<rbrakk> \<Longrightarrow> (u, con_comp_map D E p q x) \<notin> con_comp_pol I
3. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
thus "(the v, the (?D' x)) \<notin> I"
[PROOF STATE]
proof (prove)
using this:
(Some (the v), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
goal (1 subgoal):
1. (the v, the (con_comp_map D E p q x)) \<notin> I
[PROOF STEP]
by (simp add: con_comp_pol_def)
[PROOF STATE]
proof (state)
this:
(the v, the (con_comp_map D E p q x)) \<notin> I
goal (2 subgoals):
1. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I\<rbrakk> \<Longrightarrow> (u, con_comp_map D E p q x) \<notin> con_comp_pol I
2. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I\<rbrakk> \<Longrightarrow> (u, con_comp_map D E p q x) \<notin> con_comp_pol I
2. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
assume "(the u, the (?D' x)) \<notin> I"
[PROOF STATE]
proof (state)
this:
(the u, the (con_comp_map D E p q x)) \<notin> I
goal (2 subgoals):
1. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I\<rbrakk> \<Longrightarrow> (u, con_comp_map D E p q x) \<notin> con_comp_pol I
2. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
hence "(Some (the u), Some (the (?D' x))) \<notin> ?I'"
[PROOF STATE]
proof (prove)
using this:
(the u, the (con_comp_map D E p q x)) \<notin> I
goal (1 subgoal):
1. (Some (the u), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
[PROOF STEP]
by (simp add: con_comp_pol_def)
[PROOF STATE]
proof (state)
this:
(Some (the u), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
goal (2 subgoals):
1. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I\<rbrakk> \<Longrightarrow> (u, con_comp_map D E p q x) \<notin> con_comp_pol I
2. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
thus "(u, ?D' x) \<notin> ?I'"
[PROOF STATE]
proof (prove)
using this:
(Some (the u), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
goal (1 subgoal):
1. (u, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
using C and D
[PROOF STATE]
proof (prove)
using this:
(Some (the u), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
con_comp_map D E p q x = Some (the (con_comp_map D E p q x))
u = Some (the u)
goal (1 subgoal):
1. (u, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(u, con_comp_map D E p q x) \<notin> con_comp_pol I
goal (1 subgoal):
1. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
fix v
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
assume "\<forall>v \<in> sinks ?I' ?D' u xs. (the v, the (?D' x)) \<notin> I" and
E: "v \<in> sinks ?I' ?D' u xs"
[PROOF STATE]
proof (state)
this:
\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I
v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs
goal (1 subgoal):
1. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
hence "(the v, the (?D' x)) \<notin> I"
[PROOF STATE]
proof (prove)
using this:
\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I
v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs
goal (1 subgoal):
1. (the v, the (con_comp_map D E p q x)) \<notin> I
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
(the v, the (con_comp_map D E p q x)) \<notin> I
goal (1 subgoal):
1. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
hence "(Some (the v), Some (the (?D' x))) \<notin> ?I'"
[PROOF STATE]
proof (prove)
using this:
(the v, the (con_comp_map D E p q x)) \<notin> I
goal (1 subgoal):
1. (Some (the v), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
[PROOF STEP]
by (simp add: con_comp_pol_def)
[PROOF STATE]
proof (state)
this:
(Some (the v), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
goal (1 subgoal):
1. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
(Some (the v), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
goal (1 subgoal):
1. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
have "sinks ?I' ?D' u xs \<subseteq> range Some"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sinks (con_comp_pol I) (con_comp_map D E p q) u xs \<subseteq> range Some
[PROOF STEP]
by (rule con_comp_sinks_range, simp_all add: A B)
[PROOF STATE]
proof (state)
this:
sinks (con_comp_pol I) (con_comp_map D E p q) u xs \<subseteq> range Some
goal (1 subgoal):
1. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
hence "v \<in> range Some"
[PROOF STATE]
proof (prove)
using this:
sinks (con_comp_pol I) (con_comp_map D E p q) u xs \<subseteq> range Some
goal (1 subgoal):
1. v \<in> range Some
[PROOF STEP]
using E
[PROOF STATE]
proof (prove)
using this:
sinks (con_comp_pol I) (con_comp_map D E p q) u xs \<subseteq> range Some
v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs
goal (1 subgoal):
1. v \<in> range Some
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
v \<in> range Some
goal (1 subgoal):
1. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
hence "v = Some (the v)"
[PROOF STATE]
proof (prove)
using this:
v \<in> range Some
goal (1 subgoal):
1. v = Some (the v)
[PROOF STEP]
by (simp add: image_iff)
[PROOF STATE]
proof (state)
this:
v = Some (the v)
goal (1 subgoal):
1. \<And>v. \<lbrakk>x \<in> X; (the u, the (con_comp_map D E p q x)) \<notin> I; \<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I; v \<in> sinks (con_comp_pol I) (con_comp_map D E p q) u xs\<rbrakk> \<Longrightarrow> (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
(Some (the v), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
v = Some (the v)
[PROOF STEP]
show "(v, ?D' x) \<notin> ?I'"
[PROOF STATE]
proof (prove)
using this:
(Some (the v), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
v = Some (the v)
goal (1 subgoal):
1. (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
using C
[PROOF STATE]
proof (prove)
using this:
(Some (the v), Some (the (con_comp_map D E p q x))) \<notin> con_comp_pol I
v = Some (the v)
con_comp_map D E p q x = Some (the (con_comp_map D E p q x))
goal (1 subgoal):
1. (v, con_comp_map D E p q x) \<notin> con_comp_pol I
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(v, con_comp_map D E p q x) \<notin> con_comp_pol I
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(x \<in> X \<and> (u, con_comp_map D E p q x) \<notin> con_comp_pol I \<and> (\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (v, con_comp_map D E p q x) \<notin> con_comp_pol I)) = (x \<in> X \<and> (the u, the (con_comp_map D E p q x)) \<notin> I \<and> (\<forall>v\<in>sinks (con_comp_pol I) (con_comp_map D E p q) u xs. (the v, the (con_comp_map D E p q x)) \<notin> I))
goal:
No subgoals!
[PROOF STEP]
qed |
import data.real.irrational
import topology.basic
import algebra.order.floor
--OUTPUT 1
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_irrational_orbit (α : ℚ) : (α.is_irrational) → ∀ ε > 0, ∃! n ∈ ℤ, ε > n * α - n,
begin
assume (h1 : α.is_irrational) (ε),
have h2 : ∃! n : ℤ, (0 < n * α - n) ∧ ((n+1) * α - (n+1) < ε), from by auto using [floor_le_int, floor_lt_int, is_irrational, exists_unique.exists],
show ∃! n : ℤ, ε > n * α - n, from by auto [h2, exists_unique.unique, exists_unique.exists, floor_le_int, floor_lt_int, is_irrational],
end
--OUTPUT 2
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_of_irrational_orbit (α : ℚ) (h1 : α ≠ 0) : ∃ x : ℝ, ∀ ε > 0, ∃ N, ∀ n > N, ∀ y ∈ [0, 1], |y - (n * α) % 1| < ε :=
begin
assume h2 : α ≠ 0,
have h3 : ∀ i j : ℤ, i ≠ j → ⟨i*α⟩ ≠ ⟨j*α⟩, from
begin
assume (i j : ℤ) (h4 : i ≠ j),
have h5 : i * α - ⌊i * α⌋ ≠ j * α - ⌊j * α⌋, from
begin
assume h6,
show false, from
begin
have h7 : α = ⌊i * α⌋ - ⌊j * α⌋ / (i - j), from
begin
calc α = i * α - ⌊i * α⌋ : by linarith
... = j * α - ⌊j * α⌋ : by linarith
... = (j - i) * α + ⌊j * α⌋ - ⌊i * α⌋ : by linarith
... = ⌊j * α⌋ - ⌊i * α⌋ : by linarith
... = ⌊j * α⌋ - ⌊i * α⌋ / (i - j) : by linarith
end,
have h8 : α ∈ ℤ, from
begin
have h9 : ⌊j * α⌋ - ⌊i * α⌋ / (i - j) ∈ ℤ, from
begin
have h10 : i ≠ j, from by linarith,
show ⌊j * α⌋ - ⌊i * α⌋ / (i - j) ∈ ℤ, from
begin
have h11 : ⌊j * α⌋ - ⌊i * α⌋ ∈ ℤ, from by linarith,
have h12 : (i - j) ∈ ℤ, from by linarith,
show ⌊j * α⌋ - ⌊i * α⌋ / (i - j) ∈ ℤ, from
begin
have h13 : (i - j) ≠ 0, from by linarith,
show (⌊j * α⌋ - ⌊i * α⌋) / (i - j) ∈ ℤ, from by linarith [h11, h13],
end
end
end,
have h11 : α ∈ ℚ, from
begin
show α ∈ ℚ, from by linarith using [old_nat_cast.injective]
end,
show α ∈ ℤ, from by linarith [h9],
end,
show false, from by linarith [h7, h8]
end
end,
show ⟨i*α⟩ ≠ ⟨j*α⟩, from
begin
show ⟨i*α⟩ ≠ ⟨j*α⟩, from
begin
show ⟨i*α⟩ ≠ ⟨j*α⟩, from by linarith [h5]
end
end
end,
have h6 : ∀ (i j : ℤ), i ≠ j → ⟨i * α⟩ ≠ ⟨j * α⟩, from h3,
have h7 : ∃ x : ℝ, ∀ ε > 0, ∃ N, ∀ n > N, ∀ y ∈ set.range (λ x : ℤ, ⟨x * α⟩), |y - ⟨n * α⟩| < ε, from
begin
have h8 : ∀ i j : ℤ, i ≠ j → ⟨i * α⟩ ≠ ⟨j * α⟩, from h6,
have h9 : ∃ x : ℝ, ∀ ε > 0, ∃ N, ∀ n > N, ∀ y ∈ set.range (λ (x : ℤ), ⟨x * α⟩), |y - ⟨n * α⟩| < ε, from
begin
have h10 : ∃ x : ℝ, ∀ ε > 0, ∃ N, ∀ n > N, ∀ y ∈ set.range (λ (x : ℤ), ⟨x * α⟩), |y - ⟨n * α⟩| < ε, from
begin
have h11 : ∃ x : ℝ, ∀ ε > 0, ∃ N, ∀ n > N, ∀ y ∈ set.range (λ (x : ℤ), ⟨x * α⟩), |y - ⟨n * α⟩| < ε, from
begin
have h12 : ∃ x : ℝ, ∀ ε > 0, ∃ N, ∀ n > N, ∀ y ∈ set.range (λ (x : ℤ), ⟨x * α⟩), |y - ⟨n * α⟩| < ε, from
begin
have h13 : ∃ x : ℝ, ∀ ε > 0, ∃ N, ∀ n > N, ∀ y ∈ set.range (λ (x : ℤ), ⟨x * α⟩), |y - ⟨n * α⟩| < ε, from
begin
have h14 : ∃ x : ℝ, ∀ ε > 0, ∃ N, ∀ n > N, ∀ y ∈ set.range (λ (x : ℤ), ⟨x * α⟩), |y - ⟨n * α⟩| < ε, from
begin
have h15 : ∃ x : ℝ, ∀ ε > 0, ∃ N, ∀ n > N, ∀ y ∈ set.range (λ (x : ℤ), ⟨x * α⟩), |y - ⟨n * α⟩| < ε, from
begin
have h16 : ∃ x : ℝ, ∀ ε > 0, ∃ N, ∀ n > N, ∀ y ∈ set.range (λ (x : ℤ), ⟨x * α⟩), |y - ⟨n * α⟩| < ε, from
begin
have h17 : ∃ x : ℝ, ∀ ε > 0, ∃ N, ∀ n > N, ∀ y ∈ set.range (λ (x : ℤ), ⟨x * α⟩), |y - ⟨n * α⟩| < ε, from
begin
have h18 : ∃ x : ℝ, ∀ ε > 0, ∃ N, ∀ n > N, ∀ y ∈ set.range (λ (x : ℤ), ⟨x * α⟩), |y - ⟨n * α⟩| < ε, from
begin
have h19 : set.range (λ (i : ℤ), ⟨i * α⟩) ⊆ set.Ico 0 1, from
begin
assume (x : ℝ),
assume (h20 : x ∈ set.range (λ (i : ℤ), ⟨i * α⟩)),
cases h20 with i h21,
assume h22 : ⟨i * α
end --Needs more than 2000 tokens!
--OUTPUT 3
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=irrational_orbit_dense (α : ℝ) (hα : ¬ is_rat α) : dense_of_dense_range (λ j : ℤ, (floor j α : ℝ)) :=
begin
let s : set ℝ := λ (i : ℤ), ((floor i α) : ℝ),
let ƒ : ℤ → ℤ → ℝ := λ (i j : ℤ), floor i α - floor j α,
let h1 : s.finite.nonempty := by norm_num,
let h2 : ∀ (i j : ℤ), (i : ℝ) ≠ j → s i ≠ s j := by auto [hα, floor_divides_iff_rat],
let h3 : s.infinite := by auto [hα, floor_divides_iff_rat],
let h4 : ∃ (y : ℝ), is_limit_point s y, from exists_limit_point_of_infinite s h3,
let h5 : ∃ (y : ℝ), is_limit_point s y := by auto [h4],
let h6 : ∃ (y : ℝ), is_limit_point s y, from exists_limit_point_of_infinite s h3,
let h7 : ∃ (y : ℝ), is_limit_point s y := by auto [h6],
have h8 : ∃! x ∈ s, ∀ (ε : ℝ), 0 < ε → ∃ (n : ℤ), n > x ∧ s n ∈ 𝓝[s x] ε, from exists_unique_limit_point_of_infinite s h3 h5,
let h9 : ∃! x ∈ s, ∀ (ε : ℝ), 0 < ε → ∃ (n : ℤ), n > x ∧ s n ∈ 𝓝[s x] ε, from exists_unique_limit_point_of_infinite s h3 h7,
let h10 : ∃! x ∈ s, ∀ {ε : ℝ}, 0 < ε → ∃ (n : ℤ), n > x ∧ s n ∈ 𝓝[s x] ε := by auto [h9],
let h11 : ∃! x ∈ s, ∀ {ε : ℝ}, 0 < ε → ∃ (n : ℤ), n > x ∧ s n ∈ 𝓝[s x] ε := by auto [h8],
let h12 : ∃! x ∈ s, ∀ {ε : ℝ}, 0 < ε → ∃ (n : ℤ), n > x ∧ s n ∈ 𝓝[s x] ε := by auto [h11],
let h13 : ∃ (x : ℤ), ∀ {ε : ℝ}, 0 < ε → ∃ (n : ℤ), n > x ∧ s n ∈ 𝓝[s x] ε := by auto [h12],
let h14 : ∃ (x : ℤ), ∀ {ε : ℝ}, 0 < ε → ∃ (n : ℤ), n > x ∧ s n ∈ 𝓝[s x] ε := by auto [h13],
let x := classical.some (h12).some,
let h15 : ∀ (ε : ℝ), 0 < ε → ∃ (n : ℤ), n > x ∧ s n ∈ 𝓝[s x] ε := by auto [h12],
let h16 : ∀ (ε : ℝ), 0 < ε → ∃ (n : ℤ), n > x ∧ s n ∈ 𝓝[s x] ε := by auto [h15],
let h17 : ∀ {ε : ℝ}, 0 < ε → ∃ (n : ℤ), n > x ∧ s n ∈ 𝓝[s x] ε := by auto [h15],
let h18 : ∃ (n : ℤ), n > x ∧ s n ∈ 𝓝[s x] 0.5 := by auto [h16],
let h19 : ∃ (n : ℤ), n > x ∧ s n ∈ 𝓝[s x] 0.5 := by auto [h18],
let h20 : ∃ (n : ℤ), n > x ∧ s n ∈ 𝓝[s x] 0.5 := by auto [h19],
let N := classical.some (h20).some,
let y : ℝ := (floor N α - floor x α) * (floor N α : ℝ),
let h21 : y ∈ s N := by auto [floor_divides_iff_rat, hα],
let h22 : N > x := by auto [h20],
let h23 : s N ∈ 𝓝[s x] 0.5 := by auto [h20],
have h24 : {N • s N - s x} < √2,
from by auto [floor_divides_iff_rat, hα, h21, h23, mem_nhds_iff, sqrt_lt_iff],
have h25 : {N • s N - s x} < √2,
from by auto [floor_divides_iff_rat, hα, h21, h23, mem_nhds_iff, sqrt_lt_iff],
let h26 : {N • s N - s x} < √2 := by auto [h25],
let h27 : {N • s N - s x} < √2 := by auto [h24],
let h28 : {N • s N - s x} < √2 := by auto [h27],
have h29 : 2 • {N • s N - s x} < 2 * √2,
from by auto [div_self, floor_divides_iff_rat, hα, h21, h23, mem_nhds_iff, sqrt_lt_iff],
have h30 : 2 • {N • s N - s x} < 2 * √2,
from by auto [div_self, floor_divides_iff_rat, hα, h21, h23, mem_nhds_iff, sqrt_lt_iff],
have h31 : 2 • {N • s N - s x} < 2 * √2,
from by auto [h29],
have h32 : 2 • {N • s N - s x} < 2 * √2,
from by auto [h30],
let h33 : 2 • {N • s N - s x} < 2 * √2 := by auto [h32],
have h34 : y < ((N • s N - s x) + (N • s N - s x)) := by auto [h25],
have h35 : y < ((N • s N - s x) + (N • s N - s x)) := by auto [h24],
let h36 : y < ((N • s N - s x) + (N • s N - s x)) := by auto [h35],
have h37 : y < ((N • s N - s x) + (N • s N - s x)) := by auto [h34],
let h38 : y < ((N • s N - s x) + (N • s N - s x)) := by auto [h37],
have h39 : y < 2 • {N • s N - s x},
from by auto [floor_divides_iff_rat, hα, h21, h23, mem_nhds_iff, sqrt_lt_iff],
let h40 : y < 2 • {N • s N - s x} := by auto [h39],
let h41 : ((y / 2) + s x : ℝ) < (N • s N : ℝ) := by auto [h40],
end --Needs more than 2000 tokens!
--OUTPUT 4
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_of_irrational_orbit (α : ℝ) (h1 : ¬ is_rat α) : ∀ ε > 0, ∃ n, n ∈ ℤ ∧ abs(n * α - ⌊n * α⌋) < ε :=
begin
assume ε,
assume h2 : ε > 0,
have h3 : α ≠ 0, from by auto [irrat],
let S : set ℝ := {(⌊i * α⌋ - i * α) | i : ℤ},
have h4 : ∀ (i j : ℤ), i ≠ j → (⌊i * α⌋ - i * α) ≠ (⌊j * α⌋ - j * α), from by auto [floor_eq_iff_rat, irrat],
have h5 : ∀ (i j : ℤ), i ≠ j → i * α ≠ j * α, from by auto [irrat, mul_eq_zero, eq_zero_of_mul_eq_zero, h3],
have h6 : ∀ (i j : ℤ), i ≠ j → i * α ∉ {j * α}, from by auto [h5],
have h7 : ∀ (i j : ℤ), i ≠ j → i * α - ⌊i * α⌋ ∉ {j * α - ⌊j * α⌋}, from by auto [eq_iff_sub_eq_zero, h6],
have h8 : ∀ (i j : ℤ), i ≠ j → (⌊i * α⌋ - i * α) ∉ {(⌊j * α⌋ - j * α)}, from by auto [h7],
have h9 : ∀ (i j : ℤ), i ≠ j → (⌊i * α⌋ - i * α) ∉ S := by auto [S, h8],
have h10 : ∀ a ∈ S, ∃ b ∈ S, abs(a - b) < ε, from by auto [sub_le_sub_left, h2],
have h11 : ∃ b ∈ S, abs(0 - b) < ε, from by auto [irrat, h10, h4, h9, S],
cases h11 with b h12,
have h13 : ∀ n : ℤ, n * α - ⌊n * α⌋ ∈ S, from by auto [n_floor_eq_sub, S, exists.intro 0, mem_image, exists.intro 0, mem_range],
have h14 : ∀ n : ℤ, n * α - ⌊n * α⌋ ∈ {(⌊i * α⌋ - i * α) | i : ℤ}, from by auto [S, h13],
have h15 : ∀ n : ℤ, n * α - ⌊n * α⌋ ∈ {i * α - ⌊i * α⌋ | i : ℤ}, from by auto [S, h14],
have h16 : ∃ n, n ∈ ℤ ∧ abs (n * α - ⌊n * α⌋) < ε,
from by auto [h12, h15, exists.intro 1, exists.intro 0, exists.intro b, exists.intro 0, exists.intro 0],
show ∃ n, n ∈ ℤ ∧ abs (n * α - ⌊n * α⌋) < ε, from by auto [h16],
end
--OUTPUT 5
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=irrational_orbit (α : ℝ) : ∀ ε > 0, ∃ x : ℝ, x ∈ set.range (λ i, ↑i*α) ∧ x ∈ Icc (0 : ℝ) (1 : ℝ) ∧ ∃ n : ℕ, |x - n * α| < ε :=
begin
assume ε,
assume hε : ε > 0,
have h3 : ∀ i j : ℕ, ((i : ℝ) * α - ↑i) ≠ ((j : ℝ) * α - ↑j), from by auto,
-- choose a sequence of all the fractional parts from 0 to infinity, or all the A such that (∃ B : ℝ, A = B - ⌊B⌋)
-- in other words, choose (A is an element of the powerset of ℝ, and there exists a B such that A = B - ⌊B⌋)
have h4 : ∀ A ∈ 𝒫 ℝ, ∃ B : ℝ, A = B - ⌊B⌋, from by auto,
have h6 : ∀ n : ℕ, ((n : ℚ) * α - ⌊(n : ℚ) * α⌋) ∈ set.range (λ B, B - ⌊B⌋), from by auto,
have h7 : (∀ x, x ∈ set.range (λ B, B - ⌊B⌋) → x ∈ Icc (0 : ℝ) (1 : ℝ)), from by auto [set.mem_range],
have h8 : ∀ n : ℕ, ((n : ℝ) * α - ⌊(n : ℝ) * α⌋) ∈ Icc (0 : ℝ) (1 : ℝ), from by auto [h6, h7],
have h9 : ∀ B ∈ Icc (0 : ℝ) (1 : ℝ), ∃ n : ℕ, |((n : ℝ) * α - ⌊(n : ℝ) * α⌋) - B| < ε, from by auto,
show (∃ (x : ℝ), x ∈ set.range (λ B, B - ⌊B⌋) ∧ x ∈ Icc (0 : ℝ) (1 : ℝ) ∧ ∃ (n : ℕ), |x - n * α| < ε) := by auto [h6, h8, h9],
end
--OUTPUT 6
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=irrational_orbit_dense (α : ℝ) : irrational α → ∀ e : ℝ, ∃ δ:ℝ, 0 < δ ∧ δ < e :=
begin
intros h,
assume e,
let x : ℝ := (1 + e/2) * (1/2),
have hx : 0 < x, from by auto [lt_mul_iff],
use x,
show (0 < x) ∧ x < e, from by auto [add_one_le_iff, le_add_iff, sub_neg_iff, le_of_lt, lt_mul_iff] using [lt_add_iff, add_lt_add_right, one_lt_two, mul_self_lt_mul_self_iff, hx],
end
--OUTPUT 7
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=rational_orbit (α : ℝ) : irrational α → ∀ y ⊆ set.range (λ n : ℤ, n * α % 1), dense y :=
begin
assume h1,
assume y ⊆ set.range (λ n : ℤ, n * α % 1),
have h2 : ∀ n m : ℤ, n ≠ m → (n*α % 1) ≠ (m*α % 1), from by auto [ring, mul_comm],
have h3 : ∀ n m : ℤ, (n*α % 1) ≠ (m*α % 1), from by auto [h2] using [irrational_of_int_eq_zero, irrational_of_int_neq_zero, h1],
let S : set ℝ := {m * α % 1 | m : ℤ},
have h4 : y ⊆ S, from by auto [set.subset_def],
have h5 : S ⊆ set.range (λ n : ℤ, n * α % 1), from by auto [set.subset_def],
have h6 : finite y → y ≠ ∅ → nonempty y → nonempty y, from by auto [finite.ne_empty_iff_exists_mem, set.not_empty_iff_exists_mem, finite_subset, finite.coe_to_finset],
have h8 : y ⊆ y, from by auto [set.subset.refl],
have h9 : y.nonempty, from by auto [h4, h6, h5, set.nonempty_range],
have h10 : dense S, from by auto [dense_iff_countable_open_dense_union, countable_iff_countably_infinite, range_is_countably_infinite, countable_iff_seq_range, set.seq_range_iff_forall_range, set.dense_Icc_iff_Ico_union_Ioc],
have h11 : dense y, from by auto [h10, h4, set.dense_subset],
show ∃ n : ℝ, n ∈ y ∧ ∀ ε > 0, ∃ m : ℝ, m ∈ y ∧ m ≠ n ∧ m ∈ Icc (n - ε) (n + ε), from by auto [h11, set.dense_iff_exists_between]
end
--OUTPUT 8
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_of_irrational_orbit (α : ℝ) (h : ¬ is_rational α) : ∀ y ∈ set.Icc 0 1, ∃ x ∈ set.Icc 0 1, |y - x| ≤ 1 :=
begin
assume (y : ℝ) (hy : y ∈ set.Icc 0 1),
set S : set ℝ := {x : ℝ | ∃ i : ℤ, x = (x - floor x) ∧ x ∈ set.Icc 0 1},
have h_empty : ∀ x : ℝ, x ∉ S, from by auto [not_lt_of_ge, lt_irrefl, (show 0 ≤ x - floor x, by linarith)],
have h_not_empty : S ≠ ∅, from by auto [exists.intro (x - floor x) (and.intro (eq.refl x) (and.intro (by auto [floor_nonneg]) (by auto [not_lt_of_ge])))],
have h_infinite : S.infinite, from by auto [h_not_empty, h_empty, infinite_Icc_iff, forall_exists_not],
have h_bounded : S.bounded, from by auto [h_not_empty, bounded_Icc_iff, set.Icc_subset_Icc_left],
have h_closed : S.closed, from by auto [set.Icc_closed],
have h_Icc_0_1 : ∀ x : ℝ, x ∈ S ↔ x ∈ set.Icc 0 1, from by auto [set.Icc_subset_Icc_left],
have h_dense : ∀ y : ℝ, y ∈ set.Icc 0 1 → ∃ x : ℝ, x ∈ S ∧ |y - x| ≤ 1, from by auto [dense_iff_inter_open, h_Icc_0_1, h_infinite, h_bounded, h_closed, h_not_empty],
cases h_dense y hy with x hx,
cases hx with hx_mem hx_pf,
use x,
from and.intro hx_mem hx_pf,
end
--OUTPUT 9
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_of_irrational_orbit {α : Type*} [s : dense_linear_order α] (a : α) : a ∉ (𝕀) → ∀ x, ∃ n ∈ ℤ, x - (1 : α) < (n : α) * a ∧ (n + 1) * a ≤ x :=
begin
assume h1 : a ∉ (𝕀),
assume x : α,
have h2 : ∀ n m : ℤ, n ≠ m → n * a ≠ m * a,
from by {intros n m h3, assume h4, have h5 := h3, rw h4 at h5, contradiction},
have h6 : ∀ n m : ℤ, n ≠ m → ∀ b : α, n * b ≠ m * b,
from by auto [h2],
have h7 : ∀ b : α, is_irrational b → ∀ n m : ℤ, n ≠ m → n * b ≠ m * b, from by auto [h6],
have h8 : n * a ≠ m * a, from by auto [h7],
have h9 : ∀ n m : ℤ, n ≠ m → n * a ∈ set.range (λ n : ℤ, n * a),
from by {intros n m h10, use [n], rw ←one_mul (n * a)},
have h10 : ∀ n m : ℤ, n ≠ m → n * a ∉ set.range (λ n : ℤ, n * a),
from by auto [h9],
have h11 : set.range (λ n : ℤ, n * a) ≠ univ,
from by auto [h10, h8, univ_def_eq],
have h12 : set.range (λ n : ℤ, n * a) ≠ univ,
from by auto [h10, h8, univ_def_eq],
have h13 : set.range (λ n : ℤ, n * a) ≠ set.univ,
from by auto [h10, h8],
have h14 : set.range (λ n : ℤ, n * a) ≠ set.univ,
from by auto [h10, h8],
have h15 : set.range (λ n : ℤ, n * a) ≠ set.univ,
from by auto [h10, h8],
have h16 : set.range (λ n : ℤ, n * a) ≠ set.univ,
from by auto [h10, h8],
have h17 : set.range (λ n : ℤ, n * a) ≠ set.univ,
from by auto [h10, h8],
(*
have h10 : ∅ ≠ set.range (λ n : ℤ, n * a),
from by auto [h3, classical.not_forall, classical.not_not_elim, classical.not_not_intro, set.exists_mem_of_ne_empty,
set.not_exists_mem_of_not_nonempty, set.not_exists_mem_of_not_nonempty],
*)
have h11 : ∀ x, ∃ n ∈ ℤ, x - 1 < n * a ∧ (n + 1) * a ≤ x,
from by auto [s.dense, set.range_iff, exists_nat],
show ∃ (n : ℤ), n ∈ ℤ ∧ x - 1 < n * a ∧ (n + 1) * a ≤ x,
from by auto [h11],
end
--OUTPUT 10
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_irration_orbit (α : ℝ) : ∀ (ε : ℝ), ε > 0 → ∃ i : ℤ, (∀ j : ℤ, |i*α - (j*α)%ℝ| < ε) :=
begin
assume (ε : ℝ),
assume (h1 : ε > 0),
have h2 : ∀ (x : ℕ), (x*ε)≠0, from by auto [nat.pos_of_ne_zero],
have h3 : ∀ (x y : ℤ), (x*ε)%ℝ ≠ (y*ε)%ℝ → x ≠ y, from by auto [@eq.subst int ℝ _ _ _ _ h2],
let S : ℝ := {x | ∃ i : ℤ, x = (i*α)%ℝ},
let SS := (λ (i : ℤ), (i*α)%ℝ),
have h4 : S = ⋃ i : ℤ, {i}, from by auto [SS, eq_univ_of_forall],
have h5 : ∀ i j : ℤ, i ≠ j → (i*α)%ℝ ≠ (j*α)%ℝ, from
begin
assume (i j : ℤ),
assume (h5 : i ≠ j),
assume (h6 : (i*α)%ℝ = (j*α)%ℝ),
have h8 : ∃ N : ℕ, ∃ i : ℤ, α = N⁻¹ * (i : ℝ), from by auto [rational_iff],
cases h8 with N h9,
cases h9 with i h10,
have h11 : ∃ j : ℤ, (i : ℝ) = N * (j : ℝ), from by auto [rat.denom_ne_zero, rat.exists_int_eq],
cases h11 with j h12,
have h13 : α = (j : ℝ), from by auto [h10, h12, eq_div_iff],
have h14 : α ∈ ℚ, from by auto [h13],
have h15 : α ∈ ℚ → false, from by auto [eq.subst h13],
have h16 : false, from by auto [h15, h14],
show false, from h16,
end,
have h6 : ∀ i j : ℤ, ((i*α)%ℝ ∈ S) ∧ ((j*α)%ℝ ∈ S) → i = j, from
begin
assume (i j : ℤ),
assume (h6 : ((i*α)%ℝ ∈ S) ∧ ((j*α)%ℝ ∈ S)),
have h7 : (i*α)%ℝ = (j*α)%ℝ → i = j, from h3 i j,
show i = j, from by auto [h7, h6, h5],
end,
have h7 : ∀ i j : ℤ, ((i*α)%ℝ ∈ S) → ((j*α)%ℝ ∈ S) → i = j, from
begin
assume (i j : ℤ),
assume (h7 : ((i*α)%ℝ ∈ S)),
assume (h8 : ((j*α)%ℝ ∈ S)),
have h9 : ((i*α)%ℝ ∈ S) ∧ ((j*α)%ℝ ∈ S), from by auto [h7, h8],
show i = j, from by auto [h6, h9],
end,
have h8 : ∀ i j : ℤ, ((i*α)%ℝ ∈ S) ↔ i = j, from
begin
assume (i j : ℤ),
split,
{
assume (h9 : ((i*α)%ℝ ∈ S)),
show i = j, from by auto [h7, h9],
},
{
assume (h10 : i = j),
show (i*α)%ℝ ∈ S, from by auto [h10],
},
end,
have h9 : S = ⋃ i : ℤ, {i}, from by auto [SS, eq_univ_of_forall],
have h10 : ∀ i : ℤ, (i*α)%ℝ ∈ S, from by auto [rat.denom_ne_zero],
have h11 : ∀ i : ℤ, ∃ x : ℝ, x ∈ {i} ∧ x ∈ S, from
begin
assume (i : ℤ),
have h12 : (i*α)%ℝ ∈ S, from by auto [h10],
have h13 : (i*α)%ℝ ∈ {i}, from by auto [h12],
show ∃ x : ℝ, x ∈ {i} ∧ x ∈ S, from by auto [h13, h12],
end,
have h12 : ⋃ i : ℤ, {i} = ⋃ x : S, {x}, from by auto [h9, h8, h11, set.ext_iff],
have h13 : ∃ (i : ℤ), ∀ (j : ℤ), (i*α)%ℝ ∈ {x | ∃ i : ℤ, x = (i*α)%ℝ}, from
begin
use 1,
assume j,
have h14 : (1*α)%ℝ = (j*α)%ℝ → 1 = j, from by auto [h3],
have h15 : (1*α)%ℝ ∈ S, from by auto [rat.denom_ne_zero],
have h16 : (1*α)%ℝ ∈ {x | ∃ i : ℤ, x = (i*α)%ℝ}, from by auto [h15],
show (1*α)%ℝ ∈ {x | ∃ i : ℤ, x = (i*α)%ℝ}, from by auto [h16],
end,
have h14 : ∃ (i : ℤ), ∀ (j : ℤ), (i*α)%ℝ ∈ S, from
begin
have h15 : ∃ i : ℤ, ∀ j : ℤ, (i*α)%ℝ ∈ {x | ∃ i : ℤ, x = (i*α)%ℝ}, from by auto [h13],
cases h15 with i h16,
have h17 : ∀ j : ℤ, (i*α)%ℝ ∈ S, from by auto [h16],
show ∃ (i : ℤ), ∀ j : ℤ, (i*α)%ℝ ∈ S, from by auto [h17],
end,
have h15 : ∃ (i : ℤ), (i*α)%ℝ ∈ S ∧ ∀ (j : ℤ), (j*α)%ℝ ∈ S → i = j, from
begin
have h16 : ∃ (i : ℤ), ∀ (j : ℤ), (i*α)%ℝ ∈ S, from by auto [h14],
cases h16 with i h17,
have h18 : ∀ (j : ℤ), (i*α)%ℝ ∈ S ∧ (j*α)%ℝ ∈ S → i = j, from
begin
assume j,
have h19 : (i*α)%ℝ ∈ S ∧ (j*α)%ℝ ∈ S → ((i*α)%ℝ = (j*α)%ℝ →
end --Needs more than 2000 tokens!
/- FEW SHOT PROMPTS TO CODEX(START)
/--`theorem`
Power Set is Closed under Intersection
Let $S$ be a set.
Let $\powerset S$ be the power set of $S$.
Then:
:$\forall A, B \in \powerset S: A \cap B \in \powerset S$
`proof`
Let $A, B \in \powerset S$.
Then by the definition of power set, $A \subseteq S$ and $B \subseteq S$.
From Intersection is Subset we have that $A \cap B \subseteq A$.
It follows from Subset Relation is Transitive that $A \cap B \subseteq S$.
Thus $A \cap B \in \powerset S$ and closure is proved.
{{qed}}
-/
theorem power_set_intersection_closed {α : Type*} (S : set α) : ∀ A B ∈ 𝒫 S, (A ∩ B) ∈ 𝒫 S :=
begin
assume (A : set α) (hA : A ∈ 𝒫 S) (B : set α) (hB : B ∈ 𝒫 S),
have h1 : (A ⊆ S) ∧ (B ⊆ S), from by auto [set.subset_of_mem_powerset, set.subset_of_mem_powerset],
have h2 : (A ∩ B) ⊆ A, from by auto [set.inter_subset_left],
have h3 : (A ∩ B) ⊆ S, from by auto [set.subset.trans],
show (A ∩ B) ∈ 𝒫 S, from by auto [set.mem_powerset],
end
/--`theorem`
Square of Sum
:$\forall x, y \in \R: \paren {x + y}^2 = x^2 + 2 x y + y^2$
`proof`
Follows from the distribution of multiplication over addition:
{{begin-eqn}}
{{eqn | l = \left({x + y}\right)^2
| r = \left({x + y}\right) \cdot \left({x + y}\right)
}}
{{eqn | r = x \cdot \left({x + y}\right) + y \cdot \left({x + y}\right)
| c = Real Multiplication Distributes over Addition
}}
{{eqn | r = x \cdot x + x \cdot y + y \cdot x + y \cdot y
| c = Real Multiplication Distributes over Addition
}}
{{eqn | r = x^2 + 2xy + y^2
| c =
}}
{{end-eqn}}
{{qed}}
-/
theorem square_of_sum (x y : ℝ) : (x + y)^2 = (x^2 + 2*x*y + y^2) :=
begin
calc (x + y)^2 = (x+y)*(x+y) : by auto [sq]
... = x*(x+y) + y*(x+y) : by auto [add_mul]
... = x*x + x*y + y*x + y*y : by auto [mul_comm, add_mul] using [ring]
... = x^2 + 2*x*y + y^2 : by auto [sq, mul_comm] using [ring]
end
/--`theorem`
Identity of Group is Unique
Let $\struct {G, \circ}$ be a group. Then there is a unique identity element $e \in G$.
`proof`
From Group has Latin Square Property, there exists a unique $x \in G$ such that:
:$a x = b$
and there exists a unique $y \in G$ such that:
:$y a = b$
Setting $b = a$, this becomes:
There exists a unique $x \in G$ such that:
:$a x = a$
and there exists a unique $y \in G$ such that:
:$y a = a$
These $x$ and $y$ are both $e$, by definition of identity element.
{{qed}}
-/
theorem group_identity_unique {G : Type*} [group G] : ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a :=
begin
have h1 : ∀ a b : G, ∃! x : G, a * x = b, from by auto using [use (a⁻¹ * b)],
have h2 : ∀ a b : G, ∃! y : G, y * a = b, from by auto using [use b * a⁻¹],
have h3 : ∀ a : G, ∃! x : G, a * x = a, from by auto [h1],
have h4 : ∀ a : G, ∃! y : G, y * a = a, from by auto [h2],
have h5 : ∀ a : G, classical.some (h3 a).exists = (1 : G), from by auto [exists_unique.unique, h3, classical.some_spec, exists_unique.exists, mul_one],
have h6 : ∀ a : G, classical.some (h4 a).exists = (1 : G), from by auto [exists_unique.unique, h4, classical.some_spec, exists_unique.exists, one_mul],
show ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a, from by auto [h3, h4, exists_unique.unique, classical.some_spec, exists_unique.exists] using [use (1 : G)],
end
/--`theorem`
Squeeze Theorem for Real Numbers
Let $\sequence {x_n}$, $\sequence {y_n}$ and $\sequence {z_n}$ be sequences in $\R$.
Let $\sequence {y_n}$ and $\sequence {z_n}$ both be convergent to the following limit:
:$\ds \lim_{n \mathop \to \infty} y_n = l, \lim_{n \mathop \to \infty} z_n = l$
Suppose that:
:$\forall n \in \N: y_n \le x_n \le z_n$
Then:
:$x_n \to l$ as $n \to \infty$
that is:
:$\ds \lim_{n \mathop \to \infty} x_n = l$
`proof`
From Negative of Absolute Value:
:$\size {x - l} < \epsilon \iff l - \epsilon < x < l + \epsilon$
Let $\epsilon > 0$.
We need to prove that:
:$\exists N: \forall n > N: \size {x_n - l} < \epsilon$
As $\ds \lim_{n \mathop \to \infty} y_n = l$ we know that:
:$\exists N_1: \forall n > N_1: \size {y_n - l} < \epsilon$
As $\ds \lim_{n \mathop \to \infty} z_n = l$ we know that:
:$\exists N_2: \forall n > N_2: \size {z_n - l} < \epsilon$
Let $N = \max \set {N_1, N_2}$.
Then if $n > N$, it follows that $n > N_1$ and $n > N_2$.
So:
:$\forall n > N: l - \epsilon < y_n < l + \epsilon$
:$\forall n > N: l - \epsilon < z_n < l + \epsilon$
But:
:$\forall n \in \N: y_n \le x_n \le z_n$
So:
:$\forall n > N: l - \epsilon < y_n \le x_n \le z_n < l + \epsilon$
and so:
:$\forall n > N: l - \epsilon < x_n < l + \epsilon$
So:
:$\forall n > N: \size {x_n - l} < \epsilon$
Hence the result.
{{qed}}
-/
theorem squeeze_theorem_real_numbers (x y z : ℕ → ℝ) (l : ℝ) :
let seq_limit : (ℕ → ℝ) → ℝ → Prop := λ (u : ℕ → ℝ) (l : ℝ), ∀ ε > 0, ∃ N, ∀ n > N, |u n - l| < ε in
seq_limit y l → seq_limit z l → (∀ n : ℕ, (y n) ≤ (x n) ∧ (x n) ≤ (z n)) → seq_limit x l :=
begin
assume seq_limit (h2 : seq_limit y l) (h3 : seq_limit z l) (h4 : ∀ (n : ℕ), y n ≤ x n ∧ x n ≤ z n) (ε),
have h5 : ∀ x, |x - l| < ε ↔ (((l - ε) < x) ∧ (x < (l + ε))),
from by auto [abs_sub_lt_iff] using [linarith],
assume (h7 : ε > 0),
cases h2 ε h7 with N1 h8,
cases h3 ε h7 with N2 h9,
let N := max N1 N2,
use N,
have h10 : ∀ n > N, n > N1 ∧ n > N2 := by auto [lt_of_le_of_lt, le_max_left, le_max_right],
have h11 : ∀ n > N, (((l - ε) < (y n)) ∧ ((y n) ≤ (x n))) ∧ (((x n) ≤ (z n)) ∧ ((z n) < l+ε)),
from by auto [h8, h10, h5, h9],
have h15 : ∀ n > N, ((l - ε) < (x n)) ∧ ((x n) < (l+ε)),
from by auto [h11] using [linarith],
show ∀ (n : ℕ), n > N → |x n - l| < ε,
from by auto [h5, h15],
end
/--`theorem`
Density of irrational orbit
The fractional parts of the integer multiples of an irrational number form a dense subset of the unit interval
`proof`
Let $\alpha$ be an irrational number. Then for distinct $i, j \in \mathbb{Z}$, we must have $\{i \alpha\} \neq\{j \alpha\}$. If this were not true, then
$$
i \alpha-\lfloor i \alpha\rfloor=\{i \alpha\}=\{j \alpha\}=j \alpha-\lfloor j \alpha\rfloor,
$$
which yields the false statement $\alpha=\frac{\lfloor i \alpha\rfloor-\lfloor j \alpha\rfloor}{i-j} \in \mathbb{Q}$. Hence,
$$
S:=\{\{i \alpha\} \mid i \in \mathbb{Z}\}
$$
is an infinite subset of $\left[0,1\right]$.
By the Bolzano-Weierstrass theorem, $S$ has a limit point in $[0, 1]$. One can thus find pairs of elements of $S$ that are arbitrarily close. Since (the absolute value of) the difference of any two elements of $S$ is also an element of $S$, it follows that $0$ is a limit point of $S$.
To show that $S$ is dense in $[0, 1]$, consider $y \in[0,1]$, and $\epsilon>0$. Then by selecting $x \in S$ such that $\{x\}<\epsilon$ (which exists as $0$ is a limit point), and $N$ such that $N \cdot\{x\} \leq y<(N+1) \cdot\{x\}$, we get: $|y-\{N x\}|<\epsilon$.
QED
-/
theorem
FEW SHOT PROMPTS TO CODEX(END)-/
|
Several experts have questioned the validity of the death toll numbers ; Anthony Penna , professor emeritus in environmental history at Northeastern University , warned that casualty estimates could only be a " <unk> " , and Belgian disaster response expert Claude de Ville de <unk> noted that " round numbers are a sure sign that nobody knows . " Edmond <unk> , UN Assistant Secretary @-@ General for Peacekeeping Operations , said , " I do not think we will ever know what the death toll is from this earthquake " , while the director of the Haitian Red Cross , Jean @-@ Pierre Guiteau , noted that his organization had not had the time to count bodies , as their focus had been on the treatment of survivors .
|
# a function returning (i + j) * -998
func $foo (
# var %i xxx,
var %i i32,
var %j i32,
var %k i32) i32 {
return (
select i32 (
dread i32 %i,
dread i32 %j,
dread i32 %k))}
# EXEC: %irbuild Main.mpl
# EXEC: %irbuild Main.irb.mpl
# EXEC: %cmp Main.irb.mpl Main.irb.irb.mpl
|
%!TEX root = ../thesis.tex
%*******************************************************************************
%*********************************** First Chapter *****************************
%*******************************************************************************
\chapter*{Conclusions} %Title of the First Chapter
\addcontentsline{toc}{chapter}{Conclusions}
\ifpdf
\graphicspath{{Conclusion/Figs/}{Conclusion/}}
\else
\graphicspath{{Conclusion/Figs/}{Conclusion/}}
\fi
The natural process of societal evolution has brought many changes in the political, social, economic and environmental contexts. These changes were mainly a result of technological progress that spurred an improvement in the use of the environment and consequently led to its degradation. Considered one of major the causes of climate change, deforestation releases billions of tonnes of carbon dioxide and other greenhouse gases into the atmosphere and causes the biodiversity loss in the tropical regions and is damaging the environmental system in the planet.
The purpose of this thesis has been to examine the economic determinants of deforestation in Brazil and the effectiveness of environmental policies taking place in the country using innovative and interdisciplinaries techniques. We analysed first the institutional environmental framework (IEF) in the economic delimitation of Brazilian Amazon (Legal Amazon) controlling for market expansion which was characterised the main driver of deforestation at the time of study. The results from the first chapter suggest that the Institutional Environmental Framework conditioned on policies and prices curbed deforestation within municipalities. Since deforestation has a spatial dimension we expand the study to include spatial analysis. We observe that the Institutional Environmental Framework when established in a municipality tend to reduce deforestation in neighbouring municipalities. An anecdotal counter-factual simulation indicated that the existence of institutional environmental framework avoided forest clearing that would have occurred had the institutional framework not been implemented.
Following the results from the first chapter, we focused our analysis on the deforestation trends observed in the Ecological Tension Zone of Maranhao, which provided us with unique natural experiment in that there were spatially heterogeneous environmental policies to combat deforestation. To understand the deforestation trends in that region we used non-linear modelling for the task since it is recognised that most ecological and climatic data represent complex relationships and thus non-linear models, such as Generalized Additive Models (GAMs), may be particularly suited to capture confounding effects in trends. Our findings suggest that deforestation is related to year and several climatic covariates, but also revealed that there are substantially differences in trends between seasons and regions. For the region under a surveillance system most of the deforestation happened during the rainy season and, for the region not under the monitoring policy, there was a well-defined deforestation trend for both seasons.
Finally, in chapter 3 we combine the findings from the previous chapters to elaborate on the motivation. Deforestation rates have declined in Brazil over the past two decades and it is believed that environmental policies conditioned on the institutional framework have played a crucial role. Moreover, the satellite monitoring program has enabled authorities to identify and react to deforestation in a much more timely manner than local field detection. Given that the trends of deforestation in two regions, under different environmental policies, of an ecological tension zone (ecotone forest) in Maranhão showed diverged results. We assumed that cloud coverage, by delaying detection until skies are clear, has acted as an important impediment to the policy's success. Focusing on the ecological tension zone of Maranhão that is separated into two parts by an artificial line- one that was covered by environmental deforestation policy and the other that is not subject to this - we use satellite data within a survival analysis framework to estimate how the probability of transition between intact forest to disturbed forest, given risk factors and conditional on the time elapsed until the occurrence of the transition, is affected by cloud coverage. Our findings suggest that the presence of clouds has increased deforestation in the region covered by the satellite detection program, and thus was likely an active barrier to legal compliance.
Overall, our results has policy implications for environmental policies in Brazil. We've seen that the institutional environmental framework is important for the protection of the tropical forests when combined with environmental enforcement. The actual institutional framework needs to be tightened up in terms of implementation. Most important, it is pertinent that the established efforts proceed in the Brazilian Amazon, in spite of any political changes in Brazil and, strengthening the institutional framework must be detached from any transient actions. We then, observed the past trend of deforestation in two areas of great importance for the Brazilian biome, Amazon and Cerrado and, the results indicate that the environmental policies in the Legal Amazon should be expanded to ecotonic/transition forests along the Amazon Forest because these forests represent the first indication of anthropic intervention. Also, we believe that the deforestation monitoring system should be improved by the use of satellites that are not constrained by climatic events such as cloud cover. Finally, it is important to acknowledge that significant deforestation is happening in areas of transition between Amazon and Cerrado and, the implementation or expansion of satellite monitoring program must be applied to further biomes like Cerrado, the second most degraded biome in the country.
\textbf{LIMITATIONS} Although our results presented in this thesis are in line with previous studies and our empirical evidence has been corroborated by robustness checks and model validations, our analysis still suffers from a number of weaknesses.
%Chapter 1
Our main results in the first chapter might suffer from the issue of omitted variable bias. We tried to include all possible variables that could potentially affect deforestation but data limitations prevented us to capture for all possible determinants. To deal with this issue we looked for proxies that could minimise this problem. Another possible issue is the potential endogeneity of many of the explanatory variables, and hence their interpretation in terms of causality. Since it would be difficult to find plausible instruments for many, if not for all, of our independent variables, we instead tried to control for municipality fixed effects, allowing us to purge all time invariant unobservables from the specifications and, we lagged all control variables by one period, so that under assumption that, after controlling for fixed effects all confounding shocks are only contemporaneous in nature, we are left with solely exogenous variation.
%chapter 2
In the second and third chapters, we a have a number of limitations that must be taken into account. First, the model implicitly assumes that the predicted range or potential space is fully occupied by forest, which in reality might not be true. Secondly, the spatial distribution of the vegetation indices may exhibit dynamic behaviour over time, so that a potential area may or may not be sparsely vegetated for a certain period (e.g., during sampling) due to progressive succession of forest. Or a temporary absence could be due to natural causes, such as, an attack of pests or diseases or inter-species competition. Thirdly, the study was based on coarse image resolution which could neglect local changes in the sample area. Finally, our results may not be generalised to other areas, such as dense tropical forest and open fields. The same issues for chapter two apply to chapter three since we use the same dataset. In addition, for the third chapter, we only considered for the analysis covariates before or from 2000, which thus excluded roads, protected areas, indigenous land, markets, municipalities centre, and, mining/mineral resources created or discovered after 2000. Finally, we acknowledge that the models were derived from NDVI values and that one could alternatively have used EVI values, which in some instances might be more suited for ecotone forests.
\textbf{FUTURE RESEARCH} This thesis represent a starting point for a research agenda which can be extended in the future. First, departing from the analysis of deforestation in Brazil, it would be interesting to look at other tropical countries investigating the role of environmental policies taking into account the different settings of the institutional framework. There are data available on the proxy of deforestation for many tropical countries and different policies approach have been undertaken for different countries, such as Bolivia, Colombia and, Venezuela. For this reason, it would be interesting to conjecture the differences existing with different institutional apparatus. Secondly, the estimation of the deforestation trends used coarse resolution to the analysis. However, a more in-depth analysis using fine resolution might be needed given the increasing action of selective logging that cannot be captured observing at a coarse resolution. Finally, our analysis of chapter 3 has shown the relevance of climatic events as an impediment of satellite monitoring program effectiveness. We could corroborate the findings by applying a fine resolution satellite which is not affected by cloud cover to reassure our findings. |
! (C) Copyright 2019 UCAR
!
! This software is licensed under the terms of the Apache Licence Version 2.0
! which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
!> This Define interfaces for accessing C++ LocationsQG objects from Fortran
!-------------------------------------------------------------------------------
interface
!-------------------------------------------------------------------------------
function qg_locs_nlocs_c(locs) bind(C,name="qg_locs_nlocs_f90")
use iso_c_binding, only: c_ptr, c_int
integer(c_int) :: qg_locs_nlocs_c
type(c_ptr), value :: locs
end function
function qg_locs_lonlat_c(locs) bind(C,name="qg_locs_lonlat_f90")
use iso_c_binding, only: c_ptr
type(c_ptr) :: qg_locs_lonlat_c
type(c_ptr), value :: locs
end function
function qg_locs_altitude_c(locs) bind(C,name="qg_locs_altitude_f90")
use iso_c_binding, only: c_ptr
type(c_ptr) :: qg_locs_altitude_c
type(c_ptr), value :: locs
end function
function qg_locs_times_c(locs, idx) bind(C,name="qg_locs_times_f90")
use iso_c_binding, only: c_ptr, c_size_t
type(c_ptr) :: qg_locs_times_c
type(c_ptr), value :: locs
integer(c_size_t) idx
end function
!-------------------------------------------------------------------------------
end interface
!-------------------------------------------------------------------------------
|
lemma space_restrict_space: "space (restrict_space M \<Omega>) = \<Omega> \<inter> space M" |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx_bsl.tfxio.tf_example_record."""
import os
import unittest
from absl import flags
import apache_beam as beam
from apache_beam.testing import util as beam_testing_util
import numpy as np
import pyarrow as pa
import tensorflow as tf
from tfx_bsl.tfxio import dataset_options
from tfx_bsl.tfxio import telemetry_test_util
from tfx_bsl.tfxio import tf_example_record
from google.protobuf import text_format
from absl.testing import absltest
from absl.testing import parameterized
from tensorflow_metadata.proto.v0 import schema_pb2
FLAGS = flags.FLAGS
_SCHEMA = text_format.Parse("""
feature {
name: "int_feature"
type: INT
value_count {
min: 1
max: 1
}
}
feature {
name: "float_feature"
type: FLOAT
value_count {
min: 4
max: 4
}
}
feature {
name: "string_feature"
type: BYTES
value_count {
min: 0
max: 2
}
}
""", schema_pb2.Schema())
_TELEMETRY_DESCRIPTORS = ["Some", "Component"]
_IS_LEGACY_SCHEMA = (
"generate_legacy_feature_spec" in
schema_pb2.Schema.DESCRIPTOR.fields_by_name)
# Enforce a consistent behavior in inferring TensorRepresentations from the
# schema.
if _IS_LEGACY_SCHEMA:
_SCHEMA.generate_legacy_feature_spec = False
_EXAMPLES = [
"""
features {
feature { key: "int_feature" value { int64_list { value: [1] } }
}
feature {
key: "float_feature"
value { float_list { value: [1.0, 2.0, 3.0, 4.0] } }
}
feature { key: "string_feature" value { } }
feature {
key: "varlen_feature"
value { int64_list { value: [1, 2, 3] } }
}
feature {
key: "row_lengths"
value { int64_list { value: [2, 1] } }
}
}
""",
"""
features {
feature { key: "int_feature" value { int64_list { value: [2] } } }
feature { key: "float_feature"
value { float_list { value: [2.0, 3.0, 4.0, 5.0] } }
}
feature {
key: "string_feature"
value { bytes_list { value: ["foo", "bar"] } }
}
feature {
key: "varlen_feature"
value { int64_list { value: [4] } }
}
feature {
key: "row_lengths"
value { int64_list { value: [1] } }
}
}
""",
"""
features {
feature { key: "int_feature" value { int64_list { value: [3] } } }
feature {
key: "float_feature"
value { float_list { value: [4.0, 5.0, 6.0, 7.0] } }
}
feature {
key: "varlen_feature"
value { int64_list { value: [5, 6] } }
}
feature {
key: "row_lengths"
value { int64_list { value: [1, 1] } }
}
}
""",
]
_SERIALIZED_EXAMPLES = [
text_format.Parse(pbtxt, tf.train.Example()).SerializeToString()
for pbtxt in _EXAMPLES
]
def CreateExamplesAsTensors():
if tf.executing_eagerly():
sparse_tensor_factory = tf.SparseTensor
else:
sparse_tensor_factory = tf.compat.v1.SparseTensorValue
return [{
"int_feature":
sparse_tensor_factory(
values=[1], indices=[[0, 0]], dense_shape=[1, 1]),
"float_feature":
sparse_tensor_factory(
values=[1.0, 2.0, 3.0, 4.0],
indices=[[0, 0], [0, 1], [0, 2], [0, 3]],
dense_shape=[1, 4]),
"string_feature":
sparse_tensor_factory(
values=[], indices=np.empty((0, 2)), dense_shape=[1, 0])
}, {
"int_feature":
sparse_tensor_factory(
values=[2], indices=[[0, 0]], dense_shape=[1, 1]),
"float_feature":
sparse_tensor_factory(
values=[2.0, 3.0, 4.0, 5.0],
indices=[[0, 0], [0, 1], [0, 2], [0, 3]],
dense_shape=[1, 4]),
"string_feature":
sparse_tensor_factory(
values=[b"foo", b"bar"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, {
"int_feature":
sparse_tensor_factory(
values=[3], indices=[[0, 0]], dense_shape=[1, 1]),
"float_feature":
sparse_tensor_factory(
values=[4.0, 5.0, 6.0, 7.0],
indices=[[0, 0], [0, 1], [0, 2], [0, 3]],
dense_shape=[1, 4]),
"string_feature":
sparse_tensor_factory(
values=[], indices=np.empty((0, 2)), dense_shape=[1, 0])
}]
_EXAMPLES_AS_TENSORS = CreateExamplesAsTensors()
_EXPECTED_COLUMN_VALUES = {
"int_feature":
pa.array([[1], [2], [3]], type=pa.large_list(pa.int64())),
"float_feature":
pa.array([[1, 2, 3, 4], [2, 3, 4, 5], [4, 5, 6, 7]],
type=pa.large_list(pa.float32())),
"string_feature":
pa.array([None, ["foo", "bar"], None],
type=pa.large_list(pa.large_binary())),
}
def _WriteInputs(filename):
with tf.io.TFRecordWriter(filename, "GZIP") as w:
for s in _SERIALIZED_EXAMPLES:
w.write(s)
class TfExampleRecordTest(tf.test.TestCase, parameterized.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._example_file = os.path.join(
FLAGS.test_tmpdir, "tfexamplerecordtest", "input.recordio.gz")
tf.io.gfile.makedirs(os.path.dirname(cls._example_file))
_WriteInputs(cls._example_file)
def _MakeTFXIO(self, schema, raw_record_column_name=None):
return tf_example_record.TFExampleRecord(
self._example_file, schema=schema,
raw_record_column_name=raw_record_column_name,
telemetry_descriptors=_TELEMETRY_DESCRIPTORS)
def _ValidateRecordBatch(
self, tfxio, record_batch, raw_record_column_name=None):
self.assertIsInstance(record_batch, pa.RecordBatch)
self.assertEqual(record_batch.num_rows, 3)
for i, field in enumerate(record_batch.schema):
if field.name == raw_record_column_name:
continue
self.assertTrue(record_batch.column(i).equals(
_EXPECTED_COLUMN_VALUES[field.name]),
"Column {} did not match ({} vs {})."
.format(field.name, record_batch.column(i),
_EXPECTED_COLUMN_VALUES[field.name]))
if raw_record_column_name is not None:
self.assertEqual(record_batch.schema.names[-1], raw_record_column_name)
self.assertTrue(record_batch.columns[-1].type.equals(
pa.large_list(pa.large_binary())))
self.assertEqual(record_batch.columns[-1].flatten().to_pylist(),
_SERIALIZED_EXAMPLES)
def _AssertSparseTensorEqual(self, lhs, rhs):
self.assertAllEqual(lhs.values, rhs.values)
self.assertAllEqual(lhs.indices, rhs.indices)
self.assertAllEqual(lhs.dense_shape, rhs.dense_shape)
def testImplicitTensorRepresentations(self):
tfxio = self._MakeTFXIO(_SCHEMA)
self.assertEqual(
{
"int_feature": text_format.Parse(
"""varlen_sparse_tensor { column_name: "int_feature" }""",
schema_pb2.TensorRepresentation()),
"float_feature": text_format.Parse(
"""varlen_sparse_tensor { column_name: "float_feature" }""",
schema_pb2.TensorRepresentation()),
"string_feature": text_format.Parse(
"""varlen_sparse_tensor { column_name: "string_feature" }""",
schema_pb2.TensorRepresentation()),
}, tfxio.TensorRepresentations())
def _AssertFn(record_batch_list):
self.assertLen(record_batch_list, 1)
record_batch = record_batch_list[0]
self._ValidateRecordBatch(tfxio, record_batch)
self.assertTrue(record_batch.schema.equals(tfxio.ArrowSchema()))
tensor_adapter = tfxio.TensorAdapter()
dict_of_tensors = tensor_adapter.ToBatchTensors(record_batch)
self.assertLen(dict_of_tensors, 3)
self.assertIn("int_feature", dict_of_tensors)
self.assertIn("float_feature", dict_of_tensors)
self.assertIn("string_feature", dict_of_tensors)
p = beam.Pipeline()
record_batch_pcoll = p | tfxio.BeamSource(batch_size=1000)
beam_testing_util.assert_that(record_batch_pcoll, _AssertFn)
pipeline_result = p.run()
pipeline_result.wait_until_finish()
telemetry_test_util.ValidateMetrics(
self, pipeline_result, _TELEMETRY_DESCRIPTORS,
"tf_example", "tfrecords_gzip")
def testExplicitTensorRepresentations(self):
schema = schema_pb2.Schema()
schema.CopyFrom(_SCHEMA)
tensor_representations = {
"my_feature":
text_format.Parse("""
dense_tensor {
column_name: "string_feature"
shape { dim { size: 2 } }
default_value { bytes_value: "zzz" }
}""", schema_pb2.TensorRepresentation())
}
schema.tensor_representation_group[""].CopyFrom(
schema_pb2.TensorRepresentationGroup(
tensor_representation=tensor_representations))
tfxio = self._MakeTFXIO(schema)
self.assertEqual(tensor_representations,
tfxio.TensorRepresentations())
def testProjection(self):
schema = schema_pb2.Schema()
schema.CopyFrom(_SCHEMA)
tensor_representations = {
"dense_string":
text_format.Parse(
"""dense_tensor {
column_name: "string_feature"
shape { dim { size: 2 } }
default_value { bytes_value: "zzz" }
}""", schema_pb2.TensorRepresentation()),
"varlen_int":
text_format.Parse(
"""varlen_sparse_tensor {
column_name: "int_feature"
}""", schema_pb2.TensorRepresentation()),
"varlen_float":
text_format.Parse(
"""varlen_sparse_tensor {
column_name: "float_feature"
}""", schema_pb2.TensorRepresentation()),
}
schema.tensor_representation_group[""].CopyFrom(
schema_pb2.TensorRepresentationGroup(
tensor_representation=tensor_representations))
tfxio = self._MakeTFXIO(schema)
self.assertEqual(tensor_representations, tfxio.TensorRepresentations())
projected_tfxio = tfxio.Project(
["dense_string", "varlen_int", "varlen_float"])
self.assertEqual(tensor_representations,
projected_tfxio.TensorRepresentations())
def _AssertFn(record_batch_list):
self.assertLen(record_batch_list, 1)
record_batch = record_batch_list[0]
self._ValidateRecordBatch(tfxio, record_batch)
expected_schema = projected_tfxio.ArrowSchema()
self.assertTrue(
record_batch.schema.equals(expected_schema),
"actual: {}; expected: {}".format(
record_batch.schema, expected_schema))
tensor_adapter = projected_tfxio.TensorAdapter()
dict_of_tensors = tensor_adapter.ToBatchTensors(record_batch)
self.assertLen(dict_of_tensors, 3)
self.assertIn("dense_string", dict_of_tensors)
self.assertIn("varlen_int", dict_of_tensors)
self.assertIn("varlen_float", dict_of_tensors)
with beam.Pipeline() as p:
# Setting the betch_size to make sure only one batch is generated.
record_batch_pcoll = p | projected_tfxio.BeamSource(
batch_size=len(_EXAMPLES))
beam_testing_util.assert_that(record_batch_pcoll, _AssertFn)
def testAttachRawRecordColumn(self):
raw_example_column_name = "raw_records"
tfxio = self._MakeTFXIO(_SCHEMA, raw_example_column_name)
def _AssertFn(record_batch_list):
self.assertLen(record_batch_list, 1)
record_batch = record_batch_list[0]
self.assertTrue(record_batch.schema.equals(tfxio.ArrowSchema()))
self._ValidateRecordBatch(tfxio, record_batch, raw_example_column_name)
with beam.Pipeline() as p:
# Setting the batch_size to make sure only one batch is generated.
record_batch_pcoll = p | tfxio.BeamSource(batch_size=len(_EXAMPLES))
beam_testing_util.assert_that(record_batch_pcoll, _AssertFn)
@unittest.skipIf(not tf.executing_eagerly(), "Skip in non-eager mode.")
def testRecordBatches(self):
tfxio = self._MakeTFXIO(_SCHEMA)
options = dataset_options.RecordBatchesOptions(
batch_size=len(_EXAMPLES), shuffle=False, num_epochs=1)
for record_batch in tfxio.RecordBatches(options):
self._ValidateRecordBatch(tfxio, record_batch)
@unittest.skipIf(not tf.executing_eagerly(), "Skip in non-eager mode.")
def testRecordBatchesWithRawRecords(self):
raw_example_column_name = "raw_records"
tfxio = self._MakeTFXIO(_SCHEMA, raw_example_column_name)
options = dataset_options.RecordBatchesOptions(
batch_size=len(_EXAMPLES), shuffle=False, num_epochs=1)
for record_batch in tfxio.RecordBatches(options):
self._ValidateRecordBatch(tfxio, record_batch, raw_example_column_name)
@unittest.skipIf(not tf.executing_eagerly(), "Skip in non-eager mode.")
def testRecordBatchesWithProject(self):
tfxio = self._MakeTFXIO(_SCHEMA)
feature_name = "string_feature"
projected_tfxio = tfxio.Project([feature_name])
options = dataset_options.RecordBatchesOptions(
batch_size=len(_EXAMPLES), shuffle=False, num_epochs=1)
for record_batch in projected_tfxio.RecordBatches(options):
self._ValidateRecordBatch(projected_tfxio, record_batch)
self.assertIn(feature_name, record_batch.schema.names)
self.assertLen(record_batch.schema.names, 1)
@unittest.skipIf(not tf.executing_eagerly(), "Skip in non-eager mode.")
def testTensorFlowDataset(self):
tfxio = self._MakeTFXIO(_SCHEMA)
options = dataset_options.TensorFlowDatasetOptions(
batch_size=1, shuffle=False, num_epochs=1)
for i, parsed_examples_dict in enumerate(
tfxio.TensorFlowDataset(options=options)):
self.assertLen(parsed_examples_dict, 3)
for tensor_name, tensor in parsed_examples_dict.items():
self._AssertSparseTensorEqual(
tensor, _EXAMPLES_AS_TENSORS[i][tensor_name])
def testTensorFlowDatasetGraphMode(self):
tfxio = self._MakeTFXIO(_SCHEMA)
options = dataset_options.TensorFlowDatasetOptions(
batch_size=1, shuffle=False, num_epochs=1)
with tf.compat.v1.Graph().as_default():
ds = tfxio.TensorFlowDataset(options=options)
iterator = tf.compat.v1.data.make_one_shot_iterator(ds)
next_elem = iterator.get_next()
records = []
with tf.compat.v1.Session() as sess:
while True:
try:
records.append(sess.run(next_elem))
except tf.errors.OutOfRangeError:
break
for i, parsed_examples_dict in enumerate(records):
self.assertLen(parsed_examples_dict, 3)
for tensor_name, tensor in parsed_examples_dict.items():
self._AssertSparseTensorEqual(
tensor, _EXAMPLES_AS_TENSORS[i][tensor_name])
@unittest.skipIf(not tf.executing_eagerly(), "Skip in non-eager mode.")
def testTensorFlowDatasetWithTensorRepresentation(self):
schema = text_format.Parse("""
feature {
name: "int_feature"
type: INT
value_count {
min: 1
max: 1
}
}
feature {
name: "float_feature"
type: FLOAT
value_count {
min: 4
max: 4
}
}
feature {
name: "string_feature"
type: BYTES
value_count {
min: 0
max: 2
}
}
tensor_representation_group {
key: ""
value {
tensor_representation {
key: "var_len_feature"
value {
varlen_sparse_tensor {
column_name: "string_feature"
}
}
}
}
}
""", schema_pb2.Schema())
tfxio = self._MakeTFXIO(schema)
options = dataset_options.TensorFlowDatasetOptions(
batch_size=1, shuffle=False, num_epochs=1)
for i, parsed_examples_dict in enumerate(
tfxio.TensorFlowDataset(options=options)):
self.assertLen(parsed_examples_dict, 1)
for tensor_name, tensor in parsed_examples_dict.items():
self.assertEqual(tensor_name, "var_len_feature")
self._AssertSparseTensorEqual(
tensor, _EXAMPLES_AS_TENSORS[i]["string_feature"])
@unittest.skipIf(tf.__version__ < "2", "Skip for TF2")
def testTensorFlowDatasetWithRaggedTensorRepresentation(self):
schema = text_format.Parse(
"""
feature {
name: "varlen_feature"
type: INT
}
feature {
name: "row_lengths"
type: INT
}
tensor_representation_group {
key: ""
value {
tensor_representation {
key: "ragged"
value {
ragged_tensor {
feature_path { step: "varlen_feature" }
partition { row_length: "row_lengths" }
}
}
}
}
}
""", schema_pb2.Schema())
tfxio = self._MakeTFXIO(schema)
projected_tfxio = tfxio.Project(["ragged"])
expected_column_values = {
"varlen_feature":
pa.array([[1, 2, 3], [4], [5, 6]], type=pa.large_list(pa.int64())),
"row_lengths":
pa.array([[2, 1], [1], [1, 1]], type=pa.large_list(pa.int64())),
}
def _AssertFn(record_batch_list):
self.assertLen(record_batch_list, 1)
record_batch = record_batch_list[0]
self.assertIsInstance(record_batch, pa.RecordBatch)
self.assertEqual(record_batch.num_rows, 3)
print(record_batch.schema)
for i, field in enumerate(record_batch.schema):
self.assertTrue(
record_batch.column(i).equals(expected_column_values[field.name]),
"Column {} did not match ({} vs {}).".format(
field.name, record_batch.column(i),
expected_column_values[field.name]))
# self._ValidateRecordBatch(tfxio, record_batch)
expected_schema = projected_tfxio.ArrowSchema()
self.assertTrue(
record_batch.schema.equals(expected_schema),
"actual: {}; expected: {}".format(record_batch.schema,
expected_schema))
tensor_adapter = projected_tfxio.TensorAdapter()
dict_of_tensors = tensor_adapter.ToBatchTensors(record_batch)
self.assertLen(dict_of_tensors, 1)
self.assertIn("ragged", dict_of_tensors)
if tf.executing_eagerly():
ragged_factory = tf.RaggedTensor.from_row_splits
else:
ragged_factory = tf.compat.v1.ragged.RaggedTensorValue
expected_tensor = ragged_factory(
values=ragged_factory(
values=[1, 2, 3, 4, 5, 6], row_splits=[0, 2, 3, 4, 5, 6]),
row_splits=[0, 2, 3, 5])
self.assertAllEqual(dict_of_tensors["ragged"], expected_tensor)
with beam.Pipeline() as p:
# Setting the betch_size to make sure only one batch is generated.
record_batch_pcoll = p | projected_tfxio.BeamSource(
batch_size=len(_EXAMPLES))
beam_testing_util.assert_that(record_batch_pcoll, _AssertFn)
if tf.executing_eagerly():
ragged_factory = tf.RaggedTensor.from_row_splits
else:
ragged_factory = tf.compat.v1.ragged.RaggedTensorValue
expected_tensors = [
ragged_factory(
values=ragged_factory(values=[1, 2, 3], row_splits=[0, 2, 3]),
row_splits=[0, 2]),
ragged_factory(
values=ragged_factory(values=[4], row_splits=[0, 1]),
row_splits=[0, 1]),
ragged_factory(
values=ragged_factory(values=[5, 6], row_splits=[0, 1, 2]),
row_splits=[0, 2]),
]
options = dataset_options.TensorFlowDatasetOptions(
batch_size=1, shuffle=False, num_epochs=1)
for i, parsed_examples_dict in enumerate(
projected_tfxio.TensorFlowDataset(options)):
self.assertLen(parsed_examples_dict, 1)
self.assertIn("ragged", parsed_examples_dict)
self.assertAllEqual(parsed_examples_dict["ragged"], expected_tensors[i])
@unittest.skipIf(not tf.executing_eagerly(), "Skip in non-eager mode.")
def testTensorFlowDatasetWithLabelKey(self):
tfxio = self._MakeTFXIO(_SCHEMA)
options = dataset_options.TensorFlowDatasetOptions(
batch_size=1, shuffle=False, num_epochs=1, label_key="string_feature")
for i, (parsed_examples_dict, label_feature) in enumerate(
tfxio.TensorFlowDataset(options=options)):
self._AssertSparseTensorEqual(
label_feature, _EXAMPLES_AS_TENSORS[i]["string_feature"])
self.assertLen(parsed_examples_dict, 2)
for tensor_name, tensor in parsed_examples_dict.items():
self._AssertSparseTensorEqual(
tensor, _EXAMPLES_AS_TENSORS[i][tensor_name])
@unittest.skipIf(not tf.executing_eagerly(), "Skip in non-eager mode.")
def testProjectedTensorFlowDataset(self):
tfxio = self._MakeTFXIO(_SCHEMA)
feature_name = "string_feature"
projected_tfxio = tfxio.Project([feature_name])
options = dataset_options.TensorFlowDatasetOptions(
batch_size=1, shuffle=False, num_epochs=1)
for i, parsed_examples_dict in enumerate(
projected_tfxio.TensorFlowDataset(options=options)):
self.assertIn(feature_name, parsed_examples_dict)
self.assertLen(parsed_examples_dict, 1)
self._AssertSparseTensorEqual(parsed_examples_dict[feature_name],
_EXAMPLES_AS_TENSORS[i][feature_name])
@parameterized.named_parameters(*[
dict(
testcase_name="same_feature_name",
schema_pbtxt="""
feature {
name: "string_feature"
type: BYTES
}
tensor_representation_group {
key: ""
value {
tensor_representation {
key: "string_feature"
value {
varlen_sparse_tensor {
column_name: "string_feature"
}
}
}
}
}
""",
expected_parsing_config={
"string_feature": tf.io.VarLenFeature(dtype=tf.string)
},
expected_rename_dict={"string_feature": "string_feature"}),
dict(
testcase_name="rename_one_feature",
schema_pbtxt="""
feature {
name: "string_feature"
type: BYTES
}
tensor_representation_group {
key: ""
value {
tensor_representation {
key: "var_len_feature_1"
value {
varlen_sparse_tensor {
column_name: "string_feature"
}
}
}
}
}
""",
expected_parsing_config={
"string_feature": tf.io.VarLenFeature(dtype=tf.string)
},
expected_rename_dict={"string_feature": "var_len_feature_1"}),
dict(
testcase_name="sparse_feature",
schema_pbtxt="""
feature {
name: "idx"
type: INT
}
feature {
name: "val"
type: FLOAT
}
tensor_representation_group {
key: ""
value {
tensor_representation {
key: "sparse_feature"
value {
sparse_tensor {
index_column_names: "idx"
value_column_name: "val"
dense_shape {
dim {
size: 1
}
}
}
}
}
}
}
""",
expected_parsing_config={
"_tfx_bsl_sparse_feature_sparse_feature":
tf.io.SparseFeature(
index_key=["idx"],
value_key="val",
size=[1],
dtype=tf.float32)
},
expected_rename_dict={
"_tfx_bsl_sparse_feature_sparse_feature": "sparse_feature"
}),
dict(
testcase_name="sparse_and_varlen_features_shared",
schema_pbtxt="""
feature {
name: "idx"
type: INT
}
feature {
name: "val"
type: FLOAT
}
tensor_representation_group {
key: ""
value {
tensor_representation {
key: "sparse_feature"
value {
sparse_tensor {
index_column_names: "idx"
value_column_name: "val"
dense_shape {
dim {
size: 1
}
}
}
}
}
tensor_representation {
key: "varlen"
value {
varlen_sparse_tensor {
column_name: "val"
}
}
}
}
}
""",
expected_parsing_config={
"_tfx_bsl_sparse_feature_sparse_feature":
tf.io.SparseFeature(
index_key=["idx"],
value_key="val",
size=[1],
dtype=tf.float32),
"val":
tf.io.VarLenFeature(dtype=tf.float32)
},
expected_rename_dict={
"_tfx_bsl_sparse_feature_sparse_feature": "sparse_feature",
"val": "varlen"
}),
])
def testValidGetTfExampleParserConfig(self, schema_pbtxt,
expected_parsing_config,
expected_rename_dict):
schema = text_format.Parse(schema_pbtxt, schema_pb2.Schema())
tfxio = self._MakeTFXIO(schema)
parser_config, rename_dict = tfxio._GetTfExampleParserConfig()
self.assertAllEqual(expected_parsing_config, parser_config)
self.assertAllEqual(expected_rename_dict, rename_dict)
@absltest.skipIf(tf.__version__ < "2",
"RaggedFeature not supported on TF1.")
def testValidGetTfExampleParserConfigWithRaggedFeature(self):
schema_pbtxt = """
feature {
name: "row_lengths"
type: INT
}
feature {
name: "val"
type: FLOAT
}
tensor_representation_group {
key: ""
value {
tensor_representation {
key: "ragged_feature"
value {
ragged_tensor {
feature_path { step: "val" }
partition { row_length: "row_lengths" }
partition { uniform_row_length: 2 }
row_partition_dtype: INT32
}
}
}
}
}
"""
schema = text_format.Parse(schema_pbtxt, schema_pb2.Schema())
tfxio = self._MakeTFXIO(schema)
parser_config, rename_dict = tfxio._GetTfExampleParserConfig()
expected_parsing_config = {
"_tfx_bsl_ragged_feature_ragged_feature":
tf.io.RaggedFeature(
value_key="val",
partitions=[
tf.io.RaggedFeature.RowLengths("row_lengths"),
tf.io.RaggedFeature.UniformRowLength(2),
],
dtype=tf.float32),
}
expected_rename_dict = {
"_tfx_bsl_ragged_feature_ragged_feature": "ragged_feature",
}
self.assertAllEqual(expected_parsing_config, parser_config)
self.assertAllEqual(expected_rename_dict, rename_dict)
@parameterized.named_parameters(*[
dict(
testcase_name="invalid_duplicate_feature",
schema_pbtxt="""
feature {
name: "string_feature"
type: BYTES
value_count {
min: 0
max: 2
}
}
tensor_representation_group {
key: ""
value {
tensor_representation {
key: "string_feature"
value {
varlen_sparse_tensor {
column_name: "string_feature"
}
}
}
tensor_representation {
key: "string_feature_2"
value {
varlen_sparse_tensor {
column_name: "string_feature"
}
}
}
}
}
""",
error=ValueError,
error_string="Unable to create a valid parsing config.*"),
dict(
testcase_name="sparse_and_fixed_feature",
schema_pbtxt="""
feature {
name: "idx"
type: INT
value_count {
min: 1
max: 1
}
}
feature {
name: "val"
type: FLOAT
value_count {
min: 1
max: 1
}
}
tensor_representation_group {
key: ""
value {
tensor_representation {
key: "sparse_feature"
value {
sparse_tensor {
index_column_names: "idx"
value_column_name: "val"
dense_shape {
dim {
size: 1
}
}
}
}
}
tensor_representation {
key: "fixed_feature"
value {
dense_tensor {
column_name: "val"
}
}
}
}
}
""",
error=ValueError,
error_string="Unable to create a valid parsing config.*"),
dict(
testcase_name="no_schema",
schema_pbtxt="",
error=ValueError,
error_string="Unable to create a parsing config because no schema.*"),
])
def testInvalidGetTfExampleParserConfig(self, schema_pbtxt, error,
error_string):
if not schema_pbtxt:
schema = None
else:
schema = text_format.Parse(schema_pbtxt, schema_pb2.Schema())
tfxio = self._MakeTFXIO(schema)
with self.assertRaisesRegex(error, error_string):
tfxio._GetTfExampleParserConfig()
class TFExampleBeamRecordTest(absltest.TestCase):
def testE2E(self):
raw_record_column_name = "raw_record"
tfxio = tf_example_record.TFExampleBeamRecord(
physical_format="inmem",
telemetry_descriptors=["some", "component"],
schema=_SCHEMA,
raw_record_column_name=raw_record_column_name,
)
def _AssertFn(record_batches):
self.assertLen(record_batches, 1)
record_batch = record_batches[0]
self.assertTrue(record_batch.schema.equals(tfxio.ArrowSchema()))
tensor_adapter = tfxio.TensorAdapter()
dict_of_tensors = tensor_adapter.ToBatchTensors(record_batch)
self.assertLen(dict_of_tensors, 3)
self.assertIn("int_feature", dict_of_tensors)
self.assertIn("float_feature", dict_of_tensors)
self.assertIn("string_feature", dict_of_tensors)
with beam.Pipeline() as p:
record_batch_pcoll = (
p
| "CreateInMemRecords" >> beam.Create(_SERIALIZED_EXAMPLES)
| "BeamSource" >>
tfxio.BeamSource(batch_size=len(_SERIALIZED_EXAMPLES)))
beam_testing_util.assert_that(record_batch_pcoll, _AssertFn)
if __name__ == "__main__":
absltest.main()
|
function msfun_realtime_pacer(block)
% Help for Writing Level-2 M-File S-Functions:
% web([docroot '/toolbox/simulink/sfg/f7-67622.html']
% http://www.mathworks.com/access/helpdesk/help/toolbox/simulink/sfg/f7-67622.html
% Copyright 2009, The MathWorks, Inc.
% instance variables
mySimTimePerRealTime = 1;
myRealTimeBaseline = 0;
mySimulationTimeBaseline = 0;
myResetBaseline = true;
myTotalBurnedTime = 0;
myNumUpdates = 0;
setup(block);
%% ---------------------------------------------------
function setup(block)
% Register the number of ports.
block.NumInputPorts = 0;
block.NumOutputPorts = 0;
% Set up the states
block.NumContStates = 0;
block.NumDworks = 0;
% Register the parameters.
block.NumDialogPrms = 1; % scale factor
block.DialogPrmsTunable = {'Nontunable'};
% Block is fixed in minor time step, i.e., it is only executed on major
% time steps. With a fixed-step solver, the block runs at the fastest
% discrete rate.
block.SampleTimes = [0 1];
block.SetAccelRunOnTLC(false); % run block in interpreted mode even w/ Acceleration
% methods called during update diagram/compilation.
block.RegBlockMethod('CheckParameters', @CheckPrms);
% methods called at run-time
block.RegBlockMethod('Start', @Start);
block.RegBlockMethod('Update', @Update);
block.RegBlockMethod('SimStatusChange', @SimStatusChange);
block.RegBlockMethod('Terminate', @Terminate);
end
%%
function CheckPrms(block)
try
validateattributes(block.DialogPrm(1).Data, {'double'},{'real', 'scalar', '>', 0});
catch %#ok<CTCH>
throw(MSLException(block.BlockHandle, ...
'Simulink:Parameters:BlkParamUndefined', ...
'Enter a number greater than 0'));
end
end
%%
function Start(block)
mySimTimePerRealTime = block.DialogPrm(1).Data;
myTotalBurnedTime = 0;
myNumUpdates = 0;
myResetBaseline = true;
if strcmp(pause('query'),'off')
fprintf('%s: Enabling MATLAB PAUSE command\n', getfullname(block.BlockHandle));
pause('on');
end
end
%%
function Update(block)
if myResetBaseline
myRealTimeBaseline = tic;
mySimulationTimeBaseline = block.CurrentTime;
myResetBaseline = false;
else
if isinf(mySimTimePerRealTime)
return;
end
elapsedRealTime = toc(myRealTimeBaseline);
differenceInSeconds = ((block.CurrentTime - mySimulationTimeBaseline) / mySimTimePerRealTime) - elapsedRealTime;
if differenceInSeconds >= 0
pause(differenceInSeconds);
myTotalBurnedTime = myTotalBurnedTime + differenceInSeconds;
myNumUpdates = myNumUpdates + 1;
end
end
end
%%
function SimStatusChange(block, status)
if status == 0,
% simulation paused
fprintf('%s: Pausing real time execution of the model (simulation time = %g sec)\n', ...
getfullname(block.BlockHandle), block.CurrentTime);
elseif status == 1
% Simulation resumed
fprintf('%s: Continuing real time execution of the model\n', ...
getfullname(block.BlockHandle));
myResetBaseline = true;
end
end
%%
function Terminate(block)
if myNumUpdates > 0
fprintf('%s: Average idle real time per major time step = %g sec\n', ...
getfullname(block.BlockHandle), myTotalBurnedTime / myNumUpdates);
end
end
end
|
Join us for the Chamber's monthly morning networking session. Free for chamber members and guests!
This session will be at the new fountain in Allegheny Commons and you’ll hear from Jayne Miller, Executive Director of the Pittsburgh Parks Conservancy.
We'll provide coffee and donuts. We'll have a structured networking opportunity, during which each attendee will be able to share information with the group.
Registration is free, but required. Register here. |
!-----------------------------------------------------------------------
! Fortran interface to CUDA FFT Library
! written by Viktor K. Decyk, UCLA
module gpufft2_h
implicit none
!
interface
subroutine gpufft2rrcuinit(nx,ny,ndim)
implicit none
integer :: nx, ny, ndim
end subroutine
end interface
!
interface
subroutine gpufft2cuinit(nx,ny,ndim)
implicit none
integer :: nx, ny, ndim
end subroutine
end interface
!
interface
subroutine gpufft2rrcudel()
implicit none
end subroutine
end interface
!
interface
subroutine gpufft2cudel()
implicit none
end subroutine
end interface
!
interface
subroutine gpufft2rrcu(gp_f,gp_g,isign,indx,indy,nxh1d,nyd)
implicit none
integer :: isign, indx, indy, nxh1d, nyd
integer, dimension(2) :: gp_f, gp_g
end subroutine
end interface
!
interface
subroutine gpufft2rrcun(gp_fn,gp_gn,isign,indx,indy,ndim,nxh1d,&
&nyd)
implicit none
integer :: isign, indx, indy, ndim, nxh1d, nyd
integer, dimension(2) :: gp_fn, gp_gn
end subroutine
end interface
!
end module
|
module Examples.CSS.Fractals
import Data.Vect
import Examples.CSS.Colors
import public Examples.CSS.Core
import Rhone.JS
import Text.CSS
--------------------------------------------------------------------------------
-- IDs
--------------------------------------------------------------------------------
public export
out : ElemRef HTMLDivElement
out = Id Div "fractals_out"
public export
btnRun : ElemRef HTMLButtonElement
btnRun = Id Button "fractals_run"
public export
txtIter : ElemRef HTMLInputElement
txtIter = Id Input "fractals_iterations"
public export
txtRedraw : ElemRef HTMLInputElement
txtRedraw = Id Input "fractals_redrawdelay"
--------------------------------------------------------------------------------
-- Classes
--------------------------------------------------------------------------------
export
fractalContent : String
fractalContent = "fractals_content"
export
lblIter : String
lblIter = "fractals_lbliter"
export
lblDelay : String
lblDelay = "fractals_lbldelay"
--------------------------------------------------------------------------------
-- Rules
--------------------------------------------------------------------------------
data Tag = LIter | IIter | LDel | IDel | BRun | Fract | Dot
AreaTag Tag where
showTag LIter = "LIter"
showTag IIter = "IIter"
showTag LDel = "LDel"
showTag IDel = "IDel"
showTag BRun = "BRun"
showTag Fract = "Fract"
showTag Dot = "."
export
css : List (Rule 1)
css =
[ Media "min-width: 300px"
[ class fractalContent !!
[ Display .= Area
(replicate 4 MinContent)
[MaxContent, MaxContent]
[ [LIter, IIter]
, [LDel, IDel ]
, [Dot, BRun ]
, [Fract, Fract]
]
, ColumnGap .= px 10
, RowGap .= px 10
, Padding .= VH (px 20) (px 10)
]
]
, Media "min-width: 800px"
[ class fractalContent !!
[ Display .= Area
(replicate 4 MinContent)
[MaxContent, MaxContent, fr 1]
[ [LIter, IIter, Fract]
, [LDel, IDel, Fract]
, [Dot, BRun, Fract]
, [Dot, Dot, Fract]
]
, ColumnGap .= px 10
, RowGap .= px 10
, Padding .= VH (px 20) (px 10)
]
]
, class lblIter !! [ GridArea .= LIter ]
, idRef txtIter !!
[ GridArea .= IIter
, TextAlign .= End
]
, class lblDelay !! [ GridArea .= LDel ]
, idRef txtRedraw !!
[ GridArea .= IDel
, TextAlign .= End
]
, idRef btnRun !! [ GridArea .= BRun ]
, idRef out !!
[ JustifySelf .= Center
, GridArea .= Fract
, BorderStyle .= Left Solid
, BorderWidth .= Left (px 2)
, BorderColor .= Left base80
, MaxWidth .= px 500
, Width .= px 500
]
]
|
module Control.Function
public export
interface Injective (f : a -> b) where
constructor MkInjective
injective : {x, y : a} -> f x = f y -> x = y
public export
inj : (0 f : a -> b) -> {auto 0 _ : Injective f} -> {0 x, y : a} -> (0 _ : f x = f y) -> x = y
inj _ eq = irrelevantEq (injective eq)
|
library(knitr)
png("jennyTest.png")
plot(rnorm(100), main = R.version.string)
dev.off()
png("jennyTest2.png")
plot(cars, main = "just a test")
dev.off()
res <- imgur_upload("jennyTest.png")
res # link to original URL of the image
# http://i.imgur.com/XPnYnSl.png
#if (interactive())
# browseURL(res)
res <- imgur_upload("jennyTest2.png")
res # link to original URL of the image
# http://i.imgur.com/FA0rnqY.png
# browseURL(res)
file.remove(list.files(pattern = "^jennyTest*")) |
------------------------------------------------------------------------
-- The Agda standard library
--
-- Subsets of finite sets
------------------------------------------------------------------------
module Data.Fin.Subset where
open import Algebra
import Algebra.Properties.BooleanAlgebra as BoolAlgProp
import Algebra.Properties.BooleanAlgebra.Expression as BAExpr
import Data.Bool.Properties as BoolProp
open import Data.Fin
open import Data.List as List using (List)
open import Data.Nat
open import Data.Product
open import Data.Vec using (Vec; _∷_; _[_]=_)
import Relation.Binary.Vec.Pointwise as Pointwise
open import Relation.Nullary
infix 4 _∈_ _∉_ _⊆_ _⊈_
------------------------------------------------------------------------
-- Definitions
-- Sides.
open import Data.Bool public
using () renaming (Bool to Side; true to inside; false to outside)
-- Partitions a finite set into two parts, the inside and the outside.
Subset : ℕ → Set
Subset = Vec Side
------------------------------------------------------------------------
-- Membership and subset predicates
_∈_ : ∀ {n} → Fin n → Subset n → Set
x ∈ p = p [ x ]= inside
_∉_ : ∀ {n} → Fin n → Subset n → Set
x ∉ p = ¬ (x ∈ p)
_⊆_ : ∀ {n} → Subset n → Subset n → Set
p₁ ⊆ p₂ = ∀ {x} → x ∈ p₁ → x ∈ p₂
_⊈_ : ∀ {n} → Subset n → Subset n → Set
p₁ ⊈ p₂ = ¬ (p₁ ⊆ p₂)
------------------------------------------------------------------------
-- Set operations
-- Pointwise lifting of the usual boolean algebra for booleans gives
-- us a boolean algebra for subsets.
--
-- The underlying equality of the returned boolean algebra is
-- propositional equality.
booleanAlgebra : ℕ → BooleanAlgebra _ _
booleanAlgebra n =
BoolAlgProp.replace-equality
(BAExpr.lift BoolProp.booleanAlgebra n)
Pointwise.Pointwise-≡
private
open module BA {n} = BooleanAlgebra (booleanAlgebra n) public
using
( ⊥ -- The empty subset.
; ⊤ -- The subset containing all elements.
)
renaming
( _∨_ to _∪_ -- Binary union.
; _∧_ to _∩_ -- Binary intersection.
; ¬_ to ∁ -- Complement.
)
-- A singleton subset, containing just the given element.
⁅_⁆ : ∀ {n} → Fin n → Subset n
⁅ zero ⁆ = inside ∷ ⊥
⁅ suc i ⁆ = outside ∷ ⁅ i ⁆
-- N-ary union.
⋃ : ∀ {n} → List (Subset n) → Subset n
⋃ = List.foldr _∪_ ⊥
-- N-ary intersection.
⋂ : ∀ {n} → List (Subset n) → Subset n
⋂ = List.foldr _∩_ ⊤
------------------------------------------------------------------------
-- Properties
Nonempty : ∀ {n} (p : Subset n) → Set
Nonempty p = ∃ λ f → f ∈ p
Empty : ∀ {n} (p : Subset n) → Set
Empty p = ¬ Nonempty p
-- Point-wise lifting of properties.
Lift : ∀ {n} → (Fin n → Set) → (Subset n → Set)
Lift P p = ∀ {x} → x ∈ p → P x
|
From iris.algebra Require Export excl local_updates.
From iris.algebra Require Import proofmode_classes.
From iris.base_logic Require Import base_logic.
Set Default Proof Using "Type".
Record auth (A : Type) := Auth { authoritative : excl' A; auth_own : A }.
Add Printing Constructor auth.
Arguments Auth {_} _ _.
Arguments authoritative {_} _.
Arguments auth_own {_} _.
Instance: Params (@Auth) 1.
Instance: Params (@authoritative) 1.
Instance: Params (@auth_own) 1.
Notation "◯ a" := (Auth None a) (at level 20).
Notation "● a" := (Auth (Excl' a) ε) (at level 20).
(* COFE *)
Section cofe.
Context {A : ofeT}.
Implicit Types a : excl' A.
Implicit Types b : A.
Implicit Types x y : auth A.
Instance auth_equiv : Equiv (auth A) := λ x y,
authoritative x ≡ authoritative y ∧ auth_own x ≡ auth_own y.
Instance auth_dist : Dist (auth A) := λ n x y,
authoritative x ≡{n}≡ authoritative y ∧ auth_own x ≡{n}≡ auth_own y.
Global Instance Auth_ne : NonExpansive2 (@Auth A).
Proof. by split. Qed.
Global Instance Auth_proper : Proper ((≡) ==> (≡) ==> (≡)) (@Auth A).
Proof. by split. Qed.
Global Instance authoritative_ne: NonExpansive (@authoritative A).
Proof. by destruct 1. Qed.
Global Instance authoritative_proper : Proper ((≡) ==> (≡)) (@authoritative A).
Proof. by destruct 1. Qed.
Global Instance own_ne : NonExpansive (@auth_own A).
Proof. by destruct 1. Qed.
Global Instance own_proper : Proper ((≡) ==> (≡)) (@auth_own A).
Proof. by destruct 1. Qed.
Definition auth_ofe_mixin : OfeMixin (auth A).
Proof. by apply (iso_ofe_mixin (λ x, (authoritative x, auth_own x))). Qed.
Canonical Structure authC := OfeT (auth A) auth_ofe_mixin.
Global Instance auth_cofe `{Cofe A} : Cofe authC.
Proof.
apply (iso_cofe (λ y : _ * _, Auth (y.1) (y.2))
(λ x, (authoritative x, auth_own x))); by repeat intro.
Qed.
Global Instance Auth_discrete a b :
Discrete a → Discrete b → Discrete (Auth a b).
Proof. by intros ?? [??] [??]; split; apply: discrete. Qed.
Global Instance auth_ofe_discrete : OfeDiscrete A → OfeDiscrete authC.
Proof. intros ? [??]; apply _. Qed.
Global Instance auth_leibniz : LeibnizEquiv A → LeibnizEquiv (auth A).
Proof. by intros ? [??] [??] [??]; f_equal/=; apply leibniz_equiv. Qed.
End cofe.
Arguments authC : clear implicits.
(* CMRA *)
Section cmra.
Context {A : ucmraT}.
Implicit Types a b : A.
Implicit Types x y : auth A.
Instance auth_valid : Valid (auth A) := λ x,
match authoritative x with
| Excl' a => (∀ n, auth_own x ≼{n} a) ∧ ✓ a
| None => ✓ auth_own x
| ExclBot' => False
end.
Global Arguments auth_valid !_ /.
Instance auth_validN : ValidN (auth A) := λ n x,
match authoritative x with
| Excl' a => auth_own x ≼{n} a ∧ ✓{n} a
| None => ✓{n} auth_own x
| ExclBot' => False
end.
Global Arguments auth_validN _ !_ /.
Instance auth_pcore : PCore (auth A) := λ x,
Some (Auth (core (authoritative x)) (core (auth_own x))).
Instance auth_op : Op (auth A) := λ x y,
Auth (authoritative x ⋅ authoritative y) (auth_own x ⋅ auth_own y).
Definition auth_valid_eq :
valid = λ x, match authoritative x with
| Excl' a => (∀ n, auth_own x ≼{n} a) ∧ ✓ a
| None => ✓ auth_own x
| ExclBot' => False
end := eq_refl _.
Definition auth_validN_eq :
validN = λ n x, match authoritative x with
| Excl' a => auth_own x ≼{n} a ∧ ✓{n} a
| None => ✓{n} auth_own x
| ExclBot' => False
end := eq_refl _.
Lemma auth_included (x y : auth A) :
x ≼ y ↔ authoritative x ≼ authoritative y ∧ auth_own x ≼ auth_own y.
Proof.
split; [intros [[z1 z2] Hz]; split; [exists z1|exists z2]; apply Hz|].
intros [[z1 Hz1] [z2 Hz2]]; exists (Auth z1 z2); split; auto.
Qed.
Lemma authoritative_validN n x : ✓{n} x → ✓{n} authoritative x.
Proof. by destruct x as [[[]|]]. Qed.
Lemma auth_own_validN n x : ✓{n} x → ✓{n} auth_own x.
Proof.
rewrite auth_validN_eq.
destruct x as [[[]|]]; naive_solver eauto using cmra_validN_includedN.
Qed.
Lemma auth_valid_discrete `{CmraDiscrete A} x :
✓ x ↔ match authoritative x with
| Excl' a => auth_own x ≼ a ∧ ✓ a
| None => ✓ auth_own x
| ExclBot' => False
end.
Proof.
rewrite auth_valid_eq. destruct x as [[[?|]|] ?]; simpl; try done.
setoid_rewrite <-cmra_discrete_included_iff; naive_solver eauto using 0.
Qed.
Lemma auth_validN_2 n a b : ✓{n} (● a ⋅ ◯ b) ↔ b ≼{n} a ∧ ✓{n} a.
Proof. by rewrite auth_validN_eq /= left_id. Qed.
Lemma auth_valid_discrete_2 `{CmraDiscrete A} a b : ✓ (● a ⋅ ◯ b) ↔ b ≼ a ∧ ✓ a.
Proof. by rewrite auth_valid_discrete /= left_id. Qed.
Lemma authoritative_valid x : ✓ x → ✓ authoritative x.
Proof. by destruct x as [[[]|]]. Qed.
Lemma auth_own_valid `{CmraDiscrete A} x : ✓ x → ✓ auth_own x.
Proof.
rewrite auth_valid_discrete.
destruct x as [[[]|]]; naive_solver eauto using cmra_valid_included.
Qed.
Lemma auth_cmra_mixin : CmraMixin (auth A).
Proof.
apply cmra_total_mixin.
- eauto.
- by intros n x y1 y2 [Hy Hy']; split; simpl; rewrite ?Hy ?Hy'.
- by intros n y1 y2 [Hy Hy']; split; simpl; rewrite ?Hy ?Hy'.
- intros n [x a] [y b] [Hx Ha]; simpl in *. rewrite !auth_validN_eq.
destruct Hx as [?? Hx|]; first destruct Hx; intros ?; ofe_subst; auto.
- intros [[[?|]|] ?]; rewrite /= ?auth_valid_eq
?auth_validN_eq /= ?cmra_included_includedN ?cmra_valid_validN;
naive_solver eauto using O.
- intros n [[[]|] ?]; rewrite !auth_validN_eq /=;
naive_solver eauto using cmra_includedN_S, cmra_validN_S.
- by split; simpl; rewrite assoc.
- by split; simpl; rewrite comm.
- by split; simpl; rewrite ?cmra_core_l.
- by split; simpl; rewrite ?cmra_core_idemp.
- intros ??; rewrite! auth_included; intros [??].
by split; simpl; apply: cmra_core_mono. (* FIXME: apply cmra_core_mono. fails *)
- assert (∀ n (a b1 b2 : A), b1 ⋅ b2 ≼{n} a → b1 ≼{n} a).
{ intros n a b1 b2 <-; apply cmra_includedN_l. }
intros n [[[a1|]|] b1] [[[a2|]|] b2]; rewrite auth_validN_eq;
naive_solver eauto using cmra_validN_op_l, cmra_validN_includedN.
- intros n x y1 y2 ? [??]; simpl in *.
destruct (cmra_extend n (authoritative x) (authoritative y1)
(authoritative y2)) as (ea1&ea2&?&?&?); auto using authoritative_validN.
destruct (cmra_extend n (auth_own x) (auth_own y1) (auth_own y2))
as (b1&b2&?&?&?); auto using auth_own_validN.
by exists (Auth ea1 b1), (Auth ea2 b2).
Qed.
Canonical Structure authR := CmraT (auth A) auth_cmra_mixin.
Global Instance auth_cmra_discrete : CmraDiscrete A → CmraDiscrete authR.
Proof.
split; first apply _.
intros [[[?|]|] ?]; rewrite auth_valid_eq auth_validN_eq /=; auto.
- setoid_rewrite <-cmra_discrete_included_iff.
rewrite -cmra_discrete_valid_iff. tauto.
- by rewrite -cmra_discrete_valid_iff.
Qed.
Instance auth_empty : Unit (auth A) := Auth ε ε.
Lemma auth_ucmra_mixin : UcmraMixin (auth A).
Proof.
split; simpl.
- rewrite auth_valid_eq /=. apply ucmra_unit_valid.
- by intros x; constructor; rewrite /= left_id.
- do 2 constructor; simpl; apply (core_id_core _).
Qed.
Canonical Structure authUR := UcmraT (auth A) auth_ucmra_mixin.
Global Instance auth_frag_core_id a : CoreId a → CoreId (◯ a).
Proof. do 2 constructor; simpl; auto. by apply core_id_core. Qed.
(** Internalized properties *)
Lemma auth_equivI {M} (x y : auth A) :
x ≡ y ⊣⊢ (authoritative x ≡ authoritative y ∧ auth_own x ≡ auth_own y : uPred M).
Proof. by uPred.unseal. Qed.
Lemma auth_validI {M} (x : auth A) :
✓ x ⊣⊢ (match authoritative x with
| Excl' a => (∃ b, a ≡ auth_own x ⋅ b) ∧ ✓ a
| None => ✓ auth_own x
| ExclBot' => False
end : uPred M).
Proof. uPred.unseal. by destruct x as [[[]|]]. Qed.
Lemma auth_frag_op a b : ◯ (a ⋅ b) = ◯ a ⋅ ◯ b.
Proof. done. Qed.
Lemma auth_frag_mono a b : a ≼ b → ◯ a ≼ ◯ b.
Proof. intros [c ->]. rewrite auth_frag_op. apply cmra_included_l. Qed.
Global Instance auth_frag_sep_homomorphism :
MonoidHomomorphism op op (≡) (@Auth A None).
Proof. by split; [split; try apply _|]. Qed.
Lemma auth_both_op a b : Auth (Excl' a) b ≡ ● a ⋅ ◯ b.
Proof. by rewrite /op /auth_op /= left_id. Qed.
Lemma auth_auth_valid a : ✓ a → ✓ (● a).
Proof. intros; split; simpl; auto using ucmra_unit_leastN. Qed.
Lemma auth_update a b a' b' :
(a,b) ~l~> (a',b') → ● a ⋅ ◯ b ~~> ● a' ⋅ ◯ b'.
Proof.
intros Hup; apply cmra_total_update.
move=> n [[[?|]|] bf1] // [[bf2 Ha] ?]; do 2 red; simpl in *.
move: Ha; rewrite !left_id -assoc=> Ha.
destruct (Hup n (Some (bf1 ⋅ bf2))); auto.
split; last done. exists bf2. by rewrite -assoc.
Qed.
Lemma auth_update_alloc a a' b' : (a,ε) ~l~> (a',b') → ● a ~~> ● a' ⋅ ◯ b'.
Proof. intros. rewrite -(right_id _ _ (● a)). by apply auth_update. Qed.
Lemma auth_update_dealloc a b a' : (a,b) ~l~> (a',ε) → ● a ⋅ ◯ b ~~> ● a'.
Proof. intros. rewrite -(right_id _ _ (● a')). by apply auth_update. Qed.
Lemma auth_local_update (a b0 b1 a' b0' b1': A) :
(b0, b1) ~l~> (b0', b1') → b0' ≼ a' → ✓ a' →
(● a ⋅ ◯ b0, ● a ⋅ ◯ b1) ~l~> (● a' ⋅ ◯ b0', ● a' ⋅ ◯ b1').
Proof.
rewrite !local_update_unital=> Hup ? ? n /=.
move=> [[[ac|]|] bc] /auth_validN_2 [Le Val] [] /=;
inversion_clear 1 as [?? Ha|]; inversion_clear Ha. (* need setoid_discriminate! *)
rewrite !left_id=> ?.
destruct (Hup n bc) as [Hval' Heq]; eauto using cmra_validN_includedN.
rewrite -!auth_both_op auth_validN_eq /=.
split_and!; [by apply cmra_included_includedN|by apply cmra_valid_validN|done].
Qed.
End cmra.
Arguments authR : clear implicits.
Arguments authUR : clear implicits.
(* Proof mode class instances *)
Instance is_op_auth_frag {A : ucmraT} (a b1 b2 : A) :
IsOp a b1 b2 → IsOp' (◯ a) (◯ b1) (◯ b2).
Proof. done. Qed.
(* Functor *)
Definition auth_map {A B} (f : A → B) (x : auth A) : auth B :=
Auth (excl_map f <$> authoritative x) (f (auth_own x)).
Lemma auth_map_id {A} (x : auth A) : auth_map id x = x.
Proof. by destruct x as [[[]|]]. Qed.
Lemma auth_map_compose {A B C} (f : A → B) (g : B → C) (x : auth A) :
auth_map (g ∘ f) x = auth_map g (auth_map f x).
Proof. by destruct x as [[[]|]]. Qed.
Lemma auth_map_ext {A B : ofeT} (f g : A → B) x :
(∀ x, f x ≡ g x) → auth_map f x ≡ auth_map g x.
Proof.
constructor; simpl; auto.
apply option_fmap_equiv_ext=> a; by apply excl_map_ext.
Qed.
Instance auth_map_ne {A B : ofeT} n :
Proper ((dist n ==> dist n) ==> dist n ==> dist n) (@auth_map A B).
Proof.
intros f g Hf [??] [??] [??]; split; simpl in *; [|by apply Hf].
apply option_fmap_ne; [|done]=> x y ?; by apply excl_map_ne.
Qed.
Instance auth_map_cmra_morphism {A B : ucmraT} (f : A → B) :
CmraMorphism f → CmraMorphism (auth_map f).
Proof.
split; try apply _.
- intros n [[[a|]|] b]; rewrite !auth_validN_eq; try
naive_solver eauto using cmra_morphism_monotoneN, cmra_morphism_validN.
- intros [??]. apply Some_proper; rewrite /auth_map /=.
by f_equiv; rewrite /= cmra_morphism_core.
- intros [[?|]?] [[?|]?]; try apply Auth_proper=>//=; by rewrite cmra_morphism_op.
Qed.
Definition authC_map {A B} (f : A -n> B) : authC A -n> authC B :=
CofeMor (auth_map f).
Lemma authC_map_ne A B : NonExpansive (@authC_map A B).
Proof. intros n f f' Hf [[[a|]|] b]; repeat constructor; apply Hf. Qed.
Program Definition authRF (F : urFunctor) : rFunctor := {|
rFunctor_car A B := authR (urFunctor_car F A B);
rFunctor_map A1 A2 B1 B2 fg := authC_map (urFunctor_map F fg)
|}.
Next Obligation.
by intros F A1 A2 B1 B2 n f g Hfg; apply authC_map_ne, urFunctor_ne.
Qed.
Next Obligation.
intros F A B x. rewrite /= -{2}(auth_map_id x).
apply auth_map_ext=>y; apply urFunctor_id.
Qed.
Next Obligation.
intros F A1 A2 A3 B1 B2 B3 f g f' g' x. rewrite /= -auth_map_compose.
apply auth_map_ext=>y; apply urFunctor_compose.
Qed.
Instance authRF_contractive F :
urFunctorContractive F → rFunctorContractive (authRF F).
Proof.
by intros ? A1 A2 B1 B2 n f g Hfg; apply authC_map_ne, urFunctor_contractive.
Qed.
Program Definition authURF (F : urFunctor) : urFunctor := {|
urFunctor_car A B := authUR (urFunctor_car F A B);
urFunctor_map A1 A2 B1 B2 fg := authC_map (urFunctor_map F fg)
|}.
Next Obligation.
by intros F A1 A2 B1 B2 n f g Hfg; apply authC_map_ne, urFunctor_ne.
Qed.
Next Obligation.
intros F A B x. rewrite /= -{2}(auth_map_id x).
apply auth_map_ext=>y; apply urFunctor_id.
Qed.
Next Obligation.
intros F A1 A2 A3 B1 B2 B3 f g f' g' x. rewrite /= -auth_map_compose.
apply auth_map_ext=>y; apply urFunctor_compose.
Qed.
Instance authURF_contractive F :
urFunctorContractive F → urFunctorContractive (authURF F).
Proof.
by intros ? A1 A2 B1 B2 n f g Hfg; apply authC_map_ne, urFunctor_contractive.
Qed.
|
program dc
double complex a, b, c, d, e, f, g, h
double precision x
complex w, z
a = (1.0,1.0)
b = 1
c = 1.0e0
d = 1.0d0
e = a + b
f = COS(e)
x = ABS(f)
f = DCMPLX(x)
h = LOG(g) + SQRT(f) + SIN(e) + EXP(a)
print *, h
w = (1.0, 1.0)
z = w + 1
w = CMPLX(1.0, 1.0)
z = w + 1
end
|
import .util
namespace inverse
open function
open classical (renaming some → unexists) (renaming some_spec → unexists_prop)
open classical (choice prop_decidable)
local attribute [instance] prop_decidable
open util
-- one-sided inverses:
class invertible.one_sided {T T': Sort _} (f: T → T') :=
(g: T' → T)
(left_inv:
∀ x: T,
g (f x) = x)
@[reducible, inline] def inv {T T': Sort _} (f: T → T') [hf: invertible.one_sided f]:
T' → T := hf.g
@[simp] theorem inv.elim {T T': Sort _} (f: T → T') [hf: invertible.one_sided f]:
∀ x: T,
inv f (f x) = x := by apply hf.left_inv
instance inv.surjective {T T': Sort _} (f: T → T') [hf: invertible.one_sided f]:
surjective (inv f) := begin
intro x,
apply exists.intro (f x),
simp,
end
instance invertible.one_sided.injective {T T': Sort _} (f: T → T') [hf: invertible.one_sided f]:
injective f := begin
intros x y hxy,
have hinvxy: inv f (f x) = inv f (f y) := congr rfl hxy,
simp at hinvxy,
assumption,
end
-- noncomputable inverse:
noncomputable def inj_inv {T T': Sort _} [hT: nonempty T] (f: T → T') (y: T'): T :=
if h: ∃ x: T, f x = y then
unexists h
else
choice hT
noncomputable def inj_inv.is_inverse {T T': Sort _} [hT: nonempty T] (f: T → T') [hf: injective f]:
invertible.one_sided f := {
g := inj_inv f,
left_inv := begin
intros,
rw [inj_inv],
cases em (∃ (x' : T), f x' = f x),
case or.inl {
rw [dif_pos h],
apply hf,
exact unexists_prop h,
},
case or.inr {
rw [dif_neg h],
apply false.elim,
apply h,
apply exists.intro x,
refl,
},
end,
}
-- two-sided inverses:
class invertible {T T': Sort _} (f: T → T') extends invertible.one_sided f :=
(right_inv:
∀ y: T',
f (g y) = y)
instance invertible.surjective {T T': Sort _} (f: T → T') [hf: invertible f]:
surjective f := begin
intro y,
apply exists.intro ((inv f) y),
rw [hf.right_inv],
end
instance invertible.bijective {T T': Sort _} (f: T → T') [hf: invertible f]:
bijective f := {
-- injective:
left := by apply invertible.one_sided.injective,
-- surjective:
right := by apply invertible.surjective,
}
instance invertible.of_surjective {T T': Sort _} (f: T → T') [hfinv: invertible.one_sided f] [hfsur: surjective f]:
invertible f := begin
split,
intros,
have hex := hfsur y,
apply exists.elim hex,
intros x hx,
rw [←hx],
exact congr rfl (invertible.one_sided.left_inv f x),
end
instance inv.invertible {T T': Sort _} (f: T → T') [hf: invertible f]:
invertible (inv f) := {
g := f,
left_inv := hf.right_inv,
right_inv := hf.left_inv,
}
@[simp] theorem inv.elim_of_inv {T T': Sort _} (f: T → T') [hf: invertible f]:
inv (inv f) = f := by rw [inv.invertible]
theorem inv.uniq {T T': Sort _} (f: T → T') [hf: invertible f]:
∀ {g: T' → T},
(∀ x: T, g (f x) = x) →
g = inv f := begin
intros g hg,
funext,
have hfsurx := invertible.surjective f x,
apply exists.elim hfsurx,
intros y hy,
rw [←hy, hg y],
rw [inv, hf.left_inv],
end
instance inv.injective {T T': Sort _} (f: T → T') [hf: invertible f]:
injective (inv f) := begin
intros x y hxy,
have hfx := invertible.surjective f x,
have hfy := invertible.surjective f y,
apply exists.elim hfx,
intros a ha,
apply exists.elim hfy,
intros b hb,
rw [←ha, ←hb] at *,
simp at hxy,
rw [hxy],
end
-- id:
instance id.invertible {T: Sort _}:
invertible (@id T) := {
g := id,
left_inv := begin
intros,
refl,
end,
right_inv := begin
intros,
refl,
end,
}
end inverse
|
# include("../src/plotSol.jl")
using LinearAlgebra
using DifferentialEquations
using Revise
# clearconsole()
function PendulumMinReal(m,l,theta,tInt,tEnd)
function thdot(th)
thDot = zeros(size(th));
thDot[1] = th[2]
thDot[2] = -3*9.806/2/l*sin(th[1])
return thDot
end
function mainDynMinReal(x,p,t)
dx = thdot(x);
return dx
end
x0 = [theta; 0.0];
prob = ODEProblem(mainDynMinReal,x0,(0.0,tEnd))
sol = solve(prob,Tsit5(),saveat = tInt,reltol=1e-10,abstol=1e-10);
tSim = sol.t;
thSol = sol[1,:];
thDotSol = sol[2,:];
xSol = zeros(length(tSim))
zSol = zeros(length(tSim))
ySol = zeros(length(tSim))
vSol = zeros(length(tSim),3)
for i=1:length(tSim)
xSol[i] = l/2*sin(thSol[i])
zSol[i] = -l/2*cos(thSol[i])
vSol[i,:] = [l/2*cos(thSol[i])*thDotSol[i] 0.0 l/2*sin(thSol[i])*thDotSol[i]]
end
rSol = [xSol ySol zSol];
ωSol = [ySol -thDotSol ySol];
return rSol, vSol, ωSol
end |
Formal statement is: lemma openI [intro?]: "(\<And>x. x\<in>S \<Longrightarrow> \<exists>e>0. ball x e \<subseteq> S) \<Longrightarrow> open S" Informal statement is: If for every $x \in S$, there exists an open ball around $x$ that is contained in $S$, then $S$ is open. |
function ns3de_test ( )
%*****************************************************************************80
%
%% NS3DE_TEST tests the NS3DE library.
%
% Location:
%
% http://people.sc.fsu.edu/~jburkardt/m_src/navier_stokes_3d_exact/ns3de_test.m
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 13 January 2015
%
% Author:
%
% John Burkardt
%
timestamp ( )
fprintf ( 1, '\n' );
fprintf ( 1, 'NS3DE_TEST\n' );
fprintf ( 1, ' MATLAB version\n' );
fprintf ( 1, ' Test the NS3DE library.\n' );
ns3de_test01 ( );
ns3de_test02 ( );
%
% Terminate.
%
fprintf ( 1, '\n' );
fprintf ( 1, 'NS3DE_TEST\n' );
fprintf ( 1, ' Normal end of execution.\n' );
timestamp ( )
return
end
|
program Fibonacci
Number1 = 0
Number2 = 1
buffer = 0
do x = 2, 47, 1
print *, Number2
Number1 = Number2
Number2 = Number2 + buffer
buffer = Number1
end do
end program Fibonacci
|
> module InfiniteHorizonSequentialDecisionProblems.Theory
> import Data.Vect
> import Finite.Predicates
> import Finite.Operations
> import Finite.Properties
> import Vect.Operations
> %default total
> %access public export
> %auto_implicits off
* Sequential decision processes
> State : Type
> Ctrl : (x : State) -> Type
> M : Type -> Type
> nexts : (x : State) -> (y : Ctrl x) -> M State
> fmap : {A, B : Type} -> (A -> B) -> M A -> M B
* Sequential decision problems
> Val : Type
> reward : (x : State) -> (y : Ctrl x) -> (x' : State) -> Val
> plus : Val -> Val -> Val
> LTE : Val -> Val -> Type
> meas : M Val -> Val
* Policies
> Policy : Type
> Policy = (x : State) -> Ctrl x
* The value of policies
> ||| A value function ...
> val : State -> Policy -> Val
> ||| ... that fulfils the specification
> valSpec : Type
> valSpec = (x : State) -> (p : Policy) ->
> val x p
> =
> meas (fmap (\ x' => reward x (p x) x' `plus` val x' p) (nexts x (p x)))
* Optimality of policies
> |||
> Opt : Policy -> Type
> Opt p = (x : State) -> (p' : Policy) -> val x p' `LTE` val x p
* Optimal policies
> |||
> cval : (x : State) -> Ctrl x -> (p : Policy) -> Val
> cval x y p = meas (fmap (\ x' => reward x y x' `plus` val x' p) (nexts x y))
> cvalargmax : (x : State) -> (p : Policy) -> Ctrl x
> ||| A policy ...
> opt : Policy
> ||| ... that fulfils Bellman's equation
> optSpec : Type
> optSpec = (x : State) -> opt x = cvalargmax x opt
* Optimality of |opt|
** Additional assumptions
> reflexiveLTE : (a : Val) -> a `LTE` a
> transitiveLTE : {a, b, c : Val} -> a `LTE` b -> b `LTE` c -> a `LTE` c
> monotonePlusLTE : {a, b, c, d : Val} -> a `LTE` b -> c `LTE` d -> (a `plus` c) `LTE` (b `plus` d)
> measMon : {A : Type} ->
> (f : A -> Val) -> (g : A -> Val) ->
> ((a : A) -> (f a) `LTE` (g a)) ->
> (ma : M A) -> meas (fmap f ma) `LTE` meas (fmap g ma)
> cvalmax : (x : State) -> (p : Policy) -> Val
> cvalargmaxSpec : (x : State) -> (p : Policy) -> cvalmax x p = cval x (cvalargmax x p) p
> cvalmaxSpec : (x : State) -> (y : Ctrl x) -> (p : Policy) -> (cval x y p) `LTE` (cvalmax x p)
** |opt| is optimal
> mutual
> ||| ... is an optimal policy
> optLemma1 : (vs : valSpec) -> (os : optSpec) ->
> (x : State) -> (y : Ctrl x) -> (p : Policy) ->
> cval x y p `LTE` cval x y opt
> optLemma1 vs os x y p = s9 where
> f' : State -> Val
> f' = \ x' => reward x y x' `plus` val x' p
> f : State -> Val
> f = \ x' => reward x y x' `plus` val x' opt
> s1 : (x' : State) -> val x' p `LTE` val x' opt
> s1 x' = assert_total (optLemma2 vs os) x' p
> s2 : (x' : State) -> (f' x') `LTE` (f x')
> s2 x' = monotonePlusLTE (reflexiveLTE (reward x y x')) (s1 x')
> s3 : meas (fmap f' (nexts x y)) `LTE` meas (fmap f (nexts x y))
> s3 = measMon f' f s2 (nexts x y)
> s9 : cval x y p `LTE` cval x y opt
> s9 = s3
> ||| ... is an optimal policy
> optLemma2 : (vs : valSpec) -> (os : optSpec) -> Opt opt
> optLemma2 vs os x p' = s9 where
> s1 : val x p' = cval x (p' x) p'
> s1 = vs x p'
> s2 : cval x (p' x) p' `LTE` cval x (p' x) opt
> s2 = assert_total optLemma1 vs os x (p' x) p'
> s3 : cval x (p' x) opt `LTE` cvalmax x opt
> s3 = cvalmaxSpec x (p' x) opt
> s4 : cvalmax x opt = cval x (cvalargmax x opt) opt
> s4 = cvalargmaxSpec x opt
> s5 : cval x (cvalargmax x opt) opt = cval x (opt x) opt
> s5 = replace {P = \ U => cval x U opt = cval x (opt x) opt} (os x) Refl
> s6 : cval x (opt x) opt = val x opt
> s6 = sym (vs x opt)
> s7 : cval x (p' x) p' `LTE` cvalmax x opt
> s7 = transitiveLTE s2 s3
> s8 : val x p' `LTE` cvalmax x opt
> s8 = replace {P = \ W => W `LTE` cvalmax x opt} (sym s1) s7
> s9 : val x p' `LTE` val x opt
> s9 = replace {P = \ W => val x p' `LTE` W} (trans (trans s4 s5) s6) s8
* Can one compute optimal policies?
If |State| is finite
> finiteState : Finite State
we can compute the number of values of type |State| and collect them in
a vector
> cardState : Nat
> cardState = card finiteState
> vectState : Vect cardState State
> vectState = toVect finiteState
For a fixed policy |p|, we can represent the value of |p| by a value table
> vt : Policy -> Vect cardState Val
> val x p = index k (vt p) where
> k : Fin cardState
> k = lookup x vectState (toVectComplete finiteState x)
In this case, the specification of |val|
< valSpec = (x : State) -> (p : Policy) ->
< val x p
< =
< meas (fmap (\ x' => reward x (p x) x' `plus` val x' p) (nexts x (p x)))
defines a linear, implicit problem for the components |vt p|. Let
> {-
> ---}
|
theory Mk_Record_Simp
imports Refine_Util Mpat_Antiquot
begin
(*
mk_record_simp: Converts a lemma of the form
"f s = x" to the form "r \<equiv> s \<Longrightarrow> f r = x"
This is used to fold the x.simp - lemmas of a record x with a definition
of the form "r \<equiv> \<lparr> ... \<rparr>".
Usage example:
record foo = ...
definition c :: foo where "c \<equiv> \<lparr> ... \<rparr>"
lemmas c_simps[simp] = foo.simps[mk_record_simp, OF c_def]
*)
lemma mk_record_simp_thm:
fixes f :: "'a \<Rightarrow> 'b"
assumes "f s = x"
assumes "r \<equiv> s"
shows "f r = x"
using assms by simp
ML \<open>
fun mk_record_simp context thm = let
val ctxt = Context.proof_of context
val cert = Thm.cterm_of ctxt
in
case Thm.concl_of thm of
@{mpat "Trueprop (?f _=_)"} =>
let
val cf = cert f
val r = infer_instantiate ctxt [(("f", 0), cf)] @{thm mk_record_simp_thm}
val r = r OF [thm]
in r end
| _ => raise THM("",~1,[thm])
end
\<close>
attribute_setup mk_record_simp =
\<open>Scan.succeed (Thm.rule_attribute [] (mk_record_simp))\<close>
"Make simplification rule for record definition"
end
|
The gradual development of blue to violet pigmentation as one progresses from species to species is an interesting phenomenon deserving further study . The climax is reached in L. indigo which is blue throughout . L. <unk> and its variety <unk> , L. paradoxus , and L. <unk> may be considered as mileposts along the road to L. indigo .
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Results concerning function extensionality for propositional equality
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
module Axiom.Extensionality.Propositional where
open import Function
open import Level using (Level; _⊔_; suc; lift)
open import Relation.Binary.Core
open import Relation.Binary.PropositionalEquality.Core
------------------------------------------------------------------------
-- Function extensionality states that if two functions are
-- propositionally equal for every input, then the functions themselves
-- must be propositionally equal.
Extensionality : (a b : Level) → Set _
Extensionality a b =
{A : Set a} {B : A → Set b} {f g : (x : A) → B x} →
(∀ x → f x ≡ g x) → f ≡ g
------------------------------------------------------------------------
-- Properties
-- If extensionality holds for a given universe level, then it also
-- holds for lower ones.
lower-extensionality : ∀ {a₁ b₁} a₂ b₂ →
Extensionality (a₁ ⊔ a₂) (b₁ ⊔ b₂) →
Extensionality a₁ b₁
lower-extensionality a₂ b₂ ext f≡g = cong (λ h → Level.lower ∘ h ∘ lift) $
ext (cong (lift {ℓ = b₂}) ∘ f≡g ∘ Level.lower {ℓ = a₂})
-- Functional extensionality implies a form of extensionality for
-- Π-types.
∀-extensionality : ∀ {a b} → Extensionality a (suc b) →
{A : Set a} (B₁ B₂ : A → Set b) →
(∀ x → B₁ x ≡ B₂ x) →
(∀ x → B₁ x) ≡ (∀ x → B₂ x)
∀-extensionality ext B₁ B₂ B₁≡B₂ with ext B₁≡B₂
∀-extensionality ext B .B B₁≡B₂ | refl = refl
|
PROGRAM my_prog
i = 1
END PROGRAM my_prog
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE Arrows, FlexibleContexts #-}
module Examples.MultiTargetTracking where
import Control.Arrow (returnA)
import Control.Monad (forM)
import Control.Monad.Bayes.Class (MonadSample, MonadInfer)
import qualified Data.Map as M
import Data.Maybe (fromJust)
import Numeric.LinearAlgebra.Static hiding ((<>))
import Inference (zdsparticles)
import DelayedSampling (DelayedSample, DelayedInfer, Result (..))
import qualified SymbolicDistr as DS
import DSProg (DeepForce (..), Expr' (..), Expr, marginal, zdeepForce, deepForce', forgetE)
import Distributions
import Util.ZStream (ZStream)
import qualified Util.ZStream as ZS
import Metaprob ((~~), Gen, lift, (|->), obs)
import qualified Metaprob as MP
import Examples.Demo (Sampler, Delayed, Weighted)
data ObsType = Clutter | NewTrack | Track Int
deriving (Eq, Show, Ord)
filterM :: Monad m => (k -> v -> m Bool) -> M.Map k v -> m (M.Map k v)
filterM f = M.traverseMaybeWithKey $ \k v -> do
b <- f k v
return (if b then Just v else Nothing)
type TrackMap = M.Map Int (Expr (R 4))
tdiff, birthRate, deathRate, clutterLambda, newTrackLambda, pd, survivalProb :: Double
tdiff = 1
birthRate = 0.1
deathRate = 0.02
clutterLambda = 3
newTrackLambda = birthRate * tdiff
pd = 0.8
survivalProb = exp (- tdiff * deathRate)
clutterDistr :: DS.Distr (Expr (R 2))
clutterDistr = DS.mvNormal (Const (0 :: R 2)) (10 * sym eye)
newTrackD :: DelayedSample m => m (Expr (R 4))
newTrackD = DS.sample (DS.mvNormal (Const mu) cov)
where
mu = 0 :: R 4
cov = sym (((5 * eye :: Sq 2) ||| (0 :: Sq 2))
===
((0 :: Sq 2) ||| (0.1 * eye :: Sq 2)))
trackMotion :: DelayedSample m => Double -> Expr (R 4) -> m (Expr (R 4))
trackMotion tdiff track = DS.sample (DS.mvNormal (MVMul (Const motionMatrix) track) motionCov)
where
motionMatrix :: Sq 4
motionMatrix = eye + konst tdiff *
(((0 :: Sq 2) ||| (eye :: Sq 2))
===
((- 1 / 100 * eye :: Sq 2) ||| (-0.1 * eye :: Sq 2)))
motionCov :: Sym 4
motionCov = sym $ konst tdiff * (((0.01 * eye :: Sq 2) ||| (0 :: Sq 2))
===
((0 :: Sq 2) ||| (0.1 * eye :: Sq 2)))
trackMeasurement :: Expr (R 4) -> DS.Distr (Expr (R 2))
trackMeasurement posvel = DS.mvNormal (MVMul (Const posFromPosVel) posvel) (sym eye)
where
posFromPosVel :: L 2 4
posFromPosVel = (eye :: Sq 2) ||| (0 :: Sq 2)
updateWithAssocs :: DelayedSample m => Int -> TrackMap -> [ObsType] -> m ((TrackMap, Int), [DS.Distr (Expr (R 2))])
updateWithAssocs nextTrackID updatedOldTracks assocs = do
(obsDists, newTrackPVs) <- fmap mconcat . forM assocs $ \k -> case k of
Clutter -> return ([clutterDistr], [])
NewTrack -> do
newTrackPV <- newTrackD
return ([trackMeasurement newTrackPV], [newTrackPV])
Track i -> return ([trackMeasurement (updatedOldTracks M.! i)], [])
coasted <- filterM (\_ _ -> sample (bernoulli ((1 - pd) / (1 - survivalProb * pd)))) notObserved
return ((mconcat (observed : coasted : zipWith M.singleton [nextTrackID..] newTrackPVs),
nextTrackID + length newTrackPVs), obsDists)
where
(observed, notObserved) = M.partitionWithKey (\i _ -> Track i `elem` assocs) updatedOldTracks
sampleStep :: DelayedSample m => (TrackMap, Int) -> Gen m ((TrackMap, Int), [Expr (R 2)])
sampleStep (allOldTracks, nextTrackID) = do
updatedOldTracks <- lift (mapM (trackMotion tdiff) allOldTracks)
assocs <- "assocs" ~~ MP.prim (shuffleWithRepeats' countDistrs)
(newTrackInfo, obsDists) <- lift (updateWithAssocs nextTrackID updatedOldTracks assocs)
observations <- "observations" ~~ MP.isequence (map MP.dsPrim obsDists)
return (newTrackInfo, observations)
where
countDistrs = M.singleton Clutter (poisson clutterLambda)
<> M.singleton NewTrack (poisson newTrackLambda)
<> M.mapKeys Track (fmap (\_ -> bernoulli01 (survivalProb * pd)) allOldTracks)
observeStep :: DelayedInfer m => (TrackMap, Int) -> [R 2] -> m ((TrackMap, Int), [Expr (R 2)])
observeStep (allOldTracks, nextTrackID) observations = do
updatedOldTracks <- mapM (trackMotion tdiff) allOldTracks
-- assocs <- proposeAssocs observations updatedOldTracks
MP.observingWithProposal
("observations" |-> MP.trList observations) (sampleStep (allOldTracks, nextTrackID))
"assocs" (proposeAssocs 0 observations updatedOldTracks)
proposeAssocs :: DelayedInfer m => Int -> [R 2] -> TrackMap -> Gen m [ObsType]
proposeAssocs j (obs : observations) remainingTracks = do
newTrack <- lift $ newTrackD
let distrs = M.singleton Clutter (clutterLambda, clutterDistr)
<> M.singleton NewTrack (newTrackLambda, trackMeasurement newTrack)
<> M.mapKeys Track (fmap (\t -> (survivalProb * pd, trackMeasurement t)) remainingTracks)
likes <- lift $ mapM (\(intensity, d) -> (\ll -> intensity * exp ll) <$> DS.score d obs) distrs
let assocDistr = let probs = fmap (/ sum likes) likes in categoricalM probs
i <- ("assoc" ++ show j) ~~ MP.prim assocDistr
let remainingTracks' = case i of
Track k -> M.delete k remainingTracks
_ -> remainingTracks
(i :) <$> proposeAssocs (j + 1) observations remainingTracks'
proposeAssocs _ [] remainingTracks = return []
generateGroundTruth :: DelayedSample m => ZStream m () (TrackMap, [Expr (R 2)])
generateGroundTruth = ZS.fromStep stepf initState where
initState = (mempty, 0)
stepf state () = do
(newState@(tracks', _), observations) <- MP.sim (sampleStep state)
return (newState, (tracks', observations))
processObservations :: DelayedInfer m => ZStream m [R 2] (M.Map Int (Result (R 4)))
processObservations = ZS.fromStep stepf initState where
initState = (mempty, 0)
stepf state obs = do
(newState@(tracks', _), _) <- observeStep state obs
marginalTracks <- mapM (fmap fromJust . marginal) tracks'
return (newState, marginalTracks)
runMTTPF :: Int -> ZStream Sampler () ([(Int, R 4)], [[(Int, Result (R 4))]], [R 2])
runMTTPF numParticles = proc () -> do
(groundTruth, obs) <- zdeepForce generateGroundTruth -< ()
particles <- zdsparticles numParticles processObservations -< obs
returnA -< (M.assocs groundTruth, map M.assocs particles, obs) |
// Copyright (c) 2019 The Bigbang developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "netchn.h"
#include <boost/bind.hpp>
#include "schedule.h"
using namespace std;
using namespace xengine;
using boost::asio::ip::tcp;
#define PUSHTX_TIMEOUT (1000)
namespace bigbang
{
//////////////////////////////
// CNetChannelPeer
void CNetChannelPeer::CNetChannelPeerFork::AddKnownTx(const vector<uint256>& vTxHash)
{
ClearExpiredTx(vTxHash.size());
for (const uint256& txid : vTxHash)
{
setKnownTx.insert(CPeerKnownTx(txid));
}
}
void CNetChannelPeer::CNetChannelPeerFork::ClearExpiredTx(size_t nReserved)
{
CPeerKnownTxSetByTime& index = setKnownTx.get<1>();
int64 nExpiredAt = GetTime() - NETCHANNEL_KNOWNINV_EXPIREDTIME;
size_t nMaxSize = NETCHANNEL_KNOWNINV_MAXCOUNT - nReserved;
CPeerKnownTxSetByTime::iterator it = index.begin();
while (it != index.end() && ((*it).nTime <= nExpiredAt || index.size() > nMaxSize))
{
index.erase(it++);
}
}
bool CNetChannelPeer::IsSynchronized(const uint256& hashFork) const
{
map<uint256, CNetChannelPeerFork>::const_iterator it = mapSubscribedFork.find(hashFork);
if (it != mapSubscribedFork.end())
{
return (*it).second.fSynchronized;
}
return false;
}
bool CNetChannelPeer::SetSyncStatus(const uint256& hashFork, bool fSync, bool& fInverted)
{
map<uint256, CNetChannelPeerFork>::iterator it = mapSubscribedFork.find(hashFork);
if (it != mapSubscribedFork.end())
{
fInverted = ((*it).second.fSynchronized != fSync);
(*it).second.fSynchronized = fSync;
return true;
}
return false;
}
void CNetChannelPeer::AddKnownTx(const uint256& hashFork, const vector<uint256>& vTxHash)
{
map<uint256, CNetChannelPeerFork>::iterator it = mapSubscribedFork.find(hashFork);
if (it != mapSubscribedFork.end())
{
(*it).second.AddKnownTx(vTxHash);
}
}
void CNetChannelPeer::MakeTxInv(const uint256& hashFork, const vector<uint256>& vTxPool,
vector<network::CInv>& vInv, size_t nMaxCount)
{
map<uint256, CNetChannelPeerFork>::iterator it = mapSubscribedFork.find(hashFork);
if (it != mapSubscribedFork.end())
{
vector<uint256> vTxHash;
CNetChannelPeerFork& peerFork = (*it).second;
for (const uint256& txid : vTxPool)
{
if (vInv.size() >= nMaxCount)
{
break;
}
else if (!(*it).second.IsKnownTx(txid))
{
vInv.push_back(network::CInv(network::CInv::MSG_TX, txid));
vTxHash.push_back(txid);
}
}
peerFork.AddKnownTx(vTxHash);
}
}
//////////////////////////////
// CNetChannel
CNetChannel::CNetChannel()
{
pPeerNet = nullptr;
pCoreProtocol = nullptr;
pBlockChain = nullptr;
pTxPool = nullptr;
pService = nullptr;
pDispatcher = nullptr;
}
CNetChannel::~CNetChannel()
{
}
bool CNetChannel::HandleInitialize()
{
if (!GetObject("peernet", pPeerNet))
{
Error("Failed to request peer net\n");
return false;
}
if (!GetObject("coreprotocol", pCoreProtocol))
{
Error("Failed to request coreprotocol\n");
return false;
}
if (!GetObject("blockchain", pBlockChain))
{
Error("Failed to request blockchain\n");
return false;
}
if (!GetObject("txpool", pTxPool))
{
Error("Failed to request txpool\n");
return false;
}
if (!GetObject("service", pService))
{
Error("Failed to request service\n");
return false;
}
if (!GetObject("dispatcher", pDispatcher))
{
Error("Failed to request dispatcher\n");
return false;
}
return true;
}
void CNetChannel::HandleDeinitialize()
{
pPeerNet = nullptr;
pCoreProtocol = nullptr;
pBlockChain = nullptr;
pTxPool = nullptr;
pService = nullptr;
pDispatcher = nullptr;
}
bool CNetChannel::HandleInvoke()
{
{
boost::unique_lock<boost::mutex> lock(mtxPushTx);
nTimerPushTx = 0;
}
return network::INetChannel::HandleInvoke();
}
void CNetChannel::HandleHalt()
{
{
boost::unique_lock<boost::mutex> lock(mtxPushTx);
if (nTimerPushTx != 0)
{
CancelTimer(nTimerPushTx);
nTimerPushTx = 0;
}
setPushTxFork.clear();
}
network::INetChannel::HandleHalt();
{
boost::recursive_mutex::scoped_lock scoped_lock(mtxSched);
mapSched.clear();
}
}
int CNetChannel::GetPrimaryChainHeight()
{
uint256 hashBlock = uint64(0);
int nHeight = 0;
int64 nTime = 0;
if (pBlockChain->GetLastBlock(pCoreProtocol->GetGenesisBlockHash(), hashBlock, nHeight, nTime))
{
return nHeight;
}
return 0;
}
bool CNetChannel::IsForkSynchronized(const uint256& hashFork) const
{
boost::shared_lock<boost::shared_mutex> rlock(rwNetPeer);
map<uint256, set<uint64>>::const_iterator it = mapUnsync.find(hashFork);
return (it == mapUnsync.end() || (*it).second.empty());
}
void CNetChannel::BroadcastBlockInv(const uint256& hashFork, const uint256& hashBlock)
{
set<uint64> setKnownPeer;
{
boost::recursive_mutex::scoped_lock scoped_lock(mtxSched);
CSchedule& sched = GetSchedule(hashFork);
sched.GetKnownPeer(network::CInv(network::CInv::MSG_BLOCK, hashBlock), setKnownPeer);
}
network::CEventPeerInv eventInv(0, hashFork);
eventInv.data.push_back(network::CInv(network::CInv::MSG_BLOCK, hashBlock));
{
boost::shared_lock<boost::shared_mutex> rlock(rwNetPeer);
for (map<uint64, CNetChannelPeer>::iterator it = mapPeer.begin(); it != mapPeer.end(); ++it)
{
uint64 nNonce = (*it).first;
if (!setKnownPeer.count(nNonce) && (*it).second.IsSubscribed(hashFork))
{
eventInv.nNonce = nNonce;
pPeerNet->DispatchEvent(&eventInv);
}
}
}
}
void CNetChannel::BroadcastTxInv(const uint256& hashFork)
{
boost::unique_lock<boost::mutex> lock(mtxPushTx);
if (nTimerPushTx == 0)
{
if (!PushTxInv(hashFork))
{
setPushTxFork.insert(hashFork);
}
nTimerPushTx = SetTimer(PUSHTX_TIMEOUT, boost::bind(&CNetChannel::PushTxTimerFunc, this, _1));
}
else
{
setPushTxFork.insert(hashFork);
}
}
void CNetChannel::SubscribeFork(const uint256& hashFork, const uint64& nNonce)
{
{
boost::recursive_mutex::scoped_lock scoped_lock(mtxSched);
if (!mapSched.insert(make_pair(hashFork, CSchedule())).second)
{
return;
}
}
network::CEventPeerSubscribe eventSubscribe(0ULL, pCoreProtocol->GetGenesisBlockHash());
eventSubscribe.data.push_back(hashFork);
{
boost::shared_lock<boost::shared_mutex> rlock(rwNetPeer);
for (map<uint64, CNetChannelPeer>::iterator it = mapPeer.begin(); it != mapPeer.end(); ++it)
{
eventSubscribe.nNonce = (*it).first;
pPeerNet->DispatchEvent(&eventSubscribe);
DispatchGetBlocksEvent(it->first, hashFork);
}
}
}
void CNetChannel::UnsubscribeFork(const uint256& hashFork)
{
{
boost::recursive_mutex::scoped_lock scoped_lock(mtxSched);
if (!mapSched.erase(hashFork))
{
return;
}
}
network::CEventPeerUnsubscribe eventUnsubscribe(0ULL, pCoreProtocol->GetGenesisBlockHash());
eventUnsubscribe.data.push_back(hashFork);
{
boost::shared_lock<boost::shared_mutex> rlock(rwNetPeer);
for (map<uint64, CNetChannelPeer>::iterator it = mapPeer.begin(); it != mapPeer.end(); ++it)
{
eventUnsubscribe.nNonce = (*it).first;
pPeerNet->DispatchEvent(&eventUnsubscribe);
}
}
}
bool CNetChannel::HandleEvent(network::CEventPeerActive& eventActive)
{
uint64 nNonce = eventActive.nNonce;
if ((eventActive.data.nService & network::NODE_NETWORK))
{
DispatchGetBlocksEvent(nNonce, pCoreProtocol->GetGenesisBlockHash());
network::CEventPeerSubscribe eventSubscribe(nNonce, pCoreProtocol->GetGenesisBlockHash());
{
boost::recursive_mutex::scoped_lock scoped_lock(mtxSched);
for (map<uint256, CSchedule>::iterator it = mapSched.begin(); it != mapSched.end(); ++it)
{
if ((*it).first != pCoreProtocol->GetGenesisBlockHash())
{
eventSubscribe.data.push_back((*it).first);
}
}
}
if (!eventSubscribe.data.empty())
{
pPeerNet->DispatchEvent(&eventSubscribe);
}
}
{
boost::unique_lock<boost::shared_mutex> wlock(rwNetPeer);
mapPeer[nNonce] = CNetChannelPeer(eventActive.data.nService, pCoreProtocol->GetGenesisBlockHash());
mapUnsync[pCoreProtocol->GetGenesisBlockHash()].insert(nNonce);
}
NotifyPeerUpdate(nNonce, true, eventActive.data);
return true;
}
bool CNetChannel::HandleEvent(network::CEventPeerDeactive& eventDeactive)
{
uint64 nNonce = eventDeactive.nNonce;
{
boost::recursive_mutex::scoped_lock scoped_lock(mtxSched);
for (map<uint256, CSchedule>::iterator it = mapSched.begin(); it != mapSched.end(); ++it)
{
CSchedule& sched = (*it).second;
set<uint64> setSchedPeer;
sched.RemovePeer(nNonce, setSchedPeer);
for (const uint64 nNonceSched : setSchedPeer)
{
SchedulePeerInv(nNonceSched, (*it).first, sched);
}
}
}
{
boost::unique_lock<boost::shared_mutex> wlock(rwNetPeer);
map<uint64, CNetChannelPeer>::iterator it = mapPeer.find(nNonce);
if (it != mapPeer.end())
{
for (auto& subFork : (*it).second.mapSubscribedFork)
{
mapUnsync[subFork.first].erase(nNonce);
}
mapPeer.erase(nNonce);
}
}
NotifyPeerUpdate(nNonce, false, eventDeactive.data);
return true;
}
bool CNetChannel::HandleEvent(network::CEventPeerSubscribe& eventSubscribe)
{
uint64 nNonce = eventSubscribe.nNonce;
uint256& hashFork = eventSubscribe.hashFork;
if (hashFork == pCoreProtocol->GetGenesisBlockHash())
{
boost::unique_lock<boost::shared_mutex> wlock(rwNetPeer);
map<uint64, CNetChannelPeer>::iterator it = mapPeer.find(nNonce);
if (it != mapPeer.end())
{
for (const uint256& hash : eventSubscribe.data)
{
(*it).second.Subscribe(hash);
mapUnsync[hash].insert(nNonce);
{
boost::recursive_mutex::scoped_lock scoped_lock(mtxSched);
if (mapSched.count(hash))
{
DispatchGetBlocksEvent(nNonce, hash);
}
}
}
}
}
else
{
DispatchMisbehaveEvent(nNonce, CEndpointManager::DDOS_ATTACK, "eventSubscribe");
}
return true;
}
bool CNetChannel::HandleEvent(network::CEventPeerUnsubscribe& eventUnsubscribe)
{
uint64 nNonce = eventUnsubscribe.nNonce;
uint256& hashFork = eventUnsubscribe.hashFork;
if (hashFork == pCoreProtocol->GetGenesisBlockHash())
{
boost::unique_lock<boost::shared_mutex> wlock(rwNetPeer);
map<uint64, CNetChannelPeer>::iterator it = mapPeer.find(nNonce);
if (it != mapPeer.end())
{
for (const uint256& hash : eventUnsubscribe.data)
{
(*it).second.Unsubscribe(hash);
mapUnsync[hash].erase(nNonce);
}
}
}
else
{
DispatchMisbehaveEvent(nNonce, CEndpointManager::DDOS_ATTACK, "eventUnsubscribe");
}
return true;
}
bool CNetChannel::HandleEvent(network::CEventPeerInv& eventInv)
{
uint64 nNonce = eventInv.nNonce;
uint256& hashFork = eventInv.hashFork;
try
{
if (eventInv.data.size() > network::CInv::MAX_INV_COUNT)
{
throw runtime_error("Inv count overflow.");
}
{
boost::recursive_mutex::scoped_lock scoped_lock(mtxSched);
CSchedule& sched = GetSchedule(hashFork);
vector<uint256> vTxHash;
for (const network::CInv& inv : eventInv.data)
{
if ((inv.nType == network::CInv::MSG_TX && !pTxPool->Exists(inv.nHash))
|| (inv.nType == network::CInv::MSG_BLOCK && !pBlockChain->Exists(inv.nHash)))
{
sched.AddNewInv(inv, nNonce);
if (inv.nType == network::CInv::MSG_TX)
{
vTxHash.push_back(inv.nHash);
}
}
}
if (!vTxHash.empty())
{
boost::unique_lock<boost::shared_mutex> wlock(rwNetPeer);
mapPeer[nNonce].AddKnownTx(hashFork, vTxHash);
}
SchedulePeerInv(nNonce, hashFork, sched);
}
}
catch (...)
{
DispatchMisbehaveEvent(nNonce, CEndpointManager::DDOS_ATTACK, "eventInv");
}
return true;
}
bool CNetChannel::HandleEvent(network::CEventPeerGetData& eventGetData)
{
uint64 nNonce = eventGetData.nNonce;
uint256& hashFork = eventGetData.hashFork;
for (const network::CInv& inv : eventGetData.data)
{
if (inv.nType == network::CInv::MSG_TX)
{
network::CEventPeerTx eventTx(nNonce, hashFork);
if (pTxPool->Get(inv.nHash, eventTx.data))
{
pPeerNet->DispatchEvent(&eventTx);
}
else if (pBlockChain->GetTransaction(inv.nHash, eventTx.data))
{
pPeerNet->DispatchEvent(&eventTx);
}
else
{
// TODO: Penalize
}
}
else if (inv.nType == network::CInv::MSG_BLOCK)
{
network::CEventPeerBlock eventBlock(nNonce, hashFork);
if (pBlockChain->GetBlock(inv.nHash, eventBlock.data))
{
pPeerNet->DispatchEvent(&eventBlock);
}
else
{
// TODO: Penalize
}
}
}
return true;
}
bool CNetChannel::HandleEvent(network::CEventPeerGetBlocks& eventGetBlocks)
{
uint64 nNonce = eventGetBlocks.nNonce;
uint256& hashFork = eventGetBlocks.hashFork;
vector<uint256> vBlockHash;
if (!pBlockChain->GetBlockInv(hashFork, eventGetBlocks.data, vBlockHash, MAX_GETBLOCKS_COUNT))
{
CBlock block;
if (!pBlockChain->GetBlock(hashFork, block))
{
//DispatchMisbehaveEvent(nNonce,CEndpointManager::DDOS_ATTACK,"eventGetBlocks");
//return true;
}
}
network::CEventPeerInv eventInv(nNonce, hashFork);
for (const uint256& hash : vBlockHash)
{
eventInv.data.push_back(network::CInv(network::CInv::MSG_BLOCK, hash));
}
pPeerNet->DispatchEvent(&eventInv);
return true;
}
bool CNetChannel::HandleEvent(network::CEventPeerTx& eventTx)
{
uint64 nNonce = eventTx.nNonce;
uint256& hashFork = eventTx.hashFork;
CTransaction& tx = eventTx.data;
uint256 txid = tx.GetHash();
try
{
boost::recursive_mutex::scoped_lock scoped_lock(mtxSched);
set<uint64> setSchedPeer, setMisbehavePeer;
CSchedule& sched = GetSchedule(hashFork);
if (!sched.ReceiveTx(nNonce, txid, tx, setSchedPeer))
{
throw runtime_error("Failed to receive tx");
}
uint256 hashForkAnchor;
int nHeightAnchor;
if (pBlockChain->GetBlockLocation(tx.hashAnchor, hashForkAnchor, nHeightAnchor)
&& hashForkAnchor == hashFork)
{
set<uint256> setMissingPrevTx;
if (!GetMissingPrevTx(tx, setMissingPrevTx))
{
AddNewTx(hashFork, txid, sched, setSchedPeer, setMisbehavePeer);
}
else
{
for (const uint256& prev : setMissingPrevTx)
{
sched.AddOrphanTxPrev(txid, prev);
network::CInv inv(network::CInv::MSG_TX, prev);
if (!sched.Exists(inv))
{
for (const uint64 nNonceSched : setSchedPeer)
{
sched.AddNewInv(inv, nNonceSched);
}
}
}
}
}
else
{
sched.InvalidateTx(txid, setMisbehavePeer);
setMisbehavePeer.clear();
}
PostAddNew(hashFork, sched, setSchedPeer, setMisbehavePeer);
}
catch (...)
{
DispatchMisbehaveEvent(nNonce, CEndpointManager::DDOS_ATTACK, "eventTx");
}
return true;
}
bool CNetChannel::HandleEvent(network::CEventPeerBlock& eventBlock)
{
uint64 nNonce = eventBlock.nNonce;
uint256& hashFork = eventBlock.hashFork;
CBlock& block = eventBlock.data;
uint256 hash = block.GetHash();
try
{
boost::recursive_mutex::scoped_lock scoped_lock(mtxSched);
set<uint64> setSchedPeer, setMisbehavePeer;
CSchedule& sched = GetSchedule(hashFork);
if (!sched.ReceiveBlock(nNonce, hash, block, setSchedPeer))
{
throw runtime_error("Failed to receive block");
}
uint256 hashForkPrev;
int nHeightPrev;
if (pBlockChain->GetBlockLocation(block.hashPrev, hashForkPrev, nHeightPrev))
{
if (hashForkPrev == hashFork)
{
AddNewBlock(hashFork, hash, sched, setSchedPeer, setMisbehavePeer);
}
else
{
sched.InvalidateBlock(hash, setMisbehavePeer);
}
}
else
{
sched.AddOrphanBlockPrev(hash, block.hashPrev);
}
PostAddNew(hashFork, sched, setSchedPeer, setMisbehavePeer);
}
catch (...)
{
DispatchMisbehaveEvent(nNonce, CEndpointManager::DDOS_ATTACK, "eventBlock");
}
return true;
}
CSchedule& CNetChannel::GetSchedule(const uint256& hashFork)
{
map<uint256, CSchedule>::iterator it = mapSched.find(hashFork);
if (it == mapSched.end())
{
throw runtime_error("Unknown fork for scheduling.");
}
return ((*it).second);
}
void CNetChannel::NotifyPeerUpdate(uint64 nNonce, bool fActive, const network::CAddress& addrPeer)
{
CNetworkPeerUpdate update;
update.nPeerNonce = nNonce;
update.fActive = fActive;
update.addrPeer = addrPeer;
pService->NotifyNetworkPeerUpdate(update);
}
void CNetChannel::DispatchGetBlocksEvent(uint64 nNonce, const uint256& hashFork)
{
network::CEventPeerGetBlocks eventGetBlocks(nNonce, hashFork);
if (pBlockChain->GetBlockLocator(hashFork, eventGetBlocks.data))
{
pPeerNet->DispatchEvent(&eventGetBlocks);
}
}
void CNetChannel::DispatchAwardEvent(uint64 nNonce, CEndpointManager::Bonus bonus)
{
CEventPeerNetReward eventReward(nNonce);
eventReward.data = bonus;
pPeerNet->DispatchEvent(&eventReward);
}
void CNetChannel::DispatchMisbehaveEvent(uint64 nNonce, CEndpointManager::CloseReason reason, const std::string& strCaller)
{
if (!strCaller.empty())
{
Log("DispatchMisbehaveEvent : %s\n", strCaller.c_str());
}
CEventPeerNetClose eventClose(nNonce);
eventClose.data = reason;
pPeerNet->DispatchEvent(&eventClose);
}
void CNetChannel::SchedulePeerInv(uint64 nNonce, const uint256& hashFork, CSchedule& sched)
{
network::CEventPeerGetData eventGetData(nNonce, hashFork);
bool fMissingPrev = false;
bool fEmpty = true;
if (sched.ScheduleBlockInv(nNonce, eventGetData.data, MAX_PEER_SCHED_COUNT, fMissingPrev, fEmpty))
{
if (fMissingPrev)
{
DispatchGetBlocksEvent(nNonce, hashFork);
}
else if (eventGetData.data.empty())
{
if (!sched.ScheduleTxInv(nNonce, eventGetData.data, MAX_PEER_SCHED_COUNT))
{
DispatchMisbehaveEvent(nNonce, CEndpointManager::DDOS_ATTACK, "SchedulePeerInv1");
}
}
SetPeerSyncStatus(nNonce, hashFork, fEmpty);
}
else
{
DispatchMisbehaveEvent(nNonce, CEndpointManager::DDOS_ATTACK, "SchedulePeerInv2");
}
if (!eventGetData.data.empty())
{
pPeerNet->DispatchEvent(&eventGetData);
}
}
bool CNetChannel::GetMissingPrevTx(CTransaction& tx, set<uint256>& setMissingPrevTx)
{
setMissingPrevTx.clear();
for (const CTxIn& txin : tx.vInput)
{
const uint256& prev = txin.prevout.hash;
if (!setMissingPrevTx.count(prev))
{
if (!pTxPool->Exists(prev) && !pBlockChain->ExistsTx(prev))
{
setMissingPrevTx.insert(prev);
}
}
}
return (!setMissingPrevTx.empty());
}
void CNetChannel::AddNewBlock(const uint256& hashFork, const uint256& hash, CSchedule& sched,
set<uint64>& setSchedPeer, set<uint64>& setMisbehavePeer)
{
vector<uint256> vBlockHash;
vBlockHash.push_back(hash);
for (size_t i = 0; i < vBlockHash.size(); i++)
{
uint256 hashBlock = vBlockHash[i];
uint64 nNonceSender = 0;
CBlock* pBlock = sched.GetBlock(hashBlock, nNonceSender);
if (pBlock != nullptr)
{
Errno err = pDispatcher->AddNewBlock(*pBlock, nNonceSender);
if (err == OK)
{
for (const CTransaction& tx : pBlock->vtx)
{
uint256 txid = tx.GetHash();
sched.RemoveInv(network::CInv(network::CInv::MSG_TX, txid), setSchedPeer);
}
set<uint64> setKnownPeer;
sched.GetNextBlock(hashBlock, vBlockHash);
sched.RemoveInv(network::CInv(network::CInv::MSG_BLOCK, hashBlock), setKnownPeer);
DispatchAwardEvent(nNonceSender, CEndpointManager::VITAL_DATA);
setSchedPeer.insert(setKnownPeer.begin(), setKnownPeer.end());
}
else if (err == ERR_ALREADY_HAVE && pBlock->IsVacant())
{
set<uint64> setKnownPeer;
sched.GetNextBlock(hashBlock, vBlockHash);
sched.RemoveInv(network::CInv(network::CInv::MSG_BLOCK, hashBlock), setKnownPeer);
setSchedPeer.insert(setKnownPeer.begin(), setKnownPeer.end());
}
else
{
sched.InvalidateBlock(hashBlock, setMisbehavePeer);
}
}
}
}
void CNetChannel::AddNewTx(const uint256& hashFork, const uint256& txid, CSchedule& sched,
set<uint64>& setSchedPeer, set<uint64>& setMisbehavePeer)
{
set<uint256> setTx;
vector<uint256> vtx;
vtx.push_back(txid);
int nAddNewTx = 0;
for (size_t i = 0; i < vtx.size(); i++)
{
uint256 hashTx = vtx[i];
uint64 nNonceSender = 0;
CTransaction* pTx = sched.GetTransaction(hashTx, nNonceSender);
if (pTx != nullptr)
{
if (pBlockChain->ExistsTx(txid))
{
return;
}
Errno err = pDispatcher->AddNewTx(*pTx, nNonceSender);
if (err == OK)
{
sched.GetNextTx(hashTx, vtx, setTx);
sched.RemoveInv(network::CInv(network::CInv::MSG_TX, hashTx), setSchedPeer);
DispatchAwardEvent(nNonceSender, CEndpointManager::MAJOR_DATA);
nAddNewTx++;
}
else if (err != ERR_MISSING_PREV)
{
sched.InvalidateTx(hashTx, setMisbehavePeer);
}
}
}
if (nAddNewTx)
{
BroadcastTxInv(hashFork);
}
}
void CNetChannel::PostAddNew(const uint256& hashFork, CSchedule& sched,
set<uint64>& setSchedPeer, set<uint64>& setMisbehavePeer)
{
for (const uint64 nNonceSched : setSchedPeer)
{
if (!setMisbehavePeer.count(nNonceSched))
{
SchedulePeerInv(nNonceSched, hashFork, sched);
}
}
for (const uint64 nNonceMisbehave : setMisbehavePeer)
{
DispatchMisbehaveEvent(nNonceMisbehave, CEndpointManager::DDOS_ATTACK, "PostAddNew");
}
}
void CNetChannel::SetPeerSyncStatus(uint64 nNonce, const uint256& hashFork, bool fSync)
{
bool fInverted = false;
{
boost::unique_lock<boost::shared_mutex> wlock(rwNetPeer);
CNetChannelPeer& peer = mapPeer[nNonce];
if (!peer.SetSyncStatus(hashFork, fSync, fInverted))
{
return;
}
}
if (fInverted)
{
if (fSync)
{
mapUnsync[hashFork].erase(nNonce);
BroadcastTxInv(hashFork);
}
else
{
mapUnsync[hashFork].insert(nNonce);
}
}
}
void CNetChannel::PushTxTimerFunc(uint32 nTimerId)
{
boost::unique_lock<boost::mutex> lock(mtxPushTx);
if (nTimerPushTx == nTimerId)
{
if (!setPushTxFork.empty())
{
set<uint256>::iterator it = setPushTxFork.begin();
while (it != setPushTxFork.end())
{
if (PushTxInv(*it))
{
setPushTxFork.erase(it++);
}
else
{
++it;
}
}
nTimerPushTx = SetTimer(PUSHTX_TIMEOUT, boost::bind(&CNetChannel::PushTxTimerFunc, this, _1));
}
else
{
nTimerPushTx = 0;
}
}
}
bool CNetChannel::PushTxInv(const uint256& hashFork)
{
// if (!IsForkSynchronized(hashFork))
// {
// return false;
// }
bool fCompleted = true;
vector<uint256> vTxPool;
pTxPool->ListTx(hashFork, vTxPool);
if (!vTxPool.empty() && !mapPeer.empty())
{
boost::shared_lock<boost::shared_mutex> rlock(rwNetPeer);
for (map<uint64, CNetChannelPeer>::iterator it = mapPeer.begin(); it != mapPeer.end(); ++it)
{
CNetChannelPeer& peer = it->second;
if (peer.IsSubscribed(hashFork))
{
network::CEventPeerInv eventInv(it->first, hashFork);
peer.MakeTxInv(hashFork, vTxPool, eventInv.data, network::CInv::MAX_INV_COUNT);
if (!eventInv.data.empty())
{
pPeerNet->DispatchEvent(&eventInv);
if (fCompleted && eventInv.data.size() == network::CInv::MAX_INV_COUNT)
{
fCompleted = false;
}
}
}
}
}
return fCompleted;
}
} // namespace bigbang
|
Product prices and availability are accurate as of 2019-04-25 02:53:03 UTC and are subject to change. Any price and availability information displayed on http://www.amazon.com/ at the time of purchase will apply to the purchase of this product.
?100 Brilliant LED lights?-- 100 Super Bright LED bulbs on 33ft high quality copper wire, ideal for decorating your gardens, patio, gate, yard, wedding, party etc. ?High Quality & Flexible Copper Wire?-- Made with thin and flexible copper wire, the string lights is easy to storage and ready for reuse. It is environmental- friendly, high energy conversion rate, durable and safe to use. ?2 Switch Buttons?-- POWER ON/OFF. Auto on at dusk, auto off by day. MODE (Steady on / Flashing) ?Water Resistant ?-- Both the string lights and the solar panel are IP65 Waterproof. No worry to use them in the rain. ?Easy to Shape?-- Flexible Copper wiring can easily build the shapes you want; wrap around tree trunks or gazebos; in the dark, the wire fades to invisible, leaving only the bright lights. |
lemma seq_compactI: assumes "\<And>f. \<forall>n. f n \<in> S \<Longrightarrow> \<exists>l\<in>S. \<exists>r::nat\<Rightarrow>nat. strict_mono r \<and> ((f \<circ> r) \<longlongrightarrow> l) sequentially" shows "seq_compact S" |
#' Recode MTUS data into human-readable labels
#'
#' Most IPUMS data uses numeric levels to indicate different values for variables (e.g. the number `8` in the `ELOC` column means an activity was performed at home). This function enables a user to recode numeric levels into human-readable labels. Please note that the user-inputted CSV file must have a specific format described below.
#'
#' @param input_data The by-activity data object that the user has already loaded. This argument may be entered either as a string inq uotes or as a data object without quotes.
#' @param which_column String denoting the column that the user wishes to recode.
#' @param codes_csv Either a data object or a string with path to a csv file. The file/table must contain two columns: the first column header must match the name of the MTUS variable to be renamed (e.g. MAIN, SEC, ELOC), and the second column header must be named LABEL, and the column must contain the list of corresponding human-readable labels.
#'
#' @return Returns a version of input_data with user-designated labels replacing original IPUMS MTUS codes.
mtus_recode <- function(input_data, which_column, codes_csv) {
# Readers of the code, beware!
# As elsewhere in the package, this function does a lot of jumping through hoops
# to take a user-inputted value (specifically `which_column`) and make R treat
# it as an object name. Functions in the dplyr package don't appear to be very
# amenable to eval(call(…)) and as.name(…), which is why I went the route of
# renaming columns and
# Initialize a blank tibble.
# Fill in the tibble with the vector of ELOCs from the by-activity data.
# Rename that column to ELOC so we can join it with the table of labels
# Replace those with labels from the code table.
# Use the mutate function to replace the numeric codes with human-readable categories
# input_data is a data object; enter either with or without quotes
# which_column is the column to recode; must enter with quotes
# codes_csv is a path to CSV file where the first column is numeric codes
# (the column name MUST equal that of the variable to be replaced),
# and the second column is a verbal label (header MUST be LABEL);
# codes_csv must be entered as a path and/or a file name, and with quotes.
# this if-else statement allows user to enter the data object name either
# in quotes or without quotes
if (typeof(input_data) == "character"){
# if type is character
varname = input_data
eval(call("assign", "input_data", as.name(input_data)))
} else {
# if type is list, transform the input_data argument into a string
varname = deparse(substitute(input_data))
}
# load a csv file with equivalences
# check if codes_csv is a path to a CSV file or a data object
if (typeof(codes_csv) == "character"){
# if type is character, read from the CSV file
codes_list <- read.csv(codes_csv)
} else {
# if type is data object/list, simply assign that to codes_list
codes_list <- codes_csv
}
if (which_column %in% names(input_data)) {
# replace numeric codes with categorical labels for MAIN
temp <- NULL
temp <- as_tibble(input_data[which_column])
temp <- inner_join(temp, codes_list, by=which_column) %>%
rename(RECODED = colnames(temp)[1]) %>%
mutate(RECODED = LABEL)
# save column location; this is so we can restore the original name
# of the column after copying in the recoded values.
# We use a regular expression because otherwise R will consider
# any two variables with similar starting letters. eg SEC and SECTOR
# will be both picked out, but we want an exact match. ^ means beginning
# and $ means end.
regex_which_column = paste0("^", which_column, "$")
column_index = grep(regex_which_column, colnames(input_data))
print(c("column index", column_index))
# Rename the column of interest to RECODEE to make it easier to run the mutate
input_data <- input_data %>%
rename(RECODEE = which_column) %>%
mutate(RECODEE = temp$RECODED)
# Reset the name of the column
colnames(input_data)[column_index] <- which_column
}
else {
print(paste("Column", which_column, "not found in", input_data,"."))
}
return(input_data)
}
|
classdef PTKAirwayForContext < PTKPlugin
% PTKAirwayForContext. Plugin for returning the main bronchi serving the lung regions for a given context
%
% This is a plugin for the Pulmonary Toolkit. Plugins can be run using
% the gui, or through the interfaces provided by the Pulmonary Toolkit.
% See PTKPlugin.m for more information on how to run plugins.
%
% Plugins should not be run directly from your code.
%
% PTKAirwayForContext returns a set of bronchi which correspond to the
% specified context
%
%
% Licence
% -------
% Part of the TD Pulmonary Toolkit. https://github.com/tomdoel/pulmonarytoolkit
% Author: Tom Doel, 2013. www.tomdoel.com
% Distributed under the GNU GPL v3 licence. Please see website for details.
%
properties
ButtonText = 'Bronchus for<br>context'
ToolTip = 'Plugin for returning the main bronchi serving the lung regions for a given context'
Category = 'Analysis'
Context = PTKContextSet.Any
AllowResultsToBeCached = true
AlwaysRunPlugin = false
PluginType = 'DoNothing'
HidePluginInDisplay = true
FlattenPreviewImage = false
PTKVersion = '2'
ButtonWidth = 6
ButtonHeight = 1
GeneratePreview = false
Visibility = 'Developer'
end
methods (Static)
function results = RunPlugin(dataset, context, reporting)
bronchi = [];
switch context
case {PTKContext.Lungs, PTKContext.LungROI, PTKContext.OriginalImage}
airways = dataset.GetResult('PTKAirwayCentreline');
bronchi = airways.AirwayCentrelineTree;
case {PTKContext.LeftLung}
airways = dataset.GetResult('PTKAirwaysLabelledByLobe');
bronchi = airways.StartBranches.Left;
case {PTKContext.RightLung}
airways = dataset.GetResult('PTKAirwaysLabelledByLobe');
bronchi = airways.StartBranches.Right;
case {PTKContext.LeftUpperLobe}
airways = dataset.GetResult('PTKAirwaysLabelledByLobe');
bronchi = airways.StartBranches.LeftUpper;
case {PTKContext.LeftLowerLobe}
airways = dataset.GetResult('PTKAirwaysLabelledByLobe');
bronchi = airways.StartBranches.LeftLower;
case {PTKContext.RightUpperLobe}
airways = dataset.GetResult('PTKAirwaysLabelledByLobe');
bronchi = airways.StartBranches.RightUpper;
case {PTKContext.RightLowerLobe}
airways = dataset.GetResult('PTKAirwaysLabelledByLobe');
bronchi = airways.StartBranches.RightLower;
case {PTKContext.RightMiddleLobe}
airways = dataset.GetResult('PTKAirwaysLabelledByLobe');
bronchi = airways.StartBranches.RightMid;
case {PTKContext.R_AP, PTKContext.R_P, PTKContext.R_AN, PTKContext.R_L, ...
PTKContext.R_M, PTKContext.R_S, PTKContext.R_MB, PTKContext.R_AB, PTKContext.R_LB, ...
PTKContext.R_PB, PTKContext.L_APP, PTKContext.L_APP2, PTKContext.L_AN, PTKContext.L_SL, ...
PTKContext.L_IL, PTKContext.L_S, PTKContext.L_AMB, PTKContext.L_LB, PTKContext.L_PB}
airways = dataset.GetResult('PTKSegmentsByNearestBronchus');
bronchi = PTKAirwayForContext.FindSegmentalBronchus(airways.AirwaysBySegment.Trachea, context);
end
for branch = bronchi
branch.GenerateBranchParameters;
end
results = [];
results.AirwayForContext = bronchi;
end
function bronchus = FindSegmentalBronchus(airways, context)
segment_label = uint8(PTKPulmonarySegmentLabels.(char(context)));
airways_to_do = CoreStack(airways);
while ~airways_to_do.IsEmpty
next_airways = airways_to_do.Pop;
if next_airways.SegmentIndex == segment_label
bronchus = next_airways;
return;
end
airways_to_do.Push(next_airways.Children);
end
bronchus = [];
end
end
end |
[STATEMENT]
lemma pprod_tau_nu: "x \<parallel> y = \<nu> x \<parallel> \<nu> y + d (\<tau> x) \<cdot> \<nu> y + d (\<tau> y) \<cdot> \<nu> x + \<tau> x \<parallel> \<tau> y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<parallel> y = \<nu> x \<parallel> \<nu> y + d (\<tau> x) \<cdot> \<nu> y + d (\<tau> y) \<cdot> \<nu> x + \<tau> x \<parallel> \<tau> y
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<parallel> y = \<nu> x \<parallel> \<nu> y + d (\<tau> x) \<cdot> \<nu> y + d (\<tau> y) \<cdot> \<nu> x + \<tau> x \<parallel> \<tau> y
[PROOF STEP]
have "x \<parallel> y = \<nu> (x \<parallel> y) + \<tau> (x \<parallel> y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<parallel> y = \<nu> (x \<parallel> y) + \<tau> (x \<parallel> y)
[PROOF STEP]
by (simp add: nu_def tau_def)
[PROOF STATE]
proof (state)
this:
x \<parallel> y = \<nu> (x \<parallel> y) + \<tau> (x \<parallel> y)
goal (1 subgoal):
1. x \<parallel> y = \<nu> x \<parallel> \<nu> y + d (\<tau> x) \<cdot> \<nu> y + d (\<tau> y) \<cdot> \<nu> x + \<tau> x \<parallel> \<tau> y
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
x \<parallel> y = \<nu> (x \<parallel> y) + \<tau> (x \<parallel> y)
goal (1 subgoal):
1. x \<parallel> y = \<nu> x \<parallel> \<nu> y + d (\<tau> x) \<cdot> \<nu> y + d (\<tau> y) \<cdot> \<nu> x + \<tau> x \<parallel> \<tau> y
[PROOF STEP]
have "... = (d (\<tau> x) \<cdot> \<nu> y + d (\<tau> y) \<cdot> \<nu> x + \<nu> x \<parallel> \<nu> y) + \<tau> x \<parallel> \<tau> y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<nu> (x \<parallel> y) + \<tau> (x \<parallel> y) = d (\<tau> x) \<cdot> \<nu> y + d (\<tau> y) \<cdot> \<nu> x + \<nu> x \<parallel> \<nu> y + \<tau> x \<parallel> \<tau> y
[PROOF STEP]
by (simp add: nu_par)
[PROOF STATE]
proof (state)
this:
\<nu> (x \<parallel> y) + \<tau> (x \<parallel> y) = d (\<tau> x) \<cdot> \<nu> y + d (\<tau> y) \<cdot> \<nu> x + \<nu> x \<parallel> \<nu> y + \<tau> x \<parallel> \<tau> y
goal (1 subgoal):
1. x \<parallel> y = \<nu> x \<parallel> \<nu> y + d (\<tau> x) \<cdot> \<nu> y + d (\<tau> y) \<cdot> \<nu> x + \<tau> x \<parallel> \<tau> y
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
\<nu> (x \<parallel> y) + \<tau> (x \<parallel> y) = d (\<tau> x) \<cdot> \<nu> y + d (\<tau> y) \<cdot> \<nu> x + \<nu> x \<parallel> \<nu> y + \<tau> x \<parallel> \<tau> y
goal (1 subgoal):
1. x \<parallel> y = \<nu> x \<parallel> \<nu> y + d (\<tau> x) \<cdot> \<nu> y + d (\<tau> y) \<cdot> \<nu> x + \<tau> x \<parallel> \<tau> y
[PROOF STEP]
using add_assoc add_commute calculation
[PROOF STATE]
proof (prove)
using this:
\<nu> (x \<parallel> y) + \<tau> (x \<parallel> y) = d (\<tau> x) \<cdot> \<nu> y + d (\<tau> y) \<cdot> \<nu> x + \<nu> x \<parallel> \<nu> y + \<tau> x \<parallel> \<tau> y
?a + ?b + ?c = ?a + (?b + ?c)
?a + ?b = ?b + ?a
x \<parallel> y = \<nu> (x \<parallel> y) + \<tau> (x \<parallel> y)
goal (1 subgoal):
1. x \<parallel> y = \<nu> x \<parallel> \<nu> y + d (\<tau> x) \<cdot> \<nu> y + d (\<tau> y) \<cdot> \<nu> x + \<tau> x \<parallel> \<tau> y
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
x \<parallel> y = \<nu> x \<parallel> \<nu> y + d (\<tau> x) \<cdot> \<nu> y + d (\<tau> y) \<cdot> \<nu> x + \<tau> x \<parallel> \<tau> y
goal:
No subgoals!
[PROOF STEP]
qed |
[STATEMENT]
lemma rbd_rbb_demorgan: "\<partial> \<circ> bd\<^sub>\<R> R = bb\<^sub>\<R> R \<circ> \<partial>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<partial> \<circ> bd\<^sub>\<R> R = bb\<^sub>\<R> R \<circ> \<partial>
[PROOF STEP]
by (simp add: rbb_def rbd_def fbd_fbb_demorgan) |
% Build a distribution of RTB
!make clean
!make
matlab.addons.toolbox.packageToolbox('RTB.prj', '~/Desktop/RTB') |
[GOAL]
n : Type u
inst✝² : DecidableEq n
inst✝¹ : Fintype n
R : Type v
inst✝ : CommRing R
A : SpecialLinearGroup n R
⊢ det (adjugate ↑A) = 1
[PROOFSTEP]
rw [det_adjugate, A.prop, one_pow]
[GOAL]
n : Type u
inst✝² : DecidableEq n
inst✝¹ : Fintype n
R : Type v
inst✝ : CommRing R
A B : SpecialLinearGroup n R
⊢ det (↑A * ↑B) = 1
[PROOFSTEP]
rw [det_mul, A.prop, B.prop, one_mul]
[GOAL]
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
A B : SpecialLinearGroup n R
inst✝ : Nontrivial R
g : SpecialLinearGroup n R
⊢ det ↑g ≠ 0
[PROOFSTEP]
rw [g.det_coe]
[GOAL]
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
A B : SpecialLinearGroup n R
inst✝ : Nontrivial R
g : SpecialLinearGroup n R
⊢ 1 ≠ 0
[PROOFSTEP]
norm_num
[GOAL]
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
A B : SpecialLinearGroup n R
inst✝ : Nontrivial R
g : SpecialLinearGroup n R
i : n
h : ↑g i = 0
⊢ ∀ (j : n), ↑g i j = 0
[PROOFSTEP]
simp [h]
[GOAL]
n : Type u
inst✝² : DecidableEq n
inst✝¹ : Fintype n
R : Type v
inst✝ : CommRing R
src✝¹ : Monoid (SpecialLinearGroup n R) := monoid
src✝ : Inv (SpecialLinearGroup n R) := hasInv
A : SpecialLinearGroup n R
⊢ A⁻¹ * A = 1
[PROOFSTEP]
ext1
[GOAL]
case a
n : Type u
inst✝² : DecidableEq n
inst✝¹ : Fintype n
R : Type v
inst✝ : CommRing R
src✝¹ : Monoid (SpecialLinearGroup n R) := monoid
src✝ : Inv (SpecialLinearGroup n R) := hasInv
A : SpecialLinearGroup n R
i✝ j✝ : n
⊢ ↑(A⁻¹ * A) i✝ j✝ = ↑1 i✝ j✝
[PROOFSTEP]
simp [adjugate_mul]
[GOAL]
n : Type u
inst✝² : DecidableEq n
inst✝¹ : Fintype n
R : Type v
inst✝ : CommRing R
A : SpecialLinearGroup n R
⊢ comp (↑Matrix.toLin' ↑A) (↑Matrix.toLin' ↑A⁻¹) = LinearMap.id
[PROOFSTEP]
rw [← toLin'_mul, ← coe_mul, mul_right_inv, coe_one, toLin'_one]
[GOAL]
n : Type u
inst✝² : DecidableEq n
inst✝¹ : Fintype n
R : Type v
inst✝ : CommRing R
A : SpecialLinearGroup n R
⊢ comp (↑Matrix.toLin' ↑A⁻¹) (↑Matrix.toLin' ↑A) = LinearMap.id
[PROOFSTEP]
rw [← toLin'_mul, ← coe_mul, mul_left_inv, coe_one, toLin'_one]
[GOAL]
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
f : R →+* S
g : SpecialLinearGroup n R
⊢ det (↑(RingHom.mapMatrix f) ↑g) = 1
[PROOFSTEP]
rw [← f.map_det]
[GOAL]
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
f : R →+* S
g : SpecialLinearGroup n R
⊢ ↑f (det ↑g) = 1
[PROOFSTEP]
simp [g.prop]
[GOAL]
n : Type u
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
R : Type v
inst✝² : CommRing R
S : Type u_1
inst✝¹ : CommRing S
inst✝ : Fact (Even (Fintype.card n))
g : SpecialLinearGroup n R
⊢ det (-↑g) = 1
[PROOFSTEP]
simpa [(@Fact.out <| Even <| Fintype.card n).neg_one_pow, g.det_coe] using det_smul (↑ₘg) (-1)
[GOAL]
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
A : SL(2, R)
⊢ det ![![↑A 1 1, -↑A 0 1], ![-↑A 1 0, ↑A 0 0]] = 1
[PROOFSTEP]
rw [Matrix.det_fin_two, mul_comm]
[GOAL]
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
A : SL(2, R)
⊢ vecCons ![↑A 1 1, -↑A 0 1] ![![-↑A 1 0, ↑A 0 0]] 1 1 * vecCons ![↑A 1 1, -↑A 0 1] ![![-↑A 1 0, ↑A 0 0]] 0 0 -
vecCons ![↑A 1 1, -↑A 0 1] ![![-↑A 1 0, ↑A 0 0]] 0 1 * vecCons ![↑A 1 1, -↑A 0 1] ![![-↑A 1 0, ↑A 0 0]] 1 0 =
1
[PROOFSTEP]
simp only [cons_val_zero, cons_val_one, head_cons, mul_neg, neg_mul, neg_neg]
[GOAL]
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
A : SL(2, R)
⊢ ↑A 0 0 * ↑A 1 1 - ↑A 0 1 * ↑A 1 0 = 1
[PROOFSTEP]
have := A.2
[GOAL]
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
A : SL(2, R)
this : det ↑A = 1
⊢ ↑A 0 0 * ↑A 1 1 - ↑A 0 1 * ↑A 1 0 = 1
[PROOFSTEP]
rw [Matrix.det_fin_two] at this
[GOAL]
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
A : SL(2, R)
this : ↑A 0 0 * ↑A 1 1 - ↑A 0 1 * ↑A 1 0 = 1
⊢ ↑A 0 0 * ↑A 1 1 - ↑A 0 1 * ↑A 1 0 = 1
[PROOFSTEP]
convert this
[GOAL]
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
A : SL(2, R)
⊢ A⁻¹ =
{ val := ![![↑A 1 1, -↑A 0 1], ![-↑A 1 0, ↑A 0 0]],
property := (_ : det ![![↑A 1 1, -↑A 0 1], ![-↑A 1 0, ↑A 0 0]] = 1) }
[PROOFSTEP]
ext
[GOAL]
case a
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
A : SL(2, R)
i✝ j✝ : Fin 2
⊢ ↑A⁻¹ i✝ j✝ =
↑{ val := ![![↑A 1 1, -↑A 0 1], ![-↑A 1 0, ↑A 0 0]],
property := (_ : det ![![↑A 1 1, -↑A 0 1], ![-↑A 1 0, ↑A 0 0]] = 1) }
i✝ j✝
[PROOFSTEP]
have := Matrix.adjugate_fin_two A.1
[GOAL]
case a
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
A : SL(2, R)
i✝ j✝ : Fin 2
this : adjugate ↑A = ↑of ![![↑A 1 1, -↑A 0 1], ![-↑A 1 0, ↑A 0 0]]
⊢ ↑A⁻¹ i✝ j✝ =
↑{ val := ![![↑A 1 1, -↑A 0 1], ![-↑A 1 0, ↑A 0 0]],
property := (_ : det ![![↑A 1 1, -↑A 0 1], ![-↑A 1 0, ↑A 0 0]] = 1) }
i✝ j✝
[PROOFSTEP]
rw [coe_inv, this]
[GOAL]
case a
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
A : SL(2, R)
i✝ j✝ : Fin 2
this : adjugate ↑A = ↑of ![![↑A 1 1, -↑A 0 1], ![-↑A 1 0, ↑A 0 0]]
⊢ ↑of ![![↑A 1 1, -↑A 0 1], ![-↑A 1 0, ↑A 0 0]] i✝ j✝ =
↑{ val := ![![↑A 1 1, -↑A 0 1], ![-↑A 1 0, ↑A 0 0]],
property := (_ : det ![![↑A 1 1, -↑A 0 1], ![-↑A 1 0, ↑A 0 0]] = 1) }
i✝ j✝
[PROOFSTEP]
rfl
[GOAL]
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
P : SL(2, R) → Prop
a b c d : R
hdet : a * d - b * c = 1
⊢ det (↑of ![![a, b], ![c, d]]) = 1
[PROOFSTEP]
rwa [det_fin_two_of]
[GOAL]
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
P : SL(2, R) → Prop
h :
∀ (a b c d : R) (hdet : a * d - b * c = 1),
P { val := ↑of ![![a, b], ![c, d]], property := (_ : det (↑of ![![a, b], ![c, d]]) = 1) }
g : SL(2, R)
⊢ P g
[PROOFSTEP]
obtain ⟨m, hm⟩ := g
[GOAL]
case mk
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
P : SL(2, R) → Prop
h :
∀ (a b c d : R) (hdet : a * d - b * c = 1),
P { val := ↑of ![![a, b], ![c, d]], property := (_ : det (↑of ![![a, b], ![c, d]]) = 1) }
m : Matrix (Fin 2) (Fin 2) R
hm : det m = 1
⊢ P { val := m, property := hm }
[PROOFSTEP]
convert h (m 0 0) (m 0 1) (m 1 0) (m 1 1) (by rwa [det_fin_two] at hm )
[GOAL]
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
P : SL(2, R) → Prop
h :
∀ (a b c d : R) (hdet : a * d - b * c = 1),
P { val := ↑of ![![a, b], ![c, d]], property := (_ : det (↑of ![![a, b], ![c, d]]) = 1) }
m : Matrix (Fin 2) (Fin 2) R
hm : det m = 1
⊢ m 0 0 * m 1 1 - m 0 1 * m 1 0 = 1
[PROOFSTEP]
rwa [det_fin_two] at hm
[GOAL]
case h.e'_1.h.e'_3
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
P : SL(2, R) → Prop
h :
∀ (a b c d : R) (hdet : a * d - b * c = 1),
P { val := ↑of ![![a, b], ![c, d]], property := (_ : det (↑of ![![a, b], ![c, d]]) = 1) }
m : Matrix (Fin 2) (Fin 2) R
hm : det m = 1
⊢ m = ↑of ![![m 0 0, m 0 1], ![m 1 0, m 1 1]]
[PROOFSTEP]
ext i j
[GOAL]
case h.e'_1.h.e'_3.a.h
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
P : SL(2, R) → Prop
h :
∀ (a b c d : R) (hdet : a * d - b * c = 1),
P { val := ↑of ![![a, b], ![c, d]], property := (_ : det (↑of ![![a, b], ![c, d]]) = 1) }
m : Matrix (Fin 2) (Fin 2) R
hm : det m = 1
i j : Fin 2
⊢ m i j = ↑of ![![m 0 0, m 0 1], ![m 1 0, m 1 1]] i j
[PROOFSTEP]
fin_cases i
[GOAL]
case h.e'_1.h.e'_3.a.h.head
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
P : SL(2, R) → Prop
h :
∀ (a b c d : R) (hdet : a * d - b * c = 1),
P { val := ↑of ![![a, b], ![c, d]], property := (_ : det (↑of ![![a, b], ![c, d]]) = 1) }
m : Matrix (Fin 2) (Fin 2) R
hm : det m = 1
j : Fin 2
⊢ m { val := 0, isLt := (_ : 0 < 2) } j = ↑of ![![m 0 0, m 0 1], ![m 1 0, m 1 1]] { val := 0, isLt := (_ : 0 < 2) } j
[PROOFSTEP]
fin_cases j
[GOAL]
case h.e'_1.h.e'_3.a.h.tail.head
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
P : SL(2, R) → Prop
h :
∀ (a b c d : R) (hdet : a * d - b * c = 1),
P { val := ↑of ![![a, b], ![c, d]], property := (_ : det (↑of ![![a, b], ![c, d]]) = 1) }
m : Matrix (Fin 2) (Fin 2) R
hm : det m = 1
j : Fin 2
⊢ m { val := 1, isLt := (_ : (fun a => a < 2) 1) } j =
↑of ![![m 0 0, m 0 1], ![m 1 0, m 1 1]] { val := 1, isLt := (_ : (fun a => a < 2) 1) } j
[PROOFSTEP]
fin_cases j
[GOAL]
case h.e'_1.h.e'_3.a.h.head.head
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
P : SL(2, R) → Prop
h :
∀ (a b c d : R) (hdet : a * d - b * c = 1),
P { val := ↑of ![![a, b], ![c, d]], property := (_ : det (↑of ![![a, b], ![c, d]]) = 1) }
m : Matrix (Fin 2) (Fin 2) R
hm : det m = 1
⊢ m { val := 0, isLt := (_ : 0 < 2) } { val := 0, isLt := (_ : 0 < 2) } =
↑of ![![m 0 0, m 0 1], ![m 1 0, m 1 1]] { val := 0, isLt := (_ : 0 < 2) } { val := 0, isLt := (_ : 0 < 2) }
[PROOFSTEP]
rfl
[GOAL]
case h.e'_1.h.e'_3.a.h.head.tail.head
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
P : SL(2, R) → Prop
h :
∀ (a b c d : R) (hdet : a * d - b * c = 1),
P { val := ↑of ![![a, b], ![c, d]], property := (_ : det (↑of ![![a, b], ![c, d]]) = 1) }
m : Matrix (Fin 2) (Fin 2) R
hm : det m = 1
⊢ m { val := 0, isLt := (_ : 0 < 2) } { val := 1, isLt := (_ : (fun a => a < 2) 1) } =
↑of ![![m 0 0, m 0 1], ![m 1 0, m 1 1]] { val := 0, isLt := (_ : 0 < 2) }
{ val := 1, isLt := (_ : (fun a => a < 2) 1) }
[PROOFSTEP]
rfl
[GOAL]
case h.e'_1.h.e'_3.a.h.tail.head.head
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
P : SL(2, R) → Prop
h :
∀ (a b c d : R) (hdet : a * d - b * c = 1),
P { val := ↑of ![![a, b], ![c, d]], property := (_ : det (↑of ![![a, b], ![c, d]]) = 1) }
m : Matrix (Fin 2) (Fin 2) R
hm : det m = 1
⊢ m { val := 1, isLt := (_ : (fun a => a < 2) 1) } { val := 0, isLt := (_ : 0 < 2) } =
↑of ![![m 0 0, m 0 1], ![m 1 0, m 1 1]] { val := 1, isLt := (_ : (fun a => a < 2) 1) }
{ val := 0, isLt := (_ : 0 < 2) }
[PROOFSTEP]
rfl
[GOAL]
case h.e'_1.h.e'_3.a.h.tail.head.tail.head
n : Type u
inst✝³ : DecidableEq n
inst✝² : Fintype n
R : Type v
inst✝¹ : CommRing R
S : Type u_1
inst✝ : CommRing S
P : SL(2, R) → Prop
h :
∀ (a b c d : R) (hdet : a * d - b * c = 1),
P { val := ↑of ![![a, b], ![c, d]], property := (_ : det (↑of ![![a, b], ![c, d]]) = 1) }
m : Matrix (Fin 2) (Fin 2) R
hm : det m = 1
⊢ m { val := 1, isLt := (_ : (fun a => a < 2) 1) } { val := 1, isLt := (_ : (fun a => a < 2) 1) } =
↑of ![![m 0 0, m 0 1], ![m 1 0, m 1 1]] { val := 1, isLt := (_ : (fun a => a < 2) 1) }
{ val := 1, isLt := (_ : (fun a => a < 2) 1) }
[PROOFSTEP]
rfl
[GOAL]
n : Type u
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
R✝ : Type v
inst✝² : CommRing R✝
S : Type u_1
inst✝¹ : CommRing S
R : Type u_2
inst✝ : Field R
g : SL(2, R)
hg : ↑g 1 0 = 0
a b : R
h : a ≠ 0
⊢ det (↑of ![![a, b], ![0, a⁻¹]]) = 1
[PROOFSTEP]
simp [h]
[GOAL]
n : Type u
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
R✝ : Type v
inst✝² : CommRing R✝
S : Type u_1
inst✝¹ : CommRing S
R : Type u_2
inst✝ : Field R
g : SL(2, R)
hg : ↑g 1 0 = 0
⊢ ∃ a b h, g = { val := ↑of ![![a, b], ![0, a⁻¹]], property := (_ : det (↑of ![![a, b], ![0, a⁻¹]]) = 1) }
[PROOFSTEP]
induction' g using Matrix.SpecialLinearGroup.fin_two_induction with a b c d h_det
[GOAL]
case h
n : Type u
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
R✝ : Type v
inst✝² : CommRing R✝
S : Type u_1
inst✝¹ : CommRing S
R : Type u_2
inst✝ : Field R
g : SL(2, R)
hg✝ : ↑g 1 0 = 0
a b c d : R
h_det : a * d - b * c = 1
hg : ↑{ val := ↑of ![![a, b], ![c, d]], property := (_ : det (↑of ![![a, b], ![c, d]]) = 1) } 1 0 = 0
⊢ ∃ a_1 b_1 h,
{ val := ↑of ![![a, b], ![c, d]], property := (_ : det (↑of ![![a, b], ![c, d]]) = 1) } =
{ val := ↑of ![![a_1, b_1], ![0, a_1⁻¹]], property := (_ : det (↑of ![![a_1, b_1], ![0, a_1⁻¹]]) = 1) }
[PROOFSTEP]
replace hg : c = 0 := by simpa using hg
[GOAL]
n : Type u
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
R✝ : Type v
inst✝² : CommRing R✝
S : Type u_1
inst✝¹ : CommRing S
R : Type u_2
inst✝ : Field R
g : SL(2, R)
hg✝ : ↑g 1 0 = 0
a b c d : R
h_det : a * d - b * c = 1
hg : ↑{ val := ↑of ![![a, b], ![c, d]], property := (_ : det (↑of ![![a, b], ![c, d]]) = 1) } 1 0 = 0
⊢ c = 0
[PROOFSTEP]
simpa using hg
[GOAL]
case h
n : Type u
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
R✝ : Type v
inst✝² : CommRing R✝
S : Type u_1
inst✝¹ : CommRing S
R : Type u_2
inst✝ : Field R
g : SL(2, R)
hg✝ : ↑g 1 0 = 0
a b c d : R
h_det : a * d - b * c = 1
hg : c = 0
⊢ ∃ a_1 b_1 h,
{ val := ↑of ![![a, b], ![c, d]], property := (_ : det (↑of ![![a, b], ![c, d]]) = 1) } =
{ val := ↑of ![![a_1, b_1], ![0, a_1⁻¹]], property := (_ : det (↑of ![![a_1, b_1], ![0, a_1⁻¹]]) = 1) }
[PROOFSTEP]
have had : a * d = 1 := by rwa [hg, mul_zero, sub_zero] at h_det
[GOAL]
n : Type u
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
R✝ : Type v
inst✝² : CommRing R✝
S : Type u_1
inst✝¹ : CommRing S
R : Type u_2
inst✝ : Field R
g : SL(2, R)
hg✝ : ↑g 1 0 = 0
a b c d : R
h_det : a * d - b * c = 1
hg : c = 0
⊢ a * d = 1
[PROOFSTEP]
rwa [hg, mul_zero, sub_zero] at h_det
[GOAL]
case h
n : Type u
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
R✝ : Type v
inst✝² : CommRing R✝
S : Type u_1
inst✝¹ : CommRing S
R : Type u_2
inst✝ : Field R
g : SL(2, R)
hg✝ : ↑g 1 0 = 0
a b c d : R
h_det : a * d - b * c = 1
hg : c = 0
had : a * d = 1
⊢ ∃ a_1 b_1 h,
{ val := ↑of ![![a, b], ![c, d]], property := (_ : det (↑of ![![a, b], ![c, d]]) = 1) } =
{ val := ↑of ![![a_1, b_1], ![0, a_1⁻¹]], property := (_ : det (↑of ![![a_1, b_1], ![0, a_1⁻¹]]) = 1) }
[PROOFSTEP]
refine' ⟨a, b, left_ne_zero_of_mul_eq_one had, _⟩
[GOAL]
case h
n : Type u
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
R✝ : Type v
inst✝² : CommRing R✝
S : Type u_1
inst✝¹ : CommRing S
R : Type u_2
inst✝ : Field R
g : SL(2, R)
hg✝ : ↑g 1 0 = 0
a b c d : R
h_det : a * d - b * c = 1
hg : c = 0
had : a * d = 1
⊢ { val := ↑of ![![a, b], ![c, d]], property := (_ : det (↑of ![![a, b], ![c, d]]) = 1) } =
{ val := ↑of ![![a, b], ![0, a⁻¹]], property := (_ : det (↑of ![![a, b], ![0, a⁻¹]]) = 1) }
[PROOFSTEP]
simp_rw [eq_inv_of_mul_eq_one_right had, hg]
[GOAL]
⊢ det (↑of ![![0, -1], ![1, 0]]) = 1
[PROOFSTEP]
norm_num [Matrix.det_fin_two_of]
[GOAL]
⊢ det (↑of ![![1, 1], ![0, 1]]) = 1
[PROOFSTEP]
norm_num [Matrix.det_fin_two_of]
[GOAL]
⊢ ↑T⁻¹ = ↑of ![![1, -1], ![0, 1]]
[PROOFSTEP]
simp [coe_inv, coe_T, adjugate_fin_two]
[GOAL]
n : ℤ
⊢ ↑(T ^ n) = ↑of ![![1, n], ![0, 1]]
[PROOFSTEP]
induction' n using Int.induction_on with n h n h
[GOAL]
case hz
⊢ ↑(T ^ 0) = ↑of ![![1, 0], ![0, 1]]
[PROOFSTEP]
rw [zpow_zero, coe_one, Matrix.one_fin_two]
[GOAL]
case hp
n : ℕ
h : ↑(T ^ ↑n) = ↑of ![![1, ↑n], ![0, 1]]
⊢ ↑(T ^ (↑n + 1)) = ↑of ![![1, ↑n + 1], ![0, 1]]
[PROOFSTEP]
simp_rw [zpow_add, zpow_one, coe_mul, h, coe_T, Matrix.mul_fin_two]
-- Porting note: was congrm !![_, _; _, _]
[GOAL]
case hp
n : ℕ
h : ↑(T ^ ↑n) = ↑of ![![1, ↑n], ![0, 1]]
⊢ ↑of ![![1 * 1 + ↑n * 0, 1 * 1 + ↑n * 1], ![0 * 1 + 1 * 0, 0 * 1 + 1 * 1]] = ↑of ![![1, ↑n + 1], ![0, 1]]
[PROOFSTEP]
ring_nf
[GOAL]
case hn
n : ℕ
h : ↑(T ^ (-↑n)) = ↑of ![![1, -↑n], ![0, 1]]
⊢ ↑(T ^ (-↑n - 1)) = ↑of ![![1, -↑n - 1], ![0, 1]]
[PROOFSTEP]
simp_rw [zpow_sub, zpow_one, coe_mul, h, coe_T_inv, Matrix.mul_fin_two]
-- Porting note: was congrm !![_, _; _, _]
[GOAL]
case hn
n : ℕ
h : ↑(T ^ (-↑n)) = ↑of ![![1, -↑n], ![0, 1]]
⊢ ↑of ![![1 * 1 + -↑n * 0, 1 * -1 + -↑n * 1], ![0 * 1 + 1 * 0, 0 * -1 + 1 * 1]] = ↑of ![![1, -↑n - 1], ![0, 1]]
[PROOFSTEP]
ring_nf
[GOAL]
n : ℤ
g : SL(2, ℤ)
⊢ ↑(T ^ n * g) 1 = ↑g 1
[PROOFSTEP]
ext j
[GOAL]
case h
n : ℤ
g : SL(2, ℤ)
j : Fin 2
⊢ ↑(T ^ n * g) 1 j = ↑g 1 j
[PROOFSTEP]
simp [coe_T_zpow, Matrix.vecMul, Matrix.dotProduct, Fin.sum_univ_succ, vecTail]
[GOAL]
g : SL(2, ℤ)
⊢ ↑(T * g) 1 = ↑g 1
[PROOFSTEP]
simpa using T_pow_mul_apply_one 1 g
[GOAL]
g : SL(2, ℤ)
⊢ ↑(T⁻¹ * g) 1 = ↑g 1
[PROOFSTEP]
simpa using T_pow_mul_apply_one (-1) g
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 30 13:05:28 2018:
在版本3的基础上,根据pandas的join方法来求交集
根据从量表中筛选的样本,来获得符合要求的原始数据的路径
数据结构neuroimageDataPath//subject00001//files
也可以是任何的数据结构,只要给定subjName在哪里就行
总之,最后把file复制到其他地方(可以限定某个file)
input:
# 1 referencePath:需要复制的被试名字所在text文件(大表中的folder)
# 2 regularExpressionOfsubjName_forReference:如提取量表中subjName的正则表达式
# 3 folderNameContainingFile_forSelect:想把被试的哪个模态/或那个文件夹下的文件复制出来(如同时有'resting'和'dti'时,选择那个模态)
# 4 num_countBackwards:subjName在倒数第几个block内(第一个计数为1)
# 如'D:\myCodes\workstation_20180829_dynamicFC\FunImgARW\1-500\00002_resting\dti\dic.txt'
# 的subjName在倒数第3个中
# 5 regularExpressionOfSubjName_forNeuroimageDataFiles:用来筛选mri数据中subject name字符串的正则表达式
# 6 keywordThatFileContain:用来筛选file的正则表达式或keyword
# 7 neuroimageDataPath:原始数据的根目录
# 8 savePath: 将原始数据copy到哪个大路径
# n_processess=5几个线程
# 9 ifSaveLog:是否保存复制log
# 10 ifCopy:是否执行复制功能
# 11 ifMove:是否移动(0)
# 12 saveInToOneOrMoreFolder:保存到每个被试文件夹下,还是保存到一个文件夹下
# 13 saveNameSuffix:文件保存的尾缀('.nii')
# 14 ifRun:是否真正对文件执行移动或复制(0)
# 总体来说被复制的文件放在如下的路径:savePath/saveFolderName/subjName/files
@author: LI Chao
"""
# =========================================================================
# import
import sys
import shutil
import os
import time
# from lc_selectFile_ import selectFile
import pandas as pd
import numpy as np
from sklearn.externals.joblib import Parallel, delayed
# =========================================================================
# def
class copy_fmri():
def __init__(self,
referencePath=r'E:\wangfeidata\folder.txt',
regularExpressionOfsubjName_forReference='([1-9]\d*)',
folderNameContainingFile_forSelect='',
num_countBackwards=2,
regularExpressionOfSubjName_forNeuroimageDataFiles='([1-9]\d*)',
keywordThatFileContain='nii',
neuroimageDataPath=r'E:\wangfeidata\FunImgARWD',
savePath=r'E:\wangfeidata',
n_processess=5,
ifSaveLog=1,
ifCopy=0,
ifMove=0,
saveInToOneOrMoreFolder='saveToEachSubjFolder',
saveNameSuffix='.nii',
ifRun=0):
# 核对参数信息
if ifCopy == 1 & ifMove == 1:
print('### Cannot copy and move at the same time! ###\n')
print('### please press Ctrl+C to close the progress ###\n')
time.sleep(5)
# print('==========================================================')
# print('\nThe num_countBackwards that to screen subject name is {} !'.format(num_countBackwards))
# print('\nKeyword of folder name that containing the files is {} !'.format(folderNameContainingFile_forSelect))
# print('regularExpressionOfSubjName_forNeuroimageDataFiles is {}'.format(regularExpressionOfSubjName_forNeuroimageDataFiles))
# print('ifCopy is {}'.format(ifCopy))
# print('saveInToOneOrMoreFolder is {}'.format(saveInToOneOrMoreFolder))
# print('==========================================================')
# input("***请核对以上信息是否准确,否则复制出错!***")
# =========================================================================
# accept excel or csv
self.referencePath = referencePath
try:
self.subjName_forSelect = pd.read_excel(
referencePath, dtype='str', header=None, index=None)
except:
self.subjName_forSelect = pd.read_csv(
referencePath, dtype='str', header=None)
#
print('###提取subjName_forSelect中的匹配成分,默认为数字###\n###当有多个匹配时默认是第1个###\n')
ith = 0
if regularExpressionOfsubjName_forReference:
self.subjName_forSelect = self.subjName_forSelect.iloc[:, 0]\
.str.findall('[1-9]\d*')
self.subjName_forSelect = [self.subjName_forSelect_[ith]
for self.subjName_forSelect_ in
self.subjName_forSelect
if len(self.subjName_forSelect_)]
# 提取subjName_forSelect完毕
self.folderNameContainingFile_forSelect = folderNameContainingFile_forSelect
self.num_countBackwards = num_countBackwards
self.regularExpressionOfSubjName_forNeuroimageDataFiles = regularExpressionOfSubjName_forNeuroimageDataFiles
self.keywordThatFileContain = keywordThatFileContain
self.neuroimageDataPath = neuroimageDataPath
self.savePath = savePath
self.n_processess = n_processess
self.ifSaveLog = ifSaveLog
self.ifCopy = ifCopy
self.ifMove = ifMove
self.saveInToOneOrMoreFolder = saveInToOneOrMoreFolder
self.saveNameSuffix = saveNameSuffix
self.ifRun = ifRun
# ===================================================================
def walkAllPath(self):
allWalkPath = os.walk(self.neuroimageDataPath)
# allWalkPath=[allWalkPath_ for allWalkPath_ in allWalkPath]
return allWalkPath
def fetch_allFilePath(self, allWalkPath):
allFilePath = []
for onePath in allWalkPath:
for oneFile in onePath[2]:
path = os.path.join(onePath[0], oneFile)
allFilePath.append(path)
return allFilePath
def fetch_allSubjName(self, allFilePath):
'''
num_countBackwards:subjName在倒数第几个block内(第一个计数为1)
# 如'D:\myCodes\workstation_20180829_dynamicFC\FunImgARW\1-500\00002_resting\dti\dic.txt'
# 的subjName在倒数第3个中
'''
# allWalkPath=sel.walkAllPath()
# allFilePath=sel.fetch_allFilePath(allWalkPath)
allSubjName = allFilePath
for i in range(self.num_countBackwards - 1):
allSubjName = [os.path.dirname(allFilePath_)
for allFilePath_ in allSubjName]
allSubjName = [os.path.basename(allFilePath_)
for allFilePath_ in allSubjName]
allSubjName = pd.DataFrame(allSubjName)
# allSubjName=allSubjName.iloc[:,0].where(allSubjName.iloc[:,0]!='').dropna()
# allSubjName=pd.DataFrame(allSubjName)
return allSubjName
def fetch_folerNameContainingFile(self, allFilePath):
'''
如果file上一级folder不是subject name,那么就涉及到选择那个文件夹下的file
此时先确定每一个file上面的folder name(可能是模态名),然后根据你的关键词来筛选
'''
folerNameContainingFile = [os.path.dirname(
allFilePath_) for allFilePath_ in allFilePath]
folerNameContainingFile = [os.path.basename(
folderName) for folderName in folerNameContainingFile]
return folerNameContainingFile
def fetch_allFileName(self, allFilePath):
'''
获取把所有file name,用于后续的筛选。
适用场景:假如跟file一起的有我们不需要的file,
比如混杂在dicom file中的有text文件,而这些text是我们不想要的。
'''
allFileName = [os.path.basename(allFilePath_)
for allFilePath_ in allFilePath]
return allFileName
# ===================================================================
def screen_pathLogicalLocation_accordingTo_yourSubjName(self, allSubjName):
# 匹配subject name:注意此处用精确匹配,只有完成匹配时,才匹配成功
# maker sure subjName_forSelect is pd.Series and its content is string
if type(self.subjName_forSelect) is type(pd.DataFrame([1])):
self.subjName_forSelect = self.subjName_forSelect.iloc[:, 0]
if type(self.subjName_forSelect[0]) is not str:
self.subjName_forSelect = pd.Series(
self.subjName_forSelect, dtype='str')
# 一定要注意匹配对之间的数据类型要一致!!!
# allSubjName=sel.fetch_allSubjName(allFilePath)
try:
allSubjName = allSubjName.iloc[:, 0].str.findall(
self.regularExpressionOfSubjName_forNeuroimageDataFiles)
# 正则表达后,可能有的不匹配而为空list,此时应该把空list当作不匹配而去除
allSubjName_temp = []
for name in allSubjName.values:
if name:
allSubjName_temp.append(name[0])
else:
allSubjName_temp.append(None)
allSubjName = allSubjName_temp
allSubjName = pd.DataFrame(allSubjName)
self.subjName_forSelect = pd.DataFrame(self.subjName_forSelect)
# self.subjName_forSelect
intersect = allSubjName.set_index(0).join(
self.subjName_forSelect.set_index(0), how='right')
intersect = pd.Series(intersect.index)
# allSubjName有,但是subjName_forSelect没有
# self.difName=allSubjName.join(self.subjName_forSelect)
# self.difName=self.difName.where(self.difName!='').dropna()
except:
print('subjName mismatch subjName_forSelected!\nplease check their type')
sys.exit(0)
if any(intersect):
# 为了逻辑比较,将allSubjName 转化为DataFrame
allSubjName = pd.DataFrame(allSubjName)
logic_loc = [allSubjName == intersect_ for intersect_ in intersect]
if len(logic_loc) > 1:
logic_loc = pd.concat(logic_loc, axis=1)
logic_loc = np.sum(logic_loc, axis=1)
logic_loc = logic_loc == 1
else:
logic_loc = logic_loc
logic_loc = pd.DataFrame(logic_loc)
else:
logic_loc = np.zeros([len(allSubjName), 1]) == 1
logic_loc = pd.DataFrame(logic_loc)
return logic_loc
def screen_pathLogicalLocation_accordingTo_folerNameContainingFile(
self, folerNameContainingFile):
# 匹配folerNameContainingFile:注意此处用的连续模糊匹配,只要含有这个关键词,则匹配
if self.folderNameContainingFile_forSelect:
logic_loc = [
self.folderNameContainingFile_forSelect in oneName_ for oneName_ in folerNameContainingFile]
logic_loc = pd.DataFrame(logic_loc)
else:
logic_loc = np.ones([len(folerNameContainingFile), 1]) == 1
logic_loc = pd.DataFrame(logic_loc)
return logic_loc
def screen_pathLogicalLocation_accordingTo_fileName(self, allFileName):
# 匹配file name:注意此处用的连续模糊匹配,只要含有这个关键词,则匹配
if self.keywordThatFileContain:
logic_loc = [
self.keywordThatFileContain in oneName_ for oneName_ in allFileName]
logic_loc = pd.DataFrame(logic_loc)
else:
logic_loc = np.ones([len(allFileName), 1]) == 1
logic_loc = pd.DataFrame(logic_loc)
return logic_loc
def fetch_totalLogicalLocation(self,
logicLoc_subjName, logicLoc_folderNameContaningFile, logicLoc_fileName):
logic_loc = pd.concat([logicLoc_subjName,
logicLoc_folderNameContaningFile,
logicLoc_fileName],
axis=1)
logic_loc = np.sum(logic_loc, axis=1) == np.shape(logic_loc)[1]
return logic_loc
def fetch_selectedFilePath_accordingPathLogicalLocation(self,
allFilePath, allSubjName, logic_loc):
#
allFilePath = pd.DataFrame(allFilePath)
allSelectedFilePath = allFilePath[logic_loc]
allSelectedFilePath = allSelectedFilePath.dropna()
# name
allSubjName = pd.DataFrame(allSubjName)
allSelectedSubjName = allSubjName[logic_loc]
allSelectedSubjName = allSelectedSubjName.dropna()
return allSelectedFilePath, allSelectedSubjName
# ===================================================================
def copy_allDicomsOfOneSubj(
self,
i,
subjName,
allSelectedSubjName,
allSelectedFilePath):
n_allSelectedSubj = len(allSelectedSubjName)
print('Copying the {}/{}th subject: {}...'.format(i +
1, n_allSelectedSubj, subjName))
# 每个file保存到每个subjxxx文件夹下面
if self.saveInToOneOrMoreFolder == 'saveToEachSubjFolder':
output_folder = os.path.join(self.savePath, subjName)
# 新建subjxxx文件夹
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# 所有file保存到一个folder下面(file的名字以subjxxx命名)
if self.saveInToOneOrMoreFolder == 'saveToOneFolder':
output_folder = os.path.join(self.savePath,
subjName + self.saveNameSuffix)
# copying OR moving OR do nothing
fileIndex = allSelectedSubjName[(
allSelectedSubjName.values == subjName)].index.tolist()
if self.ifCopy == 1 and self.ifMove == 0:
[shutil.copy(allSelectedFilePath.loc[fileIndex_, :][0],
output_folder) for fileIndex_ in fileIndex]
elif self.ifCopy == 0 and self.ifMove == 1:
[shutil.move(allSelectedFilePath.loc[fileIndex_, :][0],
output_folder) for fileIndex_ in fileIndex]
elif self.ifCopy == 0 and self.ifMove == 0:
print('### No copy and No move ###\n')
else:
print('### Cannot copy and move at the same time! ###\n')
print('OK!\n')
#
def copy_allDicomsOfAllSubj_multiprocess(self, allSelectedSubjName,
allSelectedFilePath):
# 新建保存文件夹
if not os.path.exists(self.savePath):
os.makedirs(self.savePath)
# 多线程
s = time.time()
# unique的name
uniSubjName = allSelectedSubjName.iloc[:, 0].unique()
# 当复制的文件较少时,不要开多线程
if len(uniSubjName) <= 500:
self.n_processess = 1
print('Copying...\n')
Parallel(n_jobs=self.n_processess, backend='threading')(delayed(self.copy_allDicomsOfOneSubj)(i, subjName, allSelectedSubjName, allSelectedFilePath)
for i, subjName in enumerate(uniSubjName))
e = time.time()
print('Done!\nRunning time is {:.1f}'.format(e - s))
# ===================================================================
def main_run(self):
# all path and name
allWalkPath = self.walkAllPath()
allFilePath = self.fetch_allFilePath(allWalkPath)
allSubjName = self.fetch_allSubjName(allFilePath)
allFileName = self.fetch_allFileName(allFilePath)
# select
folderNameContainingFile = self.fetch_folerNameContainingFile(
allFilePath)
# logicLoc_subjName:根据被试名字匹配所得到的logicLoc。以此类推。
# fileName≠subjName,比如fileName可以是xxx.nii,但是subjName可能是subjxxx
logicLoc_subjName = self.screen_pathLogicalLocation_accordingTo_yourSubjName(
allSubjName)
logicLoc_folderNameContaningFile = self.screen_pathLogicalLocation_accordingTo_folerNameContainingFile(
folderNameContainingFile)
logicLoc_fileName = self.screen_pathLogicalLocation_accordingTo_fileName(
allFileName)
logic_loc = self.fetch_totalLogicalLocation(
logicLoc_subjName, logicLoc_folderNameContaningFile, logicLoc_fileName)
allSelectedFilePath, allSelectedSubjName = self.fetch_selectedFilePath_accordingPathLogicalLocation(
allFilePath, allSubjName, logic_loc)
# save for checking
if self.ifSaveLog:
now = time.localtime()
now = time.strftime("%Y-%m-%d %H:%M:%S", now)
#
uniSubjName = allSelectedSubjName.iloc[:, 0].unique()
uniSubjName = [uniSubjName_ for uniSubjName_ in uniSubjName]
uniSubjName = pd.DataFrame(uniSubjName)
allSelectedFilePath.to_csv(
os.path.join(
self.savePath,
'log_allSelectedFilePath.txt'),
index=False,
header=False)
allSelectedSubjPath = [os.path.dirname(
allSelectedFilePath_) for allSelectedFilePath_ in allSelectedFilePath.iloc[:, 0]]
allSelectedSubjPath = pd.DataFrame(
allSelectedSubjPath).drop_duplicates()
allSelectedSubjPath.to_csv(
os.path.join(
self.savePath,
'log_allSelectedSubjPath.txt'),
index=False,
header=False)
uniSubjName.to_csv(
os.path.join(
self.savePath,
'log_allSelectedSubjName.txt'),
index=False,
header=False)
self.difName.to_csv(
os.path.join(
self.savePath,
'log_difdSubjName.txt'),
index=False,
header=False)
allSubjName.to_csv(
os.path.join(
self.savePath,
'log_allSubjName.txt'),
index=False,
header=False)
#
if len(uniSubjName) <= 100:
self.n_processess = 1
f = open(os.path.join(self.savePath, "copy_inputs.txt"), 'a')
f.write("\n\n")
f.write('====================' + now + '====================')
f.write("\n\n")
f.write("referencePath is: " + self.referencePath)
f.write("\n\n")
f.write(
"folderNameContainingFile_forSelect are: " +
self.folderNameContainingFile_forSelect)
f.write("\n\n")
f.write("num_countBackwards is: " + str(self.num_countBackwards))
f.write("\n\n")
f.write("regularExpressionOfSubjName_forNeuroimageDataFiles is: " +
str(self.regularExpressionOfSubjName_forNeuroimageDataFiles))
f.write("\n\n")
f.write("keywordThatFileContain is: " +
str(self.keywordThatFileContain))
f.write("\n\n")
f.write("neuroimageDataPath is: " + self.neuroimageDataPath)
f.write("\n\n")
f.write("savePath is: " + self.savePath)
f.write("\n\n")
f.write("n_processess is: " + str(self.n_processess))
f.write("\n\n")
f.close()
# copy
if self.ifRun:
self.copy_allDicomsOfAllSubj_multiprocess(
allSelectedSubjName, allSelectedFilePath)
return allFilePath, allSubjName, logic_loc, allSelectedFilePath, allSelectedSubjName
if __name__ == '__main__':
import lc_copy_selected_file_V4 as copy
# basic['folder'].to_csv(r'I:\dynamicALFF\folder.txt',header=False,index=False)
path=r'D:\WorkStation_2018\Workstation_Old\WorkStation_2018_07_DynamicFC_insomnia\FunImgARWS'
folder=r'D:\WorkStation_2018\Workstation_Old\WorkStation_2018_07_DynamicFC_insomnia\folder.txt'
sel=copy.copy_fmri(referencePath=folder,
regularExpressionOfsubjName_forReference='([1-9]\d*)',
folderNameContainingFile_forSelect='',
num_countBackwards=2,
regularExpressionOfSubjName_forNeuroimageDataFiles='([1-9]\d*)',\
keywordThatFileContain='nii',
neuroimageDataPath=path,
savePath=r'D:\WorkStation_2018\Workstation_Old\WorkStation_2018_07_DynamicFC_insomnia\test',
n_processess=5,
ifSaveLog=1,
ifCopy=1,
ifMove=0,
saveInToOneOrMoreFolder='saveToEachSubjFolder',
saveNameSuffix='',
ifRun=0)
allFilePath,allSubjName,\
logic_loc,allSelectedFilePath,allSelectedSubjName=\
sel.main_run()
print('Done!')
|
!
! GrELS_Binary_IO
!
! Module containing routines to inquire, read, and write Binary
! GrELS object datafiles.
!
!
! CREATION HISTORY:
! Written by: Paul van Delst, 14-Feb-2010
! [email protected]
!
MODULE GrELS_Binary_IO
! ------------------
! Environment set up
! ------------------
! Module use
USE File_Utility , ONLY: File_Open, File_Exists
USE Message_Handler , ONLY: SUCCESS, FAILURE, INFORMATION, Display_Message
USE Binary_File_Utility, ONLY: Open_Binary_File
USE GrELS_Define , ONLY: GrELS_type , &
GrELS_Associated , &
GrELS_Destroy , &
GrELS_Create , &
GrELS_ValidRelease, &
GrELS_Info
! Disable implicit typing
IMPLICIT NONE
! ------------
! Visibilities
! ------------
PRIVATE
PUBLIC :: GrELS_Binary_InquireFile
PUBLIC :: GrELS_Binary_ReadFile
PUBLIC :: GrELS_Binary_WriteFile
PUBLIC :: GrELS_Binary_IOVersion
! -----------------
! Module parameters
! -----------------
CHARACTER(*), PRIVATE, PARAMETER :: MODULE_VERSION_ID = &
CHARACTER(*), PARAMETER :: WRITE_ERROR_STATUS = 'DELETE'
! Default message length
INTEGER, PARAMETER :: ML = 256
! Old integer flag setting
INTEGER, PARAMETER :: SET = 1
CONTAINS
!################################################################################
!################################################################################
!## ##
!## ## PUBLIC MODULE ROUTINES ## ##
!## ##
!################################################################################
!################################################################################
!------------------------------------------------------------------------------
!:sdoc+:
!
! NAME:
! GrELS_Binary_InquireFile
!
! PURPOSE:
! Function to inquire GrELS object Binary format files.
!
! CALLING SEQUENCE:
! Error_Status = GrELS_Binary_InquireFile( &
! Filename, &
! n_Wavelengths = n_Wavelengths , &
! n_Surface_Types = n_Surface_Types , &
! n_Weeks = n_Weeks , &
! n_Latitude_Zones = n_Latitude_Zones, &
! Release = Release , &
! Version = Version )
!
! INPUTS:
! Filename: Character string specifying the name of a
! GrELS data file to read.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! OPTIONAL OUTPUTS:
! n_Wavelengths: Number of wavelengths for which the reflectance
! LUT data is specified.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! n_Surface_Types: Number of surface types for which the reflectance
! LUT data is specified.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! n_Weeks: Number of weeks for which the green vegetations
! fraction LUT data is specified.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! n_Latitude_Zones: Number of latitude zones for which the green
! vegetation fraction LUT data is specified.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! Release: The release number of the GrELS file.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! Version: The version number of the GrELS file.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! FUNCTION RESULT:
! Error_Status: The return value is an integer defining the error status.
! The error codes are defined in the Message_Handler module.
! If == SUCCESS, the file inquire was successful
! == FAILURE, an unrecoverable error occurred.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
!
!:sdoc-:
!------------------------------------------------------------------------------
FUNCTION GrELS_Binary_InquireFile( &
Filename , & ! Input
n_Wavelengths , & ! Optional output
n_Surface_Types , & ! Optional output
n_Weeks , & ! Optional output
n_Latitude_Zones, & ! Optional output
Release , & ! Optional output
Version ) & ! Optional output
RESULT( err_stat )
! Arguments
CHARACTER(*), INTENT(IN) :: Filename
INTEGER, OPTIONAL, INTENT(OUT) :: n_Wavelengths
INTEGER, OPTIONAL, INTENT(OUT) :: n_Surface_Types
INTEGER, OPTIONAL, INTENT(OUT) :: n_Weeks
INTEGER, OPTIONAL, INTENT(OUT) :: n_Latitude_Zones
INTEGER, OPTIONAL, INTENT(OUT) :: Release
INTEGER, OPTIONAL, INTENT(OUT) :: Version
! Function result
INTEGER :: err_stat
! Function parameters
CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'GrELS_InquireFile(Binary)'
! Function variables
CHARACTER(ML) :: msg
INTEGER :: io_stat
INTEGER :: fid
TYPE(GrELS_type) :: GrELS
! Setup
err_stat = SUCCESS
fid = -100
! Check that the file exists
IF ( .NOT. File_Exists( Filename ) ) THEN
msg = 'File '//TRIM(Filename)//' not found.'
CALL Inquire_Cleanup(); RETURN
END IF
! Open the GrELS data file
err_stat = Open_Binary_File( Filename, fid )
IF ( err_stat /= SUCCESS ) THEN
msg = 'Error opening '//TRIM(Filename)
CALL Inquire_Cleanup(); RETURN
END IF
! Read the release and version
READ( fid,IOSTAT=io_stat ) GrELS%Release, GrELS%Version
IF ( io_stat /= 0 ) THEN
WRITE( msg,'("Error reading Release/Version. IOSTAT = ",i0)' ) io_stat
CALL Inquire_Cleanup(); RETURN
END IF
! Read the dimensions
READ( fid,IOSTAT=io_stat ) GrELS%n_Wavelengths , &
GrELS%n_Surface_Types , &
GrELS%n_Weeks , &
GrELS%n_Latitude_Zones
IF ( io_stat /= 0 ) THEN
WRITE( msg,'("Error reading dimensions from ",a,". IOSTAT = ",i0)' ) &
TRIM(Filename), io_stat
CALL Inquire_Cleanup(); RETURN
END IF
! Close the file
CLOSE( fid, IOSTAT=io_stat )
IF ( io_stat /= 0 ) THEN
WRITE( msg,'("Error closing ",a,". IOSTAT = ",i0)' ) TRIM(Filename), io_stat
CALL Inquire_Cleanup(); RETURN
END IF
! Set the return arguments
IF ( PRESENT(n_Wavelengths ) ) n_Wavelengths = GrELS%n_Wavelengths
IF ( PRESENT(n_Surface_Types ) ) n_Surface_Types = GrELS%n_Surface_Types
IF ( PRESENT(n_Weeks ) ) n_Weeks = GrELS%n_Weeks
IF ( PRESENT(n_Latitude_Zones) ) n_Latitude_Zones = GrELS%n_Latitude_Zones
IF ( PRESENT(Release ) ) Release = GrELS%Release
IF ( PRESENT(Version ) ) Version = GrELS%Version
CONTAINS
SUBROUTINE Inquire_CleanUp()
IF ( File_Open(fid) ) THEN
CLOSE( fid,IOSTAT=io_stat )
IF ( io_stat /= 0 ) &
msg = TRIM(msg)//'; Error closing input file during error cleanup'
END IF
err_stat = FAILURE
CALL Display_Message( ROUTINE_NAME, msg, err_stat )
END SUBROUTINE Inquire_CleanUp
END FUNCTION GrELS_Binary_InquireFile
!------------------------------------------------------------------------------
!:sdoc+:
!
! NAME:
! GrELS_Binary_ReadFile
!
! PURPOSE:
! Function to read GrELS object files in Binary format.
!
! CALLING SEQUENCE:
! Error_Status = GrELS_Binary_ReadFile( &
! Filename , &
! GrELS , &
! Quiet = Quiet )
!
! INPUTS:
! Filename: Character string specifying the name of a
! GrELS format data file to read.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! OUTPUTS:
! GrELS: Object containing the reflectance spectra and
! greenness vegetation fraction data.
! UNITS: N/A
! TYPE: TYPE(GrELS_type)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT)
!
! OPTIONAL INPUTS:
! Quiet: Set this logical argument to suppress INFORMATION
! messages being printed to stdout
! If == .FALSE., INFORMATION messages are OUTPUT [DEFAULT].
! == .TRUE., INFORMATION messages are SUPPRESSED.
! If not specified, default is .FALSE.
! UNITS: N/A
! TYPE: LOGICAL
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! FUNCTION RESULT:
! Error_Status: The return value is an integer defining the error status.
! The error codes are defined in the Message_Handler module.
! If == SUCCESS, the file read was successful
! == FAILURE, an unrecoverable error occurred.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
!
!:sdoc-:
!------------------------------------------------------------------------------
FUNCTION GrELS_Binary_ReadFile( &
Filename, & ! Input
GrELS , & ! Output
Quiet , & ! Optional input
Debug ) & ! Optional input (Debug output control)
RESULT( err_stat )
! Arguments
CHARACTER(*), INTENT(IN) :: Filename
TYPE(GrELS_type), INTENT(OUT) :: GrELS
LOGICAL, OPTIONAL, INTENT(IN) :: Quiet
LOGICAL, OPTIONAL, INTENT(IN) :: Debug
! Function result
INTEGER :: err_stat
! Function parameters
CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'GrELS_ReadFile(Binary)'
! Function variables
CHARACTER(ML) :: msg
LOGICAL :: noisy
INTEGER :: io_stat
INTEGER :: fid
TYPE(GrELS_type) :: dummy
! Setup
err_stat = SUCCESS
! ...Check Quiet argument
noisy = .TRUE.
IF ( PRESENT(Quiet) ) noisy = .NOT. Quiet
! ...Override Quiet settings if debug set.
IF ( PRESENT(Debug) ) THEN
IF ( Debug ) noisy = .TRUE.
END IF
! Open the file
err_stat = Open_Binary_File( Filename, fid )
IF ( err_stat /= SUCCESS ) THEN
msg = 'Error opening '//TRIM(Filename)
CALL Read_Cleanup(); RETURN
END IF
! Read and check the release and version
READ( fid,IOSTAT=io_stat ) dummy%Release, dummy%Version
IF ( io_stat /= 0 ) THEN
WRITE( msg,'("Error reading Release/Version. IOSTAT = ",i0)' ) io_stat
CALL Read_Cleanup(); RETURN
END IF
IF ( .NOT. GrELS_ValidRelease( dummy ) ) THEN
msg = 'GrELS Release check failed.'
CALL Read_Cleanup(); RETURN
END IF
! Read the geomagnetic field data
! ...Read the dimensions
READ( fid,IOSTAT=io_stat ) dummy%n_Wavelengths , &
dummy%n_Surface_Types , &
dummy%n_Weeks , &
dummy%n_Latitude_Zones
IF ( io_stat /= 0 ) THEN
WRITE( msg,'("Error reading data dimensions. IOSTAT = ",i0)' ) io_stat
CALL Read_Cleanup(); RETURN
END IF
! ...Allocate the object
CALL GrELS_Create( GrELS, &
dummy%n_Wavelengths , &
dummy%n_Surface_Types , &
dummy%n_Weeks , &
dummy%n_Latitude_Zones )
IF ( .NOT. GrELS_Associated( GrELS ) ) THEN
msg = 'GrELS object allocation failed.'
CALL Read_Cleanup(); RETURN
END IF
! ...Read the GrELS dimension vectors
READ( fid,IOSTAT=io_stat ) GrELS%Wavelength , &
GrELS%Surface_Type_Name, &
GrELS%Week , &
GrELS%Latitude_Zone
IF ( io_stat /= 0 ) THEN
WRITE( msg,'("Error reading dimension vectors. IOSTAT = ",i0)' ) io_stat
CALL Read_Cleanup(); RETURN
END IF
! ...Read the reflectance LUT
READ( fid,IOSTAT=io_stat ) GrELS%Reflectance
IF ( io_stat /= 0 ) THEN
WRITE( msg,'("Error reading reflectance data. IOSTAT = ",i0)' ) io_stat
CALL Read_Cleanup(); RETURN
END IF
! ...Read the greenness vegetation fraction LUT
READ( fid,IOSTAT=io_stat ) GrELS%GVF
IF ( io_stat /= 0 ) THEN
WRITE( msg,'("Error reading greenness vegetation fraction data. IOSTAT = ",i0)' ) io_stat
CALL Read_Cleanup(); RETURN
END IF
! ...Assign the version number read in
GrELS%Version = dummy%Version
! Close the file
CLOSE( fid,IOSTAT=io_stat )
IF ( io_stat /= 0 ) THEN
WRITE( msg,'("Error closing ",a,". IOSTAT = ",i0)' ) TRIM(Filename), io_stat
CALL Read_Cleanup(); RETURN
END IF
! Output an info message
IF ( noisy ) THEN
CALL GrELS_Info( GrELS, msg )
CALL Display_Message( ROUTINE_NAME, 'FILE: '//TRIM(Filename)//'; '//TRIM(msg), INFORMATION )
END IF
CONTAINS
SUBROUTINE Read_CleanUp()
IF ( File_Open(Filename) ) THEN
CLOSE( fid,IOSTAT=io_stat )
IF ( io_stat /= 0 ) &
msg = TRIM(msg)//'; Error closing input file during error cleanup.'
END IF
CALL GrELS_Destroy( GrELS )
err_stat = FAILURE
CALL Display_Message( ROUTINE_NAME, msg, err_stat )
END SUBROUTINE Read_CleanUp
END FUNCTION GrELS_Binary_ReadFile
!------------------------------------------------------------------------------
!:sdoc+:
!
! NAME:
! GrELS_Binary_WriteFile
!
! PURPOSE:
! Function to write GrELS object files in Binary format.
!
! CALLING SEQUENCE:
! Error_Status = GrELS_Binary_WriteFile( &
! Filename, &
! GrELS , &
! Quiet = Quiet )
!
! INPUTS:
! Filename: Character string specifying the name of the
! GrELS format data file to write.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! GrELS: Object containing the reflectance spectra and
! greenness vegetation fraction data.
! UNITS: N/A
! TYPE: TYPE(GrELS_type)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! OPTIONAL INPUTS:
! Quiet: Set this logical argument to suppress INFORMATION
! messages being printed to stdout
! If == .FALSE., INFORMATION messages are OUTPUT [DEFAULT].
! == .TRUE., INFORMATION messages are SUPPRESSED.
! If not specified, default is .FALSE.
! UNITS: N/A
! TYPE: LOGICAL
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! FUNCTION RESULT:
! Error_Status: The return value is an integer defining the error status.
! The error codes are defined in the Message_Handler module.
! If == SUCCESS, the file write was successful
! == FAILURE, an unrecoverable error occurred.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
!
! SIDE EFFECTS:
! - If the output file already exists, it is overwritten.
! - If an error occurs, the output file is deleted before
! returning to the calling routine.
!
!:sdoc-:
!------------------------------------------------------------------------------
FUNCTION GrELS_Binary_WriteFile( &
Filename, & ! Input
GrELS , & ! Input
Quiet , & ! Optional input
Debug ) & ! Optional input (Debug output control)
RESULT( err_stat )
! Arguments
CHARACTER(*), INTENT(IN) :: Filename
TYPE(GrELS_type), INTENT(IN) :: GrELS
LOGICAL, OPTIONAL, INTENT(IN) :: Quiet
LOGICAL, OPTIONAL, INTENT(IN) :: Debug
! Function result
INTEGER :: err_stat
! Function parameters
CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'GrELS_WriteFile(Binary)'
CHARACTER(*), PARAMETER :: FILE_STATUS_ON_ERROR = 'DELETE'
! Function variables
CHARACTER(ML) :: msg
LOGICAL :: noisy
INTEGER :: io_stat
INTEGER :: fid
! Setup
err_stat = SUCCESS
! ...Check Quiet argument
noisy = .TRUE.
IF ( PRESENT(Quiet) ) noisy = .NOT. Quiet
! ...Override Quiet settings if debug set.
IF ( PRESENT(Debug) ) THEN
IF ( Debug ) noisy = .TRUE.
END IF
! Check the GrELS object
! ...Is there any data?
IF ( .NOT. GrELS_Associated( GrELS ) ) THEN
msg = 'Input GrELS object is not allocated.'
CALL Write_Cleanup(); RETURN
END IF
! ...Check if release is valid
IF ( .NOT. GrELS_ValidRelease( GrELS ) ) THEN
msg = 'GrELS Release check failed.'
CALL Write_Cleanup(); RETURN
END IF
! Open the file for writing
err_stat = Open_Binary_File( Filename, fid, For_Output=SET )
IF ( err_stat /= SUCCESS ) THEN
msg = 'Error opening '//TRIM(Filename)
CALL Write_Cleanup(); RETURN
END IF
! Write the release and version
WRITE( fid,IOSTAT=io_stat ) GrELS%Release, GrELS%Version
IF ( io_stat /= 0 ) THEN
WRITE( msg,'("Error reading Release/Version. IOSTAT = ",i0)' ) io_stat
CALL Write_Cleanup(); RETURN
END IF
! Write the geomagnetic field data
! ...Write the dimensions
WRITE( fid,IOSTAT=io_stat ) GrELS%n_Wavelengths , &
GrELS%n_Surface_Types , &
GrELS%n_Weeks , &
GrELS%n_Latitude_Zones
IF ( io_stat /= 0 ) THEN
WRITE( msg,'("Error writing data dimensions. IOSTAT = ",i0)' ) io_stat
CALL Write_Cleanup(); RETURN
END IF
! ...Write the GrELS dimension vectors
WRITE( fid,IOSTAT=io_stat ) GrELS%Wavelength , &
GrELS%Surface_Type_Name, &
GrELS%Week , &
GrELS%Latitude_Zone
IF ( io_stat /= 0 ) THEN
WRITE( msg,'("Error writing dimension vectors. IOSTAT = ",i0)' ) io_stat
CALL Write_Cleanup(); RETURN
END IF
! ...Write the reflectance LUT
WRITE( fid,IOSTAT=io_stat ) GrELS%Reflectance
IF ( io_stat /= 0 ) THEN
WRITE( msg,'("Error writing reflectance data. IOSTAT = ",i0)' ) io_stat
CALL Write_Cleanup(); RETURN
END IF
! ...Write the greenness vegetation fraction LUT
WRITE( fid,IOSTAT=io_stat ) GrELS%GVF
IF ( io_stat /= 0 ) THEN
WRITE( msg,'("Error writing greenness vegetation fraction data. IOSTAT = ",i0)' ) io_stat
CALL Write_Cleanup(); RETURN
END IF
! Close the file
CLOSE( fid,STATUS='KEEP',IOSTAT=io_stat )
IF ( io_stat /= 0 ) THEN
WRITE( msg,'("Error closing ",a,". IOSTAT = ",i0)' ) TRIM(Filename), io_stat
CALL Write_Cleanup(); RETURN
END IF
! Output an info message
IF ( noisy ) THEN
CALL GrELS_Info( GrELS, msg )
CALL Display_Message( ROUTINE_NAME, 'FILE: '//TRIM(Filename)//'; '//TRIM(msg), INFORMATION )
END IF
CONTAINS
SUBROUTINE Write_CleanUp()
IF ( File_Open(Filename) ) THEN
CLOSE( fid,STATUS=WRITE_ERROR_STATUS,IOSTAT=io_stat )
IF ( io_stat /= 0 ) &
msg = TRIM(msg)//'; Error deleting output file during error cleanup.'
END IF
err_stat = FAILURE
CALL Display_Message( ROUTINE_NAME, msg, err_stat )
END SUBROUTINE Write_CleanUp
END FUNCTION GrELS_Binary_WriteFile
!--------------------------------------------------------------------------------
!:sdoc+:
!
! NAME:
! GrELS_Binary_IOVersion
!
! PURPOSE:
! Subroutine to return the module version information.
!
! CALLING SEQUENCE:
! CALL GrELS_Binary_IOVersion( Id )
!
! OUTPUTS:
! Id: Character string containing the version Id information
! for the module.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT)
!
!:sdoc-:
!--------------------------------------------------------------------------------
SUBROUTINE GrELS_Binary_IOVersion( Id )
CHARACTER(*), INTENT(OUT) :: Id
Id = MODULE_VERSION_ID
END SUBROUTINE GrELS_Binary_IOVersion
END MODULE GrELS_Binary_IO
|
import doubleround
import littleendian
import category_theory.category.basic
import category_theory.core
open doubleround
open littleendian
open operations
open params
open utils
open category_theory
namespace core
variable [category (bitvec word_len)]
/-!
# Core
- The `doubleround10` function and its inverse.
- The `hash` and `core` functions, the non existing inverse.
-/
/-- Apply double round 10 times to a reduced input. -/
@[simp] def doubleround_10 (X : matrixType): matrixType :=
doubleround_salsa20 $
doubleround_salsa20 $
doubleround_salsa20 $
doubleround_salsa20 $
doubleround_salsa20 $
doubleround_salsa20 $
doubleround_salsa20 $
doubleround_salsa20 $
doubleround_salsa20 $
doubleround_salsa20 $
X
/-- Inverse of `doubleround_10`. -/
@[simp] def doubleround_10_inv (X : matrixType): matrixType :=
doubleround_salsa20_inv $
doubleround_salsa20_inv $
doubleround_salsa20_inv $
doubleround_salsa20_inv $
doubleround_salsa20_inv $
doubleround_salsa20_inv $
doubleround_salsa20_inv $
doubleround_salsa20_inv $
doubleround_salsa20_inv $
doubleround_salsa20_inv $
X
/- Just some notation for inverses. -/
local notation `doubleround_10⁻¹` := doubleround_10_inv
/-- The `doubleround` function is invertible. -/
lemma doubleround_is_inv (I : doubleround_10 ≅ doubleround_10⁻¹) : I.hom ≫ I.inv = 𝟙 doubleround_10 :=
by rw [iso.hom_inv_id]
/-!
## Core and hash definitions
- There is no isomorphism (≅) between `core` and any `core⁻¹`.
- There is no isomorphism (≅) between `hash` and any `hash⁻¹` because the use of `core` and `core⁻¹`.
-/
/-- Do addition modulo 2^32 of the reduced input and the doubleround of the reduced input. -/
@[simp] def core (X : matrixType) : matrixType := mod_matrix (doubleround_10 X) X
/-- Do the hash. -/
def hash (X : matrix64Type) : matrix64Type := aument (core (reduce X))
end core
|
subpopulations_auto <- c("Lymphocytes", "CD3", "CD19", "CD20", "IgD+CD27+", "IgD+CD27-",
"IgD-CD27+", "IgD-CD27-", "Plasmablasts", "Transitional")
subpopulations_manual <- c("LYM", "CD3", "CD19", "CD20", "27+ IgD+", "27- IgD+",
"27+ IgD-", "27- IgD-", "27hi 38hi", "24hi 38hi")
subpopulations_common <- c("Lymphocytes", "CD3", "CD19", "CD20", "Memory IgD+", "Naive",
"Memory IgD-", "IgD-CD27-", "Plasmablasts", "Transitional")
|
#if _MSC_VER <= 1200
#include "python_class.h"
#include <boost/version.hpp>
#if BOOST_VERSION != 102800
#include <boost/python.hpp>
typedef boost::python::dict BoostPythonDictionary;
#else
#include <boost/python/objects.hpp>
typedef boost::python::dictionary BoostPythonDictionary;
#endif
#include "cmd/container.h"
#include <string>
#include "init.h"
#include "gfx/vec.h"
#include "cmd/unit_generic.h"
#include "universe_util.h"
#include "cmd/unit_util.h"
#include "faction_generic.h"
#include "cmd/ai/fire.h"
#include "unit_wrapper_class.h"
#include "unit_from_to_python.h"
#include "define_odd_unit_functions.h"
#endif
|
(*<*)
theory AOT_model
imports Main "HOL-Cardinals.Cardinals"
begin
declare[[typedef_overloaded]]
(*>*)
section\<open>References\<close>
text\<open>
A full description of this formalization including references can be found
at @{url \<open>http://dx.doi.org/10.17169/refubium-35141\<close>}.
The version of Principia Logico-Metaphysica (PLM) implemented in this formalization
can be found at @{url \<open>http://mally.stanford.edu/principia-2021-10-13.pdf\<close>}, while
the latest version of PLM is available at @{url \<open>http://mally.stanford.edu/principia.pdf\<close>}.
\<close>
section\<open>Model for the Logic of AOT\<close>
text\<open>We introduce a primitive type for hyperintensional propositions.\<close>
typedecl \<o>
text\<open>To be able to model modal operators following Kripke semantics,
we introduce a primitive type for possible worlds and assert, by axiom,
that there is a surjective function mapping propositions to the
boolean-valued functions acting on possible worlds. We call the result
of applying this function to a proposition the Montague intension
of the proposition.\<close>
typedecl w \<comment>\<open>The primtive type of possible worlds.\<close>
axiomatization AOT_model_d\<o> :: \<open>\<o>\<Rightarrow>(w\<Rightarrow>bool)\<close> where
d\<o>_surj: \<open>surj AOT_model_d\<o>\<close>
text\<open>The axioms of PLM require the existence of a non-actual world.\<close>
consts w\<^sub>0 :: w \<comment>\<open>The designated actual world.\<close>
axiomatization where AOT_model_nonactual_world: \<open>\<exists>w . w \<noteq> w\<^sub>0\<close>
text\<open>Validity of a proposition in a given world can now be modelled as the result
of applying that world to the Montague intension of the proposition.\<close>
definition AOT_model_valid_in :: \<open>w\<Rightarrow>\<o>\<Rightarrow>bool\<close> where
\<open>AOT_model_valid_in w \<phi> \<equiv> AOT_model_d\<o> \<phi> w\<close>
text\<open>By construction, we can choose a proposition for any given Montague intension,
s.t. the proposition is valid in a possible world iff the Montague intension
evaluates to true at that world.\<close>
definition AOT_model_proposition_choice :: \<open>(w\<Rightarrow>bool) \<Rightarrow> \<o>\<close> (binder \<open>\<epsilon>\<^sub>\<o> \<close> 8)
where \<open>\<epsilon>\<^sub>\<o> w. \<phi> w \<equiv> (inv AOT_model_d\<o>) \<phi>\<close>
lemma AOT_model_proposition_choice_simp: \<open>AOT_model_valid_in w (\<epsilon>\<^sub>\<o> w. \<phi> w) = \<phi> w\<close>
by (simp add: surj_f_inv_f[OF d\<o>_surj] AOT_model_valid_in_def
AOT_model_proposition_choice_def)
text\<open>Nitpick can trivially show that there are models for the axioms above.\<close>
lemma \<open>True\<close> nitpick[satisfy, user_axioms, expect = genuine] ..
typedecl \<omega> \<comment>\<open>The primtive type of ordinary objects/urelements.\<close>
text\<open>Validating extended relation comprehension requires a large set of
special urelements. For simple models that do not validate extended
relation comprehension (and consequently the predecessor axiom in the
theory of natural numbers), it suffices to use a primitive type as @{text \<sigma>},
i.e. @{theory_text \<open>typedecl \<sigma>\<close>}.\<close>
typedecl \<sigma>'
typedef \<sigma> = \<open>UNIV::((\<omega> \<Rightarrow> w \<Rightarrow> bool) set \<times> (\<omega> \<Rightarrow> w \<Rightarrow> bool) set \<times> \<sigma>') set\<close> ..
typedecl null \<comment> \<open>Null-urelements representing non-denoting terms.\<close>
datatype \<upsilon> = \<omega>\<upsilon> \<omega> | \<sigma>\<upsilon> \<sigma> | is_null\<upsilon>: null\<upsilon> null \<comment> \<open>Type of urelements\<close>
text\<open>Urrelations are proposition-valued functions on urelements.
Urrelations are required to evaluate to necessarily false propositions for
null-urelements (note that there may be several distinct necessarily false
propositions).\<close>
typedef urrel = \<open>{ \<phi> . \<forall> x w . \<not>AOT_model_valid_in w (\<phi> (null\<upsilon> x)) }\<close>
by (rule exI[where x=\<open>\<lambda> x . (\<epsilon>\<^sub>\<o> w . \<not>is_null\<upsilon> x)\<close>])
(auto simp: AOT_model_proposition_choice_simp)
text\<open>Abstract objects will be modelled as sets of urrelations and will
have to be mapped surjectively into the set of special urelements.
We show that any mapping from abstract objects to special urelements
has to involve at least one large set of collapsed abstract objects.
We will use this fact to extend arbitrary mappings from abstract objects
to special urelements to surjective mappings.\<close>
lemma \<alpha>\<sigma>_pigeonhole:
\<comment> \<open>For any arbitrary mapping @{term \<alpha>\<sigma>} from sets of urrelations to special
urelements, there exists an abstract object x, s.t. the cardinal of the set
of special urelements is strictly smaller than the cardinal of the set of
abstract objects that are mapped to the same urelement as x under @{term \<alpha>\<sigma>}.\<close>
\<open>\<exists>x . |UNIV::\<sigma> set| <o |{y . \<alpha>\<sigma> x = \<alpha>\<sigma> y}|\<close>
for \<alpha>\<sigma> :: \<open>urrel set \<Rightarrow> \<sigma>\<close>
proof(rule ccontr)
have card_\<sigma>_set_set_bound: \<open>|UNIV::\<sigma> set set| \<le>o |UNIV::urrel set|\<close>
proof -
let ?pick = \<open>\<lambda>u s . \<epsilon>\<^sub>\<o> w . case u of (\<sigma>\<upsilon> s') \<Rightarrow> s' \<in> s | _ \<Rightarrow> False\<close>
have \<open>\<exists>f :: \<sigma> set \<Rightarrow> urrel . inj f\<close>
proof
show \<open>inj (\<lambda>s . Abs_urrel (\<lambda>u . ?pick u s))\<close>
proof(rule injI)
fix x y
assume \<open>Abs_urrel (\<lambda>u. ?pick u x) = Abs_urrel (\<lambda>u. ?pick u y)\<close>
hence \<open>(\<lambda>u. ?pick u x) = (\<lambda>u. ?pick u y)\<close>
by (auto intro!: Abs_urrel_inject[THEN iffD1]
simp: AOT_model_proposition_choice_simp)
hence \<open>AOT_model_valid_in w\<^sub>0 (?pick (\<sigma>\<upsilon> s) x) =
AOT_model_valid_in w\<^sub>0 (?pick (\<sigma>\<upsilon> s) y)\<close>
for s by metis
hence \<open>(s \<in> x) = (s \<in> y)\<close> for s
by (auto simp: AOT_model_proposition_choice_simp)
thus \<open>x = y\<close>
by blast
qed
qed
thus ?thesis
by (metis card_of_image inj_imp_surj_inv)
qed
text\<open>Assume, for a proof by contradiction, that there is no large collapsed set.\<close>
assume \<open>\<nexists>x . |UNIV::\<sigma> set| <o |{y . \<alpha>\<sigma> x = \<alpha>\<sigma> y}|\<close>
hence A: \<open>\<forall>x . |{y . \<alpha>\<sigma> x = \<alpha>\<sigma> y}| \<le>o |UNIV::\<sigma> set|\<close>
by auto
have union_univ: \<open>(\<Union>x \<in> range(inv \<alpha>\<sigma>) . {y . \<alpha>\<sigma> x = \<alpha>\<sigma> y}) = UNIV\<close>
by auto (meson f_inv_into_f range_eqI)
text\<open>We refute by case distinction: there is either finitely many or
infinitely many special urelements and in both cases we can derive
a contradiction from the assumption above.\<close>
{
text\<open>Finite case.\<close>
assume finite_\<sigma>_set: \<open>finite (UNIV::\<sigma> set)\<close>
hence finite_collapsed: \<open>finite {y . \<alpha>\<sigma> x = \<alpha>\<sigma> y}\<close> for x
using A card_of_ordLeq_infinite by blast
hence 0: \<open>\<forall>x . card {y . \<alpha>\<sigma> x = \<alpha>\<sigma> y} \<le> card (UNIV::\<sigma> set)\<close>
by (metis A finite_\<sigma>_set card_of_ordLeq inj_on_iff_card_le)
have 1: \<open>card (range (inv \<alpha>\<sigma>)) \<le> card (UNIV::\<sigma> set)\<close>
using finite_\<sigma>_set card_image_le by blast
hence 2: \<open>finite (range (inv \<alpha>\<sigma>))\<close>
using finite_\<sigma>_set by blast
define n where \<open>n = card (UNIV::urrel set set)\<close>
define m where \<open>m = card (UNIV::\<sigma> set)\<close>
have \<open>n = card (\<Union>x \<in> range(inv \<alpha>\<sigma>) . {y . \<alpha>\<sigma> x = \<alpha>\<sigma> y})\<close>
unfolding n_def using union_univ by argo
also have \<open>\<dots> \<le> (\<Sum>i\<in>range (inv \<alpha>\<sigma>). card {y. \<alpha>\<sigma> i = \<alpha>\<sigma> y})\<close>
using card_UN_le 2 by blast
also have \<open>\<dots> \<le> (\<Sum>i\<in>range (inv \<alpha>\<sigma>). card (UNIV::\<sigma> set))\<close>
by (metis (no_types, lifting) 0 sum_mono)
also have \<open>\<dots> \<le> card (range (inv \<alpha>\<sigma>)) * card (UNIV::\<sigma> set)\<close>
using sum_bounded_above by auto
also have \<open>\<dots> \<le> card (UNIV::\<sigma> set) * card (UNIV::\<sigma> set)\<close>
using 1 by force
also have \<open>\<dots> = m*m\<close>
unfolding m_def by blast
finally have n_upper: \<open>n \<le> m*m\<close>.
have \<open>finite (\<Union>x \<in> range(inv \<alpha>\<sigma>) . {y . \<alpha>\<sigma> x = \<alpha>\<sigma> y})\<close>
using 2 finite_collapsed by blast
hence finite_\<alpha>set: \<open>finite (UNIV::urrel set set)\<close>
using union_univ by argo
have \<open>2^2^m = (2::nat)^(card (UNIV::\<sigma> set set))\<close>
by (metis Pow_UNIV card_Pow finite_\<sigma>_set m_def)
moreover have \<open>card (UNIV::\<sigma> set set) \<le> (card (UNIV::urrel set))\<close>
using card_\<sigma>_set_set_bound
by (meson Finite_Set.finite_set card_of_ordLeq finite_\<alpha>set
finite_\<sigma>_set inj_on_iff_card_le)
ultimately have \<open>2^2^m \<le> (2::nat)^(card (UNIV:: urrel set))\<close>
by simp
also have \<open>\<dots> = n\<close>
unfolding n_def
by (metis Finite_Set.finite_set Pow_UNIV card_Pow finite_\<alpha>set)
finally have \<open>2^2^m \<le> n\<close> by blast
hence \<open>2^2^m \<le> m*m\<close> using n_upper by linarith
moreover {
have \<open>(2::nat)^(2^m) \<ge> (2^(m + 1))\<close>
by (metis Suc_eq_plus1 Suc_leI less_exp one_le_numeral power_increasing)
also have \<open>(2^(m + 1)) = (2::nat) * 2^m\<close>
by auto
have \<open>m < 2^m\<close>
by (simp add: less_exp)
hence \<open>m*m < (2^m)*(2^m)\<close>
by (simp add: mult_strict_mono)
moreover have \<open>\<dots> = 2^(m+m)\<close>
by (simp add: power_add)
ultimately have \<open>m*m < 2 ^ (m + m)\<close> by presburger
moreover have \<open>m+m \<le> 2^m\<close>
proof (induct m)
case 0
thus ?case by auto
next
case (Suc m)
thus ?case
by (metis Suc_leI less_exp mult_2 mult_le_mono2 power_Suc)
qed
ultimately have \<open>m*m < 2^2^m\<close>
by (meson less_le_trans one_le_numeral power_increasing)
}
ultimately have False by auto
}
moreover {
text\<open>Infinite case.\<close>
assume \<open>infinite (UNIV::\<sigma> set)\<close>
hence Cinf\<sigma>: \<open>Cinfinite |UNIV::\<sigma> set|\<close>
by (simp add: cinfinite_def)
have 1: \<open>|range (inv \<alpha>\<sigma>)| \<le>o |UNIV::\<sigma> set|\<close>
by auto
have 2: \<open>\<forall>i\<in>range (inv \<alpha>\<sigma>). |{y . \<alpha>\<sigma> i = \<alpha>\<sigma> y}| \<le>o |UNIV::\<sigma> set|\<close>
proof
fix i :: \<open>urrel set\<close>
assume \<open>i \<in> range (inv \<alpha>\<sigma>)\<close>
show \<open>|{y . \<alpha>\<sigma> i = \<alpha>\<sigma> y}| \<le>o |UNIV::\<sigma> set|\<close>
using A by blast
qed
have \<open>|\<Union> ((\<lambda>i. {y. \<alpha>\<sigma> i = \<alpha>\<sigma> y}) ` (range (inv \<alpha>\<sigma>)))| \<le>o
|Sigma (range (inv \<alpha>\<sigma>)) (\<lambda>i. {y. \<alpha>\<sigma> i = \<alpha>\<sigma> y})|\<close>
using card_of_UNION_Sigma by blast
hence \<open>|UNIV::urrel set set| \<le>o
|Sigma (range (inv \<alpha>\<sigma>)) (\<lambda>i. {y. \<alpha>\<sigma> i = \<alpha>\<sigma> y})|\<close>
using union_univ by argo
moreover have \<open>|Sigma (range (inv \<alpha>\<sigma>)) (\<lambda>i. {y. \<alpha>\<sigma> i = \<alpha>\<sigma> y})| \<le>o |UNIV::\<sigma> set|\<close>
using card_of_Sigma_ordLeq_Cinfinite[OF Cinf\<sigma>, OF 1, OF 2] by blast
ultimately have \<open>|UNIV::urrel set set| \<le>o |UNIV::\<sigma> set|\<close>
using ordLeq_transitive by blast
moreover {
have \<open>|UNIV::\<sigma> set| <o |UNIV::\<sigma> set set|\<close>
by auto
moreover have \<open>|UNIV::\<sigma> set set| \<le>o |UNIV::urrel set|\<close>
using card_\<sigma>_set_set_bound by blast
moreover have \<open>|UNIV::urrel set| <o |UNIV::urrel set set|\<close>
by auto
ultimately have \<open>|UNIV::\<sigma> set| <o |UNIV::urrel set set|\<close>
by (metis ordLess_imp_ordLeq ordLess_ordLeq_trans)
}
ultimately have False
using not_ordLeq_ordLess by blast
}
ultimately show False by blast
qed
text\<open>We introduce a mapping from abstract objects (i.e. sets of urrelations) to
special urelements @{text \<open>\<alpha>\<sigma>\<close>} that is surjective and distinguishes all
abstract objects that are distinguished by a (not necessarily surjective)
mapping @{text \<open>\<alpha>\<sigma>'\<close>}. @{text \<open>\<alpha>\<sigma>'\<close>} will be used to model extended relation
comprehension.\<close>
consts \<alpha>\<sigma>' :: \<open>urrel set \<Rightarrow> \<sigma>\<close>
consts \<alpha>\<sigma> :: \<open>urrel set \<Rightarrow> \<sigma>\<close>
specification(\<alpha>\<sigma>)
\<alpha>\<sigma>_surj: \<open>surj \<alpha>\<sigma>\<close>
\<alpha>\<sigma>_\<alpha>\<sigma>': \<open>\<alpha>\<sigma> x = \<alpha>\<sigma> y \<Longrightarrow> \<alpha>\<sigma>' x = \<alpha>\<sigma>' y\<close>
proof -
obtain x where x_prop: \<open>|UNIV::\<sigma> set| <o |{y. \<alpha>\<sigma>' x = \<alpha>\<sigma>' y}|\<close>
using \<alpha>\<sigma>_pigeonhole by blast
have \<open>\<exists>f :: urrel set \<Rightarrow> \<sigma> . f ` {y. \<alpha>\<sigma>' x = \<alpha>\<sigma>' y} = UNIV \<and> f x = \<alpha>\<sigma>' x\<close>
proof -
have \<open>\<exists>f :: urrel set \<Rightarrow> \<sigma> . f ` {y. \<alpha>\<sigma>' x = \<alpha>\<sigma>' y} = UNIV\<close>
by (simp add: x_prop card_of_ordLeq2 ordLess_imp_ordLeq)
then obtain f :: \<open>urrel set \<Rightarrow> \<sigma>\<close> where \<open>f ` {y. \<alpha>\<sigma>' x = \<alpha>\<sigma>' y} = UNIV\<close>
by presburger
moreover obtain a where \<open>f a = \<alpha>\<sigma>' x\<close> and \<open>\<alpha>\<sigma>' a = \<alpha>\<sigma>' x\<close>
by (smt (verit, best) calculation UNIV_I image_iff mem_Collect_eq)
ultimately have \<open>(f (a := f x, x := f a)) ` {y. \<alpha>\<sigma>' x = \<alpha>\<sigma>' y} = UNIV \<and>
(f (a := f x, x := f a)) x = \<alpha>\<sigma>' x\<close>
by (auto simp: image_def)
thus ?thesis by blast
qed
then obtain f where fimage: \<open>f ` {y. \<alpha>\<sigma>' x = \<alpha>\<sigma>' y} = UNIV\<close>
and fx: \<open>f x = \<alpha>\<sigma>' x\<close>
by blast
define \<alpha>\<sigma> :: \<open>urrel set \<Rightarrow> \<sigma>\<close> where
\<open>\<alpha>\<sigma> \<equiv> \<lambda> urrels . if \<alpha>\<sigma>' urrels = \<alpha>\<sigma>' x \<and> f urrels \<notin> range \<alpha>\<sigma>'
then f urrels
else \<alpha>\<sigma>' urrels\<close>
have \<open>surj \<alpha>\<sigma>\<close>
proof -
{
fix s :: \<sigma>
{
assume \<open>s \<in> range \<alpha>\<sigma>'\<close>
hence 0: \<open>\<alpha>\<sigma>' (inv \<alpha>\<sigma>' s) = s\<close>
by (meson f_inv_into_f)
{
assume \<open>s = \<alpha>\<sigma>' x\<close>
hence \<open>\<alpha>\<sigma> x = s\<close>
using \<alpha>\<sigma>_def fx by presburger
hence \<open>\<exists>f . \<alpha>\<sigma> (f s) = s\<close>
by auto
}
moreover {
assume \<open>s \<noteq> \<alpha>\<sigma>' x\<close>
hence \<open>\<alpha>\<sigma> (inv \<alpha>\<sigma>' s) = s\<close>
unfolding \<alpha>\<sigma>_def 0 by presburger
hence \<open>\<exists>f . \<alpha>\<sigma> (f s) = s\<close>
by blast
}
ultimately have \<open>\<exists>f . \<alpha>\<sigma> (f s) = s\<close>
by blast
}
moreover {
assume \<open>s \<notin> range \<alpha>\<sigma>'\<close>
moreover obtain urrels where \<open>f urrels = s\<close> and \<open>\<alpha>\<sigma>' x = \<alpha>\<sigma>' urrels\<close>
by (smt (verit, best) UNIV_I fimage image_iff mem_Collect_eq)
ultimately have \<open>\<alpha>\<sigma> urrels = s\<close>
using \<alpha>\<sigma>_def by presburger
hence \<open>\<exists>f . \<alpha>\<sigma> (f s) = s\<close>
by (meson f_inv_into_f range_eqI)
}
ultimately have \<open>\<exists>f . \<alpha>\<sigma> (f s) = s\<close>
by blast
}
thus ?thesis
by (metis surj_def)
qed
moreover have \<open>\<forall>x y. \<alpha>\<sigma> x = \<alpha>\<sigma> y \<longrightarrow> \<alpha>\<sigma>' x = \<alpha>\<sigma>' y\<close>
by (metis \<alpha>\<sigma>_def rangeI)
ultimately show ?thesis
by blast
qed
text\<open>For extended models that validate extended relation comprehension
(and consequently the predecessor axiom), we specify which
abstract objects are distinguished by @{const \<alpha>\<sigma>'}.\<close>
definition urrel_to_\<omega>rel :: \<open>urrel \<Rightarrow> (\<omega> \<Rightarrow> w \<Rightarrow> bool)\<close> where
\<open>urrel_to_\<omega>rel \<equiv> \<lambda> r u w . AOT_model_valid_in w (Rep_urrel r (\<omega>\<upsilon> u))\<close>
definition \<omega>rel_to_urrel :: \<open>(\<omega> \<Rightarrow> w \<Rightarrow> bool) \<Rightarrow> urrel\<close> where
\<open>\<omega>rel_to_urrel \<equiv> \<lambda> \<phi> . Abs_urrel
(\<lambda> u . \<epsilon>\<^sub>\<o> w . case u of \<omega>\<upsilon> x \<Rightarrow> \<phi> x w | _ \<Rightarrow> False)\<close>
definition AOT_urrel_\<omega>equiv :: \<open>urrel \<Rightarrow> urrel \<Rightarrow> bool\<close> where
\<open>AOT_urrel_\<omega>equiv \<equiv> \<lambda> r s . \<forall> u v . AOT_model_valid_in v (Rep_urrel r (\<omega>\<upsilon> u)) =
AOT_model_valid_in v (Rep_urrel s (\<omega>\<upsilon> u))\<close>
lemma urrel_\<omega>rel_quot: \<open>Quotient3 AOT_urrel_\<omega>equiv urrel_to_\<omega>rel \<omega>rel_to_urrel\<close>
proof(rule Quotient3I)
show \<open>urrel_to_\<omega>rel (\<omega>rel_to_urrel a) = a\<close> for a
unfolding \<omega>rel_to_urrel_def urrel_to_\<omega>rel_def
apply (rule ext)
apply (subst Abs_urrel_inverse)
by (auto simp: AOT_model_proposition_choice_simp)
next
show \<open>AOT_urrel_\<omega>equiv (\<omega>rel_to_urrel a) (\<omega>rel_to_urrel a)\<close> for a
unfolding \<omega>rel_to_urrel_def AOT_urrel_\<omega>equiv_def
apply (subst (1 2) Abs_urrel_inverse)
by (auto simp: AOT_model_proposition_choice_simp)
next
show \<open>AOT_urrel_\<omega>equiv r s = (AOT_urrel_\<omega>equiv r r \<and> AOT_urrel_\<omega>equiv s s \<and>
urrel_to_\<omega>rel r = urrel_to_\<omega>rel s)\<close> for r s
proof
assume \<open>AOT_urrel_\<omega>equiv r s\<close>
hence \<open>AOT_model_valid_in v (Rep_urrel r (\<omega>\<upsilon> u)) =
AOT_model_valid_in v (Rep_urrel s (\<omega>\<upsilon> u))\<close> for u v
using AOT_urrel_\<omega>equiv_def by metis
hence \<open>urrel_to_\<omega>rel r = urrel_to_\<omega>rel s\<close>
unfolding urrel_to_\<omega>rel_def
by simp
thus \<open>AOT_urrel_\<omega>equiv r r \<and> AOT_urrel_\<omega>equiv s s \<and>
urrel_to_\<omega>rel r = urrel_to_\<omega>rel s\<close>
unfolding AOT_urrel_\<omega>equiv_def
by auto
next
assume \<open>AOT_urrel_\<omega>equiv r r \<and> AOT_urrel_\<omega>equiv s s \<and>
urrel_to_\<omega>rel r = urrel_to_\<omega>rel s\<close>
hence \<open>AOT_model_valid_in v (Rep_urrel r (\<omega>\<upsilon> u)) =
AOT_model_valid_in v (Rep_urrel s (\<omega>\<upsilon> u))\<close> for u v
by (metis urrel_to_\<omega>rel_def)
thus \<open>AOT_urrel_\<omega>equiv r s\<close>
using AOT_urrel_\<omega>equiv_def by presburger
qed
qed
specification (\<alpha>\<sigma>')
\<alpha>\<sigma>_eq_ord_exts_all:
\<open>\<alpha>\<sigma>' a = \<alpha>\<sigma>' b \<Longrightarrow> (\<And>s . urrel_to_\<omega>rel s = urrel_to_\<omega>rel r \<Longrightarrow> s \<in> a)
\<Longrightarrow> (\<And> s . urrel_to_\<omega>rel s = urrel_to_\<omega>rel r \<Longrightarrow> s \<in> b)\<close>
\<alpha>\<sigma>_eq_ord_exts_ex:
\<open>\<alpha>\<sigma>' a = \<alpha>\<sigma>' b \<Longrightarrow> (\<exists> s . s \<in> a \<and> urrel_to_\<omega>rel s = urrel_to_\<omega>rel r)
\<Longrightarrow> (\<exists>s . s \<in> b \<and> urrel_to_\<omega>rel s = urrel_to_\<omega>rel r)\<close>
proof -
define \<alpha>\<sigma>_wit_intersection where
\<open>\<alpha>\<sigma>_wit_intersection \<equiv> \<lambda> urrels .
{ordext . \<forall>urrel . urrel_to_\<omega>rel urrel = ordext \<longrightarrow> urrel \<in> urrels}\<close>
define \<alpha>\<sigma>_wit_union where
\<open>\<alpha>\<sigma>_wit_union \<equiv> \<lambda> urrels .
{ordext . \<exists>urrel\<in>urrels . urrel_to_\<omega>rel urrel = ordext}\<close>
let ?\<alpha>\<sigma>_wit = \<open>\<lambda> urrels .
let ordexts = \<alpha>\<sigma>_wit_intersection urrels in
let ordexts' = \<alpha>\<sigma>_wit_union urrels in
(ordexts, ordexts', undefined)\<close>
define \<alpha>\<sigma>_wit :: \<open>urrel set \<Rightarrow> \<sigma>\<close> where
\<open>\<alpha>\<sigma>_wit \<equiv> \<lambda> urrels . Abs_\<sigma> (?\<alpha>\<sigma>_wit urrels)\<close>
{
fix a b :: \<open>urrel set\<close> and r s
assume \<open>\<alpha>\<sigma>_wit a = \<alpha>\<sigma>_wit b\<close>
hence 0: \<open>{ordext. \<forall>urrel. urrel_to_\<omega>rel urrel = ordext \<longrightarrow> urrel \<in> a} =
{ordext. \<forall>urrel. urrel_to_\<omega>rel urrel = ordext \<longrightarrow> urrel \<in> b}\<close>
unfolding \<alpha>\<sigma>_wit_def Let_def
apply (subst (asm) Abs_\<sigma>_inject)
by (auto simp: \<alpha>\<sigma>_wit_intersection_def \<alpha>\<sigma>_wit_union_def)
assume \<open>urrel_to_\<omega>rel s = urrel_to_\<omega>rel r \<Longrightarrow> s \<in> a\<close> for s
hence \<open>urrel_to_\<omega>rel r \<in>
{ordext. \<forall>urrel. urrel_to_\<omega>rel urrel = ordext \<longrightarrow> urrel \<in> a}\<close>
by auto
hence \<open>urrel_to_\<omega>rel r \<in>
{ordext. \<forall>urrel. urrel_to_\<omega>rel urrel = ordext \<longrightarrow> urrel \<in> b}\<close>
using 0 by blast
moreover assume \<open>urrel_to_\<omega>rel s = urrel_to_\<omega>rel r\<close>
ultimately have \<open>s \<in> b\<close>
by blast
}
moreover {
fix a b :: \<open>urrel set\<close> and s r
assume \<open>\<alpha>\<sigma>_wit a = \<alpha>\<sigma>_wit b\<close>
hence 0: \<open>{ordext. \<exists>urrel \<in> a. urrel_to_\<omega>rel urrel = ordext} =
{ordext. \<exists>urrel \<in> b. urrel_to_\<omega>rel urrel = ordext}\<close>
unfolding \<alpha>\<sigma>_wit_def
apply (subst (asm) Abs_\<sigma>_inject)
by (auto simp: Let_def \<alpha>\<sigma>_wit_intersection_def \<alpha>\<sigma>_wit_union_def)
assume \<open>s \<in> a\<close>
hence \<open>urrel_to_\<omega>rel s \<in> {ordext. \<exists>urrel \<in> a. urrel_to_\<omega>rel urrel = ordext}\<close>
by blast
moreover assume \<open>urrel_to_\<omega>rel s = urrel_to_\<omega>rel r\<close>
ultimately have \<open>urrel_to_\<omega>rel r \<in>
{ordext. \<exists>urrel \<in> b. urrel_to_\<omega>rel urrel = ordext}\<close>
using "0" by argo
hence \<open>\<exists>s. s \<in> b \<and> urrel_to_\<omega>rel s = urrel_to_\<omega>rel r\<close>
by blast
}
ultimately show ?thesis
by (safe intro!: exI[where x=\<alpha>\<sigma>_wit]; metis)
qed
text\<open>We enable the extended model version.\<close>
abbreviation (input) AOT_ExtendedModel where \<open>AOT_ExtendedModel \<equiv> True\<close>
text\<open>Individual terms are either ordinary objects, represented by ordinary urelements,
abstract objects, modelled as sets of urrelations, or null objects, used to
represent non-denoting definite descriptions.\<close>
datatype \<kappa> = \<omega>\<kappa> \<omega> | \<alpha>\<kappa> \<open>urrel set\<close> | is_null\<kappa>: null\<kappa> null
text\<open>The mapping from abstract objects to urelements can be naturally
lifted to a surjective mapping from individual terms to urelements.\<close>
primrec \<kappa>\<upsilon> :: \<open>\<kappa>\<Rightarrow>\<upsilon>\<close> where
\<open>\<kappa>\<upsilon> (\<omega>\<kappa> x) = \<omega>\<upsilon> x\<close>
| \<open>\<kappa>\<upsilon> (\<alpha>\<kappa> x) = \<sigma>\<upsilon> (\<alpha>\<sigma> x)\<close>
| \<open>\<kappa>\<upsilon> (null\<kappa> x) = null\<upsilon> x\<close>
lemma \<kappa>\<upsilon>_surj: \<open>surj \<kappa>\<upsilon>\<close>
using \<alpha>\<sigma>_surj by (metis \<kappa>\<upsilon>.simps(1) \<kappa>\<upsilon>.simps(2) \<kappa>\<upsilon>.simps(3) \<upsilon>.exhaust surj_def)
text\<open>By construction if the urelement of an individual term is exemplified by
an urrelation, it cannot be a null-object.\<close>
lemma urrel_null_false:
assumes \<open>AOT_model_valid_in w (Rep_urrel f (\<kappa>\<upsilon> x))\<close>
shows \<open>\<not>is_null\<kappa> x\<close>
by (metis (mono_tags, lifting) assms Rep_urrel \<kappa>.collapse(3) \<kappa>\<upsilon>.simps(3)
mem_Collect_eq)
text\<open>AOT requires any ordinary object to be @{emph \<open>possibly concrete\<close>} and that
there is an object that is not actually, but possibly concrete.\<close>
consts AOT_model_concrete\<omega> :: \<open>\<omega> \<Rightarrow> w \<Rightarrow> bool\<close>
specification (AOT_model_concrete\<omega>)
AOT_model_\<omega>_concrete_in_some_world:
\<open>\<exists> w . AOT_model_concrete\<omega> x w\<close>
AOT_model_contingent_object:
\<open>\<exists> x w . AOT_model_concrete\<omega> x w \<and> \<not>AOT_model_concrete\<omega> x w\<^sub>0\<close>
by (rule exI[where x=\<open>\<lambda>_ w. w \<noteq> w\<^sub>0\<close>]) (auto simp: AOT_model_nonactual_world)
text\<open>We define a type class for AOT's terms specifying the conditions under which
objects of that type denote and require the set of denoting terms to be
non-empty.\<close>
class AOT_Term =
fixes AOT_model_denotes :: \<open>'a \<Rightarrow> bool\<close>
assumes AOT_model_denoting_ex: \<open>\<exists> x . AOT_model_denotes x\<close>
text\<open>All types except the type of propositions involve non-denoting terms. We
define a refined type class for those.\<close>
class AOT_IncompleteTerm = AOT_Term +
assumes AOT_model_nondenoting_ex: \<open>\<exists> x . \<not>AOT_model_denotes x\<close>
text\<open>Generic non-denoting term.\<close>
definition AOT_model_nondenoting :: \<open>'a::AOT_IncompleteTerm\<close> where
\<open>AOT_model_nondenoting \<equiv> SOME \<tau> . \<not>AOT_model_denotes \<tau>\<close>
lemma AOT_model_nondenoing: \<open>\<not>AOT_model_denotes (AOT_model_nondenoting)\<close>
using someI_ex[OF AOT_model_nondenoting_ex]
unfolding AOT_model_nondenoting_def by blast
text\<open>@{const AOT_model_denotes} can trivially be extended to products of types.\<close>
instantiation prod :: (AOT_Term, AOT_Term) AOT_Term
begin
definition AOT_model_denotes_prod :: \<open>'a\<times>'b \<Rightarrow> bool\<close> where
\<open>AOT_model_denotes_prod \<equiv> \<lambda>(x,y) . AOT_model_denotes x \<and> AOT_model_denotes y\<close>
instance proof
show \<open>\<exists>x::'a\<times>'b. AOT_model_denotes x\<close>
by (simp add: AOT_model_denotes_prod_def AOT_model_denoting_ex)
qed
end
text\<open>We specify a transformation of proposition-valued functions on terms, s.t.
the result is fully determined by @{emph \<open>regular\<close>} terms. This will be required
for modelling n-ary relations as functions on tuples while preserving AOT's
definition of n-ary relation identity.\<close>
locale AOT_model_irregular_spec =
fixes AOT_model_irregular :: \<open>('a \<Rightarrow> \<o>) \<Rightarrow> 'a \<Rightarrow> \<o>\<close>
and AOT_model_regular :: \<open>'a \<Rightarrow> bool\<close>
and AOT_model_term_equiv :: \<open>'a \<Rightarrow> 'a \<Rightarrow> bool\<close>
assumes AOT_model_irregular_false:
\<open>\<not>AOT_model_valid_in w (AOT_model_irregular \<phi> x)\<close>
assumes AOT_model_irregular_equiv:
\<open>AOT_model_term_equiv x y \<Longrightarrow>
AOT_model_irregular \<phi> x = AOT_model_irregular \<phi> y\<close>
assumes AOT_model_irregular_eqI:
\<open>(\<And> x . AOT_model_regular x \<Longrightarrow> \<phi> x = \<psi> x) \<Longrightarrow>
AOT_model_irregular \<phi> x = AOT_model_irregular \<psi> x\<close>
text\<open>We introduce a type class for individual terms that specifies being regular,
being equivalent (i.e. conceptually @{emph \<open>sharing urelements\<close>}) and the
transformation on proposition-valued functions as specified above.\<close>
class AOT_IndividualTerm = AOT_IncompleteTerm +
fixes AOT_model_regular :: \<open>'a \<Rightarrow> bool\<close>
fixes AOT_model_term_equiv :: \<open>'a \<Rightarrow> 'a \<Rightarrow> bool\<close>
fixes AOT_model_irregular :: \<open>('a \<Rightarrow> \<o>) \<Rightarrow> 'a \<Rightarrow> \<o>\<close>
assumes AOT_model_irregular_nondenoting:
\<open>\<not>AOT_model_regular x \<Longrightarrow> \<not>AOT_model_denotes x\<close>
assumes AOT_model_term_equiv_part_equivp:
\<open>equivp AOT_model_term_equiv\<close>
assumes AOT_model_term_equiv_denotes:
\<open>AOT_model_term_equiv x y \<Longrightarrow> (AOT_model_denotes x = AOT_model_denotes y)\<close>
assumes AOT_model_term_equiv_regular:
\<open>AOT_model_term_equiv x y \<Longrightarrow> (AOT_model_regular x = AOT_model_regular y)\<close>
assumes AOT_model_irregular:
\<open>AOT_model_irregular_spec AOT_model_irregular AOT_model_regular
AOT_model_term_equiv\<close>
interpretation AOT_model_irregular_spec AOT_model_irregular AOT_model_regular
AOT_model_term_equiv
using AOT_model_irregular .
text\<open>Our concrete type for individual terms satisfies the type class of
individual terms.
Note that all unary individuals are regular. In general, an individual term
may be a tuple and is regular, if at most one tuple element does not denote.\<close>
instantiation \<kappa> :: AOT_IndividualTerm
begin
definition AOT_model_term_equiv_\<kappa> :: \<open>\<kappa> \<Rightarrow> \<kappa> \<Rightarrow> bool\<close> where
\<open>AOT_model_term_equiv_\<kappa> \<equiv> \<lambda> x y . \<kappa>\<upsilon> x = \<kappa>\<upsilon> y\<close>
definition AOT_model_denotes_\<kappa> :: \<open>\<kappa> \<Rightarrow> bool\<close> where
\<open>AOT_model_denotes_\<kappa> \<equiv> \<lambda> x . \<not>is_null\<kappa> x\<close>
definition AOT_model_regular_\<kappa> :: \<open>\<kappa> \<Rightarrow> bool\<close> where
\<open>AOT_model_regular_\<kappa> \<equiv> \<lambda> x . True\<close>
definition AOT_model_irregular_\<kappa> :: \<open>(\<kappa> \<Rightarrow> \<o>) \<Rightarrow> \<kappa> \<Rightarrow> \<o>\<close> where
\<open>AOT_model_irregular_\<kappa> \<equiv> SOME \<phi> . AOT_model_irregular_spec \<phi>
AOT_model_regular AOT_model_term_equiv\<close>
instance proof
show \<open>\<exists>x :: \<kappa>. AOT_model_denotes x\<close>
by (rule exI[where x=\<open>\<omega>\<kappa> undefined\<close>])
(simp add: AOT_model_denotes_\<kappa>_def)
next
show \<open>\<exists>x :: \<kappa>. \<not>AOT_model_denotes x\<close>
by (rule exI[where x=\<open>null\<kappa> undefined\<close>])
(simp add: AOT_model_denotes_\<kappa>_def AOT_model_regular_\<kappa>_def)
next
show "\<not>AOT_model_regular x \<Longrightarrow> \<not> AOT_model_denotes x" for x :: \<kappa>
by (simp add: AOT_model_regular_\<kappa>_def)
next
show \<open>equivp (AOT_model_term_equiv :: \<kappa> \<Rightarrow> \<kappa> \<Rightarrow> bool)\<close>
by (rule equivpI; rule reflpI exI sympI transpI)
(simp_all add: AOT_model_term_equiv_\<kappa>_def)
next
fix x y :: \<kappa>
show \<open>AOT_model_term_equiv x y \<Longrightarrow> AOT_model_denotes x = AOT_model_denotes y\<close>
by (metis AOT_model_denotes_\<kappa>_def AOT_model_term_equiv_\<kappa>_def \<kappa>.exhaust_disc
\<kappa>\<upsilon>.simps \<upsilon>.disc(1,3,5,6) is_\<alpha>\<kappa>_def is_\<omega>\<kappa>_def is_null\<kappa>_def)
next
fix x y :: \<kappa>
show \<open>AOT_model_term_equiv x y \<Longrightarrow> AOT_model_regular x = AOT_model_regular y\<close>
by (simp add: AOT_model_regular_\<kappa>_def)
next
have "AOT_model_irregular_spec (\<lambda> \<phi> (x::\<kappa>) . \<epsilon>\<^sub>\<o> w . False)
AOT_model_regular AOT_model_term_equiv"
by standard (auto simp: AOT_model_proposition_choice_simp)
thus \<open>AOT_model_irregular_spec (AOT_model_irregular::(\<kappa>\<Rightarrow>\<o>) \<Rightarrow> \<kappa> \<Rightarrow> \<o>)
AOT_model_regular AOT_model_term_equiv\<close>
unfolding AOT_model_irregular_\<kappa>_def by (metis (no_types, lifting) someI_ex)
qed
end
text\<open>We define relations among individuals as proposition valued functions.
@{emph \<open>Denoting\<close>} unary relations (among @{typ \<kappa>}) will match the
urrelations introduced above.\<close>
typedef 'a rel (\<open><_>\<close>) = \<open>UNIV::('a::AOT_IndividualTerm \<Rightarrow> \<o>) set\<close> ..
setup_lifting type_definition_rel
text\<open>We will use the transformation specified above to "fix" the behaviour of
functions on irregular terms when defining @{text \<open>\<lambda>\<close>}-expressions.\<close>
definition fix_irregular :: \<open>('a::AOT_IndividualTerm \<Rightarrow> \<o>) \<Rightarrow> ('a \<Rightarrow> \<o>)\<close> where
\<open>fix_irregular \<equiv> \<lambda> \<phi> x . if AOT_model_regular x
then \<phi> x else AOT_model_irregular \<phi> x\<close>
lemma fix_irregular_denoting:
\<open>AOT_model_denotes x \<Longrightarrow> fix_irregular \<phi> x = \<phi> x\<close>
by (meson AOT_model_irregular_nondenoting fix_irregular_def)
lemma fix_irregular_regular:
\<open>AOT_model_regular x \<Longrightarrow> fix_irregular \<phi> x = \<phi> x\<close>
by (meson AOT_model_irregular_nondenoting fix_irregular_def)
lemma fix_irregular_irregular:
\<open>\<not>AOT_model_regular x \<Longrightarrow> fix_irregular \<phi> x = AOT_model_irregular \<phi> x\<close>
by (simp add: fix_irregular_def)
text\<open>Relations among individual terms are (potentially non-denoting) terms.
A relation denotes, if it agrees on all equivalent terms (i.e. terms sharing
urelements), is necessarily false on all non-denoting terms and is
well-behaved on irregular terms.\<close>
instantiation rel :: (AOT_IndividualTerm) AOT_IncompleteTerm
begin
text\<open>\linelabel{AOT_model_denotes_rel}\<close>
lift_definition AOT_model_denotes_rel :: \<open><'a> \<Rightarrow> bool\<close> is
\<open>\<lambda> \<phi> . (\<forall> x y . AOT_model_term_equiv x y \<longrightarrow> \<phi> x = \<phi> y) \<and>
(\<forall> w x . AOT_model_valid_in w (\<phi> x) \<longrightarrow> AOT_model_denotes x) \<and>
(\<forall> x . \<not>AOT_model_regular x \<longrightarrow> \<phi> x = AOT_model_irregular \<phi> x)\<close> .
instance proof
have \<open>AOT_model_irregular (fix_irregular \<phi>) x = AOT_model_irregular \<phi> x\<close>
for \<phi> and x :: 'a
by (rule AOT_model_irregular_eqI) (simp add: fix_irregular_def)
thus \<open>\<exists> x :: <'a> . AOT_model_denotes x\<close>
by (safe intro!: exI[where x=\<open>Abs_rel (fix_irregular (\<lambda>x. \<epsilon>\<^sub>\<o> w . False))\<close>])
(transfer; auto simp: AOT_model_proposition_choice_simp fix_irregular_def
AOT_model_irregular_equiv AOT_model_term_equiv_regular
AOT_model_irregular_false)
next
show \<open>\<exists>f :: <'a> . \<not>AOT_model_denotes f\<close>
by (rule exI[where x=\<open>Abs_rel (\<lambda>x. \<epsilon>\<^sub>\<o> w . True)\<close>];
auto simp: AOT_model_denotes_rel.abs_eq AOT_model_nondenoting_ex
AOT_model_proposition_choice_simp)
qed
end
text\<open>Auxiliary lemmata.\<close>
lemma AOT_model_term_equiv_eps:
shows \<open>AOT_model_term_equiv (Eps (AOT_model_term_equiv \<kappa>)) \<kappa>\<close>
and \<open>AOT_model_term_equiv \<kappa> (Eps (AOT_model_term_equiv \<kappa>))\<close>
and \<open>AOT_model_term_equiv \<kappa> \<kappa>' \<Longrightarrow>
(Eps (AOT_model_term_equiv \<kappa>)) = (Eps (AOT_model_term_equiv \<kappa>'))\<close>
apply (metis AOT_model_term_equiv_part_equivp equivp_def someI_ex)
apply (metis AOT_model_term_equiv_part_equivp equivp_def someI_ex)
by (metis AOT_model_term_equiv_part_equivp equivp_def)
lemma AOT_model_denotes_Abs_rel_fix_irregularI:
assumes \<open>\<And> x y . AOT_model_term_equiv x y \<Longrightarrow> \<phi> x = \<phi> y\<close>
and \<open>\<And> w x . AOT_model_valid_in w (\<phi> x) \<Longrightarrow> AOT_model_denotes x\<close>
shows \<open>AOT_model_denotes (Abs_rel (fix_irregular \<phi>))\<close>
proof -
have \<open>AOT_model_irregular \<phi> x = AOT_model_irregular
(\<lambda>x. if AOT_model_regular x then \<phi> x else AOT_model_irregular \<phi> x) x\<close>
if \<open>\<not> AOT_model_regular x\<close>
for x
by (rule AOT_model_irregular_eqI) auto
thus ?thesis
unfolding AOT_model_denotes_rel.rep_eq
using assms by (auto simp: AOT_model_irregular_false Abs_rel_inverse
AOT_model_irregular_equiv fix_irregular_def
AOT_model_term_equiv_regular)
qed
lemma AOT_model_term_equiv_rel_equiv:
assumes \<open>AOT_model_denotes x\<close>
and \<open>AOT_model_denotes y\<close>
shows \<open>AOT_model_term_equiv x y = (\<forall> \<Pi> w . AOT_model_denotes \<Pi> \<longrightarrow>
AOT_model_valid_in w (Rep_rel \<Pi> x) = AOT_model_valid_in w (Rep_rel \<Pi> y))\<close>
proof
assume \<open>AOT_model_term_equiv x y\<close>
thus \<open>\<forall> \<Pi> w . AOT_model_denotes \<Pi> \<longrightarrow> AOT_model_valid_in w (Rep_rel \<Pi> x) =
AOT_model_valid_in w (Rep_rel \<Pi> y)\<close>
by (simp add: AOT_model_denotes_rel.rep_eq)
next
have 0: \<open>(AOT_model_denotes x' \<and> AOT_model_term_equiv x' y) =
(AOT_model_denotes y' \<and> AOT_model_term_equiv y' y)\<close>
if \<open>AOT_model_term_equiv x' y'\<close> for x' y'
by (metis that AOT_model_term_equiv_denotes AOT_model_term_equiv_part_equivp
equivp_def)
assume \<open>\<forall> \<Pi> w . AOT_model_denotes \<Pi> \<longrightarrow> AOT_model_valid_in w (Rep_rel \<Pi> x) =
AOT_model_valid_in w (Rep_rel \<Pi> y)\<close>
moreover have \<open>AOT_model_denotes (Abs_rel (fix_irregular
(\<lambda> x . \<epsilon>\<^sub>\<o> w . AOT_model_denotes x \<and> AOT_model_term_equiv x y)))\<close>
(is "AOT_model_denotes ?r")
by (rule AOT_model_denotes_Abs_rel_fix_irregularI)
(auto simp: 0 AOT_model_denotes_rel.rep_eq Abs_rel_inverse fix_irregular_def
AOT_model_proposition_choice_simp AOT_model_irregular_false)
ultimately have \<open>AOT_model_valid_in w (Rep_rel ?r x) =
AOT_model_valid_in w (Rep_rel ?r y)\<close> for w
by blast
thus \<open>AOT_model_term_equiv x y\<close>
by (simp add: Abs_rel_inverse AOT_model_proposition_choice_simp
fix_irregular_denoting[OF assms(1)] AOT_model_term_equiv_part_equivp
fix_irregular_denoting[OF assms(2)] assms equivp_reflp)
qed
text\<open>Denoting relations among terms of type @{typ \<kappa>} correspond to urrelations.\<close>
definition rel_to_urrel :: \<open><\<kappa>> \<Rightarrow> urrel\<close> where
\<open>rel_to_urrel \<equiv> \<lambda> \<Pi> . Abs_urrel (\<lambda> u . Rep_rel \<Pi> (SOME x . \<kappa>\<upsilon> x = u))\<close>
definition urrel_to_rel :: \<open>urrel \<Rightarrow> <\<kappa>>\<close> where
\<open>urrel_to_rel \<equiv> \<lambda> \<phi> . Abs_rel (\<lambda> x . Rep_urrel \<phi> (\<kappa>\<upsilon> x))\<close>
definition AOT_rel_equiv :: \<open><'a::AOT_IndividualTerm> \<Rightarrow> <'a> \<Rightarrow> bool\<close> where
\<open>AOT_rel_equiv \<equiv> \<lambda> f g . AOT_model_denotes f \<and> AOT_model_denotes g \<and> f = g\<close>
lemma urrel_quotient3: \<open>Quotient3 AOT_rel_equiv rel_to_urrel urrel_to_rel\<close>
proof (rule Quotient3I)
have \<open>(\<lambda>u. Rep_urrel a (\<kappa>\<upsilon> (SOME x. \<kappa>\<upsilon> x = u))) = (\<lambda>u. Rep_urrel a u)\<close> for a
by (rule ext) (metis (mono_tags, lifting) \<kappa>\<upsilon>_surj surj_f_inv_f verit_sko_ex')
thus \<open>rel_to_urrel (urrel_to_rel a) = a\<close> for a
by (simp add: Abs_rel_inverse rel_to_urrel_def urrel_to_rel_def
Rep_urrel_inverse)
next
show \<open>AOT_rel_equiv (urrel_to_rel a) (urrel_to_rel a)\<close> for a
unfolding AOT_rel_equiv_def urrel_to_rel_def
by transfer (simp add: AOT_model_regular_\<kappa>_def AOT_model_denotes_\<kappa>_def
AOT_model_term_equiv_\<kappa>_def urrel_null_false)
next
{
fix a
assume \<open>\<forall>w x. AOT_model_valid_in w (a x) \<longrightarrow> \<not> is_null\<kappa> x\<close>
hence \<open>(\<lambda>u. a (SOME x. \<kappa>\<upsilon> x = u)) \<in>
{\<phi>. \<forall>x w. \<not> AOT_model_valid_in w (\<phi> (null\<upsilon> x))}\<close>
by (simp; metis (mono_tags, lifting) \<kappa>.exhaust_disc \<kappa>\<upsilon>.simps \<upsilon>.disc(1,3,5)
\<upsilon>.disc(6) is_\<alpha>\<kappa>_def is_\<omega>\<kappa>_def someI_ex)
} note 1 = this
{
fix r s :: \<open>\<kappa> \<Rightarrow> \<o>\<close>
assume A: \<open>\<forall>x y. AOT_model_term_equiv x y \<longrightarrow> r x = r y\<close>
assume \<open>\<forall>w x. AOT_model_valid_in w (r x) \<longrightarrow> AOT_model_denotes x\<close>
hence 2: \<open>(\<lambda>u. r (SOME x. \<kappa>\<upsilon> x = u)) \<in>
{\<phi>. \<forall>x w. \<not> AOT_model_valid_in w (\<phi> (null\<upsilon> x))}\<close>
using 1 AOT_model_denotes_\<kappa>_def by meson
assume B: \<open>\<forall>x y. AOT_model_term_equiv x y \<longrightarrow> s x = s y\<close>
assume \<open>\<forall>w x. AOT_model_valid_in w (s x) \<longrightarrow> AOT_model_denotes x\<close>
hence 3: \<open>(\<lambda>u. s (SOME x. \<kappa>\<upsilon> x = u)) \<in>
{\<phi>. \<forall>x w. \<not> AOT_model_valid_in w (\<phi> (null\<upsilon> x))}\<close>
using 1 AOT_model_denotes_\<kappa>_def by meson
assume \<open>Abs_urrel (\<lambda>u. r (SOME x. \<kappa>\<upsilon> x = u)) =
Abs_urrel (\<lambda>u. s (SOME x. \<kappa>\<upsilon> x = u))\<close>
hence 4: \<open>r (SOME x. \<kappa>\<upsilon> x = u) = s (SOME x::\<kappa>. \<kappa>\<upsilon> x = u)\<close> for u
unfolding Abs_urrel_inject[OF 2 3] by metis
have \<open>r x = s x\<close> for x
using 4[of \<open>\<kappa>\<upsilon> x\<close>]
by (metis (mono_tags, lifting) A B AOT_model_term_equiv_\<kappa>_def someI_ex)
hence \<open>r = s\<close> by auto
}
thus \<open>AOT_rel_equiv r s = (AOT_rel_equiv r r \<and> AOT_rel_equiv s s \<and>
rel_to_urrel r = rel_to_urrel s)\<close> for r s
unfolding AOT_rel_equiv_def rel_to_urrel_def
by transfer auto
qed
lemma urrel_quotient:
\<open>Quotient AOT_rel_equiv rel_to_urrel urrel_to_rel
(\<lambda>x y. AOT_rel_equiv x x \<and> rel_to_urrel x = y)\<close>
using Quotient3_to_Quotient[OF urrel_quotient3] by auto
text\<open>Unary individual terms are always regular and equipped with encoding and
concreteness. The specification of the type class anticipates the required
properties for deriving the axiom system.\<close>
class AOT_UnaryIndividualTerm =
fixes AOT_model_enc :: \<open>'a \<Rightarrow> <'a::AOT_IndividualTerm> \<Rightarrow> bool\<close>
and AOT_model_concrete :: \<open>w \<Rightarrow> 'a \<Rightarrow> bool\<close>
assumes AOT_model_unary_regular:
\<open>AOT_model_regular x\<close> \<comment> \<open>All unary individual terms are regular.\<close>
and AOT_model_enc_relid:
\<open>AOT_model_denotes F \<Longrightarrow>
AOT_model_denotes G \<Longrightarrow>
(\<And> x . AOT_model_enc x F \<longleftrightarrow> AOT_model_enc x G)
\<Longrightarrow> F = G\<close>
and AOT_model_A_objects:
\<open>\<exists>x . AOT_model_denotes x \<and>
(\<forall>w. \<not> AOT_model_concrete w x) \<and>
(\<forall>F. AOT_model_denotes F \<longrightarrow> AOT_model_enc x F = \<phi> F)\<close>
and AOT_model_contingent:
\<open>\<exists> x w. AOT_model_concrete w x \<and> \<not> AOT_model_concrete w\<^sub>0 x\<close>
and AOT_model_nocoder:
\<open>AOT_model_concrete w x \<Longrightarrow> \<not>AOT_model_enc x F\<close>
and AOT_model_concrete_equiv:
\<open>AOT_model_term_equiv x y \<Longrightarrow>
AOT_model_concrete w x = AOT_model_concrete w y\<close>
and AOT_model_concrete_denotes:
\<open>AOT_model_concrete w x \<Longrightarrow> AOT_model_denotes x\<close>
\<comment> \<open>The following are properties that will only hold in the extended models.\<close>
and AOT_model_enc_indistinguishable_all:
\<open>AOT_ExtendedModel \<Longrightarrow>
AOT_model_denotes a \<Longrightarrow> \<not>(\<exists> w . AOT_model_concrete w a) \<Longrightarrow>
AOT_model_denotes b \<Longrightarrow> \<not>(\<exists> w . AOT_model_concrete w b) \<Longrightarrow>
AOT_model_denotes \<Pi> \<Longrightarrow>
(\<And> \<Pi>' . AOT_model_denotes \<Pi>' \<Longrightarrow>
(\<And> v . AOT_model_valid_in v (Rep_rel \<Pi>' a) =
AOT_model_valid_in v (Rep_rel \<Pi>' b))) \<Longrightarrow>
(\<And> \<Pi>' . AOT_model_denotes \<Pi>' \<Longrightarrow>
(\<And> v x . \<exists> w . AOT_model_concrete w x \<Longrightarrow>
AOT_model_valid_in v (Rep_rel \<Pi>' x) =
AOT_model_valid_in v (Rep_rel \<Pi> x)) \<Longrightarrow>
AOT_model_enc a \<Pi>') \<Longrightarrow>
(\<And> \<Pi>' . AOT_model_denotes \<Pi>' \<Longrightarrow>
(\<And> v x . \<exists> w . AOT_model_concrete w x \<Longrightarrow>
AOT_model_valid_in v (Rep_rel \<Pi>' x) =
AOT_model_valid_in v (Rep_rel \<Pi> x)) \<Longrightarrow>
AOT_model_enc b \<Pi>')\<close>
and AOT_model_enc_indistinguishable_ex:
\<open>AOT_ExtendedModel \<Longrightarrow>
AOT_model_denotes a \<Longrightarrow> \<not>(\<exists> w . AOT_model_concrete w a) \<Longrightarrow>
AOT_model_denotes b \<Longrightarrow> \<not>(\<exists> w . AOT_model_concrete w b) \<Longrightarrow>
AOT_model_denotes \<Pi> \<Longrightarrow>
(\<And> \<Pi>' . AOT_model_denotes \<Pi>' \<Longrightarrow>
(\<And> v . AOT_model_valid_in v (Rep_rel \<Pi>' a) =
AOT_model_valid_in v (Rep_rel \<Pi>' b))) \<Longrightarrow>
(\<exists> \<Pi>' . AOT_model_denotes \<Pi>' \<and> AOT_model_enc a \<Pi>' \<and>
(\<forall> v x . (\<exists> w . AOT_model_concrete w x) \<longrightarrow>
AOT_model_valid_in v (Rep_rel \<Pi>' x) =
AOT_model_valid_in v (Rep_rel \<Pi> x))) \<Longrightarrow>
(\<exists> \<Pi>' . AOT_model_denotes \<Pi>' \<and> AOT_model_enc b \<Pi>' \<and>
(\<forall> v x . (\<exists> w . AOT_model_concrete w x) \<longrightarrow>
AOT_model_valid_in v (Rep_rel \<Pi>' x) =
AOT_model_valid_in v (Rep_rel \<Pi> x)))\<close>
text\<open>Instantiate the class of unary individual terms for our concrete type of
individual terms @{typ \<kappa>}.\<close>
instantiation \<kappa> :: AOT_UnaryIndividualTerm
begin
definition AOT_model_enc_\<kappa> :: \<open>\<kappa> \<Rightarrow> <\<kappa>> \<Rightarrow> bool\<close> where
\<open>AOT_model_enc_\<kappa> \<equiv> \<lambda> x F .
case x of \<alpha>\<kappa> a \<Rightarrow> AOT_model_denotes F \<and> rel_to_urrel F \<in> a
| _ \<Rightarrow> False\<close>
primrec AOT_model_concrete_\<kappa> :: \<open>w \<Rightarrow> \<kappa> \<Rightarrow> bool\<close> where
\<open>AOT_model_concrete_\<kappa> w (\<omega>\<kappa> x) = AOT_model_concrete\<omega> x w\<close>
| \<open>AOT_model_concrete_\<kappa> w (\<alpha>\<kappa> x) = False\<close>
| \<open>AOT_model_concrete_\<kappa> w (null\<kappa> x) = False\<close>
lemma AOT_meta_A_objects_\<kappa>:
\<open>\<exists>x :: \<kappa>. AOT_model_denotes x \<and>
(\<forall>w. \<not> AOT_model_concrete w x) \<and>
(\<forall>F. AOT_model_denotes F \<longrightarrow> AOT_model_enc x F = \<phi> F)\<close> for \<phi>
apply (rule exI[where x=\<open>\<alpha>\<kappa> {f . \<phi> (urrel_to_rel f)}\<close>])
apply (simp add: AOT_model_enc_\<kappa>_def AOT_model_denotes_\<kappa>_def)
by (metis (no_types, lifting) AOT_rel_equiv_def urrel_quotient
Quotient_rep_abs_fold_unmap)
instance proof
show \<open>AOT_model_regular x\<close> for x :: \<kappa>
by (simp add: AOT_model_regular_\<kappa>_def)
next
fix F G :: \<open><\<kappa>>\<close>
assume \<open>AOT_model_denotes F\<close>
moreover assume \<open>AOT_model_denotes G\<close>
moreover assume \<open>\<And>x. AOT_model_enc x F = AOT_model_enc x G\<close>
moreover obtain x where \<open>\<forall>G. AOT_model_denotes G \<longrightarrow> AOT_model_enc x G = (F = G)\<close>
using AOT_meta_A_objects_\<kappa> by blast
ultimately show \<open>F = G\<close> by blast
next
show \<open>\<exists>x :: \<kappa>. AOT_model_denotes x \<and>
(\<forall>w. \<not> AOT_model_concrete w x) \<and>
(\<forall>F. AOT_model_denotes F \<longrightarrow> AOT_model_enc x F = \<phi> F)\<close> for \<phi>
using AOT_meta_A_objects_\<kappa> .
next
show \<open>\<exists> (x::\<kappa>) w. AOT_model_concrete w x \<and> \<not> AOT_model_concrete w\<^sub>0 x\<close>
using AOT_model_concrete_\<kappa>.simps(1) AOT_model_contingent_object by blast
next
show \<open>AOT_model_concrete w x \<Longrightarrow> \<not> AOT_model_enc x F\<close> for w and x :: \<kappa> and F
by (metis AOT_model_concrete_\<kappa>.simps(2) AOT_model_enc_\<kappa>_def \<kappa>.case_eq_if
\<kappa>.collapse(2))
next
show \<open>AOT_model_concrete w x = AOT_model_concrete w y\<close>
if \<open>AOT_model_term_equiv x y\<close>
for x y :: \<kappa> and w
using that by (induct x; induct y; auto simp: AOT_model_term_equiv_\<kappa>_def)
next
show \<open>AOT_model_concrete w x \<Longrightarrow> AOT_model_denotes x\<close> for w and x :: \<kappa>
by (metis AOT_model_concrete_\<kappa>.simps(3) AOT_model_denotes_\<kappa>_def \<kappa>.collapse(3))
(* Extended models only *)
next
fix \<kappa> \<kappa>' :: \<kappa> and \<Pi> \<Pi>' :: \<open><\<kappa>>\<close> and w :: w
assume ext: \<open>AOT_ExtendedModel\<close>
assume \<open>AOT_model_denotes \<kappa>\<close>
moreover assume \<open>\<nexists>w. AOT_model_concrete w \<kappa>\<close>
ultimately obtain a where a_def: \<open>\<alpha>\<kappa> a = \<kappa>\<close>
by (metis AOT_model_\<omega>_concrete_in_some_world AOT_model_concrete_\<kappa>.simps(1)
AOT_model_denotes_\<kappa>_def \<kappa>.discI(3) \<kappa>.exhaust_sel)
assume \<open>AOT_model_denotes \<kappa>'\<close>
moreover assume \<open>\<nexists>w. AOT_model_concrete w \<kappa>'\<close>
ultimately obtain b where b_def: \<open>\<alpha>\<kappa> b = \<kappa>'\<close>
by (metis AOT_model_\<omega>_concrete_in_some_world AOT_model_concrete_\<kappa>.simps(1)
AOT_model_denotes_\<kappa>_def \<kappa>.discI(3) \<kappa>.exhaust_sel)
assume \<open>AOT_model_denotes \<Pi>' \<Longrightarrow> AOT_model_valid_in w (Rep_rel \<Pi>' \<kappa>) =
AOT_model_valid_in w (Rep_rel \<Pi>' \<kappa>')\<close> for \<Pi>' w
hence \<open>AOT_model_valid_in w (Rep_urrel r (\<kappa>\<upsilon> \<kappa>)) =
AOT_model_valid_in w (Rep_urrel r (\<kappa>\<upsilon> \<kappa>'))\<close> for r
by (metis AOT_rel_equiv_def Abs_rel_inverse Quotient3_rel_rep
iso_tuple_UNIV_I urrel_quotient3 urrel_to_rel_def)
hence \<open>let r = (Abs_urrel (\<lambda> u . \<epsilon>\<^sub>\<o> w . u = \<kappa>\<upsilon> \<kappa>)) in
AOT_model_valid_in w (Rep_urrel r (\<kappa>\<upsilon> \<kappa>)) =
AOT_model_valid_in w (Rep_urrel r (\<kappa>\<upsilon> \<kappa>'))\<close>
by presburger
hence \<alpha>\<sigma>_eq: \<open>\<alpha>\<sigma> a = \<alpha>\<sigma> b\<close>
unfolding Let_def
apply (subst (asm) (1 2) Abs_urrel_inverse)
using AOT_model_proposition_choice_simp a_def b_def by force+
assume \<Pi>_den: \<open>AOT_model_denotes \<Pi>\<close>
have \<open>\<not>AOT_model_valid_in w (Rep_rel \<Pi> (SOME xa. \<kappa>\<upsilon> xa = null\<upsilon> x))\<close> for x w
by (metis (mono_tags, lifting) AOT_model_denotes_\<kappa>_def
AOT_model_denotes_rel.rep_eq \<kappa>.exhaust_disc \<kappa>\<upsilon>.simps(1,2,3)
\<open>AOT_model_denotes \<Pi>\<close> \<upsilon>.disc(8,9) \<upsilon>.distinct(3)
is_\<alpha>\<kappa>_def is_\<omega>\<kappa>_def verit_sko_ex')
moreover have \<open>Rep_rel \<Pi> (\<omega>\<kappa> x) = Rep_rel \<Pi> (SOME y. \<kappa>\<upsilon> y = \<omega>\<upsilon> x)\<close> for x
by (metis (mono_tags, lifting) AOT_model_denotes_rel.rep_eq
AOT_model_term_equiv_\<kappa>_def \<kappa>\<upsilon>.simps(1) \<Pi>_den verit_sko_ex')
ultimately have \<open>Rep_rel \<Pi> (\<omega>\<kappa> x) = Rep_urrel (rel_to_urrel \<Pi>) (\<omega>\<upsilon> x)\<close> for x
unfolding rel_to_urrel_def
by (subst Abs_urrel_inverse) auto
hence \<open>\<exists>r . \<forall> x . Rep_rel \<Pi> (\<omega>\<kappa> x) = Rep_urrel r (\<omega>\<upsilon> x)\<close>
by (auto intro!: exI[where x=\<open>rel_to_urrel \<Pi>\<close>])
then obtain r where r_prop: \<open>Rep_rel \<Pi> (\<omega>\<kappa> x) = Rep_urrel r (\<omega>\<upsilon> x)\<close> for x
by blast
assume \<open>AOT_model_denotes \<Pi>' \<Longrightarrow>
(\<And>v x. \<exists>w. AOT_model_concrete w x \<Longrightarrow>
AOT_model_valid_in v (Rep_rel \<Pi>' x) =
AOT_model_valid_in v (Rep_rel \<Pi> x)) \<Longrightarrow> AOT_model_enc \<kappa> \<Pi>'\<close> for \<Pi>'
hence \<open>AOT_model_denotes \<Pi>' \<Longrightarrow>
(\<And>v x. AOT_model_valid_in v (Rep_rel \<Pi>' (\<omega>\<kappa> x)) =
AOT_model_valid_in v (Rep_rel \<Pi> (\<omega>\<kappa> x))) \<Longrightarrow> AOT_model_enc \<kappa> \<Pi>'\<close> for \<Pi>'
by (metis AOT_model_concrete_\<kappa>.simps(2) AOT_model_concrete_\<kappa>.simps(3)
\<kappa>.exhaust_disc is_\<alpha>\<kappa>_def is_\<omega>\<kappa>_def is_null\<kappa>_def)
hence \<open>(\<And>v x. AOT_model_valid_in v (Rep_urrel r (\<omega>\<upsilon> x)) =
AOT_model_valid_in v (Rep_rel \<Pi> (\<omega>\<kappa> x))) \<Longrightarrow> r \<in> a\<close> for r
unfolding a_def[symmetric] AOT_model_enc_\<kappa>_def apply simp
by (smt (verit, best) AOT_rel_equiv_def Abs_rel_inverse Quotient3_def
\<kappa>\<upsilon>.simps(1) iso_tuple_UNIV_I urrel_quotient3 urrel_to_rel_def)
hence \<open>(\<And>v x. AOT_model_valid_in v (Rep_urrel r' (\<omega>\<upsilon> x)) =
AOT_model_valid_in v (Rep_urrel r (\<omega>\<upsilon> x))) \<Longrightarrow> r' \<in> a\<close> for r'
unfolding r_prop.
hence \<open>\<And>s. urrel_to_\<omega>rel s = urrel_to_\<omega>rel r \<Longrightarrow> s \<in> a\<close>
by (metis urrel_to_\<omega>rel_def)
hence 0: \<open>\<And>s. urrel_to_\<omega>rel s = urrel_to_\<omega>rel r \<Longrightarrow> s \<in> b\<close>
using \<alpha>\<sigma>_eq_ord_exts_all \<alpha>\<sigma>_eq ext \<alpha>\<sigma>_\<alpha>\<sigma>' by blast
assume \<Pi>'_den: \<open>AOT_model_denotes \<Pi>'\<close>
assume \<open>\<exists>w. AOT_model_concrete w x \<Longrightarrow> AOT_model_valid_in v (Rep_rel \<Pi>' x) =
AOT_model_valid_in v (Rep_rel \<Pi> x)\<close> for v x
hence \<open>AOT_model_valid_in v (Rep_rel \<Pi>' (\<omega>\<kappa> x)) =
AOT_model_valid_in v (Rep_rel \<Pi> (\<omega>\<kappa> x))\<close> for v x
using AOT_model_\<omega>_concrete_in_some_world AOT_model_concrete_\<kappa>.simps(1)
by presburger
hence \<open>AOT_model_valid_in v (Rep_urrel (rel_to_urrel \<Pi>') (\<omega>\<upsilon> x)) =
AOT_model_valid_in v (Rep_urrel r (\<omega>\<upsilon> x))\<close> for v x
by (smt (verit, best) AOT_rel_equiv_def Abs_rel_inverse Quotient3_def
\<kappa>\<upsilon>.simps(1) iso_tuple_UNIV_I r_prop urrel_quotient3 urrel_to_rel_def \<Pi>'_den)
hence \<open>urrel_to_\<omega>rel (rel_to_urrel \<Pi>') = urrel_to_\<omega>rel r\<close>
by (metis (full_types) AOT_urrel_\<omega>equiv_def Quotient3_def urrel_\<omega>rel_quot)
hence \<open>rel_to_urrel \<Pi>' \<in> b\<close> using 0 by blast
thus \<open>AOT_model_enc \<kappa>' \<Pi>'\<close>
unfolding b_def[symmetric] AOT_model_enc_\<kappa>_def by (auto simp: \<Pi>'_den)
next
fix \<kappa> \<kappa>' :: \<kappa> and \<Pi> \<Pi>' :: \<open><\<kappa>>\<close> and w :: w
assume ext: \<open>AOT_ExtendedModel\<close>
assume \<open>AOT_model_denotes \<kappa>\<close>
moreover assume \<open>\<nexists>w. AOT_model_concrete w \<kappa>\<close>
ultimately obtain a where a_def: \<open>\<alpha>\<kappa> a = \<kappa>\<close>
by (metis AOT_model_\<omega>_concrete_in_some_world AOT_model_concrete_\<kappa>.simps(1)
AOT_model_denotes_\<kappa>_def \<kappa>.discI(3) \<kappa>.exhaust_sel)
assume \<open>AOT_model_denotes \<kappa>'\<close>
moreover assume \<open>\<nexists>w. AOT_model_concrete w \<kappa>'\<close>
ultimately obtain b where b_def: \<open>\<alpha>\<kappa> b = \<kappa>'\<close>
by (metis AOT_model_\<omega>_concrete_in_some_world AOT_model_concrete_\<kappa>.simps(1)
AOT_model_denotes_\<kappa>_def \<kappa>.discI(3) \<kappa>.exhaust_sel)
assume \<open>AOT_model_denotes \<Pi>' \<Longrightarrow> AOT_model_valid_in w (Rep_rel \<Pi>' \<kappa>) =
AOT_model_valid_in w (Rep_rel \<Pi>' \<kappa>')\<close> for \<Pi>' w
hence \<open>AOT_model_valid_in w (Rep_urrel r (\<kappa>\<upsilon> \<kappa>)) =
AOT_model_valid_in w (Rep_urrel r (\<kappa>\<upsilon> \<kappa>'))\<close> for r
by (metis AOT_rel_equiv_def Abs_rel_inverse Quotient3_rel_rep
iso_tuple_UNIV_I urrel_quotient3 urrel_to_rel_def)
hence \<open>let r = (Abs_urrel (\<lambda> u . \<epsilon>\<^sub>\<o> w . u = \<kappa>\<upsilon> \<kappa>)) in
AOT_model_valid_in w (Rep_urrel r (\<kappa>\<upsilon> \<kappa>)) =
AOT_model_valid_in w (Rep_urrel r (\<kappa>\<upsilon> \<kappa>'))\<close>
by presburger
hence \<alpha>\<sigma>_eq: \<open>\<alpha>\<sigma> a = \<alpha>\<sigma> b\<close>
unfolding Let_def
apply (subst (asm) (1 2) Abs_urrel_inverse)
using AOT_model_proposition_choice_simp a_def b_def by force+
assume \<Pi>_den: \<open>AOT_model_denotes \<Pi>\<close>
have \<open>\<not>AOT_model_valid_in w (Rep_rel \<Pi> (SOME xa. \<kappa>\<upsilon> xa = null\<upsilon> x))\<close> for x w
by (metis (mono_tags, lifting) AOT_model_denotes_\<kappa>_def
AOT_model_denotes_rel.rep_eq \<kappa>.exhaust_disc \<kappa>\<upsilon>.simps(1,2,3)
\<open>AOT_model_denotes \<Pi>\<close> \<upsilon>.disc(8) \<upsilon>.disc(9) \<upsilon>.distinct(3)
is_\<alpha>\<kappa>_def is_\<omega>\<kappa>_def verit_sko_ex')
moreover have \<open>Rep_rel \<Pi> (\<omega>\<kappa> x) = Rep_rel \<Pi> (SOME xa. \<kappa>\<upsilon> xa = \<omega>\<upsilon> x)\<close> for x
by (metis (mono_tags, lifting) AOT_model_denotes_rel.rep_eq
AOT_model_term_equiv_\<kappa>_def \<kappa>\<upsilon>.simps(1) \<Pi>_den verit_sko_ex')
ultimately have \<open>Rep_rel \<Pi> (\<omega>\<kappa> x) = Rep_urrel (rel_to_urrel \<Pi>) (\<omega>\<upsilon> x)\<close> for x
unfolding rel_to_urrel_def
by (subst Abs_urrel_inverse) auto
hence \<open>\<exists>r . \<forall> x . Rep_rel \<Pi> (\<omega>\<kappa> x) = Rep_urrel r (\<omega>\<upsilon> x)\<close>
by (auto intro!: exI[where x=\<open>rel_to_urrel \<Pi>\<close>])
then obtain r where r_prop: \<open>Rep_rel \<Pi> (\<omega>\<kappa> x) = Rep_urrel r (\<omega>\<upsilon> x)\<close> for x
by blast
assume \<open>\<exists>\<Pi>'. AOT_model_denotes \<Pi>' \<and>
AOT_model_enc \<kappa> \<Pi>' \<and>
(\<forall>v x. (\<exists>w. AOT_model_concrete w x) \<longrightarrow> AOT_model_valid_in v (Rep_rel \<Pi>' x) =
AOT_model_valid_in v (Rep_rel \<Pi> x))\<close>
then obtain \<Pi>' where
\<Pi>'_den: \<open>AOT_model_denotes \<Pi>'\<close> and
\<kappa>_enc_\<Pi>': \<open>AOT_model_enc \<kappa> \<Pi>'\<close> and
\<Pi>'_prop: \<open>\<exists>w. AOT_model_concrete w x \<Longrightarrow>
AOT_model_valid_in v (Rep_rel \<Pi>' x) =
AOT_model_valid_in v (Rep_rel \<Pi> x)\<close> for v x
by blast
have \<open>AOT_model_valid_in v (Rep_rel \<Pi>' (\<omega>\<kappa> x)) =
AOT_model_valid_in v (Rep_rel \<Pi> (\<omega>\<kappa> x))\<close> for x v
by (simp add: AOT_model_\<omega>_concrete_in_some_world \<Pi>'_prop)
hence 0: \<open>AOT_urrel_\<omega>equiv (rel_to_urrel \<Pi>') (rel_to_urrel \<Pi>)\<close>
unfolding AOT_urrel_\<omega>equiv_def
by (smt (verit) AOT_rel_equiv_def Abs_rel_inverse Quotient3_def
\<kappa>\<upsilon>.simps(1) iso_tuple_UNIV_I urrel_quotient3 urrel_to_rel_def
\<Pi>_den \<Pi>'_den)
have \<open>rel_to_urrel \<Pi>' \<in> a\<close>
and \<open>urrel_to_\<omega>rel (rel_to_urrel \<Pi>') = urrel_to_\<omega>rel (rel_to_urrel \<Pi>)\<close>
apply (metis AOT_model_enc_\<kappa>_def \<kappa>.simps(11) \<kappa>_enc_\<Pi>' a_def)
by (metis Quotient3_rel 0 urrel_\<omega>rel_quot)
hence \<open>\<exists>s. s \<in> b \<and> urrel_to_\<omega>rel s = urrel_to_\<omega>rel (rel_to_urrel \<Pi>)\<close>
using \<alpha>\<sigma>_eq_ord_exts_ex \<alpha>\<sigma>_eq ext \<alpha>\<sigma>_\<alpha>\<sigma>' by blast
then obtain s where
s_prop: \<open>s \<in> b \<and> urrel_to_\<omega>rel s = urrel_to_\<omega>rel (rel_to_urrel \<Pi>)\<close>
by blast
then obtain \<Pi>'' where
\<Pi>''_prop: \<open>rel_to_urrel \<Pi>'' = s\<close> and \<Pi>''_den: \<open>AOT_model_denotes \<Pi>''\<close>
by (metis AOT_rel_equiv_def Quotient3_def urrel_quotient3)
moreover have \<open>AOT_model_enc \<kappa>' \<Pi>''\<close>
by (metis AOT_model_enc_\<kappa>_def \<Pi>''_den \<Pi>''_prop \<kappa>.simps(11) b_def s_prop)
moreover have \<open>AOT_model_valid_in v (Rep_rel \<Pi>'' x) =
AOT_model_valid_in v (Rep_rel \<Pi> x)\<close>
if \<open>\<exists>w. AOT_model_concrete w x\<close> for v x
proof(insert that)
assume \<open>\<exists>w. AOT_model_concrete w x\<close>
then obtain u where x_def: \<open>x = \<omega>\<kappa> u\<close>
by (metis AOT_model_concrete_\<kappa>.simps(2,3) \<kappa>.exhaust)
show \<open>AOT_model_valid_in v (Rep_rel \<Pi>'' x) =
AOT_model_valid_in v (Rep_rel \<Pi> x)\<close>
unfolding x_def
by (smt (verit, best) AOT_rel_equiv_def Abs_rel_inverse Quotient3_def
\<Pi>''_den \<Pi>''_prop \<Pi>_den \<kappa>\<upsilon>.simps(1) iso_tuple_UNIV_I s_prop
urrel_quotient3 urrel_to_\<omega>rel_def urrel_to_rel_def)
qed
ultimately show \<open>\<exists>\<Pi>'. AOT_model_denotes \<Pi>' \<and> AOT_model_enc \<kappa>' \<Pi>' \<and>
(\<forall>v x. (\<exists>w. AOT_model_concrete w x) \<longrightarrow> AOT_model_valid_in v (Rep_rel \<Pi>' x) =
AOT_model_valid_in v (Rep_rel \<Pi> x))\<close>
apply (safe intro!: exI[where x=\<Pi>''])
by auto
qed
end
text\<open>Products of unary individual terms and individual terms are individual terms.
A tuple is regular, if at most one element does not denote. I.e. a pair is
regular, if the first (unary) element denotes and the second is regular (i.e.
at most one of its recursive tuple elements does not denote), or the first does
not denote, but the second denotes (i.e. all its recursive tuple elements
denote).\<close>
instantiation prod :: (AOT_UnaryIndividualTerm, AOT_IndividualTerm) AOT_IndividualTerm
begin
definition AOT_model_regular_prod :: \<open>'a\<times>'b \<Rightarrow> bool\<close> where
\<open>AOT_model_regular_prod \<equiv> \<lambda> (x,y) . AOT_model_denotes x \<and> AOT_model_regular y \<or>
\<not>AOT_model_denotes x \<and> AOT_model_denotes y\<close>
definition AOT_model_term_equiv_prod :: \<open>'a\<times>'b \<Rightarrow> 'a\<times>'b \<Rightarrow> bool\<close> where
\<open>AOT_model_term_equiv_prod \<equiv> \<lambda> (x\<^sub>1,y\<^sub>1) (x\<^sub>2,y\<^sub>2) .
AOT_model_term_equiv x\<^sub>1 x\<^sub>2 \<and> AOT_model_term_equiv y\<^sub>1 y\<^sub>2\<close>
function AOT_model_irregular_prod :: \<open>('a\<times>'b \<Rightarrow> \<o>) \<Rightarrow> 'a\<times>'b \<Rightarrow> \<o>\<close> where
AOT_model_irregular_proj2: \<open>AOT_model_denotes x \<Longrightarrow>
AOT_model_irregular \<phi> (x,y) =
AOT_model_irregular (\<lambda>y. \<phi> (SOME x' . AOT_model_term_equiv x x', y)) y\<close>
| AOT_model_irregular_proj1: \<open>\<not>AOT_model_denotes x \<and> AOT_model_denotes y \<Longrightarrow>
AOT_model_irregular \<phi> (x,y) =
AOT_model_irregular (\<lambda>x. \<phi> (x, SOME y' . AOT_model_term_equiv y y')) x\<close>
| AOT_model_irregular_prod_generic: \<open>\<not>AOT_model_denotes x \<and> \<not>AOT_model_denotes y \<Longrightarrow>
AOT_model_irregular \<phi> (x,y) =
(SOME \<Phi> . AOT_model_irregular_spec \<Phi> AOT_model_regular AOT_model_term_equiv)
\<phi> (x,y)\<close>
by auto blast
termination using "termination" by blast
instance proof
obtain x :: 'a and y :: 'b where
\<open>\<not>AOT_model_denotes x\<close> and \<open>\<not>AOT_model_denotes y\<close>
by (meson AOT_model_nondenoting_ex AOT_model_denoting_ex)
thus \<open>\<exists>x::'a\<times>'b. \<not>AOT_model_denotes x\<close>
by (auto simp: AOT_model_denotes_prod_def AOT_model_regular_prod_def)
next
show \<open>equivp (AOT_model_term_equiv :: 'a\<times>'b \<Rightarrow> 'a\<times>'b \<Rightarrow> bool)\<close>
by (rule equivpI; rule reflpI sympI transpI;
simp add: AOT_model_term_equiv_prod_def AOT_model_term_equiv_part_equivp
equivp_reflp prod.case_eq_if case_prod_unfold equivp_symp)
(metis equivp_transp[OF AOT_model_term_equiv_part_equivp])
next
show \<open>\<not>AOT_model_regular x \<Longrightarrow> \<not> AOT_model_denotes x\<close> for x :: \<open>'a\<times>'b\<close>
by (metis (mono_tags, lifting) AOT_model_denotes_prod_def case_prod_unfold
AOT_model_irregular_nondenoting AOT_model_regular_prod_def)
next
fix x y :: \<open>'a\<times>'b\<close>
show \<open>AOT_model_term_equiv x y \<Longrightarrow> AOT_model_denotes x = AOT_model_denotes y\<close>
by (metis (mono_tags, lifting) AOT_model_denotes_prod_def case_prod_beta
AOT_model_term_equiv_denotes AOT_model_term_equiv_prod_def )
next
fix x y :: \<open>'a\<times>'b\<close>
show \<open>AOT_model_term_equiv x y \<Longrightarrow> AOT_model_regular x = AOT_model_regular y\<close>
by (induct x; induct y;
simp add: AOT_model_term_equiv_prod_def AOT_model_regular_prod_def)
(meson AOT_model_term_equiv_denotes AOT_model_term_equiv_regular)
next
interpret sp: AOT_model_irregular_spec \<open>\<lambda>\<phi> (x::'a\<times>'b) . \<epsilon>\<^sub>\<o> w . False\<close>
AOT_model_regular AOT_model_term_equiv
by (simp add: AOT_model_irregular_spec_def AOT_model_proposition_choice_simp)
have ex_spec: \<open>\<exists> \<phi> :: ('a\<times>'b \<Rightarrow> \<o>) \<Rightarrow> 'a\<times>'b \<Rightarrow> \<o> .
AOT_model_irregular_spec \<phi> AOT_model_regular AOT_model_term_equiv\<close>
using sp.AOT_model_irregular_spec_axioms by blast
have some_spec: \<open>AOT_model_irregular_spec
(SOME \<phi> :: ('a\<times>'b \<Rightarrow> \<o>) \<Rightarrow> 'a\<times>'b \<Rightarrow> \<o> .
AOT_model_irregular_spec \<phi> AOT_model_regular AOT_model_term_equiv)
AOT_model_regular AOT_model_term_equiv\<close>
using someI_ex[OF ex_spec] by argo
interpret sp_some: AOT_model_irregular_spec
\<open>SOME \<phi> :: ('a\<times>'b \<Rightarrow> \<o>) \<Rightarrow> 'a\<times>'b \<Rightarrow> \<o> .
AOT_model_irregular_spec \<phi> AOT_model_regular AOT_model_term_equiv\<close>
AOT_model_regular AOT_model_term_equiv
using some_spec by blast
show \<open>AOT_model_irregular_spec (AOT_model_irregular :: ('a\<times>'b \<Rightarrow> \<o>) \<Rightarrow> 'a\<times>'b \<Rightarrow> \<o>)
AOT_model_regular AOT_model_term_equiv\<close>
proof
have \<open>\<not>AOT_model_valid_in w (AOT_model_irregular \<phi> (a, b))\<close>
for w \<phi> and a :: 'a and b :: 'b
by (induct arbitrary: \<phi> rule: AOT_model_irregular_prod.induct)
(auto simp: AOT_model_irregular_false sp_some.AOT_model_irregular_false)
thus "\<not>AOT_model_valid_in w (AOT_model_irregular \<phi> x)" for w \<phi> and x :: \<open>'a\<times>'b\<close>
by (induct x)
next
{
fix x\<^sub>1 y\<^sub>1 :: 'a and x\<^sub>2 y\<^sub>2 :: 'b and \<phi> :: \<open>'a\<times>'b\<Rightarrow>\<o>\<close>
assume x\<^sub>1y\<^sub>1_equiv: \<open>AOT_model_term_equiv x\<^sub>1 y\<^sub>1\<close>
moreover assume x\<^sub>2y\<^sub>2_equiv: \<open>AOT_model_term_equiv x\<^sub>2 y\<^sub>2\<close>
ultimately have xy_equiv: \<open>AOT_model_term_equiv (x\<^sub>1,x\<^sub>2) (y\<^sub>1,y\<^sub>2)\<close>
by (simp add: AOT_model_term_equiv_prod_def)
{
assume \<open>AOT_model_denotes x\<^sub>1\<close>
moreover hence \<open>AOT_model_denotes y\<^sub>1\<close>
using AOT_model_term_equiv_denotes AOT_model_term_equiv_regular
x\<^sub>1y\<^sub>1_equiv x\<^sub>2y\<^sub>2_equiv by blast
ultimately have \<open>AOT_model_irregular \<phi> (x\<^sub>1,x\<^sub>2) =
AOT_model_irregular \<phi> (y\<^sub>1,y\<^sub>2)\<close>
using AOT_model_irregular_equiv AOT_model_term_equiv_eps(3)
x\<^sub>1y\<^sub>1_equiv x\<^sub>2y\<^sub>2_equiv by fastforce
}
moreover {
assume \<open>~AOT_model_denotes x\<^sub>1 \<and> AOT_model_denotes x\<^sub>2\<close>
moreover hence \<open>~AOT_model_denotes y\<^sub>1 \<and> AOT_model_denotes y\<^sub>2\<close>
by (meson AOT_model_term_equiv_denotes x\<^sub>1y\<^sub>1_equiv x\<^sub>2y\<^sub>2_equiv)
ultimately have \<open>AOT_model_irregular \<phi> (x\<^sub>1,x\<^sub>2) =
AOT_model_irregular \<phi> (y\<^sub>1,y\<^sub>2)\<close>
using AOT_model_irregular_equiv AOT_model_term_equiv_eps(3)
x\<^sub>1y\<^sub>1_equiv x\<^sub>2y\<^sub>2_equiv by fastforce
}
moreover {
assume denotes_x: \<open>(\<not>AOT_model_denotes x\<^sub>1 \<and> \<not>AOT_model_denotes x\<^sub>2)\<close>
hence denotes_y: \<open>(\<not>AOT_model_denotes y\<^sub>1 \<and> \<not>AOT_model_denotes y\<^sub>2)\<close>
by (meson AOT_model_term_equiv_denotes AOT_model_term_equiv_regular
x\<^sub>1y\<^sub>1_equiv x\<^sub>2y\<^sub>2_equiv)
have eps_eq: \<open>Eps (AOT_model_term_equiv x\<^sub>1) = Eps (AOT_model_term_equiv y\<^sub>1)\<close>
by (simp add: AOT_model_term_equiv_eps(3) x\<^sub>1y\<^sub>1_equiv)
have \<open>AOT_model_irregular \<phi> (x\<^sub>1,x\<^sub>2) = AOT_model_irregular \<phi> (y\<^sub>1,y\<^sub>2)\<close>
using denotes_x denotes_y
using sp_some.AOT_model_irregular_equiv xy_equiv by auto
}
moreover {
assume denotes_x: \<open>\<not>AOT_model_denotes x\<^sub>1 \<and> AOT_model_denotes x\<^sub>2\<close>
hence denotes_y: \<open>\<not>AOT_model_denotes y\<^sub>1 \<and> AOT_model_denotes y\<^sub>2\<close>
by (meson AOT_model_term_equiv_denotes x\<^sub>1y\<^sub>1_equiv x\<^sub>2y\<^sub>2_equiv)
have eps_eq: \<open>Eps (AOT_model_term_equiv x\<^sub>2) = Eps (AOT_model_term_equiv y\<^sub>2)\<close>
by (simp add: AOT_model_term_equiv_eps(3) x\<^sub>2y\<^sub>2_equiv)
have \<open>AOT_model_irregular \<phi> (x\<^sub>1,x\<^sub>2) = AOT_model_irregular \<phi> (y\<^sub>1,y\<^sub>2)\<close>
using denotes_x denotes_y
using AOT_model_irregular_nondenoting calculation(2) by blast
}
ultimately have \<open>AOT_model_irregular \<phi> (x\<^sub>1,x\<^sub>2) = AOT_model_irregular \<phi> (y\<^sub>1,y\<^sub>2)\<close>
using AOT_model_term_equiv_denotes AOT_model_term_equiv_regular
sp_some.AOT_model_irregular_equiv x\<^sub>1y\<^sub>1_equiv x\<^sub>2y\<^sub>2_equiv xy_equiv
by blast
} note 0 = this
show \<open>AOT_model_term_equiv x y \<Longrightarrow>
AOT_model_irregular \<phi> x = AOT_model_irregular \<phi> y\<close>
for x y :: \<open>'a\<times>'b\<close> and \<phi>
by (induct x; induct y; simp add: AOT_model_term_equiv_prod_def 0)
next
fix \<phi> \<psi> :: \<open>'a\<times>'b \<Rightarrow> \<o>\<close>
assume \<open>AOT_model_regular x \<Longrightarrow> \<phi> x = \<psi> x\<close> for x
hence \<open>\<phi> (x, y) = \<psi> (x, y)\<close>
if \<open>AOT_model_denotes x \<and> AOT_model_regular y \<or>
\<not>AOT_model_denotes x \<and> AOT_model_denotes y\<close> for x y
using that unfolding AOT_model_regular_prod_def by simp
hence \<open>AOT_model_irregular \<phi> (x,y) = AOT_model_irregular \<psi> (x,y)\<close>
for x :: 'a and y :: 'b
proof (induct arbitrary: \<psi> \<phi> rule: AOT_model_irregular_prod.induct)
case (1 x y \<phi>)
thus ?case
apply simp
by (meson AOT_model_irregular_eqI AOT_model_irregular_nondenoting
AOT_model_term_equiv_denotes AOT_model_term_equiv_eps(1))
next
case (2 x y \<phi>)
thus ?case
apply simp
by (meson AOT_model_irregular_nondenoting AOT_model_term_equiv_denotes
AOT_model_term_equiv_eps(1))
next
case (3 x y \<phi>)
thus ?case
apply simp
by (metis (mono_tags, lifting) AOT_model_regular_prod_def case_prod_conv
sp_some.AOT_model_irregular_eqI surj_pair)
qed
thus \<open>AOT_model_irregular \<phi> x = AOT_model_irregular \<psi> x\<close> for x :: \<open>'a\<times>'b\<close>
by (metis surjective_pairing)
qed
qed
end
text\<open>Introduction rules for term equivalence on tuple terms.\<close>
lemma AOT_meta_prod_equivI:
shows "\<And> (a::'a::AOT_UnaryIndividualTerm) x (y :: 'b::AOT_IndividualTerm) .
AOT_model_term_equiv x y \<Longrightarrow> AOT_model_term_equiv (a,x) (a,y)"
and "\<And> (x::'a::AOT_UnaryIndividualTerm) y (b :: 'b::AOT_IndividualTerm) .
AOT_model_term_equiv x y \<Longrightarrow> AOT_model_term_equiv (x,b) (y,b)"
unfolding AOT_model_term_equiv_prod_def
by (simp add: AOT_model_term_equiv_part_equivp equivp_reflp)+
text\<open>The type of propositions are trivial instances of terms.\<close>
instantiation \<o> :: AOT_Term
begin
definition AOT_model_denotes_\<o> :: \<open>\<o> \<Rightarrow> bool\<close> where
\<open>AOT_model_denotes_\<o> \<equiv> \<lambda>_. True\<close>
instance proof
show \<open>\<exists>x::\<o>. AOT_model_denotes x\<close>
by (simp add: AOT_model_denotes_\<o>_def)
qed
end
text\<open>AOT's variables are modelled by restricting the type of terms to those terms
that denote.\<close>
typedef 'a AOT_var = \<open>{ x :: 'a::AOT_Term . AOT_model_denotes x }\<close>
morphisms AOT_term_of_var AOT_var_of_term
by (simp add: AOT_model_denoting_ex)
text\<open>Simplify automatically generated theorems and rules.\<close>
declare AOT_var_of_term_induct[induct del]
AOT_var_of_term_cases[cases del]
AOT_term_of_var_induct[induct del]
AOT_term_of_var_cases[cases del]
lemmas AOT_var_of_term_inverse = AOT_var_of_term_inverse[simplified]
and AOT_var_of_term_inject = AOT_var_of_term_inject[simplified]
and AOT_var_of_term_induct =
AOT_var_of_term_induct[simplified, induct type: AOT_var]
and AOT_var_of_term_cases =
AOT_var_of_term_cases[simplified, cases type: AOT_var]
and AOT_term_of_var = AOT_term_of_var[simplified]
and AOT_term_of_var_cases =
AOT_term_of_var_cases[simplified, induct pred: AOT_term_of_var]
and AOT_term_of_var_induct =
AOT_term_of_var_induct[simplified, induct pred: AOT_term_of_var]
and AOT_term_of_var_inverse = AOT_term_of_var_inverse[simplified]
and AOT_term_of_var_inject = AOT_term_of_var_inject[simplified]
text\<open>Equivalence by definition is modelled as necessary equivalence.\<close>
consts AOT_model_equiv_def :: \<open>\<o> \<Rightarrow> \<o> \<Rightarrow> bool\<close>
specification(AOT_model_equiv_def)
AOT_model_equiv_def: \<open>AOT_model_equiv_def \<phi> \<psi> = (\<forall> v . AOT_model_valid_in v \<phi> =
AOT_model_valid_in v \<psi>)\<close>
by (rule exI[where x=\<open>\<lambda> \<phi> \<psi> . \<forall> v . AOT_model_valid_in v \<phi> =
AOT_model_valid_in v \<psi>\<close>]) simp
text\<open>Identity by definition is modelled as identity for denoting terms plus
co-denoting.\<close>
consts AOT_model_id_def :: \<open>('b \<Rightarrow> 'a::AOT_Term) \<Rightarrow> ('b \<Rightarrow> 'a) \<Rightarrow> bool\<close>
specification(AOT_model_id_def)
AOT_model_id_def: \<open>(AOT_model_id_def \<tau> \<sigma>) = (\<forall> \<alpha> . if AOT_model_denotes (\<sigma> \<alpha>)
then \<tau> \<alpha> = \<sigma> \<alpha>
else \<not>AOT_model_denotes (\<tau> \<alpha>))\<close>
by (rule exI[where x="\<lambda> \<tau> \<sigma> . \<forall> \<alpha> . if AOT_model_denotes (\<sigma> \<alpha>)
then \<tau> \<alpha> = \<sigma> \<alpha>
else \<not>AOT_model_denotes (\<tau> \<alpha>)"])
blast
text\<open>To reduce definitions by identity without free variables to definitions
by identity with free variables acting on the unit type, we give the unit type
a trivial instantiation to @{class AOT_Term}.\<close>
instantiation unit :: AOT_Term
begin
definition AOT_model_denotes_unit :: \<open>unit \<Rightarrow> bool\<close> where
\<open>AOT_model_denotes_unit \<equiv> \<lambda>_. True\<close>
instance proof qed(simp add: AOT_model_denotes_unit_def)
end
text\<open>Modally-strict and modally-fragile axioms are as necessary,
resp. actually valid propositions.\<close>
definition AOT_model_axiom where
\<open>AOT_model_axiom \<equiv> \<lambda> \<phi> . \<forall> v . AOT_model_valid_in v \<phi>\<close>
definition AOT_model_act_axiom where
\<open>AOT_model_act_axiom \<equiv> \<lambda> \<phi> . AOT_model_valid_in w\<^sub>0 \<phi>\<close>
lemma AOT_model_axiomI:
assumes \<open>\<And>v . AOT_model_valid_in v \<phi>\<close>
shows \<open>AOT_model_axiom \<phi>\<close>
unfolding AOT_model_axiom_def using assms ..
lemma AOT_model_act_axiomI:
assumes \<open>AOT_model_valid_in w\<^sub>0 \<phi>\<close>
shows \<open>AOT_model_act_axiom \<phi>\<close>
unfolding AOT_model_act_axiom_def using assms .
(*<*)
end
(*>*) |
# 5. Write a function which takes in N (the number of users) and s_N (the state
# probability for the last user in the chain) and returns the maximum likelihood estimate
# of s_0 (rounded to 2 decimal places).
estimate_s_0 <- function(N, s_N){
return(s_0_mle)
} |
\section{Businesses}
\subsection{Registration}
If you want to create a business account for \textit{Soldino}, visit the
homepage, make sure you are logged in your MetaMask\glosp account and
then make sure that the switch button is set on \texttt{Business}.\\
\begin{figure}[H]
\includegraphics[width=5cm]{res/images/user_business.png}
\centering
\caption{Select \texttt{Business} from the switch button}
\end{figure}
\noindent Then, insert your business' data in the form.
\begin{figure}[H]
\includegraphics[width=10cm]{res/images/business_signup.png}
\centering
\caption{Business sign up form}
\end{figure}
\noindent After you have filled all the fields with your data,
press the \texttt{Sign up} button. If an entry is not valid (e.g. the email
address does not contain a valid domain), the system will let you know
you have to correct that field to continue. If all fields are valid, a
pop-up window will open asking you to allow \textit{Soldino} to access your
information.\\
\begin{figure}[H]
\includegraphics[width=12cm]{res/images/metamask_connect.png}
\centering
\caption{Connecting MetaMask to \textit{Soldino}}
\end{figure}
\noindent \noindent Press \texttt{Connect} and you will be redirected to a page
congratulating you for your registration on the platform.
\begin{figure}[H]
\includegraphics[width=10cm]{res/images/registration_complete.png}
\centering
\caption{Registration is completed message}
\end{figure}
\subsubsection{Business is already registered}
If your Ethereum\glosp address is already registered in the platform, you will
see an error message.
\begin{figure}[H]
\includegraphics[width=10cm]{res/images/user_already_registered.png}
\centering
\caption{Error shown if you are already registered}
\end{figure}
\noindent Just press \texttt{Login} to log in your business account.
\subsection{Login}
If you already have a business account press the \texttt{Login} button on the
top right of the homepage, you will automatically log in your account
(there is no need for an username or password, all data are done via MetaMask).
\\To be able to log in you have to be logged in your MetaMask\glosp account.
\subsubsection{Account is disabled}
If your account was disabled by the Government, you will see an error
message.
\begin{figure}[H]
\includegraphics[width=10cm]{res/images/user_disabled.png}
\centering
\caption{Error shown if your account has been disabled by the Government}
\end{figure}
\noindent If your account is disabled, you cannot access it any more until it
is enabled again by the Government.
\subsection{Logout}
To log out of \textit{Soldino} you just have press the \texttt{Logout} button on
the top right of the page but you should also to log out of MetaMask\glosp{}
for better security. To do this you have to press MetaMask's icon on the top
right of the browser, press your account's icon and then press \texttt{Log out}
on the top right.
\begin{figure}[H]
\includegraphics[width=10cm]{res/images/logout_metamask.png}
\centering
\caption{Logging out}
\end{figure}
\subsection{Buying}
After completing the login, you will be redirected in the store main page.
Here you will find some of the products and the search bar.
\begin{figure}[H]
\includegraphics[width=15cm]{res/images/store_main_page.png}
\centering
\caption{Store front page}
\end{figure}
\noindent Note that all prices shown in the platform are in Cubits\glosp{}
(CC\glo), \textit{Soldino}'s own special token. Also, note that 1 Cubit
equals 1 Euro.
\subsubsection{Checking your balance}
Before making a purchase you should check if you have enough Cubits\glo.
This can be done by clicking the user icon on navigation bar at the top
of the page.
\begin{figure}[H]
\includegraphics[width=3cm]{res/images/user_icon.png}
\centering
\caption{User's icon}
\end{figure}
\noindent This will open a little window where you will be able to see
your personal information as well as your Cubits balance.
\begin{figure}[H]
\includegraphics[width=10cm]{res/images/user_info.png}
\centering
\caption{User's information}
\end{figure}
\subsubsection{Searching}
You can search for products by name using the search bar that can be
found on the top of the page. Pay attention you can use the search bar
to find products by words that are in their titles only. \\
After you start typing you will see
all results that match your search. If no matching products are
found, a message will be shown.
%
\subsubsection{Cart}
You can add products in your cart, after selecting the quantity you need,
by pressing the \texttt{Add to cart} button under it.
\begin{figure}[H]
\includegraphics[width=7cm]{res/images/add_to_cart.png}
\centering
\caption{Adding a product to the cart}
\end{figure}
\noindent The number on the cart shows you how many unique products are
in it. You can access your cart by pressing the shopping cart icon in
the navigation bar at the top of the page.
\begin{figure}[H]
\includegraphics[width=3cm]{res/images/cart_icon.png}
\centering
\caption{Shopping cart icon}
\end{figure}
\noindent Here you will see the total for your order and you will find
every item you have previously selected with their quantity. If you
need to modify the quantity press the \texttt{+} or \texttt{-} buttons.
\begin{figure}[H]
\includegraphics[width=15cm]{res/images/cart_example.png}
\centering
\caption{Example of a cart's content}
\end{figure}
\noindent When you want to proceed with the order, press the \texttt{Checkout} button.
You will be redirected to the checkout page.
\subsubsection{Checkout}
In the checkout page, you will be able to choose where your products
will be delivered to by using the radio buttons: you can either select
the address you gave during registration or enter a new one.
\begin{figure}[H]
\includegraphics[width=10cm]{res/images/checkout.png}
\centering
\caption{Example of a checkout page}
\end{figure}
\noindent Press the \texttt{Confirm and pay} button to proceed. In a new
MetaMask\glo{} pop up window, press \texttt{Confirm} to be able to pay all
the vendors of the products that you are buying in a single transaction.
After this a new MetaMask\glo{} window will ask you to confirm the
transaction again. Press \texttt{Confirm} to pay or press \texttt{Cancel}
button if you do not want to continue.
\subsubsection{Past orders}
You can visit the page containing all past orders by pressing the
\texttt{Orders} button in the bar at the top of the page.
Here you will find a order history where there is every purchase that you made
with \textit{Soldino}. Each order tells when it was made, who was the
seller, what were the items bought, where it was shipped to and how
much you paid for it.
\begin{figure}[H]
\includegraphics[width=15cm]{res/images/past_orders.png}
\centering
\caption{Example of past orders}
\end{figure}
\subsection{Selling}
If you want to manage the products that you are selling on \textit{Soldino}
or add a new one, you have to press the \texttt{Products Manager} on the bar at the
top of the page. Here you will find every product that you are currently selling.
\begin{figure}[H]
\includegraphics[width=15cm]{res/images/products_manager.png}
\centering
\caption{Products Manager page}
\end{figure}
\subsubsection{Selling a new product}
If you want to sell a new product, you have to press \texttt{Add a
product}, that will redirect you to a page where you have to insert
information about your new product.
\begin{figure}[H]
\includegraphics[width=10cm]{res/images/add_new_product.png}
\centering
\caption{Adding a new product}
\end{figure}
\noindent After you completed every field and you selected a photo
(adding a photo is not required, but it is recommended),
press \texttt{Confirm} button and
it will be added to the products that users can buy.
\\Press \texttt{Cancel} button if you do not wish to continue.
\\Note that the photo must be a .png, .jpg or .gif file and the
chosen picture could be for illustrative purposes only.
\subsubsection{Editing a product}
If you want to change the information of a product, you have to press
the \texttt{Edit} button and then you will be redirected to a
page where you will be able to change the product's information.
\begin{figure}[H]
\includegraphics[width=10cm]{res/images/edit_product.png}
\centering
\caption{Editing the product's information}
\end{figure}
\noindent After you are done with the changes press the \texttt{Confirm}
button to apply them.\\
Note that you have to insert only the fields you wish to change, leaving
the others blank.
\\If you do not wish to apply the changes, press the \texttt{Cancel} button.
\subsubsection{Removing a product}
If you want to remove a product that you are selling on the platform, press
the \texttt{Remove} button under it.
\\Note that removing a product will not delete it from orders previously made.
\subsection{VAT management}
If you are a business, \textit{Soldino} will manage VAT for the products
you buy and sell on the platform automatically. You can access the page
containing all invoices by pressing the \texttt{Transaction Manager} button
in the navigation bar at the top of the page.
\begin{figure}[H]
\includegraphics[width=15cm]{res/images/past_invoices.png}
\centering
\caption{Example of past invoices}
\end{figure}
\noindent Here you will also see the status of the quarter.
\\A red value at the end of a quarter means that you will have to pay
that amount to the Government.
\begin{figure}[H]
\includegraphics[width=10cm]{res/images/negative_vat_status.png}
\centering
\caption{Example of VAT input status}
\end{figure}
\noindent While a green one means that you will
soon be reimbursed for that amount.
\begin{figure}[H]
\includegraphics[width=10cm]{res/images/positive_vat_status.png}
\centering
\caption{Example of VAT output status}
\end{figure}
\subsubsection{Invoices}
Each of the invoices represents an order placed on one of your products.
You can see more information about an invoice by pressing the
\texttt{More details} button. This will open a pop-up window containing all
the details of that order.
\begin{figure}[H]
\includegraphics[width=10cm]{res/images/invoice_details.png}
\centering
\caption{Example of an invoice}
\end{figure}
\noindent You can also download a PDF file containing all the invoices
of this trimester by pressing \texttt{Download PDF}.
\subsubsection{Paying VAT}
At the end of a quarter, if you are in debit with the government, next
to how much you have to pay you will see two buttons.
\begin{figure}[H]
\includegraphics[width=15cm]{res/images/paying_vat.png}
\centering
\caption{Paying off Vat input status}
\end{figure}
\paragraph{Instant payment} \mbox{}\\
If you choose to instantly pay off what you owe, press the \texttt{Instant
payment} button. MetaMask\glosp then will open a new window asking
you to allow the
transaction, accept to continue.
\paragraph{Deferred payment} \mbox{}\\
Otherwise, you can choose to deferred the payment\glosp for a quarter
by pressing the \texttt{Deferred payment} button. A new window will open
where you will need to confirm the deferment.
\begin{figure}[H]
\includegraphics[width=10cm]{res/images/deferred_payment.png}
\centering
\caption{Deferring payment}
\end{figure}
\noindent If you confirm you will see a message showing when the
payment was deferred to.
\begin{figure}[H]
\includegraphics[width=10cm]{res/images/deferred_message.png}
\centering
\caption{Deferred message}
\end{figure}
%
\subsubsection{Checking past quarters' VAT}
You can see invoices of past quarters by using the drop-down
menu at the top of the page.
\begin{figure}[H]
\includegraphics[width=10cm]{res/images/past_trimesters.png}
\centering
\caption{Past trimesters}
\end{figure}
\noindent From this page you will also be able to see specific details
of an invoice or download them all as PDF as explained before. |
{-# OPTIONS --without-K --rewriting #-}
open import lib.Basics
open import lib.Equivalence2
open import lib.Function2
open import lib.NType2
open import lib.types.Coproduct
open import lib.types.Fin
open import lib.types.Group
open import lib.types.Int
open import lib.types.Nat
open import lib.types.Pi
open import lib.types.Subtype
open import lib.types.Truncation
open import lib.groups.SubgroupProp
module lib.groups.Homomorphism where
{-
Group homomorphisms.
-}
preserves-comp : ∀ {i j} {A : Type i} {B : Type j}
(A-comp : A → A → A) (B-comp : B → B → B) (f : A → B)
→ Type (lmax i j)
preserves-comp Ac Bc f = ∀ a₁ a₂ → f (Ac a₁ a₂) == Bc (f a₁) (f a₂)
preserves-comp-prop : ∀ {i j} {A : Type i} {B : Type j}
{{_ : is-set B}} (A-comp : A → A → A) (B-comp : B → B → B)
→ SubtypeProp (A → B) (lmax i j)
preserves-comp-prop Ac Bc =
preserves-comp Ac Bc , ⟨⟩
abstract
∼-preserves-preserves-comp : ∀ {i j} {A : Type i} {B : Type j}
(A-comp : A → A → A) (B-comp : B → B → B) {f₀ f₁ : A → B} → f₀ ∼ f₁
→ preserves-comp A-comp B-comp f₀
→ preserves-comp A-comp B-comp f₁
∼-preserves-preserves-comp Ac Bc {f₀ = f₀} {f₁} f₀∼f₁ f₀-pc a₁ a₂ =
! (f₀∼f₁ (Ac a₁ a₂)) ∙ f₀-pc a₁ a₂ ∙ ap2 Bc (f₀∼f₁ a₁) (f₀∼f₁ a₂)
record GroupStructureHom {i j} {GEl : Type i} {HEl : Type j}
(GS : GroupStructure GEl) (HS : GroupStructure HEl) : Type (lmax i j) where
constructor group-structure-hom
private
module G = GroupStructure GS
module H = GroupStructure HS
field
f : GEl → HEl
pres-comp : preserves-comp G.comp H.comp f
abstract
pres-ident : f G.ident == H.ident
pres-ident = H.cancel-l (f G.ident) $
H.comp (f G.ident) (f G.ident)
=⟨ ! (pres-comp G.ident G.ident) ⟩
f (G.comp G.ident G.ident)
=⟨ ap f (G.unit-l G.ident) ⟩
f G.ident
=⟨ ! (H.unit-r (f G.ident)) ⟩
H.comp (f G.ident) H.ident =∎
pres-inv : ∀ g → f (G.inv g) == H.inv (f g)
pres-inv g = ! $ H.inv-unique-l _ _ $
H.comp (f (G.inv g)) (f g)
=⟨ ! (pres-comp (G.inv g) g) ⟩
f (G.comp (G.inv g) g)
=⟨ ap f (G.inv-l g) ⟩
f G.ident
=⟨ pres-ident ⟩
H.ident
=∎
pres-exp : ∀ g i → f (G.exp g i) == H.exp (f g) i
pres-exp g (pos O) = pres-ident
pres-exp g (pos (S O)) = idp
pres-exp g (pos (S (S n))) = pres-comp g (G.exp g (pos (S n))) ∙ ap (H.comp (f g)) (pres-exp g (pos (S n)))
pres-exp g (negsucc O) = pres-inv g
pres-exp g (negsucc (S n)) = pres-comp (G.inv g) (G.exp g (negsucc n)) ∙ ap2 H.comp (pres-inv g) (pres-exp g (negsucc n))
pres-conj : ∀ g h → f (G.conj g h) == H.conj (f g) (f h)
pres-conj g h = pres-comp (G.comp g h) (G.inv g) ∙ ap2 H.comp (pres-comp g h) (pres-inv g)
pres-diff : ∀ g h → f (G.diff g h) == H.diff (f g) (f h)
pres-diff g h = pres-comp g (G.inv h) ∙ ap (H.comp (f g)) (pres-inv h)
pres-sum : ∀ {I : ℕ} (g : Fin I → GEl) → f (G.sum g) == H.sum (f ∘ g)
pres-sum {I = O} _ = pres-ident
pres-sum {I = S I} g = pres-comp (G.sum (g ∘ Fin-S)) (g (_ , ltS))
∙ ap (λ h → H.comp h (f (g (_ , ltS)))) (pres-sum (g ∘ Fin-S))
pres-subsum-r : ∀ {k l} {I : ℕ} {A : Type k} {B : Type l}
→ (p : Fin I → Coprod A B) (g : B → GEl)
→ f (G.subsum-r p g) == H.subsum-r p (f ∘ g)
pres-subsum-r p g = pres-sum (Coprod-rec (λ _ → G.ident) g ∘ p)
∙ ap H.sum (λ= λ x →
Coprod-rec-post∘ f (λ _ → G.ident) g (p x)
∙ ap (λ h → Coprod-rec h (f ∘ g) (p x)) (λ= λ _ → pres-ident))
⊙f : ⊙[ GEl , G.ident ] ⊙→ ⊙[ HEl , H.ident ]
⊙f = f , pres-ident
infix 0 _→ᴳˢ_ -- [ˢ] for structures
_→ᴳˢ_ = GroupStructureHom
record GroupHom {i j} (G : Group i) (H : Group j) : Type (lmax i j) where
constructor group-hom
private
module G = Group G
module H = Group H
field
f : G.El → H.El
pres-comp : ∀ g₁ g₂ → f (G.comp g₁ g₂) == H.comp (f g₁) (f g₂)
open GroupStructureHom {GS = G.group-struct} {HS = H.group-struct}
record {f = f ; pres-comp = pres-comp} hiding (f ; pres-comp) public
infix 0 _→ᴳ_
_→ᴳ_ = GroupHom
→ᴳˢ-to-→ᴳ : ∀ {i j} {G : Group i} {H : Group j}
→ (Group.group-struct G →ᴳˢ Group.group-struct H) → (G →ᴳ H)
→ᴳˢ-to-→ᴳ (group-structure-hom f pres-comp) = group-hom f pres-comp
idhom : ∀ {i} (G : Group i) → (G →ᴳ G)
idhom G = group-hom (idf _) (λ _ _ → idp)
idshom : ∀ {i} {GEl : Type i} (GS : GroupStructure GEl) → (GS →ᴳˢ GS)
idshom GS = group-structure-hom (idf _) (λ _ _ → idp)
{- constant (zero) homomorphism -}
module _ where
cst-hom : ∀ {i j} {G : Group i} {H : Group j} → (G →ᴳ H)
cst-hom {H = H} = group-hom (cst (Group.ident H)) (λ _ _ → ! (Group.unit-l H _))
{- negation is a homomorphism in an abelian gruop -}
inv-hom : ∀ {i} (G : AbGroup i) → GroupHom (AbGroup.grp G) (AbGroup.grp G)
inv-hom G = group-hom G.inv inv-pres-comp where
module G = AbGroup G
abstract
inv-pres-comp : (g₁ g₂ : G.El) → G.inv (G.comp g₁ g₂) == G.comp (G.inv g₁) (G.inv g₂)
inv-pres-comp g₁ g₂ = G.inv-comp g₁ g₂ ∙ G.comm (G.inv g₂) (G.inv g₁)
{- equality of homomorphisms -}
abstract
group-hom= : ∀ {i j} {G : Group i} {H : Group j} {φ ψ : G →ᴳ H}
→ GroupHom.f φ == GroupHom.f ψ → φ == ψ
group-hom= {G = G} {H = H} p = ap (uncurry group-hom) $
Subtype=-out (preserves-comp-prop (Group.comp G) (Group.comp H)) p
group-hom=-↓ : ∀ {i j k} {A : Type i} {G : A → Group j} {H : A → Group k} {x y : A}
{p : x == y} {φ : G x →ᴳ H x} {ψ : G y →ᴳ H y}
→ GroupHom.f φ == GroupHom.f ψ
[ (λ a → Group.El (G a) → Group.El (H a)) ↓ p ]
→ φ == ψ [ (λ a → G a →ᴳ H a) ↓ p ]
group-hom=-↓ {p = idp} = group-hom=
abstract
instance
GroupHom-level : ∀ {i j} {G : Group i} {H : Group j} → is-set (G →ᴳ H)
GroupHom-level {G = G} {H = H} = equiv-preserves-level
(equiv (uncurry group-hom) (λ x → GroupHom.f x , GroupHom.pres-comp x)
(λ _ → idp) (λ _ → idp))
{{Subtype-level
(preserves-comp-prop (Group.comp G) (Group.comp H))}}
infixr 80 _∘ᴳˢ_ _∘ᴳ_
abstract
∘ᴳˢ-pres-comp : ∀ {i j k} {GEl : Type i} {HEl : Type j} {KEl : Type k}
{GS : GroupStructure GEl} {HS : GroupStructure HEl} {KS : GroupStructure KEl}
(ψ : HS →ᴳˢ KS) (φ : GS →ᴳˢ HS)
→ preserves-comp (GroupStructure.comp GS) (GroupStructure.comp KS) (GroupStructureHom.f ψ ∘ GroupStructureHom.f φ)
∘ᴳˢ-pres-comp ψ φ g₁ g₂ = ap (GroupStructureHom.f ψ) (GroupStructureHom.pres-comp φ g₁ g₂)
∙ GroupStructureHom.pres-comp ψ (GroupStructureHom.f φ g₁) (GroupStructureHom.f φ g₂)
∘ᴳ-pres-comp : ∀ {i j k} {G : Group i} {H : Group j} {K : Group k} (ψ : H →ᴳ K) (φ : G →ᴳ H)
→ preserves-comp (Group.comp G) (Group.comp K) (GroupHom.f ψ ∘ GroupHom.f φ)
∘ᴳ-pres-comp ψ φ g₁ g₂ = ap (GroupHom.f ψ) (GroupHom.pres-comp φ g₁ g₂)
∙ GroupHom.pres-comp ψ (GroupHom.f φ g₁) (GroupHom.f φ g₂)
_∘ᴳˢ_ : ∀ {i j k} {GEl : Type i} {HEl : Type j} {KEl : Type k}
{GS : GroupStructure GEl} {HS : GroupStructure HEl} {KS : GroupStructure KEl}
→ (HS →ᴳˢ KS) → (GS →ᴳˢ HS) → (GS →ᴳˢ KS)
ψ ∘ᴳˢ φ = group-structure-hom (GroupStructureHom.f ψ ∘ GroupStructureHom.f φ) (∘ᴳˢ-pres-comp ψ φ)
_∘ᴳ_ : ∀ {i j k} {G : Group i} {H : Group j} {K : Group k}
→ (H →ᴳ K) → (G →ᴳ H) → (G →ᴳ K)
ψ ∘ᴳ φ = group-hom (GroupHom.f ψ ∘ GroupHom.f φ) (∘ᴳ-pres-comp ψ φ)
{- algebraic properties -}
∘ᴳ-unit-r : ∀ {i j} {G : Group i} {H : Group j} (φ : G →ᴳ H)
→ φ ∘ᴳ idhom G == φ
∘ᴳ-unit-r φ = group-hom= idp
∘ᴳ-unit-l : ∀ {i j} {G : Group i} {H : Group j} (φ : G →ᴳ H)
→ idhom H ∘ᴳ φ == φ
∘ᴳ-unit-l φ = group-hom= idp
∘ᴳ-assoc : ∀ {i j k l} {G : Group i} {H : Group j} {K : Group k} {L : Group l}
(χ : K →ᴳ L) (ψ : H →ᴳ K) (φ : G →ᴳ H)
→ (χ ∘ᴳ ψ) ∘ᴳ φ == χ ∘ᴳ ψ ∘ᴳ φ
∘ᴳ-assoc χ ψ φ = group-hom= idp
is-injᴳ : ∀ {i j} {G : Group i} {H : Group j}
→ (G →ᴳ H) → Type (lmax i j)
is-injᴳ hom = is-inj (GroupHom.f hom)
is-surjᴳ : ∀ {i j} {G : Group i} {H : Group j}
→ (G →ᴳ H) → Type (lmax i j)
is-surjᴳ hom = is-surj (GroupHom.f hom)
{- subgroups -}
infix 80 _∘subᴳ_
_∘subᴳ_ : ∀ {i j k} {G : Group i} {H : Group j}
→ SubgroupProp H k → (G →ᴳ H) → SubgroupProp G k
_∘subᴳ_ {k = k} {G = G} P φ = record {M} where
module G = Group G
module P = SubgroupProp P
module φ = GroupHom φ
module M where
prop : G.El → Type k
prop = P.prop ∘ φ.f
abstract
ident : prop G.ident
ident = transport! P.prop φ.pres-ident P.ident
diff : {g₁ g₂ : G.El} → prop g₁ → prop g₂ → prop (G.diff g₁ g₂)
diff {g₁} {g₂} pφg₁ pφg₂ = transport! P.prop
(φ.pres-diff g₁ g₂)
(P.diff pφg₁ pφg₂)
infix 80 _∘nsubᴳ_
_∘nsubᴳ_ : ∀ {i j k} {G : Group i} {H : Group j}
→ NormalSubgroupProp H k → (G →ᴳ H) → NormalSubgroupProp G k
_∘nsubᴳ_ {G = G} {H} P φ = P.propᴳ ∘subᴳ φ , P-φ-is-normal
where module P = NormalSubgroupProp P
module φ = GroupHom φ
abstract
P-φ-is-normal : is-normal (P.propᴳ ∘subᴳ φ)
P-φ-is-normal g₁ {g₂} pφg₂ = transport! P.prop
(φ.pres-conj g₁ g₂)
(P.conj (φ.f g₁) pφg₂)
{- kernels and images -}
module _ {i j} {G : Group i} {H : Group j} (φ : G →ᴳ H) where
private
module G = Group G
module H = Group H
module φ = GroupHom φ
ker-propᴳ : SubgroupProp G j
ker-propᴳ = record {M} where
module M where
prop : G.El → Type j
prop g = φ.f g == H.ident
abstract
ident : prop G.ident
ident = φ.pres-ident
diff : {g₁ g₂ : G.El} → prop g₁ → prop g₂ → prop (G.diff g₁ g₂)
diff {g₁} {g₂} p₁ p₂ = φ.pres-diff g₁ g₂ ∙ ap2 H.diff p₁ p₂ ∙ H.inv-r H.ident
-- 'n' for 'normal'
ker-npropᴳ : NormalSubgroupProp G j
ker-npropᴳ = ker-propᴳ , ker-is-normal where
abstract
ker-is-normal : is-normal ker-propᴳ
ker-is-normal g₁ {g₂} pg₂ =
φ.pres-conj g₁ g₂
∙ ap (H.conj (φ.f g₁)) pg₂
∙ H.conj-ident-r (φ.f g₁)
im-propᴳ : SubgroupProp H (lmax i j)
im-propᴳ = record {M} where
module M where
prop : H.El → Type (lmax i j)
prop h = Trunc -1 (hfiber φ.f h)
level : (h : H.El) → is-prop (prop h)
level h = Trunc-level
abstract
ident : prop H.ident
ident = [ G.ident , φ.pres-ident ]
diff : {h₁ h₂ : H.El} → prop h₁ → prop h₂ → prop (H.diff h₁ h₂)
diff = Trunc-fmap2 λ {(g₁ , p₁) (g₂ , p₂)
→ G.diff g₁ g₂ , φ.pres-diff g₁ g₂ ∙ ap2 H.diff p₁ p₂}
im-npropᴳ : is-abelian H → NormalSubgroupProp H (lmax i j)
im-npropᴳ H-is-abelian = sub-abelian-normal H-is-abelian im-propᴳ
has-trivial-kerᴳ : Type (lmax i j)
has-trivial-kerᴳ = is-trivial-propᴳ ker-propᴳ
abstract
-- any homomorphism with trivial kernel is injective
has-trivial-ker-is-injᴳ : has-trivial-kerᴳ → is-injᴳ φ
has-trivial-ker-is-injᴳ tk g₁ g₂ p =
G.zero-diff-same g₁ g₂ $ tk (G.diff g₁ g₂) $
φ.pres-diff g₁ g₂ ∙ ap (λ h → H.diff h (φ.f g₂)) p ∙ H.inv-r (φ.f g₂)
ker-cst-hom-is-full : ∀ {i j} (G : Group i) (H : Group j)
→ is-fullᴳ (ker-propᴳ (cst-hom {G = G} {H}))
ker-cst-hom-is-full G H g = idp
{- exactness -}
module _ {i j k} {G : Group i} {H : Group j} {K : Group k}
(φ : G →ᴳ H) (ψ : H →ᴳ K) where
private
module G = Group G
module H = Group H
module K = Group K
module φ = GroupHom φ
module ψ = GroupHom ψ
record is-exact : Type (lmax k (lmax j i)) where
field
im-sub-ker : im-propᴳ φ ⊆ᴳ ker-propᴳ ψ
ker-sub-im : ker-propᴳ ψ ⊆ᴳ im-propᴳ φ
open is-exact public
abstract
{- an equivalent version of is-exact-ktoi -}
im-sub-ker-in : is-fullᴳ (ker-propᴳ (ψ ∘ᴳ φ)) → im-propᴳ φ ⊆ᴳ ker-propᴳ ψ
im-sub-ker-in r h = Trunc-rec (λ {(g , p) → ap ψ.f (! p) ∙ r g})
im-sub-ker-out : im-propᴳ φ ⊆ᴳ ker-propᴳ ψ → is-fullᴳ (ker-propᴳ (ψ ∘ᴳ φ))
im-sub-ker-out s g = s (φ.f g) [ g , idp ]
{- homomorphisms into an abelian group can be composed with
- the group operation and form a group -}
module _ {i j} (G : Group i) (H : AbGroup j)
where
private
module G = Group G
module H = AbGroup H
hom-comp : (G →ᴳ H.grp) → (G →ᴳ H.grp) → (G →ᴳ H.grp)
hom-comp φ ψ = group-hom (λ g → H.comp (φ.f g) (ψ.f g)) hom-comp-pres-comp where
module φ = GroupHom φ
module ψ = GroupHom ψ
abstract
hom-comp-pres-comp : ∀ g₁ g₂
→ H.comp (φ.f (G.comp g₁ g₂)) (ψ.f (G.comp g₁ g₂))
== H.comp (H.comp (φ.f g₁) (ψ.f g₁)) (H.comp (φ.f g₂) (ψ.f g₂))
hom-comp-pres-comp g₁ g₂ =
H.comp (φ.f (G.comp g₁ g₂)) (ψ.f (G.comp g₁ g₂))
=⟨ ap2 H.comp (φ.pres-comp g₁ g₂) (ψ.pres-comp g₁ g₂) ⟩
H.comp (H.comp (φ.f g₁) (φ.f g₂)) (H.comp (ψ.f g₁) (ψ.f g₂))
=⟨ H.interchange (φ.f g₁) (φ.f g₂) (ψ.f g₁) (ψ.f g₂) ⟩
H.comp (H.comp (φ.f g₁) (ψ.f g₁)) (H.comp (φ.f g₂) (ψ.f g₂)) =∎
hom-group-structure : GroupStructure (G →ᴳ H.grp)
hom-group-structure = record {M} where
module M where
ident : G →ᴳ H.grp
ident = cst-hom
comp : (G →ᴳ H.grp) → (G →ᴳ H.grp) → (G →ᴳ H.grp)
comp = hom-comp
inv : (G →ᴳ H.grp) → (G →ᴳ H.grp)
inv φ = inv-hom H ∘ᴳ φ
abstract
unit-l : ∀ φ → comp ident φ == φ
unit-l φ = group-hom= $ λ= λ _ → H.unit-l _
assoc : ∀ φ ψ ξ → comp (comp φ ψ) ξ == comp φ (comp ψ ξ)
assoc φ ψ ξ = group-hom= $ λ= λ _ → H.assoc _ _ _
inv-l : ∀ φ → comp (inv φ) φ == ident
inv-l φ = group-hom= $ λ= λ _ → H.inv-l _
hom-group : Group (lmax i j)
hom-group = group (G →ᴳ H.grp) hom-group-structure
abstract
hom-group-is-abelian : is-abelian hom-group
hom-group-is-abelian φ ψ = group-hom= $ λ= λ g → H.comm _ _
hom-abgroup : AbGroup (lmax i j)
hom-abgroup = hom-group , hom-group-is-abelian
module _ {i j} {G : Group i} {H : AbGroup j} where
app-hom : Group.El G → hom-group G H →ᴳ AbGroup.grp H
app-hom g = group-hom (λ φ → GroupHom.f φ g) lemma
where abstract lemma = λ φ ψ → idp
appᴳ = app-hom
pre∘ᴳ-hom : ∀ {i j k} {G : Group i} {H : Group j} (K : AbGroup k)
→ (G →ᴳ H) → (hom-group H K →ᴳ hom-group G K)
pre∘ᴳ-hom K φ = record { f = _∘ᴳ φ ; pres-comp = lemma}
where abstract lemma = λ _ _ → group-hom= idp
post∘ᴳ-hom : ∀ {i j k} (G : Group i) (H : AbGroup j) (K : AbGroup k)
→ (AbGroup.grp H →ᴳ AbGroup.grp K) → (hom-group G H →ᴳ hom-group G K)
post∘ᴳ-hom G H K φ = record { f = φ ∘ᴳ_ ; pres-comp = lemma}
where abstract lemma = λ _ _ → group-hom= $ λ= λ _ → GroupHom.pres-comp φ _ _
|
Formal statement is: lemma pred_intros_countable[measurable (raw)]: fixes P :: "'a \<Rightarrow> 'i :: countable \<Rightarrow> bool" shows "(\<And>i. pred M (\<lambda>x. P x i)) \<Longrightarrow> pred M (\<lambda>x. \<forall>i. P x i)" "(\<And>i. pred M (\<lambda>x. P x i)) \<Longrightarrow> pred M (\<lambda>x. \<exists>i. P x i)" Informal statement is: If $P(x,i)$ is a predicate on $x$ that depends on a countable index $i$, then the predicates $\forall i. P(x,i)$ and $\exists i. P(x,i)$ are measurable. |
open import Functors
open import Categories
open import RMonads
module RMonads.REM.Functors {a b c d}{C : Cat {a}{b}}{D : Cat {c}{d}}
(J : Fun C D)(M : RMonad J) where
open import Library
open import RMonads.REM M
open Cat
open Fun
open RAlg
open RAlgMorph
open RMonad M
REML : Fun C EM
REML = record {
OMap = λ X → record {
acar = T X;
astr = bind;
alaw1 = sym law2;
alaw2 = law3};
HMap = λ f → record {
amor = bind (comp D η (HMap J f));
ahom = sym law3};
fid = RAlgMorphEq (
proof
bind (comp D η (HMap J (iden C)))
≅⟨ cong (bind ∘ comp D η) (fid J) ⟩
bind (comp D η (iden D))
≅⟨ cong bind (idr D) ⟩
bind η
≅⟨ law1 ⟩
iden D ∎);
fcomp = λ{_ _ _ f g} → RAlgMorphEq (
proof
bind (comp D η (HMap J (comp C f g)))
≅⟨ cong (bind ∘ comp D η) (fcomp J) ⟩
bind (comp D η (comp D (HMap J f) (HMap J g)))
≅⟨ cong bind (sym (ass D)) ⟩
bind (comp D (comp D η (HMap J f)) (HMap J g))
≅⟨ cong (λ f₁ → bind (comp D f₁ (HMap J g))) (sym law2) ⟩
bind (comp D (comp D (bind (comp D η (HMap J f))) η) (HMap J g))
≅⟨ cong bind (ass D) ⟩
bind (comp D (bind (comp D η (HMap J f))) (comp D η (HMap J g)))
≅⟨ law3 ⟩
comp D (bind (comp D η (HMap J f))) (bind (comp D η (HMap J g)))
∎)}
REMR : Fun EM D
REMR = record {
OMap = acar;
HMap = amor;
fid = refl;
fcomp = refl}
|
Formal statement is: lemma fold_coeffs_monom [simp]: "a \<noteq> 0 \<Longrightarrow> fold_coeffs f (monom a n) = f 0 ^^ n \<circ> f a" Informal statement is: If $a \neq 0$, then the fold of the coefficients of the monomial $a x^n$ is the composition of $f(0)$ with itself $n$ times, followed by $f(a)$. |
open import Mockingbird.Forest using (Forest)
-- Mockingbirds, Warblers, and Starlings
module Mockingbird.Problems.Chapter12 {b ℓ} (forest : Forest {b} {ℓ}) where
open import Data.Product using (_×_; _,_; proj₁; ∃-syntax)
open import Function using (_$_)
open import Mockingbird.Forest.Birds forest
import Mockingbird.Problems.Chapter11 forest as Chapter₁₁
open Forest forest
problem₁ : ⦃ _ : HasBluebird ⦄ ⦃ _ : HasMockingbird ⦄ → HasDoubleMockingbird
problem₁ = record
{ M₂ = B ∙ M
; isDoubleMockingbird = λ x y → begin
B ∙ M ∙ x ∙ y ≈⟨ isBluebird M x y ⟩
M ∙ (x ∙ y) ≈⟨ isMockingbird (x ∙ y) ⟩
x ∙ y ∙ (x ∙ y) ∎
}
problem₂-BCM : ⦃ _ : HasBluebird ⦄ ⦃ _ : HasThrush ⦄ ⦃ _ : HasMockingbird ⦄ → HasLark
problem₂-BCM = record
{ L = C ∙ B ∙ M
; isLark = λ x y → begin
C ∙ B ∙ M ∙ x ∙ y ≈⟨ congʳ $ isCardinal B M x ⟩
B ∙ x ∙ M ∙ y ≈⟨ isBluebird x M y ⟩
x ∙ (M ∙ y) ≈⟨ congˡ $ isMockingbird y ⟩
x ∙ (y ∙ y) ∎
} where instance hasCardinal = Chapter₁₁.problem₂₁′
problem₂-BRM : ⦃ _ : HasBluebird ⦄ ⦃ _ : HasThrush ⦄ ⦃ _ : HasMockingbird ⦄ → HasLark
problem₂-BRM = record
{ L = R ∙ M ∙ B
; isLark = λ x y → begin
R ∙ M ∙ B ∙ x ∙ y ≈⟨ congʳ $ isRobin M B x ⟩
B ∙ x ∙ M ∙ y ≈⟨ isBluebird x M y ⟩
x ∙ (M ∙ y) ≈⟨ congˡ $ isMockingbird y ⟩
x ∙ (y ∙ y) ∎
} where instance hasRobin = Chapter₁₁.problem₂₀
problem₃ : ⦃ _ : HasBluebird ⦄ ⦃ _ : HasWarbler ⦄ → HasLark
problem₃ = record
{ L = B ∙ W ∙ B
; isLark = λ x y → begin
B ∙ W ∙ B ∙ x ∙ y ≈⟨ congʳ $ isBluebird W B x ⟩
W ∙ (B ∙ x) ∙ y ≈⟨ isWarbler (B ∙ x) y ⟩
B ∙ x ∙ y ∙ y ≈⟨ isBluebird x y y ⟩
x ∙ (y ∙ y) ∎
}
problem₄ : ⦃ _ : HasMockingbird ⦄ ⦃ _ : HasQueerBird ⦄ → HasLark
problem₄ = record
{ L = Q ∙ M
; isLark = λ x y → begin
Q ∙ M ∙ x ∙ y ≈⟨ isQueerBird M x y ⟩
x ∙ (M ∙ y) ≈⟨ congˡ $ isMockingbird y ⟩
x ∙ (y ∙ y) ∎
}
problem₅ : ⦃ _ : HasBluebird ⦄ ⦃ _ : HasRobin ⦄ ⦃ _ : HasMockingbird ⦄ → HasConverseWarbler
problem₅ = record
{ W′ = M₂ ∙ R
; isConverseWarbler = λ x y → begin
M₂ ∙ R ∙ x ∙ y ≈⟨ congʳ $ isDoubleMockingbird R x ⟩
R ∙ x ∙ (R ∙ x) ∙ y ≈⟨ isRobin x (R ∙ x) y ⟩
R ∙ x ∙ y ∙ x ≈⟨ isRobin x y x ⟩
y ∙ x ∙ x ∎
} where instance hasDoubleMockingbird = problem₁
problem₆′ : ⦃ _ : HasCardinal ⦄ ⦃ _ : HasConverseWarbler ⦄ → HasWarbler
problem₆′ = record
{ W = C ∙ W′
; isWarbler = λ x y → begin
C ∙ W′ ∙ x ∙ y ≈⟨ isCardinal W′ x y ⟩
W′ ∙ y ∙ x ≈⟨ isConverseWarbler y x ⟩
x ∙ y ∙ y ∎
}
problem₆ : ⦃ _ : HasBluebird ⦄ ⦃ _ : HasRobin ⦄ ⦃ _ : HasCardinal ⦄ ⦃ _ : HasMockingbird ⦄ → HasWarbler
problem₆ = record
{ W = C ∙ (B ∙ M ∙ R)
; isWarbler = λ x y → begin
C ∙ (B ∙ M ∙ R) ∙ x ∙ y ≈⟨⟩
C ∙ (M₂ ∙ R) ∙ x ∙ y ≈⟨⟩
C ∙ W′ ∙ x ∙ y ≈⟨⟩
W ∙ x ∙ y ≈⟨ isWarbler x y ⟩
x ∙ y ∙ y ∎
} where
instance
hasDoubleMockingbird = problem₁
hasConverseWarbler = problem₅
hasWarbler = problem₆′
problem₇ : ⦃ _ : HasBluebird ⦄ ⦃ _ : HasThrush ⦄ ⦃ _ : HasMockingbird ⦄ → HasWarbler
problem₇ = record
{ W = B ∙ (T ∙ (B ∙ M ∙ (B ∙ B ∙ T))) ∙ (B ∙ B ∙ T)
; isWarbler = λ x y → begin
B ∙ (T ∙ (B ∙ M ∙ (B ∙ B ∙ T))) ∙ (B ∙ B ∙ T) ∙ x ∙ y ≈˘⟨ congʳ $ congʳ $ isWarbler′ ⟩
W ∙ x ∙ y ≈⟨ isWarbler x y ⟩
x ∙ y ∙ y ∎
} where
instance
hasRobin = Chapter₁₁.problem₂₀
hasCardinal = Chapter₁₁.problem₂₁-bonus
hasWarbler = problem₆
isWarbler′ : W ≈ B ∙ (T ∙ (B ∙ M ∙ (B ∙ B ∙ T))) ∙ (B ∙ B ∙ T)
isWarbler′ = begin
W ≈⟨⟩
C ∙ (B ∙ M ∙ R) ≈⟨⟩
C ∙ (B ∙ M ∙ (B ∙ B ∙ T)) ≈⟨⟩
B ∙ (T ∙ (B ∙ B ∙ T)) ∙ (B ∙ B ∙ T) ∙ (B ∙ M ∙ (B ∙ B ∙ T)) ≈⟨ isBluebird (T ∙ (B ∙ B ∙ T)) (B ∙ B ∙ T) (B ∙ M ∙ (B ∙ B ∙ T)) ⟩
T ∙ (B ∙ B ∙ T) ∙ (B ∙ B ∙ T ∙ (B ∙ M ∙ (B ∙ B ∙ T))) ≈⟨ isThrush (B ∙ B ∙ T) (B ∙ B ∙ T ∙ (B ∙ M ∙ (B ∙ B ∙ T))) ⟩
B ∙ B ∙ T ∙ (B ∙ M ∙ (B ∙ B ∙ T)) ∙ (B ∙ B ∙ T) ≈⟨ congʳ $ isBluebird B T (B ∙ M ∙ (B ∙ B ∙ T)) ⟩
B ∙ (T ∙ (B ∙ M ∙ (B ∙ B ∙ T))) ∙ (B ∙ B ∙ T) ∎
-- TODO: other expression in problem 7.
-- NOTE: the bluebird B is not necessary.
problem₈ : ⦃ _ : HasThrush ⦄ ⦃ _ : HasWarbler ⦄ → HasMockingbird
problem₈ = record
{ M = W ∙ T
; isMockingbird = λ x → begin
W ∙ T ∙ x ≈⟨ isWarbler T x ⟩
T ∙ x ∙ x ≈⟨ isThrush x x ⟩
x ∙ x ∎
}
problem₉-W* : ⦃ _ : HasBluebird ⦄ ⦃ _ : HasThrush ⦄ ⦃ _ : HasMockingbird ⦄ → HasWarblerOnceRemoved
problem₉-W* = record
{ W* = B ∙ W
; isWarblerOnceRemoved = λ x y z → begin
B ∙ W ∙ x ∙ y ∙ z ≈⟨ congʳ $ isBluebird W x y ⟩
W ∙ (x ∙ y) ∙ z ≈⟨ isWarbler (x ∙ y) z ⟩
x ∙ y ∙ z ∙ z ∎
} where instance hasWarbler = problem₇
problem₉-W** : ⦃ _ : HasBluebird ⦄ ⦃ _ : HasThrush ⦄ ⦃ _ : HasMockingbird ⦄ → HasWarblerTwiceRemoved
problem₉-W** = record
{ W** = B ∙ W*
; isWarblerTwiceRemoved = λ x y z w → begin
B ∙ W* ∙ x ∙ y ∙ z ∙ w ≈⟨ congʳ $ congʳ $ isBluebird W* x y ⟩
W* ∙ (x ∙ y) ∙ z ∙ w ≈⟨ isWarblerOnceRemoved (x ∙ y) z w ⟩
x ∙ y ∙ z ∙ w ∙ w ∎
} where
instance
hasWarbler = problem₇
hasWarblerOnceRemoved = problem₉-W*
problem₁₀ : ⦃ _ : HasBluebird ⦄ ⦃ _ : HasCardinal ⦄ ⦃ _ : HasWarbler ⦄ → HasHummingbird
problem₁₀ = record
{ H = B ∙ W ∙ (B ∙ C)
; isHummingbird = λ x y z → begin
B ∙ W ∙ (B ∙ C) ∙ x ∙ y ∙ z ≈⟨ congʳ $ congʳ $ isBluebird W (B ∙ C) x ⟩
W ∙ (B ∙ C ∙ x) ∙ y ∙ z ≈⟨ congʳ $ isWarbler (B ∙ C ∙ x) y ⟩
B ∙ C ∙ x ∙ y ∙ y ∙ z ≈⟨ congʳ $ congʳ $ isBluebird C x y ⟩
C ∙ (x ∙ y) ∙ y ∙ z ≈⟨ isCardinal (x ∙ y) y z ⟩
x ∙ y ∙ z ∙ y ∎
}
problem₁₁ : ⦃ _ : HasRobin ⦄ ⦃ _ : HasHummingbird ⦄ → HasWarbler
problem₁₁ = problem₆′
where
instance
hasCardinal = Chapter₁₁.problem₂₁
hasConverseWarbler : HasConverseWarbler
hasConverseWarbler = record
{ W′ = H ∙ R
; isConverseWarbler = λ x y → begin
H ∙ R ∙ x ∙ y ≈⟨ isHummingbird R x y ⟩
R ∙ x ∙ y ∙ x ≈⟨ isRobin x y x ⟩
y ∙ x ∙ x ∎
}
problem₁₂ : ⦃ _ : HasBluebird ⦄ ⦃ _ : HasThrush ⦄ ⦃ _ : HasMockingbird ⦄ → HasStarling
problem₁₂ = record
{ S = W** ∙ G
; isStarling = λ x y z → begin
W** ∙ G ∙ x ∙ y ∙ z ≈⟨ isWarblerTwiceRemoved G x y z ⟩
G ∙ x ∙ y ∙ z ∙ z ≈⟨ isGoldfinch x y z z ⟩
x ∙ z ∙ (y ∙ z) ∎
} where
instance
hasWarblerTwiceRemoved = problem₉-W**
hasGoldfinch = Chapter₁₁.problem₄₇
problem₁₃ : ⦃ _ : HasStarling ⦄ ⦃ _ : HasRobin ⦄ → HasHummingbird
problem₁₃ = record
{ H = S ∙ R
; isHummingbird = λ x y z → begin
S ∙ R ∙ x ∙ y ∙ z ≈⟨ congʳ $ isStarling R x y ⟩
R ∙ y ∙ (x ∙ y) ∙ z ≈⟨ isRobin y (x ∙ y) z ⟩
x ∙ y ∙ z ∙ y ∎
}
problem₁₄-SR : ⦃ _ : HasStarling ⦄ ⦃ _ : HasRobin ⦄ → HasWarbler
problem₁₄-SR = record
{ W = R ∙ (S ∙ R ∙ R) ∙ R
; isWarbler = λ x y → begin
R ∙ (S ∙ R ∙ R) ∙ R ∙ x ∙ y ≈˘⟨ congʳ $ congʳ $ isRobin R R (S ∙ R ∙ R) ⟩
R ∙ R ∙ R ∙ (S ∙ R ∙ R) ∙ x ∙ y ≈⟨⟩
C ∙ (S ∙ R ∙ R) ∙ x ∙ y ≈⟨⟩
C ∙ (H ∙ R) ∙ x ∙ y ≈⟨⟩
W ∙ x ∙ y ≈⟨ isWarbler x y ⟩
x ∙ y ∙ y ∎
} where
instance
hasCardinal = Chapter₁₁.problem₂₁
hasHummingbird = problem₁₃
hasWarbler = problem₁₁
problem₁₄-SC : ⦃ _ : HasStarling ⦄ ⦃ _ : HasCardinal ⦄ → HasWarbler
problem₁₄-SC = record
{ W = C ∙ (S ∙ (C ∙ C) ∙ (C ∙ C))
; isWarbler = λ x y → begin
C ∙ (S ∙ (C ∙ C) ∙ (C ∙ C)) ∙ x ∙ y ≈⟨⟩
C ∙ (S ∙ R ∙ R) ∙ x ∙ y ≈⟨ isCardinal (S ∙ R ∙ R) x y ⟩
S ∙ R ∙ R ∙ y ∙ x ≈⟨ congʳ $ isStarling R R y ⟩
R ∙ y ∙ (R ∙ y) ∙ x ≈⟨ isRobin y (R ∙ y) x ⟩
R ∙ y ∙ x ∙ y ≈⟨ isRobin y x y ⟩
x ∙ y ∙ y ∎
} where instance hasRobin = Chapter₁₁.problem₂₃
problem₁₅ : ⦃ _ : HasThrush ⦄ ⦃ _ : HasStarling ⦄ → HasWarbler
problem₁₅ = record
{ W = S ∙ T
; isWarbler = λ x y → begin
S ∙ T ∙ x ∙ y ≈⟨ isStarling T x y ⟩
T ∙ y ∙ (x ∙ y) ≈⟨ isThrush y (x ∙ y) ⟩
x ∙ y ∙ y ∎
}
problem₁₆ : ⦃ _ : HasThrush ⦄ ⦃ _ : HasStarling ⦄ → HasMockingbird
problem₁₆ = record
{ M = S ∙ T ∙ T
; isMockingbird = λ x → begin
S ∙ T ∙ T ∙ x ≈⟨ isStarling T T x ⟩
T ∙ x ∙ (T ∙ x) ≈⟨ isThrush x (T ∙ x) ⟩
T ∙ x ∙ x ≈⟨ isThrush x x ⟩
x ∙ x ∎
}
module Exercises where
exercise₁-a : ⦃ _ : HasBluebird ⦄ ⦃ _ : HasThrush ⦄
→ ∃[ G₁ ] (∀ x y z w v → G₁ ∙ x ∙ y ∙ z ∙ w ∙ v ≈ x ∙ y ∙ v ∙ (z ∙ w))
exercise₁-a =
( D₁ ∙ C*
, λ x y z w v → begin
D₁ ∙ C* ∙ x ∙ y ∙ z ∙ w ∙ v ≈⟨ congʳ $ isDickcissel C* x y z w ⟩
C* ∙ x ∙ y ∙ (z ∙ w) ∙ v ≈⟨ isCardinalOnceRemoved x y (z ∙ w) v ⟩
x ∙ y ∙ v ∙ (z ∙ w) ∎
)
where
instance
hasCardinal = Chapter₁₁.problem₂₁′
hasCardinalOnceRemoved = Chapter₁₁.problem₃₁
hasDickcissel = Chapter₁₁.problem₉
-- Note: in the solutions, the bluebird is used, but the exercise does not
-- state that the bluebird may be used.
exercise₁-b : ⦃ _ : HasBluebird ⦄ ⦃ _ : HasMockingbird ⦄
→ ∃[ G₁ ] (∀ x y z w v → G₁ ∙ x ∙ y ∙ z ∙ w ∙ v ≈ x ∙ y ∙ v ∙ (z ∙ w))
→ ∃[ G₂ ] (∀ x y z w → G₂ ∙ x ∙ y ∙ z ∙ w ≈ x ∙ w ∙ (x ∙ w) ∙ (y ∙ z))
exercise₁-b (G₁ , isG₁) =
( G₁ ∙ (B ∙ M)
, λ x y z w → begin
G₁ ∙ (B ∙ M) ∙ x ∙ y ∙ z ∙ w ≈⟨ isG₁ (B ∙ M) x y z w ⟩
B ∙ M ∙ x ∙ w ∙ (y ∙ z) ≈⟨ congʳ $ isBluebird M x w ⟩
M ∙ (x ∙ w) ∙ (y ∙ z) ≈⟨ congʳ $ isMockingbird (x ∙ w) ⟩
x ∙ w ∙ (x ∙ w) ∙ (y ∙ z) ∎
)
exercise₁-c : ⦃ _ : HasBluebird ⦄ ⦃ _ : HasThrush ⦄ ⦃ _ : HasIdentity ⦄
→ ∃[ I₂ ] (∀ x → I₂ ∙ x ≈ x ∙ I ∙ I)
exercise₁-c =
( B ∙ (T ∙ I) ∙ (T ∙ I)
, λ x → begin
B ∙ (T ∙ I) ∙ (T ∙ I) ∙ x ≈⟨ isBluebird (T ∙ I) (T ∙ I) x ⟩
T ∙ I ∙ (T ∙ I ∙ x) ≈⟨ isThrush I (T ∙ I ∙ x) ⟩
T ∙ I ∙ x ∙ I ≈⟨ congʳ $ isThrush I x ⟩
x ∙ I ∙ I ∎
)
exercise₁-d : ⦃ _ : HasIdentity ⦄ ⦃ _ : HasFinch ⦄
→ (hasI₂ : ∃[ I₂ ] (∀ x → I₂ ∙ x ≈ x ∙ I ∙ I))
→ ∀ x → proj₁ hasI₂ ∙ (F ∙ x) ≈ x
exercise₁-d (I₂ , isI₂) x = begin
I₂ ∙ (F ∙ x) ≈⟨ isI₂ (F ∙ x) ⟩
F ∙ x ∙ I ∙ I ≈⟨ isFinch x I I ⟩
I ∙ I ∙ x ≈⟨ congʳ $ isIdentity I ⟩
I ∙ x ≈⟨ isIdentity x ⟩
x ∎
exercise₁-e : ⦃ _ : HasFinch ⦄ ⦃ _ : HasQueerBird ⦄ ⦃ _ : HasIdentity ⦄
→ (hasG₂ : ∃[ G₂ ] (∀ x y z w → G₂ ∙ x ∙ y ∙ z ∙ w ≈ x ∙ w ∙ (x ∙ w) ∙ (y ∙ z)))
→ (hasI₂ : ∃[ I₂ ] (∀ x → I₂ ∙ x ≈ x ∙ I ∙ I))
→ IsWarbler (proj₁ hasG₂ ∙ F ∙ (Q ∙ proj₁ hasI₂))
exercise₁-e (G₂ , isG₂) (I₂ , isI₂) x y = begin
G₂ ∙ F ∙ (Q ∙ I₂) ∙ x ∙ y ≈⟨ isG₂ F (Q ∙ I₂) x y ⟩
F ∙ y ∙ (F ∙ y) ∙ (Q ∙ I₂ ∙ x) ≈⟨ isFinch y (F ∙ y) (Q ∙ I₂ ∙ x) ⟩
Q ∙ I₂ ∙ x ∙ (F ∙ y) ∙ y ≈⟨ congʳ $ isQueerBird I₂ x (F ∙ y) ⟩
x ∙ (I₂ ∙ (F ∙ y)) ∙ y ≈⟨ congʳ $ congˡ $ exercise₁-d (I₂ , isI₂) y ⟩
x ∙ y ∙ y ∎
exercise₂ : ⦃ _ : HasBluebird ⦄ ⦃ _ : HasCardinal ⦄ ⦃ _ : HasWarbler ⦄
→ IsStarling (B ∙ (B ∙ (B ∙ W) ∙ C) ∙ (B ∙ B))
exercise₂ x y z = begin
B ∙ (B ∙ (B ∙ W) ∙ C) ∙ (B ∙ B) ∙ x ∙ y ∙ z ≈⟨ congʳ $ congʳ $ isBluebird (B ∙ (B ∙ W) ∙ C) (B ∙ B) x ⟩
B ∙ (B ∙ W) ∙ C ∙ (B ∙ B ∙ x) ∙ y ∙ z ≈⟨ congʳ $ congʳ $ isBluebird (B ∙ W) C (B ∙ B ∙ x) ⟩
B ∙ W ∙ (C ∙ (B ∙ B ∙ x)) ∙ y ∙ z ≈⟨ congʳ $ isBluebird W (C ∙ (B ∙ B ∙ x)) y ⟩
W ∙ (C ∙ (B ∙ B ∙ x) ∙ y) ∙ z ≈⟨ isWarbler (C ∙ (B ∙ B ∙ x) ∙ y) z ⟩
C ∙ (B ∙ B ∙ x) ∙ y ∙ z ∙ z ≈⟨ congʳ $ isCardinal (B ∙ B ∙ x) y z ⟩
B ∙ B ∙ x ∙ z ∙ y ∙ z ≈⟨ congʳ $ congʳ $ isBluebird B x z ⟩
B ∙ (x ∙ z) ∙ y ∙ z ≈⟨ isBluebird (x ∙ z) y z ⟩
x ∙ z ∙ (y ∙ z) ∎
exercise₃ : ⦃ _ : HasStarling ⦄ ⦃ _ : HasBluebird ⦄ → HasPhoenix
exercise₃ = record
{ Φ = B ∙ (B ∙ S) ∙ B
; isPhoenix = λ x y z w → begin
B ∙ (B ∙ S) ∙ B ∙ x ∙ y ∙ z ∙ w ≈⟨ congʳ $ congʳ $ congʳ $ isBluebird (B ∙ S) B x ⟩
B ∙ S ∙ (B ∙ x) ∙ y ∙ z ∙ w ≈⟨ congʳ $ congʳ $ isBluebird S (B ∙ x) y ⟩
S ∙ (B ∙ x ∙ y) ∙ z ∙ w ≈⟨ isStarling (B ∙ x ∙ y) z w ⟩
B ∙ x ∙ y ∙ w ∙ (z ∙ w) ≈⟨ congʳ $ isBluebird x y w ⟩
x ∙ (y ∙ w) ∙ (z ∙ w) ∎
}
exercise₄ : ⦃ _ : HasBluebird ⦄ ⦃ _ : HasCardinal ⦄ ⦃ _ : HasWarbler ⦄ → HasPsiBird
exercise₄ = record
{ Ψ = H* ∙ D₂
; isPsiBird = λ x y z w → begin
H* ∙ D₂ ∙ x ∙ y ∙ z ∙ w ≈⟨⟩
B ∙ H ∙ D₂ ∙ x ∙ y ∙ z ∙ w ≈⟨ congʳ $ congʳ $ congʳ $ isBluebird H D₂ x ⟩
H ∙ (D₂ ∙ x) ∙ y ∙ z ∙ w ≈⟨ congʳ $ isHummingbird (D₂ ∙ x) y z ⟩
D₂ ∙ x ∙ y ∙ z ∙ y ∙ w ≈⟨ isDovekie x y z y w ⟩
x ∙ (y ∙ z) ∙ (y ∙ w) ∎
} where
instance
hasHummingbird = problem₁₀
hasDovekie = Chapter₁₁.problem₁₁
H* = B ∙ H
-- NOTE: my copy of the book contains a mistake (looking at the given
-- solutions): it says Γxyzwv = y(zw)(xywv) instead of Γxyzwv = y(zw)(xyzwv).
exercise₅-a : ⦃ _ : HasPhoenix ⦄ ⦃ _ : HasBluebird ⦄
→ ∃[ Γ ] (∀ x y z w v → Γ ∙ x ∙ y ∙ z ∙ w ∙ v ≈ y ∙ (z ∙ w) ∙ (x ∙ y ∙ z ∙ w ∙ v))
exercise₅-a =
( Φ ∙ (Φ ∙ (Φ ∙ B)) ∙ B
, λ x y z w v → begin
Φ ∙ (Φ ∙ (Φ ∙ B)) ∙ B ∙ x ∙ y ∙ z ∙ w ∙ v ≈⟨ congʳ $ congʳ $ congʳ $ isPhoenix (Φ ∙ (Φ ∙ B)) B x y ⟩
Φ ∙ (Φ ∙ B) ∙ (B ∙ y) ∙ (x ∙ y) ∙ z ∙ w ∙ v ≈⟨ congʳ $ congʳ $ isPhoenix (Φ ∙ B) (B ∙ y) (x ∙ y) z ⟩
Φ ∙ B ∙ (B ∙ y ∙ z) ∙ (x ∙ y ∙ z) ∙ w ∙ v ≈⟨ congʳ $ isPhoenix B (B ∙ y ∙ z) (x ∙ y ∙ z) w ⟩
B ∙ (B ∙ y ∙ z ∙ w) ∙ (x ∙ y ∙ z ∙ w) ∙ v ≈⟨ isBluebird (B ∙ y ∙ z ∙ w) (x ∙ y ∙ z ∙ w) v ⟩
B ∙ y ∙ z ∙ w ∙ (x ∙ y ∙ z ∙ w ∙ v) ≈⟨ congʳ $ isBluebird y z w ⟩
y ∙ (z ∙ w) ∙ (x ∙ y ∙ z ∙ w ∙ v) ∎
)
exercise₅-b : ⦃ _ : HasKestrel ⦄
→ ∃[ Γ ] (∀ x y z w v → Γ ∙ x ∙ y ∙ z ∙ w ∙ v ≈ y ∙ (z ∙ w) ∙ (x ∙ y ∙ z ∙ w ∙ v))
→ HasPsiBird
exercise₅-b (Γ , isΓ) = record
{ Ψ = Γ ∙ (K ∙ K)
; isPsiBird = λ x y z w → begin
Γ ∙ (K ∙ K) ∙ x ∙ y ∙ z ∙ w ≈⟨ isΓ (K ∙ K) x y z w ⟩
x ∙ (y ∙ z) ∙ (K ∙ K ∙ x ∙ y ∙ z ∙ w) ≈⟨ congˡ $ congʳ $ congʳ $ congʳ $ isKestrel K x ⟩
x ∙ (y ∙ z) ∙ (K ∙ y ∙ z ∙ w) ≈⟨ congˡ $ congʳ $ isKestrel y z ⟩
x ∙ (y ∙ z) ∙ (y ∙ w) ∎
}
exercise₅ : ⦃ _ : HasPhoenix ⦄ ⦃ _ : HasBluebird ⦄ ⦃ _ : HasKestrel ⦄ → HasPsiBird
exercise₅ = exercise₅-b exercise₅-a
exercise₆-a : ⦃ _ : HasStarling ⦄ ⦃ _ : HasBluebird ⦄ ⦃ _ : HasThrush ⦄
→ ∃[ S′ ] (∀ x y z → S′ ∙ x ∙ y ∙ z ≈ y ∙ z ∙ (x ∙ z))
exercise₆-a =
( C ∙ S
, λ x y z → begin
C ∙ S ∙ x ∙ y ∙ z ≈⟨ congʳ $ isCardinal S x y ⟩
S ∙ y ∙ x ∙ z ≈⟨ isStarling y x z ⟩
y ∙ z ∙ (x ∙ z) ∎
)
where instance hasCardinal = Chapter₁₁.problem₂₁′
exercise₆-b : ⦃ _ : HasIdentity ⦄
→ ∃[ S′ ] (∀ x y z → S′ ∙ x ∙ y ∙ z ≈ y ∙ z ∙ (x ∙ z))
→ HasWarbler
exercise₆-b (S′ , isS′) = record
{ W = S′ ∙ I
; isWarbler = λ x y → begin
S′ ∙ I ∙ x ∙ y ≈⟨ isS′ I x y ⟩
x ∙ y ∙ (I ∙ y) ≈⟨ congˡ $ isIdentity y ⟩
x ∙ y ∙ y ∎
}
exercise₇ : ⦃ _ : HasQueerBird ⦄ ⦃ _ : HasCardinal ⦄ ⦃ _ : HasWarbler ⦄
→ ∃[ Q̂ ] IsStarling (C ∙ Q̂ ∙ W)
exercise₇ = let Q̂ = Q ∙ (Q ∙ Q ∙ (Q ∙ Q)) ∙ Q in
( Q̂
, λ x y z → begin
C ∙ Q̂ ∙ W ∙ x ∙ y ∙ z ≈⟨ congʳ $ congʳ $ isCardinal Q̂ W x ⟩
Q̂ ∙ x ∙ W ∙ y ∙ z ≈⟨⟩
Q ∙ (Q ∙ Q ∙ (Q ∙ Q)) ∙ Q ∙ x ∙ W ∙ y ∙ z ≈⟨ congʳ $ congʳ $ congʳ $ isQueerBird (Q ∙ Q ∙ (Q ∙ Q)) Q x ⟩
Q ∙ (Q ∙ Q ∙ (Q ∙ Q) ∙ x) ∙ W ∙ y ∙ z ≈⟨ congʳ $ isQueerBird (Q ∙ Q ∙ (Q ∙ Q) ∙ x) W y ⟩
W ∙ (Q ∙ Q ∙ (Q ∙ Q) ∙ x ∙ y) ∙ z ≈⟨ isWarbler (Q ∙ Q ∙ (Q ∙ Q) ∙ x ∙ y) z ⟩
Q ∙ Q ∙ (Q ∙ Q) ∙ x ∙ y ∙ z ∙ z ≈⟨ congʳ $ congʳ $ congʳ $ isQueerBird Q (Q ∙ Q) x ⟩
Q ∙ Q ∙ (Q ∙ x) ∙ y ∙ z ∙ z ≈⟨ congʳ $ congʳ $ isQueerBird Q (Q ∙ x) y ⟩
Q ∙ x ∙ (Q ∙ y) ∙ z ∙ z ≈⟨ congʳ $ isQueerBird x (Q ∙ y) z ⟩
Q ∙ y ∙ (x ∙ z) ∙ z ≈⟨ isQueerBird y (x ∙ z) z ⟩
x ∙ z ∙ (y ∙ z) ∎
)
|
/-
Copyright (c) 2019 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kenny Lau
! This file was ported from Lean 3 source module ring_theory.integral_closure
! leanprover-community/mathlib commit 641b6a82006416ec431b2987b354af9311fed4f2
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Data.Polynomial.Expand
import Mathbin.LinearAlgebra.FiniteDimensional
import Mathbin.LinearAlgebra.Matrix.Charpoly.LinearMap
import Mathbin.RingTheory.Adjoin.Fg
import Mathbin.RingTheory.FiniteType
import Mathbin.RingTheory.Polynomial.ScaleRoots
import Mathbin.RingTheory.Polynomial.Tower
import Mathbin.RingTheory.TensorProduct
/-!
# Integral closure of a subring.
If A is an R-algebra then `a : A` is integral over R if it is a root of a monic polynomial
with coefficients in R. Enough theory is developed to prove that integral elements
form a sub-R-algebra of A.
## Main definitions
Let `R` be a `comm_ring` and let `A` be an R-algebra.
* `ring_hom.is_integral_elem (f : R →+* A) (x : A)` : `x` is integral with respect to the map `f`,
* `is_integral (x : A)` : `x` is integral over `R`, i.e., is a root of a monic polynomial with
coefficients in `R`.
* `integral_closure R A` : the integral closure of `R` in `A`, regarded as a sub-`R`-algebra of `A`.
-/
open Classical
open BigOperators Polynomial
open Polynomial Submodule
section Ring
variable {R S A : Type _}
variable [CommRing R] [Ring A] [Ring S] (f : R →+* S)
/-- An element `x` of `A` is said to be integral over `R` with respect to `f`
if it is a root of a monic polynomial `p : R[X]` evaluated under `f` -/
def RingHom.IsIntegralElem (f : R →+* A) (x : A) :=
∃ p : R[X], Monic p ∧ eval₂ f x p = 0
#align ring_hom.is_integral_elem RingHom.IsIntegralElem
/-- A ring homomorphism `f : R →+* A` is said to be integral
if every element `A` is integral with respect to the map `f` -/
def RingHom.IsIntegral (f : R →+* A) :=
∀ x : A, f.IsIntegralElem x
#align ring_hom.is_integral RingHom.IsIntegral
variable [Algebra R A] (R)
/-- An element `x` of an algebra `A` over a commutative ring `R` is said to be *integral*,
if it is a root of some monic polynomial `p : R[X]`.
Equivalently, the element is integral over `R` with respect to the induced `algebra_map` -/
def IsIntegral (x : A) : Prop :=
(algebraMap R A).IsIntegralElem x
#align is_integral IsIntegral
variable (A)
/-- An algebra is integral if every element of the extension is integral over the base ring -/
protected def Algebra.IsIntegral : Prop :=
(algebraMap R A).IsIntegral
#align algebra.is_integral Algebra.IsIntegral
variable {R A}
theorem RingHom.is_integral_map {x : R} : f.IsIntegralElem (f x) :=
⟨X - C x, monic_X_sub_C _, by simp⟩
#align ring_hom.is_integral_map RingHom.is_integral_map
theorem isIntegral_algebraMap {x : R} : IsIntegral R (algebraMap R A x) :=
(algebraMap R A).is_integral_map
#align is_integral_algebra_map isIntegral_algebraMap
theorem isIntegral_of_noetherian (H : IsNoetherian R A) (x : A) : IsIntegral R x :=
by
let leval : R[X] →ₗ[R] A := (aeval x).toLinearMap
let D : ℕ → Submodule R A := fun n => (degree_le R n).map leval
let M := WellFounded.min (isNoetherian_iff_wellFounded.1 H) (Set.range D) ⟨_, ⟨0, rfl⟩⟩
have HM : M ∈ Set.range D := WellFounded.min_mem _ _ _
cases' HM with N HN
have HM : ¬M < D (N + 1) :=
WellFounded.not_lt_min (isNoetherian_iff_wellFounded.1 H) (Set.range D) _ ⟨N + 1, rfl⟩
rw [← HN] at HM
have HN2 : D (N + 1) ≤ D N :=
by_contradiction fun H =>
HM (lt_of_le_not_le (map_mono (degree_le_mono (WithBot.coe_le_coe.2 (Nat.le_succ N)))) H)
have HN3 : leval (X ^ (N + 1)) ∈ D N := HN2 (mem_map_of_mem (mem_degree_le.2 (degree_X_pow_le _)))
rcases HN3 with ⟨p, hdp, hpe⟩
refine' ⟨X ^ (N + 1) - p, monic_X_pow_sub (mem_degree_le.1 hdp), _⟩
show leval (X ^ (N + 1) - p) = 0
rw [LinearMap.map_sub, hpe, sub_self]
#align is_integral_of_noetherian isIntegral_of_noetherian
theorem isIntegral_of_submodule_noetherian (S : Subalgebra R A) (H : IsNoetherian R S.toSubmodule)
(x : A) (hx : x ∈ S) : IsIntegral R x :=
by
suffices IsIntegral R (show S from ⟨x, hx⟩)
by
rcases this with ⟨p, hpm, hpx⟩
replace hpx := congr_arg S.val hpx
refine' ⟨p, hpm, Eq.trans _ hpx⟩
simp only [aeval_def, eval₂, sum_def]
rw [S.val.map_sum]
refine' Finset.sum_congr rfl fun n hn => _
rw [S.val.map_mul, S.val.map_pow, S.val.commutes, S.val_apply, Subtype.coe_mk]
refine' isIntegral_of_noetherian H ⟨x, hx⟩
#align is_integral_of_submodule_noetherian isIntegral_of_submodule_noetherian
end Ring
section
variable {R A B S : Type _}
variable [CommRing R] [CommRing A] [CommRing B] [CommRing S]
variable [Algebra R A] [Algebra R B] (f : R →+* S)
theorem map_isIntegral {B C F : Type _} [Ring B] [Ring C] [Algebra R B] [Algebra A B] [Algebra R C]
[IsScalarTower R A B] [Algebra A C] [IsScalarTower R A C] {b : B} [AlgHomClass F A B C] (f : F)
(hb : IsIntegral R b) : IsIntegral R (f b) :=
by
obtain ⟨P, hP⟩ := hb
refine' ⟨P, hP.1, _⟩
rw [← aeval_def, show (aeval (f b)) P = (aeval (f b)) (P.map (algebraMap R A)) by simp,
aeval_alg_hom_apply, aeval_map_algebra_map, aeval_def, hP.2, _root_.map_zero]
#align map_is_integral map_isIntegral
theorem isIntegral_map_of_comp_eq_of_isIntegral {R S T U : Type _} [CommRing R] [CommRing S]
[CommRing T] [CommRing U] [Algebra R S] [Algebra T U] (φ : R →+* T) (ψ : S →+* U)
(h : (algebraMap T U).comp φ = ψ.comp (algebraMap R S)) {a : S} (ha : IsIntegral R a) :
IsIntegral T (ψ a) := by
rw [IsIntegral, RingHom.IsIntegralElem] at ha⊢
obtain ⟨p, hp⟩ := ha
refine' ⟨p.map φ, hp.left.map _, _⟩
rw [← eval_map, map_map, h, ← map_map, eval_map, eval₂_at_apply, eval_map, hp.right,
RingHom.map_zero]
#align is_integral_map_of_comp_eq_of_is_integral isIntegral_map_of_comp_eq_of_isIntegral
theorem isIntegral_algHom_iff {A B : Type _} [Ring A] [Ring B] [Algebra R A] [Algebra R B]
(f : A →ₐ[R] B) (hf : Function.Injective f) {x : A} : IsIntegral R (f x) ↔ IsIntegral R x :=
by
refine' ⟨_, map_isIntegral f⟩
rintro ⟨p, hp, hx⟩
use p, hp
rwa [← f.comp_algebra_map, ← AlgHom.coe_toRingHom, ← Polynomial.hom_eval₂, AlgHom.coe_toRingHom,
map_eq_zero_iff f hf] at hx
#align is_integral_alg_hom_iff isIntegral_algHom_iff
@[simp]
theorem isIntegral_algEquiv {A B : Type _} [Ring A] [Ring B] [Algebra R A] [Algebra R B]
(f : A ≃ₐ[R] B) {x : A} : IsIntegral R (f x) ↔ IsIntegral R x :=
⟨fun h => by simpa using map_isIntegral f.symm.to_alg_hom h, map_isIntegral f.toAlgHom⟩
#align is_integral_alg_equiv isIntegral_algEquiv
theorem isIntegral_of_isScalarTower [Algebra A B] [IsScalarTower R A B] {x : B}
(hx : IsIntegral R x) : IsIntegral A x :=
let ⟨p, hp, hpx⟩ := hx
⟨p.map <| algebraMap R A, hp.map _, by rw [← aeval_def, aeval_map_algebra_map, aeval_def, hpx]⟩
#align is_integral_of_is_scalar_tower isIntegral_of_isScalarTower
theorem map_isIntegral_int {B C F : Type _} [Ring B] [Ring C] {b : B} [RingHomClass F B C] (f : F)
(hb : IsIntegral ℤ b) : IsIntegral ℤ (f b) :=
map_isIntegral (f : B →+* C).toIntAlgHom hb
#align map_is_integral_int map_isIntegral_int
theorem isIntegral_ofSubring {x : A} (T : Subring R) (hx : IsIntegral T x) : IsIntegral R x :=
isIntegral_of_isScalarTower hx
#align is_integral_of_subring isIntegral_ofSubring
theorem IsIntegral.algebraMap [Algebra A B] [IsScalarTower R A B] {x : A} (h : IsIntegral R x) :
IsIntegral R (algebraMap A B x) :=
by
rcases h with ⟨f, hf, hx⟩
use f, hf
rw [IsScalarTower.algebraMap_eq R A B, ← hom_eval₂, hx, RingHom.map_zero]
#align is_integral.algebra_map IsIntegral.algebraMap
theorem isIntegral_algebraMap_iff [Algebra A B] [IsScalarTower R A B] {x : A}
(hAB : Function.Injective (algebraMap A B)) :
IsIntegral R (algebraMap A B x) ↔ IsIntegral R x :=
isIntegral_algHom_iff (IsScalarTower.toAlgHom R A B) hAB
#align is_integral_algebra_map_iff isIntegral_algebraMap_iff
theorem isIntegral_iff_isIntegral_closure_finite {r : A} :
IsIntegral R r ↔ ∃ s : Set R, s.Finite ∧ IsIntegral (Subring.closure s) r :=
by
constructor <;> intro hr
· rcases hr with ⟨p, hmp, hpr⟩
refine' ⟨_, Finset.finite_toSet _, p.restriction, monic_restriction.2 hmp, _⟩
rw [← aeval_def, ← aeval_map_algebra_map R r p.restriction, map_restriction, aeval_def, hpr]
rcases hr with ⟨s, hs, hsr⟩
exact isIntegral_ofSubring _ hsr
#align is_integral_iff_is_integral_closure_finite isIntegral_iff_isIntegral_closure_finite
theorem fg_adjoin_singleton_of_integral (x : A) (hx : IsIntegral R x) :
(Algebra.adjoin R ({x} : Set A)).toSubmodule.Fg :=
by
rcases hx with ⟨f, hfm, hfx⟩
exists Finset.image ((· ^ ·) x) (Finset.range (nat_degree f + 1))
apply le_antisymm
· rw [span_le]
intro s hs
rw [Finset.mem_coe] at hs
rcases Finset.mem_image.1 hs with ⟨k, hk, rfl⟩
clear hk
exact (Algebra.adjoin R {x}).pow_mem (Algebra.subset_adjoin (Set.mem_singleton _)) k
intro r hr; change r ∈ Algebra.adjoin R ({x} : Set A) at hr
rw [Algebra.adjoin_singleton_eq_range_aeval] at hr
rcases(aeval x).mem_range.mp hr with ⟨p, rfl⟩
rw [← mod_by_monic_add_div p hfm]
rw [← aeval_def] at hfx
rw [AlgHom.map_add, AlgHom.map_mul, hfx, MulZeroClass.zero_mul, add_zero]
have : degree (p %ₘ f) ≤ degree f := degree_mod_by_monic_le p hfm
generalize p %ₘ f = q at this⊢
rw [← sum_C_mul_X_pow_eq q, aeval_def, eval₂_sum, sum_def]
refine' sum_mem fun k hkq => _
rw [eval₂_mul, eval₂_C, eval₂_pow, eval₂_X, ← Algebra.smul_def]
refine' smul_mem _ _ (subset_span _)
rw [Finset.mem_coe]; refine' Finset.mem_image.2 ⟨_, _, rfl⟩
rw [Finset.mem_range, Nat.lt_succ_iff]; refine' le_of_not_lt fun hk => _
rw [degree_le_iff_coeff_zero] at this
rw [mem_support_iff] at hkq; apply hkq; apply this
exact lt_of_le_of_lt degree_le_nat_degree (WithBot.coe_lt_coe.2 hk)
#align fg_adjoin_singleton_of_integral fg_adjoin_singleton_of_integral
theorem fg_adjoin_of_finite {s : Set A} (hfs : s.Finite) (his : ∀ x ∈ s, IsIntegral R x) :
(Algebra.adjoin R s).toSubmodule.Fg :=
Set.Finite.induction_on hfs
(fun _ =>
⟨{1},
Submodule.ext fun x =>
by
erw [Algebra.adjoin_empty, Finset.coe_singleton, ← one_eq_span, one_eq_range,
LinearMap.mem_range, Algebra.mem_bot]
rfl⟩)
(fun a s has hs ih his => by
rw [← Set.union_singleton, Algebra.adjoin_union_coe_submodule] <;>
exact
fg.mul (ih fun i hi => his i <| Set.mem_insert_of_mem a hi)
(fg_adjoin_singleton_of_integral _ <| his a <| Set.mem_insert a s))
his
#align fg_adjoin_of_finite fg_adjoin_of_finite
theorem isNoetherian_adjoin_finset [IsNoetherianRing R] (s : Finset A)
(hs : ∀ x ∈ s, IsIntegral R x) : IsNoetherian R (Algebra.adjoin R (↑s : Set A)) :=
isNoetherian_of_fg_of_noetherian _ (fg_adjoin_of_finite s.finite_toSet hs)
#align is_noetherian_adjoin_finset isNoetherian_adjoin_finset
/- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/
/-- If `S` is a sub-`R`-algebra of `A` and `S` is finitely-generated as an `R`-module,
then all elements of `S` are integral over `R`. -/
theorem isIntegral_of_mem_of_fg (S : Subalgebra R A) (HS : S.toSubmodule.Fg) (x : A) (hx : x ∈ S) :
IsIntegral R x :=
by
-- say `x ∈ S`. We want to prove that `x` is integral over `R`.
-- Say `S` is generated as an `R`-module by the set `y`.
cases' HS with y hy
-- We can write `x` as `∑ rᵢ yᵢ` for `yᵢ ∈ Y`.
obtain ⟨lx, hlx1, hlx2⟩ :
∃ (l : A →₀ R)(H : l ∈ Finsupp.supported R R ↑y), (Finsupp.total A A R id) l = x := by
rwa [← @Finsupp.mem_span_image_iff_total A A R _ _ _ id (↑y) x, Set.image_id ↑y, hy]
-- Note that `y ⊆ S`.
have hyS : ∀ {p}, p ∈ y → p ∈ S := fun p hp =>
show p ∈ S.to_submodule by
rw [← hy]
exact subset_span hp
-- Now `S` is a subalgebra so the product of two elements of `y` is also in `S`.
have : ∀ jk : (↑(y ×ˢ y) : Set (A × A)), jk.1.1 * jk.1.2 ∈ S.to_submodule := fun jk =>
S.mul_mem (hyS (Finset.mem_product.1 jk.2).1) (hyS (Finset.mem_product.1 jk.2).2)
rw [← hy, ← Set.image_id ↑y] at this
simp only [Finsupp.mem_span_image_iff_total] at this
-- Say `yᵢyⱼ = ∑rᵢⱼₖ yₖ`
choose ly hly1 hly2
-- Now let `S₀` be the subring of `R` generated by the `rᵢ` and the `rᵢⱼₖ`.
let S₀ : Subring R :=
Subring.closure ↑(lx.frange ∪ Finset.bunionᵢ Finset.univ (Finsupp.frange ∘ ly))
-- It suffices to prove that `x` is integral over `S₀`.
refine' isIntegral_ofSubring S₀ _
letI : CommRing S₀ := SubringClass.toCommRing S₀
letI : Algebra S₀ A := Algebra.ofSubring S₀
-- Claim: the `S₀`-module span (in `A`) of the set `y ∪ {1}` is closed under
-- multiplication (indeed, this is the motivation for the definition of `S₀`).
have :
span S₀ (insert 1 ↑y : Set A) * span S₀ (insert 1 ↑y : Set A) ≤ span S₀ (insert 1 ↑y : Set A) :=
by
rw [span_mul_span]
refine' span_le.2 fun z hz => _
rcases Set.mem_mul.1 hz with ⟨p, q, rfl | hp, hq, rfl⟩
· rw [one_mul]
exact subset_span hq
rcases hq with (rfl | hq)
· rw [mul_one]
exact subset_span (Or.inr hp)
erw [← hly2 ⟨(p, q), Finset.mem_product.2 ⟨hp, hq⟩⟩]
rw [Finsupp.total_apply, Finsupp.sum]
refine' (span S₀ (insert 1 ↑y : Set A)).sum_mem fun t ht => _
have : ly ⟨(p, q), Finset.mem_product.2 ⟨hp, hq⟩⟩ t ∈ S₀ :=
Subring.subset_closure
(Finset.mem_union_right _ <|
Finset.mem_bunionᵢ.2
⟨⟨(p, q), Finset.mem_product.2 ⟨hp, hq⟩⟩, Finset.mem_univ _,
Finsupp.mem_frange.2 ⟨Finsupp.mem_support_iff.1 ht, _, rfl⟩⟩)
change (⟨_, this⟩ : S₀) • t ∈ _
exact smul_mem _ _ (subset_span <| Or.inr <| hly1 _ ht)
-- Hence this span is a subring. Call this subring `S₁`.
let S₁ : Subring A :=
{ carrier := span S₀ (insert 1 ↑y : Set A)
one_mem' := subset_span <| Or.inl rfl
mul_mem' := fun p q hp hq => this <| mul_mem_mul hp hq
zero_mem' := (span S₀ (insert 1 ↑y : Set A)).zero_mem
add_mem' := fun _ _ => (span S₀ (insert 1 ↑y : Set A)).add_mem
neg_mem' := fun _ => (span S₀ (insert 1 ↑y : Set A)).neg_mem }
have : S₁ = Subalgebra.toSubring (Algebra.adjoin S₀ (↑y : Set A)) :=
by
ext z
suffices
z ∈ span (↥S₀) (insert 1 ↑y : Set A) ↔ z ∈ (Algebra.adjoin (↥S₀) (y : Set A)).toSubmodule by
simpa
constructor <;> intro hz
·
exact
(span_le.2 (Set.insert_subset.2 ⟨(Algebra.adjoin S₀ ↑y).one_mem, Algebra.subset_adjoin⟩)) hz
· rw [Subalgebra.mem_toSubmodule, Algebra.mem_adjoin_iff] at hz
suffices Subring.closure (Set.range ⇑(algebraMap (↥S₀) A) ∪ ↑y) ≤ S₁ by exact this hz
refine' Subring.closure_le.2 (Set.union_subset _ fun t ht => subset_span <| Or.inr ht)
rw [Set.range_subset_iff]
intro y
rw [Algebra.algebraMap_eq_smul_one]
exact smul_mem _ y (subset_span (Or.inl rfl))
have foo : ∀ z, z ∈ S₁ ↔ z ∈ Algebra.adjoin (↥S₀) (y : Set A)
simp [this]
haveI : IsNoetherianRing ↥S₀ := is_noetherian_subring_closure _ (Finset.finite_toSet _)
refine'
isIntegral_of_submodule_noetherian (Algebra.adjoin S₀ ↑y)
(isNoetherian_of_fg_of_noetherian _
⟨insert 1 y, by
rw [Finset.coe_insert]
ext z
simp [S₁]
convert foo z⟩)
_ _
rw [← hlx2, Finsupp.total_apply, Finsupp.sum]
refine' Subalgebra.sum_mem _ fun r hr => _
have : lx r ∈ S₀ :=
Subring.subset_closure (Finset.mem_union_left _ (Finset.mem_image_of_mem _ hr))
change (⟨_, this⟩ : S₀) • r ∈ _
rw [Finsupp.mem_supported] at hlx1
exact Subalgebra.smul_mem _ (Algebra.subset_adjoin <| hlx1 hr) _
#align is_integral_of_mem_of_fg isIntegral_of_mem_of_fg
theorem Module.End.isIntegral {M : Type _} [AddCommGroup M] [Module R M] [Module.Finite R M] :
Algebra.IsIntegral R (Module.End R M) :=
LinearMap.exists_monic_and_aeval_eq_zero R
#align module.End.is_integral Module.End.isIntegral
/-- Suppose `A` is an `R`-algebra, `M` is an `A`-module such that `a • m ≠ 0` for all non-zero `a`
and `m`. If `x : A` fixes a nontrivial f.g. `R`-submodule `N` of `M`, then `x` is `R`-integral. -/
theorem isIntegral_of_smul_mem_submodule {M : Type _} [AddCommGroup M] [Module R M] [Module A M]
[IsScalarTower R A M] [NoZeroSMulDivisors A M] (N : Submodule R M) (hN : N ≠ ⊥) (hN' : N.Fg)
(x : A) (hx : ∀ n ∈ N, x • n ∈ N) : IsIntegral R x :=
by
let A' : Subalgebra R A :=
{ carrier := { x | ∀ n ∈ N, x • n ∈ N }
mul_mem' := fun a b ha hb n hn => smul_smul a b n ▸ ha _ (hb _ hn)
one_mem' := fun n hn => (one_smul A n).symm ▸ hn
add_mem' := fun a b ha hb n hn => (add_smul a b n).symm ▸ N.add_mem (ha _ hn) (hb _ hn)
zero_mem' := fun n hn => (zero_smul A n).symm ▸ N.zero_mem
algebraMap_mem' := fun r n hn => (algebraMap_smul A r n).symm ▸ N.smul_mem r hn }
let f : A' →ₐ[R] Module.End R N :=
AlgHom.ofLinearMap
{ toFun := fun x => (DistribMulAction.toLinearMap R M x).restrict x.Prop
map_add' := fun x y => LinearMap.ext fun n => Subtype.ext <| add_smul x y n
map_smul' := fun r s => LinearMap.ext fun n => Subtype.ext <| smul_assoc r s n }
(LinearMap.ext fun n => Subtype.ext <| one_smul _ _) fun x y =>
LinearMap.ext fun n => Subtype.ext <| mul_smul x y n
obtain ⟨a, ha₁, ha₂⟩ : ∃ a ∈ N, a ≠ (0 : M) :=
by
by_contra h'
push_neg at h'
apply hN
rwa [eq_bot_iff]
have : Function.Injective f :=
by
show Function.Injective f.to_linear_map
rw [← LinearMap.ker_eq_bot, eq_bot_iff]
intro s hs
have : s.1 • a = 0 := congr_arg Subtype.val (LinearMap.congr_fun hs ⟨a, ha₁⟩)
exact Subtype.ext ((eq_zero_or_eq_zero_of_smul_eq_zero this).resolve_right ha₂)
show IsIntegral R (A'.val ⟨x, hx⟩)
rw [isIntegral_algHom_iff A'.val Subtype.val_injective, ← isIntegral_algHom_iff f this]
haveI : Module.Finite R N := by rwa [Module.finite_def, Submodule.fg_top]
apply Module.End.isIntegral
#align is_integral_of_smul_mem_submodule isIntegral_of_smul_mem_submodule
variable {f}
theorem RingHom.Finite.to_isIntegral (h : f.Finite) : f.IsIntegral :=
letI := f.to_algebra
fun x => isIntegral_of_mem_of_fg ⊤ h.1 _ trivial
#align ring_hom.finite.to_is_integral RingHom.Finite.to_isIntegral
alias RingHom.Finite.to_isIntegral ← RingHom.IsIntegral.of_finite
#align ring_hom.is_integral.of_finite RingHom.IsIntegral.of_finite
theorem RingHom.IsIntegral.to_finite (h : f.IsIntegral) (h' : f.FiniteType) : f.Finite :=
by
letI := f.to_algebra
obtain ⟨s, hs⟩ := h'
constructor
change (⊤ : Subalgebra R S).toSubmodule.Fg
rw [← hs]
exact fg_adjoin_of_finite (Set.toFinite _) fun x _ => h x
#align ring_hom.is_integral.to_finite RingHom.IsIntegral.to_finite
alias RingHom.IsIntegral.to_finite ← RingHom.Finite.of_isIntegral_of_finiteType
#align ring_hom.finite.of_is_integral_of_finite_type RingHom.Finite.of_isIntegral_of_finiteType
/-- finite = integral + finite type -/
theorem RingHom.finite_iff_isIntegral_and_finiteType : f.Finite ↔ f.IsIntegral ∧ f.FiniteType :=
⟨fun h => ⟨h.to_isIntegral, h.to_finiteType⟩, fun ⟨h, h'⟩ => h.toFinite h'⟩
#align ring_hom.finite_iff_is_integral_and_finite_type RingHom.finite_iff_isIntegral_and_finiteType
theorem Algebra.IsIntegral.finite (h : Algebra.IsIntegral R A) [h' : Algebra.FiniteType R A] :
Module.Finite R A :=
by
have :=
h.to_finite
(by
delta RingHom.FiniteType
convert h'
ext
exact (Algebra.smul_def _ _).symm)
delta RingHom.Finite at this; convert this; ext; exact Algebra.smul_def _ _
#align algebra.is_integral.finite Algebra.IsIntegral.finite
theorem Algebra.IsIntegral.of_finite [h : Module.Finite R A] : Algebra.IsIntegral R A :=
by
apply RingHom.Finite.to_isIntegral
delta RingHom.Finite; convert h; ext; exact (Algebra.smul_def _ _).symm
#align algebra.is_integral.of_finite Algebra.IsIntegral.of_finite
/-- finite = integral + finite type -/
theorem Algebra.finite_iff_isIntegral_and_finiteType :
Module.Finite R A ↔ Algebra.IsIntegral R A ∧ Algebra.FiniteType R A :=
⟨fun h => ⟨Algebra.IsIntegral.of_finite, inferInstance⟩, fun ⟨h, h'⟩ => h.finite⟩
#align algebra.finite_iff_is_integral_and_finite_type Algebra.finite_iff_isIntegral_and_finiteType
variable (f)
theorem RingHom.is_integral_of_mem_closure {x y z : S} (hx : f.IsIntegralElem x)
(hy : f.IsIntegralElem y) (hz : z ∈ Subring.closure ({x, y} : Set S)) : f.IsIntegralElem z :=
by
letI : Algebra R S := f.to_algebra
have := (fg_adjoin_singleton_of_integral x hx).mul (fg_adjoin_singleton_of_integral y hy)
rw [← Algebra.adjoin_union_coe_submodule, Set.singleton_union] at this
exact
isIntegral_of_mem_of_fg (Algebra.adjoin R {x, y}) this z
(Algebra.mem_adjoin_iff.2 <| Subring.closure_mono (Set.subset_union_right _ _) hz)
#align ring_hom.is_integral_of_mem_closure RingHom.is_integral_of_mem_closure
theorem isIntegral_of_mem_closure {x y z : A} (hx : IsIntegral R x) (hy : IsIntegral R y)
(hz : z ∈ Subring.closure ({x, y} : Set A)) : IsIntegral R z :=
(algebraMap R A).is_integral_of_mem_closure hx hy hz
#align is_integral_of_mem_closure isIntegral_of_mem_closure
theorem RingHom.is_integral_zero : f.IsIntegralElem 0 :=
f.map_zero ▸ f.is_integral_map
#align ring_hom.is_integral_zero RingHom.is_integral_zero
theorem isIntegral_zero : IsIntegral R (0 : A) :=
(algebraMap R A).is_integral_zero
#align is_integral_zero isIntegral_zero
theorem RingHom.is_integral_one : f.IsIntegralElem 1 :=
f.map_one ▸ f.is_integral_map
#align ring_hom.is_integral_one RingHom.is_integral_one
theorem isIntegral_one : IsIntegral R (1 : A) :=
(algebraMap R A).is_integral_one
#align is_integral_one isIntegral_one
theorem RingHom.is_integral_add {x y : S} (hx : f.IsIntegralElem x) (hy : f.IsIntegralElem y) :
f.IsIntegralElem (x + y) :=
f.is_integral_of_mem_closure hx hy <|
Subring.add_mem _ (Subring.subset_closure (Or.inl rfl)) (Subring.subset_closure (Or.inr rfl))
#align ring_hom.is_integral_add RingHom.is_integral_add
theorem isIntegral_add {x y : A} (hx : IsIntegral R x) (hy : IsIntegral R y) :
IsIntegral R (x + y) :=
(algebraMap R A).is_integral_add hx hy
#align is_integral_add isIntegral_add
theorem RingHom.is_integral_neg {x : S} (hx : f.IsIntegralElem x) : f.IsIntegralElem (-x) :=
f.is_integral_of_mem_closure hx hx (Subring.neg_mem _ (Subring.subset_closure (Or.inl rfl)))
#align ring_hom.is_integral_neg RingHom.is_integral_neg
theorem isIntegral_neg {x : A} (hx : IsIntegral R x) : IsIntegral R (-x) :=
(algebraMap R A).is_integral_neg hx
#align is_integral_neg isIntegral_neg
theorem RingHom.is_integral_sub {x y : S} (hx : f.IsIntegralElem x) (hy : f.IsIntegralElem y) :
f.IsIntegralElem (x - y) := by
simpa only [sub_eq_add_neg] using f.is_integral_add hx (f.is_integral_neg hy)
#align ring_hom.is_integral_sub RingHom.is_integral_sub
theorem isIntegral_sub {x y : A} (hx : IsIntegral R x) (hy : IsIntegral R y) :
IsIntegral R (x - y) :=
(algebraMap R A).is_integral_sub hx hy
#align is_integral_sub isIntegral_sub
theorem RingHom.is_integral_mul {x y : S} (hx : f.IsIntegralElem x) (hy : f.IsIntegralElem y) :
f.IsIntegralElem (x * y) :=
f.is_integral_of_mem_closure hx hy
(Subring.mul_mem _ (Subring.subset_closure (Or.inl rfl)) (Subring.subset_closure (Or.inr rfl)))
#align ring_hom.is_integral_mul RingHom.is_integral_mul
theorem isIntegral_mul {x y : A} (hx : IsIntegral R x) (hy : IsIntegral R y) :
IsIntegral R (x * y) :=
(algebraMap R A).is_integral_mul hx hy
#align is_integral_mul isIntegral_mul
theorem isIntegral_smul [Algebra S A] [Algebra R S] [IsScalarTower R S A] {x : A} (r : R)
(hx : IsIntegral S x) : IsIntegral S (r • x) :=
by
rw [Algebra.smul_def, IsScalarTower.algebraMap_apply R S A]
exact isIntegral_mul isIntegral_algebraMap hx
#align is_integral_smul isIntegral_smul
theorem isIntegral_of_pow {x : A} {n : ℕ} (hn : 0 < n) (hx : IsIntegral R <| x ^ n) :
IsIntegral R x := by
rcases hx with ⟨p, ⟨hmonic, heval⟩⟩
exact
⟨expand R n p, monic.expand hn hmonic, by
rwa [eval₂_eq_eval_map, map_expand, expand_eval, ← eval₂_eq_eval_map]⟩
#align is_integral_of_pow isIntegral_of_pow
variable (R A)
/-- The integral closure of R in an R-algebra A. -/
def integralClosure : Subalgebra R A
where
carrier := { r | IsIntegral R r }
zero_mem' := isIntegral_zero
one_mem' := isIntegral_one
add_mem' _ _ := isIntegral_add
mul_mem' _ _ := isIntegral_mul
algebraMap_mem' x := isIntegral_algebraMap
#align integral_closure integralClosure
theorem mem_integralClosure_iff_mem_fg {r : A} :
r ∈ integralClosure R A ↔ ∃ M : Subalgebra R A, M.toSubmodule.Fg ∧ r ∈ M :=
⟨fun hr =>
⟨Algebra.adjoin R {r}, fg_adjoin_singleton_of_integral _ hr, Algebra.subset_adjoin rfl⟩,
fun ⟨M, Hf, hrM⟩ => isIntegral_of_mem_of_fg M Hf _ hrM⟩
#align mem_integral_closure_iff_mem_fg mem_integralClosure_iff_mem_fg
variable {R} {A}
theorem adjoin_le_integralClosure {x : A} (hx : IsIntegral R x) :
Algebra.adjoin R {x} ≤ integralClosure R A :=
by
rw [Algebra.adjoin_le_iff]
simp only [SetLike.mem_coe, Set.singleton_subset_iff]
exact hx
#align adjoin_le_integral_closure adjoin_le_integralClosure
theorem le_integralClosure_iff_isIntegral {S : Subalgebra R A} :
S ≤ integralClosure R A ↔ Algebra.IsIntegral R S :=
SetLike.forall.symm.trans
(forall_congr' fun x =>
show IsIntegral R (algebraMap S A x) ↔ IsIntegral R x from
isIntegral_algebraMap_iff Subtype.coe_injective)
#align le_integral_closure_iff_is_integral le_integralClosure_iff_isIntegral
theorem isIntegral_sup {S T : Subalgebra R A} :
Algebra.IsIntegral R ↥(S ⊔ T) ↔ Algebra.IsIntegral R S ∧ Algebra.IsIntegral R T := by
simp only [← le_integralClosure_iff_isIntegral, sup_le_iff]
#align is_integral_sup isIntegral_sup
/-- Mapping an integral closure along an `alg_equiv` gives the integral closure. -/
theorem integralClosure_map_algEquiv (f : A ≃ₐ[R] B) :
(integralClosure R A).map (f : A →ₐ[R] B) = integralClosure R B :=
by
ext y
rw [Subalgebra.mem_map]
constructor
· rintro ⟨x, hx, rfl⟩
exact map_isIntegral f hx
· intro hy
use f.symm y, map_isIntegral (f.symm : B →ₐ[R] A) hy
simp
#align integral_closure_map_alg_equiv integralClosure_map_algEquiv
theorem integralClosure.isIntegral (x : integralClosure R A) : IsIntegral R x :=
let ⟨p, hpm, hpx⟩ := x.2
⟨p, hpm,
Subtype.eq <| by
rwa [← aeval_def, Subtype.val_eq_coe, ← Subalgebra.val_apply, aeval_alg_hom_apply] at hpx⟩
#align integral_closure.is_integral integralClosure.isIntegral
theorem RingHom.is_integral_of_is_integral_mul_unit (x y : S) (r : R) (hr : f r * y = 1)
(hx : f.IsIntegralElem (x * y)) : f.IsIntegralElem x :=
by
obtain ⟨p, ⟨p_monic, hp⟩⟩ := hx
refine' ⟨scale_roots p r, ⟨(monic_scale_roots_iff r).2 p_monic, _⟩⟩
convert scale_roots_eval₂_eq_zero f hp
rw [mul_comm x y, ← mul_assoc, hr, one_mul]
#align ring_hom.is_integral_of_is_integral_mul_unit RingHom.is_integral_of_is_integral_mul_unit
theorem isIntegral_of_isIntegral_mul_unit {x y : A} {r : R} (hr : algebraMap R A r * y = 1)
(hx : IsIntegral R (x * y)) : IsIntegral R x :=
(algebraMap R A).is_integral_of_is_integral_mul_unit x y r hr hx
#align is_integral_of_is_integral_mul_unit isIntegral_of_isIntegral_mul_unit
/-- Generalization of `is_integral_of_mem_closure` bootstrapped up from that lemma -/
theorem isIntegral_of_mem_closure' (G : Set A) (hG : ∀ x ∈ G, IsIntegral R x) :
∀ x ∈ Subring.closure G, IsIntegral R x := fun x hx =>
Subring.closure_induction hx hG isIntegral_zero isIntegral_one (fun _ _ => isIntegral_add)
(fun _ => isIntegral_neg) fun _ _ => isIntegral_mul
#align is_integral_of_mem_closure' isIntegral_of_mem_closure'
theorem is_integral_of_mem_closure'' {S : Type _} [CommRing S] {f : R →+* S} (G : Set S)
(hG : ∀ x ∈ G, f.IsIntegralElem x) : ∀ x ∈ Subring.closure G, f.IsIntegralElem x := fun x hx =>
@isIntegral_of_mem_closure' R S _ _ f.toAlgebra G hG x hx
#align is_integral_of_mem_closure'' is_integral_of_mem_closure''
theorem IsIntegral.pow {x : A} (h : IsIntegral R x) (n : ℕ) : IsIntegral R (x ^ n) :=
(integralClosure R A).pow_mem h n
#align is_integral.pow IsIntegral.pow
theorem IsIntegral.nsmul {x : A} (h : IsIntegral R x) (n : ℕ) : IsIntegral R (n • x) :=
(integralClosure R A).nsmul_mem h n
#align is_integral.nsmul IsIntegral.nsmul
theorem IsIntegral.zsmul {x : A} (h : IsIntegral R x) (n : ℤ) : IsIntegral R (n • x) :=
(integralClosure R A).zsmul_mem h n
#align is_integral.zsmul IsIntegral.zsmul
theorem IsIntegral.multiset_prod {s : Multiset A} (h : ∀ x ∈ s, IsIntegral R x) :
IsIntegral R s.Prod :=
(integralClosure R A).multiset_prod_mem h
#align is_integral.multiset_prod IsIntegral.multiset_prod
theorem IsIntegral.multiset_sum {s : Multiset A} (h : ∀ x ∈ s, IsIntegral R x) :
IsIntegral R s.Sum :=
(integralClosure R A).multiset_sum_mem h
#align is_integral.multiset_sum IsIntegral.multiset_sum
theorem IsIntegral.prod {α : Type _} {s : Finset α} (f : α → A) (h : ∀ x ∈ s, IsIntegral R (f x)) :
IsIntegral R (∏ x in s, f x) :=
(integralClosure R A).prod_mem h
#align is_integral.prod IsIntegral.prod
theorem IsIntegral.sum {α : Type _} {s : Finset α} (f : α → A) (h : ∀ x ∈ s, IsIntegral R (f x)) :
IsIntegral R (∑ x in s, f x) :=
(integralClosure R A).sum_mem h
#align is_integral.sum IsIntegral.sum
theorem IsIntegral.det {n : Type _} [Fintype n] [DecidableEq n] {M : Matrix n n A}
(h : ∀ i j, IsIntegral R (M i j)) : IsIntegral R M.det :=
by
rw [Matrix.det_apply]
exact IsIntegral.sum _ fun σ hσ => IsIntegral.zsmul (IsIntegral.prod _ fun i hi => h _ _) _
#align is_integral.det IsIntegral.det
@[simp]
theorem IsIntegral.pow_iff {x : A} {n : ℕ} (hn : 0 < n) : IsIntegral R (x ^ n) ↔ IsIntegral R x :=
⟨isIntegral_of_pow hn, fun hx => IsIntegral.pow hx n⟩
#align is_integral.pow_iff IsIntegral.pow_iff
open TensorProduct
theorem IsIntegral.tmul (x : A) {y : B} (h : IsIntegral R y) : IsIntegral A (x ⊗ₜ[R] y) :=
by
obtain ⟨p, hp, hp'⟩ := h
refine' ⟨(p.map (algebraMap R A)).scaleRoots x, _, _⟩
· rw [Polynomial.monic_scaleRoots_iff]
exact hp.map _
[email protected]_eval₂_mul (A ⊗[R] B) A _ _ _
algebra.tensor_product.include_left.to_ring_hom (1 ⊗ₜ y) x using
2
·
simp only [AlgHom.toRingHom_eq_coe, AlgHom.coe_toRingHom, mul_one, one_mul,
Algebra.TensorProduct.includeLeft_apply, Algebra.TensorProduct.tmul_mul_tmul]
convert(MulZeroClass.mul_zero _).symm
rw [Polynomial.eval₂_map, Algebra.TensorProduct.includeLeft_comp_algebraMap, ←
Polynomial.eval₂_map]
convert Polynomial.eval₂_at_apply algebra.tensor_product.include_right.to_ring_hom y
rw [Polynomial.eval_map, hp', _root_.map_zero]
#align is_integral.tmul IsIntegral.tmul
section
variable (p : R[X]) (x : S)
/-- The monic polynomial whose roots are `p.leading_coeff * x` for roots `x` of `p`. -/
noncomputable def normalizeScaleRoots (p : R[X]) : R[X] :=
∑ i in p.support,
monomial i (if i = p.natDegree then 1 else p.coeff i * p.leadingCoeff ^ (p.natDegree - 1 - i))
#align normalize_scale_roots normalizeScaleRoots
theorem normalizeScaleRoots_coeff_mul_leadingCoeff_pow (i : ℕ) (hp : 1 ≤ natDegree p) :
(normalizeScaleRoots p).coeff i * p.leadingCoeff ^ i =
p.coeff i * p.leadingCoeff ^ (p.natDegree - 1) :=
by
simp only [normalizeScaleRoots, finset_sum_coeff, coeff_monomial, Finset.sum_ite_eq', one_mul,
MulZeroClass.zero_mul, mem_support_iff, ite_mul, Ne.def, ite_not]
split_ifs with h₁ h₂
· simp [h₁]
· rw [h₂, leading_coeff, ← pow_succ, tsub_add_cancel_of_le hp]
· rw [mul_assoc, ← pow_add, tsub_add_cancel_of_le]
apply Nat.le_pred_of_lt
rw [lt_iff_le_and_ne]
exact ⟨le_nat_degree_of_ne_zero h₁, h₂⟩
#align normalize_scale_roots_coeff_mul_leading_coeff_pow normalizeScaleRoots_coeff_mul_leadingCoeff_pow
theorem leadingCoeff_smul_normalizeScaleRoots (p : R[X]) :
p.leadingCoeff • normalizeScaleRoots p = scaleRoots p p.leadingCoeff :=
by
ext
simp only [coeff_scale_roots, normalizeScaleRoots, coeff_monomial, coeff_smul, Finset.smul_sum,
Ne.def, Finset.sum_ite_eq', finset_sum_coeff, smul_ite, smul_zero, mem_support_iff]
split_ifs with h₁ h₂
· simp [*]
· simp [*]
· rw [Algebra.id.smul_eq_mul, mul_comm, mul_assoc, ← pow_succ', tsub_right_comm,
tsub_add_cancel_of_le]
rw [Nat.succ_le_iff]
exact tsub_pos_of_lt (lt_of_le_of_ne (le_nat_degree_of_ne_zero h₁) h₂)
#align leading_coeff_smul_normalize_scale_roots leadingCoeff_smul_normalizeScaleRoots
theorem normalizeScaleRoots_support : (normalizeScaleRoots p).support ≤ p.support :=
by
intro x
contrapose
simp only [not_mem_support_iff, normalizeScaleRoots, finset_sum_coeff, coeff_monomial,
Finset.sum_ite_eq', mem_support_iff, Ne.def, Classical.not_not, ite_eq_right_iff]
intro h₁ h₂
exact (h₂ h₁).rec _
#align normalize_scale_roots_support normalizeScaleRoots_support
theorem normalizeScaleRoots_degree : (normalizeScaleRoots p).degree = p.degree :=
by
apply le_antisymm
· exact Finset.sup_mono (normalizeScaleRoots_support p)
· rw [← degree_scale_roots, ← leadingCoeff_smul_normalizeScaleRoots]
exact degree_smul_le _ _
#align normalize_scale_roots_degree normalizeScaleRoots_degree
theorem normalizeScaleRoots_eval₂_leadingCoeff_mul (h : 1 ≤ p.natDegree) (f : R →+* S) (x : S) :
(normalizeScaleRoots p).eval₂ f (f p.leadingCoeff * x) =
f p.leadingCoeff ^ (p.natDegree - 1) * p.eval₂ f x :=
by
rw [eval₂_eq_sum_range, eval₂_eq_sum_range, Finset.mul_sum]
apply Finset.sum_congr
· rw [nat_degree_eq_of_degree_eq (normalizeScaleRoots_degree p)]
intro n hn
rw [mul_pow, ← mul_assoc, ← f.map_pow, ← f.map_mul,
normalizeScaleRoots_coeff_mul_leadingCoeff_pow _ _ h, f.map_mul, f.map_pow]
ring
#align normalize_scale_roots_eval₂_leading_coeff_mul normalizeScaleRoots_eval₂_leadingCoeff_mul
theorem normalizeScaleRoots_monic (h : p ≠ 0) : (normalizeScaleRoots p).Monic :=
by
delta monic leading_coeff
rw [nat_degree_eq_of_degree_eq (normalizeScaleRoots_degree p)]
suffices p = 0 → (0 : R) = 1 by simpa [normalizeScaleRoots, coeff_monomial]
exact fun h' => (h h').rec _
#align normalize_scale_roots_monic normalizeScaleRoots_monic
/-- Given a `p : R[X]` and a `x : S` such that `p.eval₂ f x = 0`,
`f p.leading_coeff * x` is integral. -/
theorem RingHom.isIntegralElem_leadingCoeff_mul (h : p.eval₂ f x = 0) :
f.IsIntegralElem (f p.leadingCoeff * x) :=
by
by_cases h' : 1 ≤ p.nat_degree
· use normalizeScaleRoots p
have : p ≠ 0 := fun h'' => by
rw [h'', nat_degree_zero] at h'
exact Nat.not_succ_le_zero 0 h'
use normalizeScaleRoots_monic p this
rw [normalizeScaleRoots_eval₂_leadingCoeff_mul p h' f x, h, MulZeroClass.mul_zero]
· by_cases hp : p.map f = 0
· apply_fun fun q => coeff q p.nat_degree at hp
rw [coeff_map, coeff_zero, coeff_nat_degree] at hp
rw [hp, MulZeroClass.zero_mul]
exact f.is_integral_zero
· rw [Nat.one_le_iff_ne_zero, Classical.not_not] at h'
rw [eq_C_of_nat_degree_eq_zero h', eval₂_C] at h
suffices p.map f = 0 by exact (hp this).rec _
rw [eq_C_of_nat_degree_eq_zero h', map_C, h, C_eq_zero]
#align ring_hom.is_integral_elem_leading_coeff_mul RingHom.isIntegralElem_leadingCoeff_mul
/-- Given a `p : R[X]` and a root `x : S`,
then `p.leading_coeff • x : S` is integral over `R`. -/
theorem isIntegral_leadingCoeff_smul [Algebra R S] (h : aeval x p = 0) :
IsIntegral R (p.leadingCoeff • x) :=
by
rw [aeval_def] at h
rw [Algebra.smul_def]
exact (algebraMap R S).isIntegralElem_leadingCoeff_mul p x h
#align is_integral_leading_coeff_smul isIntegral_leadingCoeff_smul
end
end
section IsIntegralClosure
/- ./././Mathport/Syntax/Translate/Command.lean:388:30: infer kinds are unsupported in Lean 4: #[`algebraMap_injective] [] -/
/-- `is_integral_closure A R B` is the characteristic predicate stating `A` is
the integral closure of `R` in `B`,
i.e. that an element of `B` is integral over `R` iff it is an element of (the image of) `A`.
-/
class IsIntegralClosure (A R B : Type _) [CommRing R] [CommSemiring A] [CommRing B] [Algebra R B]
[Algebra A B] : Prop where
algebraMap_injective : Function.Injective (algebraMap A B)
isIntegral_iff : ∀ {x : B}, IsIntegral R x ↔ ∃ y, algebraMap A B y = x
#align is_integral_closure IsIntegralClosure
instance integralClosure.isIntegralClosure (R A : Type _) [CommRing R] [CommRing A] [Algebra R A] :
IsIntegralClosure (integralClosure R A) R A :=
⟨Subtype.coe_injective, fun x =>
⟨fun h => ⟨⟨x, h⟩, rfl⟩, by
rintro ⟨⟨_, h⟩, rfl⟩
exact h⟩⟩
#align integral_closure.is_integral_closure integralClosure.isIntegralClosure
namespace IsIntegralClosure
variable {R A B : Type _} [CommRing R] [CommRing A] [CommRing B]
variable [Algebra R B] [Algebra A B] [IsIntegralClosure A R B]
variable (R) {A} (B)
protected theorem isIntegral [Algebra R A] [IsScalarTower R A B] (x : A) : IsIntegral R x :=
(isIntegral_algebraMap_iff (algebraMap_injective A R B)).mp <|
show IsIntegral R (algebraMap A B x) from isIntegral_iff.mpr ⟨x, rfl⟩
#align is_integral_closure.is_integral IsIntegralClosure.isIntegral
theorem isIntegral_algebra [Algebra R A] [IsScalarTower R A B] : Algebra.IsIntegral R A := fun x =>
IsIntegralClosure.isIntegral R B x
#align is_integral_closure.is_integral_algebra IsIntegralClosure.isIntegral_algebra
theorem noZeroSMulDivisors [Algebra R A] [IsScalarTower R A B] [NoZeroSMulDivisors R B] :
NoZeroSMulDivisors R A :=
by
refine'
Function.Injective.noZeroSMulDivisors _ (IsIntegralClosure.algebraMap_injective A R B)
(map_zero _) fun _ _ => _
simp only [Algebra.algebraMap_eq_smul_one, IsScalarTower.smul_assoc]
#align is_integral_closure.no_zero_smul_divisors IsIntegralClosure.noZeroSMulDivisors
variable {R} (A) {B}
/-- If `x : B` is integral over `R`, then it is an element of the integral closure of `R` in `B`. -/
noncomputable def mk' (x : B) (hx : IsIntegral R x) : A :=
Classical.choose (isIntegral_iff.mp hx)
#align is_integral_closure.mk' IsIntegralClosure.mk'
@[simp]
theorem algebraMap_mk' (x : B) (hx : IsIntegral R x) : algebraMap A B (mk' A x hx) = x :=
Classical.choose_spec (isIntegral_iff.mp hx)
#align is_integral_closure.algebra_map_mk' IsIntegralClosure.algebraMap_mk'
@[simp]
theorem mk'_one (h : IsIntegral R (1 : B) := isIntegral_one) : mk' A 1 h = 1 :=
algebraMap_injective A R B <| by rw [algebra_map_mk', RingHom.map_one]
#align is_integral_closure.mk'_one IsIntegralClosure.mk'_one
@[simp]
theorem mk'_zero (h : IsIntegral R (0 : B) := isIntegral_zero) : mk' A 0 h = 0 :=
algebraMap_injective A R B <| by rw [algebra_map_mk', RingHom.map_zero]
#align is_integral_closure.mk'_zero IsIntegralClosure.mk'_zero
@[simp]
theorem mk'_add (x y : B) (hx : IsIntegral R x) (hy : IsIntegral R y) :
mk' A (x + y) (isIntegral_add hx hy) = mk' A x hx + mk' A y hy :=
algebraMap_injective A R B <| by simp only [algebra_map_mk', RingHom.map_add]
#align is_integral_closure.mk'_add IsIntegralClosure.mk'_add
@[simp]
theorem mk'_mul (x y : B) (hx : IsIntegral R x) (hy : IsIntegral R y) :
mk' A (x * y) (isIntegral_mul hx hy) = mk' A x hx * mk' A y hy :=
algebraMap_injective A R B <| by simp only [algebra_map_mk', RingHom.map_mul]
#align is_integral_closure.mk'_mul IsIntegralClosure.mk'_mul
@[simp]
theorem mk'_algebraMap [Algebra R A] [IsScalarTower R A B] (x : R)
(h : IsIntegral R (algebraMap R B x) := isIntegral_algebraMap) :
IsIntegralClosure.mk' A (algebraMap R B x) h = algebraMap R A x :=
algebraMap_injective A R B <| by rw [algebra_map_mk', ← IsScalarTower.algebraMap_apply]
#align is_integral_closure.mk'_algebra_map IsIntegralClosure.mk'_algebraMap
section lift
variable {R} (A B) {S : Type _} [CommRing S] [Algebra R S] [Algebra S B] [IsScalarTower R S B]
variable [Algebra R A] [IsScalarTower R A B] (h : Algebra.IsIntegral R S)
/-- If `B / S / R` is a tower of ring extensions where `S` is integral over `R`,
then `S` maps (uniquely) into an integral closure `B / A / R`. -/
noncomputable def lift : S →ₐ[R] A
where
toFun x := mk' A (algebraMap S B x) (IsIntegral.algebraMap (h x))
map_one' := by simp only [RingHom.map_one, mk'_one]
map_zero' := by simp only [RingHom.map_zero, mk'_zero]
map_add' x y := by simp_rw [← mk'_add, RingHom.map_add]
map_mul' x y := by simp_rw [← mk'_mul, RingHom.map_mul]
commutes' x := by simp_rw [← IsScalarTower.algebraMap_apply, mk'_algebra_map]
#align is_integral_closure.lift IsIntegralClosure.lift
@[simp]
theorem algebraMap_lift (x : S) : algebraMap A B (lift A B h x) = algebraMap S B x :=
algebraMap_mk' _ _ _
#align is_integral_closure.algebra_map_lift IsIntegralClosure.algebraMap_lift
end lift
section Equiv
variable (R A B) (A' : Type _) [CommRing A'] [Algebra A' B] [IsIntegralClosure A' R B]
variable [Algebra R A] [Algebra R A'] [IsScalarTower R A B] [IsScalarTower R A' B]
/-- Integral closures are all isomorphic to each other. -/
noncomputable def equiv : A ≃ₐ[R] A' :=
AlgEquiv.ofAlgHom (lift _ B (isIntegral_algebra R B)) (lift _ B (isIntegral_algebra R B))
(by
ext x
apply algebra_map_injective A' R B
simp)
(by
ext x
apply algebra_map_injective A R B
simp)
#align is_integral_closure.equiv IsIntegralClosure.equiv
@[simp]
theorem algebraMap_equiv (x : A) : algebraMap A' B (equiv R A B A' x) = algebraMap A B x :=
algebraMap_lift _ _ _ _
#align is_integral_closure.algebra_map_equiv IsIntegralClosure.algebraMap_equiv
end Equiv
end IsIntegralClosure
end IsIntegralClosure
section Algebra
open Algebra
variable {R A B S T : Type _}
variable [CommRing R] [CommRing A] [CommRing B] [CommRing S] [CommRing T]
variable [Algebra A B] [Algebra R B] (f : R →+* S) (g : S →+* T)
theorem isIntegral_trans_aux (x : B) {p : A[X]} (pmonic : Monic p) (hp : aeval x p = 0) :
IsIntegral (adjoin R (↑(p.map <| algebraMap A B).frange : Set B)) x :=
by
generalize hS : (↑(p.map <| algebraMap A B).frange : Set B) = S
have coeffs_mem : ∀ i, (p.map <| algebraMap A B).coeff i ∈ adjoin R S :=
by
intro i
by_cases hi : (p.map <| algebraMap A B).coeff i = 0
· rw [hi]
exact Subalgebra.zero_mem _
rw [← hS]
exact subset_adjoin (coeff_mem_frange _ _ hi)
obtain ⟨q, hq⟩ :
∃ q : (adjoin R S)[X], q.map (algebraMap (adjoin R S) B) = (p.map <| algebraMap A B) :=
by
rw [← Set.mem_range]
exact (Polynomial.mem_map_range _).2 fun i => ⟨⟨_, coeffs_mem i⟩, rfl⟩
use q
constructor
· suffices h : (q.map (algebraMap (adjoin R S) B)).Monic
· refine' monic_of_injective _ h
exact Subtype.val_injective
· rw [hq]
exact pmonic.map _
· convert hp using 1
replace hq := congr_arg (eval x) hq
convert hq using 1 <;> symm <;> apply eval_map
#align is_integral_trans_aux isIntegral_trans_aux
variable [Algebra R A] [IsScalarTower R A B]
/-- If A is an R-algebra all of whose elements are integral over R,
and x is an element of an A-algebra that is integral over A, then x is integral over R.-/
theorem isIntegral_trans (A_int : Algebra.IsIntegral R A) (x : B) (hx : IsIntegral A x) :
IsIntegral R x := by
rcases hx with ⟨p, pmonic, hp⟩
let S : Set B := ↑(p.map <| algebraMap A B).frange
refine' isIntegral_of_mem_of_fg (adjoin R (S ∪ {x})) _ _ (subset_adjoin <| Or.inr rfl)
refine' fg_trans (fg_adjoin_of_finite (Finset.finite_toSet _) fun x hx => _) _
· rw [Finset.mem_coe, frange, Finset.mem_image] at hx
rcases hx with ⟨i, _, rfl⟩
rw [coeff_map]
exact map_isIntegral (IsScalarTower.toAlgHom R A B) (A_int _)
· apply fg_adjoin_singleton_of_integral
exact isIntegral_trans_aux _ pmonic hp
#align is_integral_trans isIntegral_trans
/-- If A is an R-algebra all of whose elements are integral over R,
and B is an A-algebra all of whose elements are integral over A,
then all elements of B are integral over R.-/
theorem Algebra.isIntegral_trans (hA : Algebra.IsIntegral R A) (hB : Algebra.IsIntegral A B) :
Algebra.IsIntegral R B := fun x => isIntegral_trans hA x (hB x)
#align algebra.is_integral_trans Algebra.isIntegral_trans
theorem RingHom.isIntegral_trans (hf : f.IsIntegral) (hg : g.IsIntegral) : (g.comp f).IsIntegral :=
@Algebra.isIntegral_trans R S T _ _ _ g.toAlgebra (g.comp f).toAlgebra f.toAlgebra
(@IsScalarTower.of_algebraMap_eq R S T _ _ _ f.toAlgebra g.toAlgebra (g.comp f).toAlgebra
(RingHom.comp_apply g f))
hf hg
#align ring_hom.is_integral_trans RingHom.isIntegral_trans
theorem RingHom.isIntegral_of_surjective (hf : Function.Surjective f) : f.IsIntegral := fun x =>
(hf x).recOn fun y hy => (hy ▸ f.is_integral_map : f.IsIntegralElem x)
#align ring_hom.is_integral_of_surjective RingHom.isIntegral_of_surjective
theorem isIntegral_of_surjective (h : Function.Surjective (algebraMap R A)) :
Algebra.IsIntegral R A :=
(algebraMap R A).isIntegral_of_surjective h
#align is_integral_of_surjective isIntegral_of_surjective
/-- If `R → A → B` is an algebra tower with `A → B` injective,
then if the entire tower is an integral extension so is `R → A` -/
theorem isIntegral_tower_bot_of_isIntegral (H : Function.Injective (algebraMap A B)) {x : A}
(h : IsIntegral R (algebraMap A B x)) : IsIntegral R x :=
by
rcases h with ⟨p, ⟨hp, hp'⟩⟩
refine' ⟨p, ⟨hp, _⟩⟩
rw [IsScalarTower.algebraMap_eq R A B, ← eval₂_map, eval₂_hom, ←
RingHom.map_zero (algebraMap A B)] at hp'
rw [eval₂_eq_eval_map]
exact H hp'
#align is_integral_tower_bot_of_is_integral isIntegral_tower_bot_of_isIntegral
theorem RingHom.isIntegral_tower_bot_of_isIntegral (hg : Function.Injective g)
(hfg : (g.comp f).IsIntegral) : f.IsIntegral := fun x =>
@isIntegral_tower_bot_of_isIntegral R S T _ _ _ g.toAlgebra (g.comp f).toAlgebra f.toAlgebra
(@IsScalarTower.of_algebraMap_eq R S T _ _ _ f.toAlgebra g.toAlgebra (g.comp f).toAlgebra
(RingHom.comp_apply g f))
hg x (hfg (g x))
#align ring_hom.is_integral_tower_bot_of_is_integral RingHom.isIntegral_tower_bot_of_isIntegral
theorem isIntegral_tower_bot_of_isIntegral_field {R A B : Type _} [CommRing R] [Field A]
[CommRing B] [Nontrivial B] [Algebra R A] [Algebra A B] [Algebra R B] [IsScalarTower R A B]
{x : A} (h : IsIntegral R (algebraMap A B x)) : IsIntegral R x :=
isIntegral_tower_bot_of_isIntegral (algebraMap A B).Injective h
#align is_integral_tower_bot_of_is_integral_field isIntegral_tower_bot_of_isIntegral_field
theorem RingHom.isIntegralElem_of_isIntegralElem_comp {x : T} (h : (g.comp f).IsIntegralElem x) :
g.IsIntegralElem x :=
let ⟨p, ⟨hp, hp'⟩⟩ := h
⟨p.map f, hp.map f, by rwa [← eval₂_map] at hp'⟩
#align ring_hom.is_integral_elem_of_is_integral_elem_comp RingHom.isIntegralElem_of_isIntegralElem_comp
theorem RingHom.isIntegral_tower_top_of_isIntegral (h : (g.comp f).IsIntegral) : g.IsIntegral :=
fun x => RingHom.isIntegralElem_of_isIntegralElem_comp f g (h x)
#align ring_hom.is_integral_tower_top_of_is_integral RingHom.isIntegral_tower_top_of_isIntegral
/-- If `R → A → B` is an algebra tower,
then if the entire tower is an integral extension so is `A → B`. -/
theorem isIntegral_tower_top_of_isIntegral {x : B} (h : IsIntegral R x) : IsIntegral A x :=
by
rcases h with ⟨p, ⟨hp, hp'⟩⟩
refine' ⟨p.map (algebraMap R A), ⟨hp.map (algebraMap R A), _⟩⟩
rw [IsScalarTower.algebraMap_eq R A B, ← eval₂_map] at hp'
exact hp'
#align is_integral_tower_top_of_is_integral isIntegral_tower_top_of_isIntegral
theorem RingHom.isIntegral_quotient_of_isIntegral {I : Ideal S} (hf : f.IsIntegral) :
(Ideal.quotientMap I f le_rfl).IsIntegral :=
by
rintro ⟨x⟩
obtain ⟨p, ⟨p_monic, hpx⟩⟩ := hf x
refine' ⟨p.map (Ideal.Quotient.mk _), ⟨p_monic.map _, _⟩⟩
simpa only [hom_eval₂, eval₂_map] using congr_arg (Ideal.Quotient.mk I) hpx
#align ring_hom.is_integral_quotient_of_is_integral RingHom.isIntegral_quotient_of_isIntegral
theorem isIntegral_quotient_of_isIntegral {I : Ideal A} (hRA : Algebra.IsIntegral R A) :
Algebra.IsIntegral (R ⧸ I.comap (algebraMap R A)) (A ⧸ I) :=
(algebraMap R A).isIntegral_quotient_of_isIntegral hRA
#align is_integral_quotient_of_is_integral isIntegral_quotient_of_isIntegral
theorem isIntegral_quotientMap_iff {I : Ideal S} :
(Ideal.quotientMap I f le_rfl).IsIntegral ↔
((Ideal.Quotient.mk I).comp f : R →+* S ⧸ I).IsIntegral :=
by
let g := Ideal.Quotient.mk (I.comap f)
have := Ideal.quotientMap_comp_mk le_rfl
refine' ⟨fun h => _, fun h => RingHom.isIntegral_tower_top_of_isIntegral g _ (this ▸ h)⟩
refine' this ▸ RingHom.isIntegral_trans g (Ideal.quotientMap I f le_rfl) _ h
exact RingHom.isIntegral_of_surjective g Ideal.Quotient.mk_surjective
#align is_integral_quotient_map_iff isIntegral_quotientMap_iff
/-- If the integral extension `R → S` is injective, and `S` is a field, then `R` is also a field. -/
theorem isField_of_isIntegral_of_isField {R S : Type _} [CommRing R] [Nontrivial R] [CommRing S]
[IsDomain S] [Algebra R S] (H : Algebra.IsIntegral R S)
(hRS : Function.Injective (algebraMap R S)) (hS : IsField S) : IsField R :=
by
refine' ⟨⟨0, 1, zero_ne_one⟩, mul_comm, fun a ha => _⟩
-- Let `a_inv` be the inverse of `algebra_map R S a`,
-- then we need to show that `a_inv` is of the form `algebra_map R S b`.
obtain ⟨a_inv, ha_inv⟩ := hS.mul_inv_cancel fun h => ha (hRS (trans h (RingHom.map_zero _).symm))
-- Let `p : R[X]` be monic with root `a_inv`,
-- and `q` be `p` with coefficients reversed (so `q(a) = q'(a) * a + 1`).
-- We claim that `q(a) = 0`, so `-q'(a)` is the inverse of `a`.
obtain ⟨p, p_monic, hp⟩ := H a_inv
use -∑ i : ℕ in Finset.range p.nat_degree, p.coeff i * a ^ (p.nat_degree - i - 1)
-- `q(a) = 0`, because multiplying everything with `a_inv^n` gives `p(a_inv) = 0`.
-- TODO: this could be a lemma for `polynomial.reverse`.
have hq : (∑ i : ℕ in Finset.range (p.nat_degree + 1), p.coeff i * a ^ (p.nat_degree - i)) = 0 :=
by
apply (injective_iff_map_eq_zero (algebraMap R S)).mp hRS
have a_inv_ne_zero : a_inv ≠ 0 := right_ne_zero_of_mul (mt ha_inv.symm.trans one_ne_zero)
refine' (mul_eq_zero.mp _).resolve_right (pow_ne_zero p.nat_degree a_inv_ne_zero)
rw [eval₂_eq_sum_range] at hp
rw [RingHom.map_sum, Finset.sum_mul]
refine' (Finset.sum_congr rfl fun i hi => _).trans hp
rw [RingHom.map_mul, mul_assoc]
congr
have : a_inv ^ p.nat_degree = a_inv ^ (p.nat_degree - i) * a_inv ^ i := by
rw [← pow_add a_inv, tsub_add_cancel_of_le (Nat.le_of_lt_succ (finset.mem_range.mp hi))]
rw [RingHom.map_pow, this, ← mul_assoc, ← mul_pow, ha_inv, one_pow, one_mul]
-- Since `q(a) = 0` and `q(a) = q'(a) * a + 1`, we have `a * -q'(a) = 1`.
-- TODO: we could use a lemma for `polynomial.div_X` here.
rw [Finset.sum_range_succ_comm, p_monic.coeff_nat_degree, one_mul, tsub_self, pow_zero,
add_eq_zero_iff_eq_neg, eq_comm] at hq
rw [mul_comm, neg_mul, Finset.sum_mul]
convert hq using 2
refine' Finset.sum_congr rfl fun i hi => _
have : 1 ≤ p.nat_degree - i := le_tsub_of_add_le_left (finset.mem_range.mp hi)
rw [mul_assoc, ← pow_succ', tsub_add_cancel_of_le this]
#align is_field_of_is_integral_of_is_field isField_of_isIntegral_of_isField
theorem isField_of_isIntegral_of_is_field' {R S : Type _} [CommRing R] [CommRing S] [IsDomain S]
[Algebra R S] (H : Algebra.IsIntegral R S) (hR : IsField R) : IsField S :=
by
letI := hR.to_field
refine' ⟨⟨0, 1, zero_ne_one⟩, mul_comm, fun x hx => _⟩
let A := Algebra.adjoin R ({x} : Set S)
haveI : IsNoetherian R A :=
isNoetherian_of_fg_of_noetherian A.to_submodule (fg_adjoin_singleton_of_integral x (H x))
haveI : Module.Finite R A := Module.IsNoetherian.finite R A
obtain ⟨y, hy⟩ :=
LinearMap.surjective_of_injective
(@LinearMap.mulLeft_injective R A _ _ _ _ ⟨x, subset_adjoin (Set.mem_singleton x)⟩ fun h =>
hx (subtype.ext_iff.mp h))
1
exact ⟨y, subtype.ext_iff.mp hy⟩
#align is_field_of_is_integral_of_is_field' isField_of_isIntegral_of_is_field'
theorem Algebra.IsIntegral.isField_iff_isField {R S : Type _} [CommRing R] [Nontrivial R]
[CommRing S] [IsDomain S] [Algebra R S] (H : Algebra.IsIntegral R S)
(hRS : Function.Injective (algebraMap R S)) : IsField R ↔ IsField S :=
⟨isField_of_isIntegral_of_is_field' H, isField_of_isIntegral_of_isField H hRS⟩
#align algebra.is_integral.is_field_iff_is_field Algebra.IsIntegral.isField_iff_isField
end Algebra
theorem integralClosure_idem {R : Type _} {A : Type _} [CommRing R] [CommRing A] [Algebra R A] :
integralClosure (integralClosure R A : Set A) A = ⊥ :=
eq_bot_iff.2 fun x hx =>
Algebra.mem_bot.2
⟨⟨x,
@isIntegral_trans _ _ _ _ _ _ _ _ (integralClosure R A).Algebra _
integralClosure.isIntegral x hx⟩,
rfl⟩
#align integral_closure_idem integralClosure_idem
section IsDomain
variable {R S : Type _} [CommRing R] [CommRing S] [IsDomain S] [Algebra R S]
instance : IsDomain (integralClosure R S) :=
inferInstance
theorem roots_mem_integralClosure {f : R[X]} (hf : f.Monic) {a : S}
(ha : a ∈ (f.map <| algebraMap R S).roots) : a ∈ integralClosure R S :=
⟨f, hf, (eval₂_eq_eval_map _).trans <| (mem_roots <| (hf.map _).NeZero).1 ha⟩
#align roots_mem_integral_closure roots_mem_integralClosure
end IsDomain
|
import algebra
import data.real.basic
-- This file contains lemmas that are used in `linear_combination.lean`
lemma left_mul_both_sides {α} [hmul : has_mul α] (x y coeff : α) (h : x = y) :
coeff * x = coeff * y :=
by apply congr_arg (has_mul.mul coeff) h
lemma sum_two_equations {α} [hadd : has_add α] (x1 y1 x2 y2 : α) (h1 : x1 = y1) (h2: x2 = y2) :
x1 + x2 = y1 + y2 :=
by convert congr (congr_arg has_add.add h1) h2
lemma left_minus_right {α} [ha : add_group α] (x y : α) (h : x = y) :
x - y = 0 :=
by apply sub_eq_zero.mpr h
lemma all_on_left_equiv {α} [ha : add_group α] (x y : α) :
(x = y) = (x - y = 0) :=
begin
simp,
apply iff.intro,
{ apply left_minus_right },
{ intro h0,
exact sub_eq_zero.mp h0 }
end
|
theory Sorting_Quicksort_Partition
imports Sorting_Quicksort_Scheme
begin
(* TODO: Move. Found useful for ATPs *)
lemma strict_itrans: "a < c \<Longrightarrow> a < b \<or> b < c" for a b c :: "_::linorder"
by auto
context weak_ordering begin
subsection \<open>Find Median\<close>
definition "move_median_to_first ri ai bi ci (xs::'a list) \<equiv> doN {
ASSERT (ai\<noteq>bi \<and> ai\<noteq>ci \<and> bi\<noteq>ci \<and> ri\<noteq>ai \<and> ri\<noteq>bi \<and> ri\<noteq>ci);
a \<leftarrow> mop_list_get xs ai;
b \<leftarrow> mop_list_get xs bi;
c \<leftarrow> mop_list_get xs ci;
if a\<^bold><b then (
if b\<^bold><c then
mop_list_swap xs ri bi
else if a\<^bold><c then
mop_list_swap xs ri ci
else
mop_list_swap xs ri ai
)
else if a\<^bold><c then
mop_list_swap xs ri ai
else if b\<^bold><c then
mop_list_swap xs ri ci
else
mop_list_swap xs ri bi
}"
lemma move_median_to_first_alt: "move_median_to_first ri ai bi ci (xs::'a list) = doN {
ASSERT (ai\<noteq>bi \<and> ai\<noteq>ci \<and> bi\<noteq>ci \<and> ri\<noteq>ai \<and> ri\<noteq>bi \<and> ri\<noteq>ci);
if\<^sub>N mop_cmp_idxs xs ai bi then (
if\<^sub>N mop_cmp_idxs xs bi ci then
mop_list_swap xs ri bi
else if\<^sub>N mop_cmp_idxs xs ai ci then
mop_list_swap xs ri ci
else
mop_list_swap xs ri ai
)
else if\<^sub>N mop_cmp_idxs xs ai ci then
mop_list_swap xs ri ai
else if\<^sub>N mop_cmp_idxs xs bi ci then
mop_list_swap xs ri ci
else
mop_list_swap xs ri bi
}"
unfolding move_median_to_first_def
by (auto simp: pw_eq_iff refine_pw_simps split!: if_splits)
lemma move_median_to_first_correct:
"\<lbrakk> ri<ai; ai<bi; bi<ci; ci<length xs \<rbrakk> \<Longrightarrow>
move_median_to_first ri ai bi ci xs
\<le> SPEC (\<lambda>xs'. \<exists>i\<in>{ai,bi,ci}.
xs' = swap xs ri i
\<and> (\<exists>j\<in>{ai,bi,ci}-{i}. xs!i\<^bold>\<le>xs!j)
\<and> (\<exists>j\<in>{ai,bi,ci}-{i}. xs!i\<^bold>\<ge>xs!j)
)"
unfolding move_median_to_first_def
apply refine_vcg
supply aux = bexI[where P="\<lambda>x. _=_ x \<and> _ x", OF conjI[OF refl]]
apply ((rule aux)?; insert connex,auto simp: unfold_lt_to_le)+
done
lemma move_median_to_first_correct':
"\<lbrakk> ri<ai; ai<bi; bi<ci; ci<length xs \<rbrakk> \<Longrightarrow>
move_median_to_first ri ai bi ci xs
\<le> SPEC (\<lambda>xs'. slice_eq_mset ri (ci+1) xs' xs
\<and> (\<exists>i\<in>{ai,bi,ci}. xs'!i\<^bold>\<le>xs'!ri)
\<and> (\<exists>i\<in>{ai,bi,ci}. xs'!i\<^bold>\<ge>xs'!ri)
)"
apply (rule order_trans[OF move_median_to_first_correct])
by auto
(* TODO: Clean up prove below, to use more concise aux-lemma! *)
lemma move_median_to_first_correct'':
"\<lbrakk> ri<ai; ai<bi; bi<ci; ci<length xs \<rbrakk> \<Longrightarrow>
move_median_to_first ri ai bi ci xs
\<le> SPEC (\<lambda>xs'. slice_eq_mset ri (ci+1) xs' xs
\<and> (\<exists>i\<in>{ai..ci}. xs'!i\<^bold>\<le>xs'!ri)
\<and> (\<exists>i\<in>{ai..ci}. xs'!i\<^bold>\<ge>xs'!ri)
)"
apply (rule order_trans[OF move_median_to_first_correct'])
by auto
end
context sort_impl_context begin
sepref_register move_median_to_first
sepref_def move_median_to_first_impl [llvm_inline] is "uncurry4 (PR_CONST move_median_to_first)" :: "size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a (arr_assn)\<^sup>d \<rightarrow>\<^sub>a arr_assn"
unfolding move_median_to_first_alt PR_CONST_def
by sepref
end
context weak_ordering begin
subsection \<open>Hoare Partitioning Scheme\<close>
definition "ungrd_qsp_next_l_spec xs pi li hi \<equiv>
doN {
ASSERT (pi<li \<and> pi<hi \<and> hi\<le>length xs);
ASSERT (\<exists>i\<in>{li..<hi}. xs!i \<^bold>\<ge> xs!pi);
SPEC (\<lambda>li'. li\<le>li' \<and> li'<hi \<and> (\<forall>i\<in>{li..<li'}. xs!i\<^bold><xs!pi) \<and> xs!li'\<^bold>\<ge>xs!pi)
}"
definition "ungrd_qsp_next_h_spec xs pi hi \<equiv>
doN {
ASSERT (pi<length xs \<and> hi\<le>length xs \<and> (\<exists>i\<in>{pi<..<hi}. (xs!i) \<^bold>\<le> xs!pi));
SPEC (\<lambda>hi'. hi'<hi \<and> (\<forall>i\<in>{hi'<..<hi}. xs!i\<^bold>>xs!pi) \<and> xs!hi'\<^bold>\<le>xs!pi)
}"
definition qsp_next_l :: "'a list \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> nat nres" where
"qsp_next_l xs pi li hi \<equiv> doN {
monadic_WHILEIT (\<lambda>li'. (\<exists>i\<in>{li'..<hi}. xs!i\<^bold>\<ge>xs!pi) \<and> li\<le>li' \<and> (\<forall>i\<in>{li..<li'}. xs!i\<^bold><xs!pi))
(\<lambda>li. doN {ASSERT (li\<noteq>pi); mop_cmp_idxs xs li pi}) (\<lambda>li. RETURN (li + 1)) li
}"
lemma qsp_next_l_refine: "(qsp_next_l,PR_CONST ungrd_qsp_next_l_spec)\<in>Id\<rightarrow>Id\<rightarrow>Id\<rightarrow>Id\<rightarrow>\<langle>Id\<rangle>nres_rel"
unfolding qsp_next_l_def ungrd_qsp_next_l_spec_def PR_CONST_def
apply (intro fun_relI; clarsimp)
subgoal for xs p li hi
apply (refine_vcg monadic_WHILEIT_rule[where R="measure (\<lambda>li. hi - li)"])
apply simp_all
subgoal by auto
apply safe
subgoal by (metis atLeastLessThan_iff leI le_less_Suc_eq wo_leD)
subgoal by (metis atLeastLessThan_iff leI le_less_Suc_eq)
subgoal using less_eq_Suc_le by force
subgoal by auto
subgoal by (auto simp: unfold_le_to_lt)
done
done
definition qsp_next_h :: "'a list \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> nat nres" where
"qsp_next_h xs pi hi \<equiv> doN {
ASSERT (hi>0);
let hi = hi - 1;
ASSERT (hi<length xs);
monadic_WHILEIT (\<lambda>hi'. hi'\<le>hi \<and> (\<exists>i\<le>hi'. xs!i\<^bold>\<le>xs!pi) \<and> (\<forall>i\<in>{hi'<..hi}. xs!i\<^bold>>xs!pi))
(\<lambda>hi. doN {ASSERT(pi\<noteq>hi); mop_cmp_idxs xs pi hi}) (\<lambda>hi. doN { ASSERT(hi>0); RETURN (hi - 1)}) hi
}"
lemma qsp_next_h_refine: "(qsp_next_h,PR_CONST (ungrd_qsp_next_h_spec)) \<in> Id \<rightarrow> Id \<rightarrow> Id \<rightarrow> \<langle>Id\<rangle>nres_rel"
unfolding qsp_next_h_def ungrd_qsp_next_h_spec_def PR_CONST_def
apply (refine_vcg monadic_WHILEIT_rule[where R="measure id"] split_ifI)
apply (all \<open>(determ \<open>elim conjE exE\<close>)?\<close>)
apply simp_all
subgoal by force
subgoal by (meson greaterThanLessThan_iff nat_le_Suc_less_imp)
subgoal by (meson greaterThanAtMost_iff greaterThanLessThan_iff nat_le_Suc_less_imp wo_leD)
subgoal by (metis gr0I le_zero_eq unfold_lt_to_le)
subgoal by (metis One_nat_def le_step_down_nat wo_leD)
subgoal by (metis Suc_pred greaterThanAtMost_iff linorder_neqE_nat not_less_eq)
subgoal by (meson greaterThanAtMost_iff greaterThanLessThan_iff nat_le_Suc_less_imp)
subgoal using wo_leI by blast
done
definition "qs_partition li\<^sub>0 hi\<^sub>0 pi xs\<^sub>0 \<equiv> doN {
ASSERT (pi < li\<^sub>0 \<and> li\<^sub>0<hi\<^sub>0 \<and> hi\<^sub>0\<le>length xs\<^sub>0);
\<comment> \<open>Initialize\<close>
li \<leftarrow> ungrd_qsp_next_l_spec xs\<^sub>0 pi li\<^sub>0 hi\<^sub>0;
hi \<leftarrow> ungrd_qsp_next_h_spec xs\<^sub>0 pi hi\<^sub>0;
ASSERT (li\<^sub>0\<le>hi);
(xs,li,hi) \<leftarrow> WHILEIT
(\<lambda>(xs,li,hi).
li\<^sub>0\<le>li \<and> hi<hi\<^sub>0
\<and> li<hi\<^sub>0 \<and> hi\<ge>li\<^sub>0
\<and> slice_eq_mset li\<^sub>0 hi\<^sub>0 xs xs\<^sub>0
\<and> xs!pi = xs\<^sub>0!pi
\<and> (\<forall>i\<in>{li\<^sub>0..<li}. xs!i \<^bold>\<le> xs\<^sub>0!pi)
\<and> xs!li \<^bold>\<ge> xs\<^sub>0!pi
\<and> (\<forall>i\<in>{hi<..<hi\<^sub>0}. xs!i \<^bold>\<ge> xs\<^sub>0!pi)
\<and> xs!hi \<^bold>\<le> xs\<^sub>0!pi
)
(\<lambda>(xs,li,hi). li<hi)
(\<lambda>(xs,li,hi). doN {
ASSERT(li<hi \<and> li<length xs \<and> hi<length xs \<and> li\<noteq>hi);
xs \<leftarrow> mop_list_swap xs li hi;
let li = li + 1;
li \<leftarrow> ungrd_qsp_next_l_spec xs pi li hi\<^sub>0;
hi \<leftarrow> ungrd_qsp_next_h_spec xs pi hi;
RETURN (xs,li,hi)
})
(xs\<^sub>0,li,hi);
RETURN (xs,li)
}"
lemma qs_partition_correct:
"\<lbrakk> pi<li; li<hi; hi\<le>length xs\<^sub>0; \<exists>i\<in>{li..<hi}. xs\<^sub>0!pi\<^bold>\<le>xs\<^sub>0!i; \<exists>i\<in>{li..<hi}. xs\<^sub>0!i\<^bold>\<le>xs\<^sub>0!pi \<rbrakk> \<Longrightarrow> qs_partition li hi pi xs\<^sub>0
\<le> SPEC (\<lambda>(xs,i). slice_eq_mset li hi xs xs\<^sub>0 \<and> li\<le>i \<and> i<hi \<and> (\<forall>i\<in>{li..<i}. xs!i\<^bold>\<le>xs\<^sub>0!pi) \<and> (\<forall>i\<in>{i..<hi}. xs!i\<^bold>\<ge>xs\<^sub>0!pi) )"
unfolding qs_partition_def ungrd_qsp_next_l_spec_def ungrd_qsp_next_h_spec_def
apply (refine_vcg WHILEIT_rule[where R="measure (\<lambda>(_,_,hi). hi)"])
supply [[put_named_ss HOL_ss]]
apply (all \<open>(clarsimp;fail)?\<close>)
apply clarsimp_all
supply [[put_named_ss Main_ss]]
apply (simp_all add: slice_eq_mset_eq_length unfold_lt_to_le)
subgoal by fastforce
subgoal by auto
subgoal
by (metis le_trans order.strict_implies_order slice_eq_mset_eq_length swap_length)
subgoal apply (clarsimp simp: swap_def)
by (metis (full_types) More_List.swap_def atLeastSucLessThan_greaterThanLessThan greaterThanLessThan_iff less_le_trans swap_nth2)
subgoal
by (metis (mono_tags) greaterThanLessThan_iff leD le_less_trans less_le_trans nat_le_linear not_less_eq_eq slice_eq_mset_eq_length swap_indep swap_nth1)
subgoal
by (smt Suc_le_lessD dual_order.trans greaterThanLessThan_iff leI less_imp_le_nat swap_indep swap_length swap_nth1)
subgoal
by (smt Suc_le_lessD atLeastLessThan_iff le_less_trans less_imp_le_nat slice_eq_mset_eq_length slice_eq_mset_swap(2))
subgoal apply clarsimp
by (metis less_irrefl less_imp_not_less less_le_trans swap_indep)
subgoal apply clarsimp
by (smt Suc_leI atLeastLessThan_iff le_def less_le_trans less_Suc_eq swap_indep swap_length swap_nth1)
subgoal apply clarsimp
by (metis le_def less_trans swap_indep)
subgoal apply clarsimp
by (smt greaterThanLessThan_iff le_def less_le_trans le_neq_implies_less less_imp_le_nat slice_eq_mset_eq_length swap_indep swap_nth2)
subgoal
by (metis le_def less_trans swap_indep)
subgoal
by (metis greaterThanLessThan_iff strict_itrans le_neq_implies_less)
done
definition "partition_pivot xs\<^sub>0 l h \<equiv> doN {
ASSERT (l\<le>h \<and> h\<le>length xs\<^sub>0 \<and> h-l\<ge>4);
let m = l + (h-l) div 2;
xs\<^sub>1 \<leftarrow> move_median_to_first l (l+1) m (h-1) xs\<^sub>0;
ASSERT (l<length xs\<^sub>1 \<and> length xs\<^sub>1 = length xs\<^sub>0);
(xs,m) \<leftarrow> qs_partition (l+1) h l xs\<^sub>1;
\<comment> \<open>TODO: Use an auxiliary lemma, instead of this assertion chain! \<close>
ASSERT (l<m \<and> m<h);
ASSERT ((\<forall>i\<in>{l+1..<m}. xs!i\<^bold>\<le>xs\<^sub>1!l) \<and> xs!l\<^bold>\<le>xs\<^sub>1!l);
ASSERT (\<forall>i\<in>{l..<m}. xs!i\<^bold>\<le>xs\<^sub>1!l);
ASSERT (\<forall>i\<in>{m..<h}. xs\<^sub>1!l\<^bold>\<le>xs!i);
RETURN (xs,m)
}"
lemma slice_LT_I_aux:
assumes B: "l<m" "m<h" "h\<le>length xs"
assumes BND: "\<forall>i\<in>{l..<m}. xs!i\<^bold>\<le>p" "\<forall>i\<in>{m..<h}. p\<^bold>\<le>xs!i"
shows "slice_LT (\<^bold>\<le>) (slice l m xs) (slice m h xs)"
unfolding slice_LT_def
using B apply (clarsimp simp: in_set_conv_nth slice_nth)
subgoal for i j
apply (rule trans[OF BND(1)[THEN bspec, of "l+i"] BND(2)[THEN bspec, of "m+j"]])
by auto
done
lemma partition_pivot_correct: "\<lbrakk>(xs,xs')\<in>Id; (l,l')\<in>Id; (h,h')\<in>Id\<rbrakk>
\<Longrightarrow> partition_pivot xs l h \<le> \<Down>Id (partition3_spec xs' l' h')"
unfolding partition_pivot_def partition3_spec_def
apply (refine_vcg move_median_to_first_correct'' qs_partition_correct)
apply (all \<open>auto 0 3 dest: slice_eq_mset_eq_length; fail\<close>) [17]
apply clarsimp
subgoal for xs\<^sub>1 xs\<^sub>2 i m j
apply (subst slice_eq_mset_nth_outside, assumption)
apply (auto dest: slice_eq_mset_eq_length)
done
subgoal apply clarsimp by (metis (full_types) Suc_leI atLeastLessThan_iff le_neq_implies_less)
subgoal by auto
subgoal
apply simp
by (metis Suc_le_eq le_add2 le_refl order.strict_trans plus_1_eq_Suc slice_eq_mset_subslice slice_eq_mset_trans)
apply (all \<open>auto; fail\<close>) [2]
subgoal by (auto dest: slice_eq_mset_eq_length intro!: slice_LT_I_aux)
done
end
context sort_impl_context begin
sepref_register ungrd_qsp_next_l_spec ungrd_qsp_next_h_spec
(* TODO: We can get rid of the length xs restriction: the stopper element will always lie within <h, which is size_t representable! *)
sepref_definition qsp_next_l_impl [llvm_inline] is "uncurry3 (qsp_next_l)" :: "(arr_assn)\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k \<rightarrow>\<^sub>a size_assn"
unfolding qsp_next_l_def PR_CONST_def
apply (annot_snat_const "TYPE(size_t)")
by sepref
lemmas [sepref_fr_rules] = qsp_next_l_impl.refine[FCOMP qsp_next_l_refine]
sepref_definition qsp_next_h_impl [llvm_inline] is "uncurry2 (qsp_next_h)" :: "(arr_assn)\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k \<rightarrow>\<^sub>a size_assn"
unfolding qsp_next_h_def PR_CONST_def
apply (annot_snat_const "TYPE(size_t)")
by sepref
lemmas [sepref_fr_rules] = qsp_next_h_impl.refine[FCOMP qsp_next_h_refine]
sepref_register qs_partition
sepref_def qs_partition_impl (*[llvm_inline]*) is "uncurry3 (PR_CONST qs_partition)" :: "size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a (arr_assn)\<^sup>d \<rightarrow>\<^sub>a arr_assn \<times>\<^sub>a size_assn"
unfolding qs_partition_def PR_CONST_def
apply (annot_snat_const "TYPE(size_t)")
supply [dest] = slice_eq_mset_eq_length
by sepref
(*sepref_register qs_partitionXXX
sepref_def qs_partitionXXX_impl (*[llvm_inline]*) is "uncurry3 (PR_CONST qs_partitionXXX)" :: "[\<lambda>(((l,h),p),xs). length xs < max_snat LENGTH(size_t)]\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a (arr_assn)\<^sup>d \<rightarrow> arr_assn \<times>\<^sub>a size_assn"
unfolding qs_partitionXXX_def PR_CONST_def
apply (annot_snat_const "TYPE(size_t)")
supply [dest] = slice_eq_mset_eq_length
by sepref
*)
sepref_register partition_pivot
sepref_def partition_pivot_impl [llvm_inline] is "uncurry2 (PR_CONST partition_pivot)" :: "arr_assn\<^sup>d *\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k \<rightarrow>\<^sub>a arr_assn \<times>\<^sub>a size_assn"
unfolding partition_pivot_def PR_CONST_def
apply (annot_snat_const "TYPE(size_t)")
by sepref
end
subsection \<open>Parameterization\<close>
context parameterized_weak_ordering begin
thm WO.qsp_next_l_def
definition qsp_next_l_param :: "'cparam \<Rightarrow> 'a list \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> nat nres" where
"qsp_next_l_param cparam xs pi li hi \<equiv> doN {
monadic_WHILEIT (\<lambda>_. True)
(\<lambda>li. doN {ASSERT (li\<noteq>pi); pcmp_idxs2 cparam xs li pi})
(\<lambda>li. doN {ASSERT (li<hi); RETURN (li + 1)}) li
}"
lemma qsp_next_l_param_refine[refine]: "\<lbrakk>
(xs',xs)\<in>cdom_list_rel cparam; (p',p)\<in>Id; (i',i)\<in>Id; (h',h)\<in>Id
\<rbrakk> \<Longrightarrow> qsp_next_l_param cparam xs' p' i' h' \<le>\<Down>nat_rel (WO.ungrd_qsp_next_l_spec cparam xs p i h)"
proof (goal_cases)
case 1
then have "qsp_next_l_param cparam xs' p' i' h' \<le>\<Down>nat_rel (WO.qsp_next_l cparam xs p i h)"
unfolding qsp_next_l_param_def WO.qsp_next_l_def
apply refine_rcg
by auto
also note WO.qsp_next_l_refine[param_fo, OF IdI IdI IdI IdI, of cparam xs p i h, THEN nres_relD]
finally show ?case unfolding PR_CONST_def .
qed
definition qsp_next_h_param :: "'cparam \<Rightarrow> 'a list \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> nat nres" where
"qsp_next_h_param cparam xs pi hi \<equiv> doN {
ASSERT (hi>0);
let hi = hi - 1;
ASSERT (hi<length xs);
monadic_WHILEIT (\<lambda>_. True)
(\<lambda>hi. doN {ASSERT(pi\<noteq>hi); pcmp_idxs2 cparam xs pi hi})
(\<lambda>hi. doN { ASSERT(hi>0); RETURN (hi - 1)}) hi
}"
lemma qsp_next_h_param_refine[refine]: "\<lbrakk>
(xs',xs)\<in>cdom_list_rel cparam; (p',p)\<in>Id; (h',h)\<in>Id
\<rbrakk> \<Longrightarrow> qsp_next_h_param cparam xs' p' h' \<le>\<Down>nat_rel (WO.ungrd_qsp_next_h_spec cparam xs p h)"
proof goal_cases
case 1
then have "qsp_next_h_param cparam xs' p' h' \<le>\<Down>nat_rel (WO.qsp_next_h cparam xs p h)"
unfolding qsp_next_h_param_def WO.qsp_next_h_def
apply refine_rcg
by (auto simp: cdom_list_rel_alt in_br_conv)
also note WO.qsp_next_h_refine[param_fo, THEN nres_relD]
finally show ?thesis by simp
qed
definition "qs_partition_param cparam li\<^sub>0 hi\<^sub>0 pi xs\<^sub>0 \<equiv> doN {
ASSERT (pi < li\<^sub>0 \<and> li\<^sub>0<hi\<^sub>0 \<and> hi\<^sub>0\<le>length xs\<^sub>0);
\<comment> \<open>Initialize\<close>
li \<leftarrow> qsp_next_l_param cparam xs\<^sub>0 pi li\<^sub>0 hi\<^sub>0;
hi \<leftarrow> qsp_next_h_param cparam xs\<^sub>0 pi hi\<^sub>0;
ASSERT (li\<^sub>0\<le>hi);
(xs,li,hi) \<leftarrow> WHILEIT
(\<lambda>_. True)
(\<lambda>(xs,li,hi). li<hi)
(\<lambda>(xs,li,hi). doN {
ASSERT(li<hi \<and> li<length xs \<and> hi<length xs \<and> li\<noteq>hi);
xs \<leftarrow> mop_list_swap xs li hi;
let li = li + 1;
li \<leftarrow> qsp_next_l_param cparam xs pi li hi\<^sub>0;
hi \<leftarrow> qsp_next_h_param cparam xs pi hi;
RETURN (xs,li,hi)
})
(xs\<^sub>0,li,hi);
RETURN (xs,li)
}"
lemma qs_partition_param_refine[refine]: "\<lbrakk>
(li',li)\<in>Id; (hi',hi)\<in>Id; (pi',pi)\<in>Id; (xs',xs)\<in>cdom_list_rel cparam
\<rbrakk> \<Longrightarrow> qs_partition_param cparam li' hi' pi' xs'
\<le> \<Down>(cdom_list_rel cparam \<times>\<^sub>r nat_rel) (WO.qs_partition cparam li hi pi xs)"
unfolding qs_partition_param_def WO.qs_partition_def
supply [refine_dref_RELATES] = RELATESI[of "cdom_list_rel cparam"]
apply refine_rcg
apply refine_dref_type
apply (auto simp: cdom_list_rel_alt in_br_conv)
done
definition "move_median_to_first_param cparam ri ai bi ci (xs::'a list) = doN {
ASSERT (ai \<noteq> bi \<and> ai \<noteq> ci \<and> bi \<noteq> ci \<and> ri \<noteq> ai \<and> ri \<noteq> bi \<and> ri \<noteq> ci);
if\<^sub>N pcmp_idxs2 cparam xs ai bi then (
if\<^sub>N pcmp_idxs2 cparam xs bi ci then
mop_list_swap xs ri bi
else if\<^sub>N pcmp_idxs2 cparam xs ai ci then
mop_list_swap xs ri ci
else
mop_list_swap xs ri ai
)
else if\<^sub>N pcmp_idxs2 cparam xs ai ci then
mop_list_swap xs ri ai
else if\<^sub>N pcmp_idxs2 cparam xs bi ci then
mop_list_swap xs ri ci
else
mop_list_swap xs ri bi
}"
(* TODO:Move *)
lemma mop_list_swap_cdom_refine[refine]: "\<lbrakk>
(xs',xs)\<in>cdom_list_rel cparam; (i',i)\<in>Id; (j',j)\<in>Id
\<rbrakk> \<Longrightarrow> mop_list_swap xs' i' j' \<le> \<Down> (cdom_list_rel cparam) (mop_list_swap xs i j)"
apply simp
apply refine_rcg
apply (clarsimp_all simp: cdom_list_rel_def list_rel_imp_same_length)
apply (parametricity)
by auto
lemma move_median_to_first_param_refine[refine]: "\<lbrakk>
(ri',ri)\<in>Id; (ai',ai)\<in>Id; (bi',bi)\<in>Id; (ci',ci)\<in>Id; (xs',xs)\<in>cdom_list_rel cparam
\<rbrakk> \<Longrightarrow> move_median_to_first_param cparam ri' ai' bi' ci' xs'
\<le> \<Down>(cdom_list_rel cparam) (WO.move_median_to_first cparam ri ai bi ci xs)"
unfolding move_median_to_first_param_def WO.move_median_to_first_alt
apply refine_rcg
by auto
definition "partition_pivot_param cparam xs\<^sub>0 l h \<equiv> doN {
ASSERT (l\<le>h \<and> h\<le>length xs\<^sub>0 \<and> h-l\<ge>4);
let m = l + (h-l) div 2;
xs\<^sub>1 \<leftarrow> move_median_to_first_param cparam l (l+1) m (h-1) xs\<^sub>0;
ASSERT (l<length xs\<^sub>1 \<and> length xs\<^sub>1 = length xs\<^sub>0);
(xs,m) \<leftarrow> qs_partition_param cparam (l+1) h l xs\<^sub>1;
RETURN (xs,m)
}"
lemma partition_pivot_param_refine[refine]: "\<lbrakk> (xs',xs)\<in>cdom_list_rel cparam; (l',l)\<in>Id; (h',h)\<in>Id
\<rbrakk> \<Longrightarrow> partition_pivot_param cparam xs' l' h'
\<le> \<Down>(cdom_list_rel cparam \<times>\<^sub>r nat_rel) (WO.partition_pivot cparam xs l h)"
unfolding partition_pivot_param_def WO.partition_pivot_def
apply refine_rcg
apply (auto simp: cdom_list_rel_alt in_br_conv)
done
end
context parameterized_sort_impl_context begin
(* TODO: Move *)
abbreviation "arr_assn \<equiv> wo_assn"
sepref_register qsp_next_l_param qsp_next_h_param
(* TODO: We can get rid of the length xs restriction: the stopper element will always lie within <h, which is size_t representable! *)
sepref_def qsp_next_l_impl [llvm_inline] is "uncurry4 (PR_CONST qsp_next_l_param)"
:: "cparam_assn\<^sup>k *\<^sub>a (arr_assn)\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k \<rightarrow>\<^sub>a size_assn"
unfolding qsp_next_l_param_def PR_CONST_def
apply (annot_snat_const "TYPE(size_t)")
by sepref
sepref_def qsp_next_h_impl [llvm_inline] is "uncurry3 (PR_CONST qsp_next_h_param)" :: "cparam_assn\<^sup>k *\<^sub>a (arr_assn)\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k \<rightarrow>\<^sub>a size_assn"
unfolding qsp_next_h_param_def PR_CONST_def
apply (annot_snat_const "TYPE(size_t)")
by sepref
sepref_register qs_partition_param
sepref_def qs_partition_impl is "uncurry4 (PR_CONST qs_partition_param)" :: "cparam_assn\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a (arr_assn)\<^sup>d \<rightarrow>\<^sub>a arr_assn \<times>\<^sub>a size_assn"
unfolding qs_partition_param_def PR_CONST_def
apply (annot_snat_const "TYPE(size_t)")
supply [dest] = slice_eq_mset_eq_length
by sepref
sepref_register move_median_to_first_param
sepref_def move_median_to_first_param_impl (*[llvm_inline] *)
is "uncurry5 (PR_CONST move_median_to_first_param)"
:: "cparam_assn\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k *\<^sub>a (arr_assn)\<^sup>d \<rightarrow>\<^sub>a arr_assn"
unfolding move_median_to_first_param_def PR_CONST_def
by sepref
sepref_register partition_pivot_param
sepref_def partition_pivot_impl (*[llvm_inline] *)
is "uncurry3 (PR_CONST partition_pivot_param)"
:: "cparam_assn\<^sup>k *\<^sub>a arr_assn\<^sup>d *\<^sub>a size_assn\<^sup>k *\<^sub>a size_assn\<^sup>k \<rightarrow>\<^sub>a arr_assn \<times>\<^sub>a size_assn"
unfolding partition_pivot_param_def PR_CONST_def
apply (annot_snat_const "TYPE(size_t)")
by sepref
end
end
|
Formal statement is: lemma complex_cnj_neg_numeral [simp]: "cnj (- numeral w) = - numeral w" Informal statement is: The complex conjugate of a negative numeral is the negative numeral. |
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE ViewPatterns #-}
module FokkerPlanck.GPUKernel where
import Data.Array.Accelerate as A
import Data.Array.Accelerate.Data.Complex as A
import qualified Data.Complex as C
import Debug.Trace
{-# INLINE moveParticle #-}
moveParticle ::
(A.RealFloat a, A.Num a, Elt a-- , FromIntegral Int a
)
=> Exp (a, a, a, a)
-> Exp (a, a, a, a)
moveParticle (unlift -> (phi, rho, theta, r)) =
let !x = theta - phi
!cosX = A.cos x
!newPhi = phi + (A.atan2 (r * A.sin x) (rho + r * cosX))
!newRho = A.sqrt $ (rho * rho + r * r + 2 * r * rho * cosX)
-- !xx = A.round $ newRho * A.cos newPhi :: Exp Int
-- !y = A.round $ newRho * A.sin newPhi :: Exp Int
-- !newRho' = A.sqrt . A.fromIntegral $ xx * xx + y * y
-- !newPhi' = A.atan2 (A.fromIntegral y) (A.fromIntegral xx)
in lift (newPhi, newRho, theta, r)
{-# INLINE normalizationTerm #-}
normalizationTerm ::
(A.Floating a, A.Num a, A.RealFloat a, A.Elt (Complex a))
=> Exp a
-> Exp a
-> Exp (Complex a)
normalizationTerm halfLogPeriod freq =
(lift $ 1 A.:+ ((-A.pi) * freq / halfLogPeriod)) /
(A.exp (lift $ halfLogPeriod A.:+ (-A.pi * freq)) -
A.exp (lift $ (-halfLogPeriod) A.:+ (A.pi * freq)))
-- {-# INLINE coefficient #-}
-- coefficient ::
-- forall a. (A.Floating a, A.Num a, A.RealFloat a, A.Elt (Complex a), Prelude.Fractional a)
-- => Exp a
-- -> Exp a
-- -> Exp a
-- -> Exp a
-- -> Exp a
-- -> Exp (a, a, a, a)
-- -> Exp (A.Complex a)
-- coefficient halfLogPeriod rFreq thetaFreq rhoFreq phiFreq particle -- (unlift -> (phi, rho, theta, r))
-- =
-- let (phi, rho, theta, r) = unlift particle :: (Exp a, Exp a, Exp a, Exp a)
-- -- (normalizationTerm halfLogPeriod rhoFreq) *
-- -- (normalizationTerm halfLogPeriod rFreq) *
-- -- (lift (A.cos (phiFreq * phi + thetaFreq * (theta - phi)) A.:+ 0)) *
-- -- (A.cis $
-- -- (-2 * A.pi) * (rhoFreq * rho + rFreq * (r - rho)) /
-- -- (A.exp halfLogPeriod))
-- in (lift $
-- ((A.exp $ (-0.5) * (rho + r)) *
-- (A.cos (phiFreq * phi + thetaFreq * (theta - phi)))
-- ) A.:+
-- 0) *
-- (A.cis $ (-1) * ((rhoFreq * rho + rFreq * (r - rho)
-- ) -- * (2 * A.pi) / (A.exp halfLogPeriod)
-- -- + phiFreq * phi + thetaFreq * (theta - phi)
-- ))
-- -- (lift $ ((A.cos (phiFreq * phi + thetaFreq * (theta - phi) / 2))) A.:+ 0) *
-- -- (A.cis $ (-2 * A.pi) * (rhoFreq * rho + rFreq * (r - rho)) / (A.exp halfLogPeriod))
-- -- ((lift $ rho A.:+ 0) A.** (lift $ (-0.5) A.:+ (rFreq - rhoFreq))) *
-- -- ((lift $ r A.:+ 0) A.** (lift $ (-0.5) A.:+ (-rFreq)))
-- -- (A.cis $ (-A.pi) * (rhoFreq * rho + rFreq * (r - rho)
-- -- ) / (halfLogPeriod))
-- -- (A.cis $
-- -- (-A.pi) * (rhoFreq * (A.log rho) + rFreq * (A.log (r / rho))) /
-- -- halfLogPeriod)
-- -- in ((lift $ rho A.:+ 0) A.**
-- -- (lift $ 0 A.:+ (-A.pi) * (rhoFreq - rFreq) / halfLogPeriod)) *
-- -- ((lift $ r A.:+ 0) A.** (lift $ 0 A.:+ (-A.pi) * rFreq / halfLogPeriod)) *
-- -- (A.cis $ (-phiFreq) * phi - thetaFreq * (theta - phi))
coefficient ::
forall a. (A.Floating a, A.Num a, A.RealFloat a, A.Elt (Complex a), Prelude.Fractional a)
=> Exp a
-> Exp a
-> Exp a
-> Exp a
-> Exp a
-> Exp (a, a, a, a)
-> Exp (A.Complex a)
coefficient halfLogPeriod rFreq thetaFreq rhoFreq phiFreq particle =
let (phi, rho, theta, r) = unlift particle :: (Exp a, Exp a, Exp a, Exp a)
in (lift $
((A.exp $ (-0.5) * (rho + r)) *
(A.cos (phiFreq * phi + thetaFreq * (theta - phi)))) A.:+
0) *
(A.cis $ (-1) * (rhoFreq * rho + rFreq * (r + rho)))
-- {-# INLINE gpuKernel #-}
gpuKernel ::
forall a. (A.Eq a, A.Floating a, A.Num a, A.RealFloat a, A.Elt (Complex a), A.FromIntegral Int a, Prelude.Fractional a)
=> Exp a
-> Exp a
-> Exp (A.Complex a)
-> Acc (A.Vector (a, a, a, a))
-> Acc (A.Vector (a, a, a, a))
-> Acc (A.Vector (A.Complex a))
gpuKernel !maxScaleExp !halfLogPeriodExp !deltaLogRhoComplexExp freqArr particles =
let -- delta = constant 0.01
movedParticleArr =
compute .
A.map
(\particle ->
let (phi, rho, theta, r) =
unlift particle :: (Exp a, Exp a, Exp a, Exp a)
logRho = A.log rho
-- (A.fromIntegral $
-- (A.round (((A.log rho) + halfLogPeriodExp) / delta) :: Exp Int)) *
-- delta -
-- halfLogPeriodExp
-- newRho =
-- (A.fromIntegral $ (A.round (rho / delta) :: Exp Int)) * delta :: Exp a
in lift (phi, logRho, theta, (A.log r) :: Exp a)) -- .
-- afst .
-- A.filter
-- (\particle ->
-- let (_, rho, _, _) =
-- unlift particle :: (Exp a, Exp a, Exp a, Exp a)
-- in rho A.> (A.constant 0) ) -- .
-- A.map moveParticle
$
particles
in A.map
(\(unlift -> (rFreq, thetaFreq, rhoFreq, phiFreq))
-- (lift $ delta A.:+ 0) /
-- (lift $ (8 * A.pi * A.pi * halfLogPeriodExp) A.:+ 0) *
->
(sfoldl
(\s particle ->
s +
(coefficient
halfLogPeriodExp
rFreq
thetaFreq
rhoFreq
phiFreq
particle))
0
(constant Z)
movedParticleArr) * deltaLogRhoComplexExp
-- (lift $
-- (16 * A.pi * A.pi * halfLogPeriodExp * halfLogPeriodExp) A.:+ 0)
-- movedParticleArr
) $
freqArr
{-# INLINE convolveKernel #-}
convolveKernel ::
(A.Num a, A.RealFloat a, A.Elt (Complex a))
=> Acc (Array DIM4 (Complex a))
-> Acc (Array DIM4 (Complex a))
-> Acc (Scalar Int)
-> Acc (Scalar Int)
-> Acc (Array DIM4 (Complex a))
-> Acc (Vector (Complex a))
convolveKernel coefficients harmonics thetaIdx rIdx input =
let (Z :. numRhoFreq :. numPhiFreq :. cols :. rows) =
unlift . shape $ input :: (Z :. Exp Int :. Exp Int :. Exp Int :. Exp Int)
coefficientsArr =
A.replicate (lift (Z :. All :. All :. cols :. rows)) .
slice coefficients $
(lift (Z :. (the rIdx) :. (the thetaIdx) :. All :. All))
harmonicsArr =
backpermute
(shape input)
(\(unlift -> Z :. rho :. phi :. col :. row :: (Z :. Exp Int :. Exp Int :. Exp Int :. Exp Int)) ->
lift (Z :. (rho + the rIdx) :. (phi + the thetaIdx) :. col :. row))
harmonics
in flatten .
A.sum .
A.sum .
backpermute
(lift (Z :. cols :. rows :. numRhoFreq :. numPhiFreq))
(\(unlift -> Z :. col :. row :. rho :. phi :: (Z :. Exp Int :. Exp Int :. Exp Int :. Exp Int)) ->
lift (Z :. rho :. phi :. col :. row)) .
A.zipWith (*) harmonicsArr . A.zipWith (*) coefficientsArr $
input
{-# INLINE coefficient' #-}
coefficient' ::
forall a.
( A.Floating a
, A.Num a
, A.RealFloat a
, A.Elt (Complex a)
, Prelude.Fractional a
)
=> Exp a
-> Exp a
-> Exp a
-> Exp a
-> Exp a
-> Exp (a, a, a, a, a)
-> Exp (A.Complex a)
coefficient' sigma rFreq thetaFreq rhoFreq phiFreq particle =
let (phi, rho, theta, r, v) =
unlift particle :: (Exp a, Exp a, Exp a, Exp a, Exp a)
in (lift $ (v * (A.exp $ (sigma - 1) * (rho + r))) A.:+ 0) *
(A.cis $
(-1) *
(rhoFreq * rho + rFreq * (r - rho) + phiFreq * phi +
thetaFreq * (theta - phi)))
gpuKernel' ::
forall a.
( A.Eq a
, A.Floating a
, A.Num a
, A.RealFloat a
, A.Elt (Complex a)
, A.FromIntegral Int a
, Prelude.Fractional a
)
=> Exp a
-> Acc (A.Vector (a, a, a, a))
-> Acc (A.Vector (a, a, a, a, a))
-> Acc (A.Vector (A.Complex a))
gpuKernel' sigma freqArr xs =
A.map
(\(unlift -> (rFreq, thetaFreq, rhoFreq, phiFreq)) ->
A.sfoldl
(\s particle ->
s + (coefficient' sigma rFreq thetaFreq rhoFreq phiFreq particle))
0
(constant Z)
xs)
freqArr
{-# INLINE coefficient'' #-}
coefficient'' ::
forall a.
( A.Floating a
, A.Num a
, A.RealFloat a
, A.Elt (Complex a)
, Prelude.Fractional a
)
=> Exp a
-> Exp a
-> Exp a
-> Exp a
-> Exp a
-> Exp a
-> Exp (a, a, a, a, a)
-> Exp (A.Complex a)
coefficient'' sigma period rFreq thetaFreq rhoFreq phiFreq particle =
let (phi, rho, theta, r, v) =
unlift particle :: (Exp a, Exp a, Exp a, Exp a, Exp a)
in lift
((v * A.exp ((sigma - 1) * rho) *
A.cos (phiFreq * phi + thetaFreq * (theta - phi))
) :+
0) *
A.cis ((-1) * (2 * A.pi / period * (rhoFreq * rho + rFreq * (r - rho))))
gpuKernel'' ::
forall a.
( A.Eq a
, A.Floating a
, A.Num a
, A.RealFloat a
, A.Elt (Complex a)
, A.FromIntegral Int a
, Prelude.Fractional a
)
=> Exp a
-> Exp a
-> Acc (A.Vector (a, a, a, a))
-> Acc (A.Vector (a, a, a, a, a))
-> Acc (A.Vector (A.Complex a))
gpuKernel'' sigma period freqArr xs =
A.map
(\(unlift -> (rFreq, thetaFreq, rhoFreq, phiFreq)) ->
A.sfoldl
(\s particle ->
s +
coefficient'' sigma period rFreq thetaFreq rhoFreq phiFreq particle)
0
(constant Z)
xs)
freqArr
{-# INLINE pinwheelAcc #-}
pinwheelAcc ::
forall a. (A.Floating a, A.Num a, A.RealFloat a, A.Elt (Complex a), Prelude.Fractional a)
=> Exp a
-> Exp a
-> Exp a
-> Exp a
-> Exp (a, a, a, a, A.Complex a)
-> Exp (A.Complex a)
pinwheelAcc rFreq thetaFreq rhoFreq phiFreq particle =
let (phi, rho, theta, r, v) =
unlift particle :: (Exp a, Exp a, Exp a, Exp a, Exp (A.Complex a))
in v * (lift $ ((A.exp $ (-0.5) * (rho + r))) A.:+ 0) *
(A.cis $
(-1) *
(rhoFreq * rho + rFreq * (r - rho) + phiFreq * phi +
thetaFreq * (theta - phi)))
pinwheelCoefficientsAcc ::
forall a.
( A.Eq a
, A.Floating a
, A.Num a
, A.RealFloat a
, A.Elt (Complex a)
, A.FromIntegral Int a
, Prelude.Fractional a
)
=> Acc (A.Vector (a, a, a, a))
-> Acc (A.Vector (a, a, a, a, A.Complex a))
-> Acc (A.Vector (A.Complex a))
pinwheelCoefficientsAcc freqArr xs =
A.map
(\(unlift -> (rFreq, thetaFreq, rhoFreq, phiFreq)) ->
A.sfoldl
(\s particle ->
s + (pinwheelAcc rFreq thetaFreq rhoFreq phiFreq particle))
0
(constant Z)
xs)
freqArr
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.