Datasets:
AI4M
/

text
stringlengths
0
3.34M
# Number of subspaces of dimension d in F^n, if |F|=q grassmann_count := (q::posint) -> (n,d) -> mul(q^(n-i)-1,i=0..d-1)/mul(q^(d-i)-1,i=0..d-1); # Number of splittings of F^(n+m) as a sum of subspaces of # dimensions n and m, if |F| = q. splitting_count := (q::posint) -> (n,m) -> grassman_count(q)(n+m,n) * q^(n*m); tanabe_L := (p,n,r) -> (k) -> p^(n*(k+r-1)); # tanabe_N(p,n,r)(k) is the number of irreducible F-linear representations # of Z_p^n that have dimension p^k over F. tanabe_N := (p,n,r) -> (k) -> `if`(k = 0,p^(n*r),(p^n-1) * p^((n-1)*k + n*(r-1))); # The socle of K^0(BGL_{p^k}(F)) is conjecturally generated by # euler(V)^{N-1}, where N = tanabe_N_bar(n,v,k) tanabe_N_bar := (p,n,r) -> proc(k) local j; p^(n*r) + (p^n-1)*p^(n*(r-1))*sum(p^((n-1)*j),j=1..k); end: tanabe_N_star := (p,n,r) -> proc(k) local j; p^(n*r-1) * (p-1) * sum(p^((n-1)*j),j=0..k-1); end: tanabe_N_lim := (p,n,r) -> (k) -> p^(n*r+n*k-k) * (1-p^(-n))/(1-p^(1-n)); # The natural numbers can be partitioned as the disjoint union of # the sets tanabe_NN(p,n,r)(k), of size tanabe_N(p,n,r)(k). tanabe_NN := (p,n,r) -> proc(k::nonnegint) if k = 0 then return [seq(i,i=0..tanabe_N_bar(p,n,r)(0)-1)]; else return [seq(i,i=tanabe_N_bar(p,n,r)(k-1)..tanabe_N_bar(p,n,r)(k)-1)]; fi; end: # tanabe_s(p,n,r)(m) is the unique k such that m lies in tanabe_NN(p,n,r)(k). tanabe_s := (p,n,r) -> proc(m) local k; k := 0; while m >= tanabe_N_bar(p,n,r)(k) do k := k+1; od; return k; end: # The E^2 page of the AHSS converging to K_*(BV) has a trigrading # with E^2_{ijk} = H_i(BV_k;K_j). It has even generators # b[i] and u, and odd generators e[i] with # |b[i]| = tanabe_b_trideg(p,n,r)(i) = [2i,0,1] # |e[i]| = tanabe_e_trideg(p,n,r)(i) = [2i+1,0,1] # |u| = tanabe_u_trideg(p,n,r) = [0,2,0]. tanabe_b_trideg := (p,n,r) -> (i) -> [2*i,0,1]; tanabe_e_trideg := (p,n,r) -> (i) -> [2*i+1,0,1]; tanabe_u_trideg := (p,n,r) -> [0,2,0]; # The only nontrivial differentials have the form d_r, where # r = tanabe_AHSS_page(p,n,r)(k) for some k >= 0 # [From now on we refer to this as the k'th differential, # and the page on which it occurs as the k'th page.] tanabe_AHSS_page := (p,n,r) -> (k) -> 2*p^(n*(r+k)) - 1; # Tridegree of the k'th differential tanabe_AHSS_d_trideg := (p,n,r) -> (k) -> [1-2*p^(n*(r+k)),2*p^(n*(r+k))-2,0]; # The k'th page can be described in terms of generators # bb[i,k] and ee[i,k], with tridegrees as follows: tanabe_bb_trideg := (p,n,r) -> (i,k) -> [2*p^k*tanabe_N_bar(p,n,r)(k-1)+2*p^k*i,0,p^k]; tanabe_ee_trideg := (p,n,r) -> (i,k) -> [1+2*p^k*i-2*p^(n*(r+k))+2*p^k*tanabe_N_bar(p,n,r)(k),2*p^(n*(r+k))-2,p^k]; # (This gives all the odd generators, but not all the even ones.) # The generators bb[i,k] and ee[i,k] can be expressed in terms of # the original generators b[i] and e[i]. as follows: tanabe_bbb := (p,n,r) -> (i,k) -> b[i+tanabe_N_bar(p,n,r)(k-1)]^(p^k); tanabe_eee := (p,n,r) -> (i,k) -> u^(p^(n*(r+k))-1) * e[i] * mul(b[i+tanabe_N_bar(p,n,r)(j)]^(p^j*(p-1)),j=0..k-1); tanabe_AHSS_d_rule := (p,n,r) -> proc(k,m) local R,Nb,i,j,n0,n1; R := NULL; Nb := tanabe_N_bar(p,n,r); for j from 0 to tanabe_s(p,n,r)(m) do n0 := `if`(j = 0,0,Nb(j-1)); n1 := Nb(j); if j <= k then for i from n0 to min(n1-1,m) do R := R,b[i]^(p^j) = 0; od: else for i from n0 to min(n1-1,m) do R := R,b[i]^(p^k) = tanabe_eee(p,n,r)(i-Nb(k),k); od; fi; od; return {R}; end: # This is the part of the inverse Poincare series contributed by # irreducible representations of dimension at most p^k. tanabe_inverse_poincare_series := (p,n,r) -> (k) -> (1-t)^(p^(n*r)) * mul((1 - t^(p^j))^(p^(n*(r+j))*(1-1/p^n)),j=1..k); tanabe_poincare_series := (p,n,r) -> (k) -> convert(series(1/tanabe_inverse_poincare_series(p,n,r)(k),t=0,p^k+1), polynom,t); tanabe_trideg0 := (p,n,r) -> proc(x) if type(x,indexed) then if op(0,x) = b then return tanabe_b_trideg(p,n,r)(op(x)); elif op(0,x) = e then return tanabe_e_trideg(p,n,r)(op(x)); elif op(0,x) = bb then return tanabe_bb_trideg(p,n,r)(op(x)); elif op(0,x) = ee then return tanabe_ee_trideg(p,n,r)(op(x)); else return [0,0,0]; fi; elif x = u then return tanabe_u_trideg(p,n,r); else return [0,0,0]; fi; end: tanabe_trideg := (p,n,r) -> apply_deg(tanabe_trideg0(p,n,r)); ###################################################################### check_tanabe_numbers := proc() local n,r,k,p,L,N,N_bar,N_star,N_lim,P,err; assume(n::posint); assume(r::posint); assume(k::posint); assume(p::posint); L := tanabe_L(p,n,r); N := tanabe_N(p,n,r); N_bar := tanabe_N_bar(p,n,r); N_star := tanabe_N_star(p,n,r); N_lim := tanabe_N_lim(p,n,r); _ASSERT( simplify(N_bar(k) - N_bar(k-1) - N(k)) = 0, "N_bar(k) - N_bar(k-1) = N(k)" ); _ASSERT( simplify(N_star(k) - (N_bar(k) - p^(n*k + n*r - k))) = 0, "N_star(k) = N_bar(k) - p^(n*k + n*r - k)" ); _ASSERT( simplify(N(0) + sum(p^j * N(j),j=1..k) - p^(n*(r+k))) = 0, "sum(p^j * N(j),j=0..k) = p^(n*(r+k))" ); _ASSERT( simplify((1 - p^(-k)) * N(0) + sum(expand((1 - p^(j - k)) * N(j)),j=1..k) - N_star(k)) = 0, "sum((1 - p^(j-k)) * N(j),j=0..k) = N_star(k)" ); _ASSERT( simplify(p^(k + 1) * N_star(k + 1) - p^k * N_star(k) - (p - 1) * p^k * N_bar(k)) = 0, "p^(k + 1) * N_star(k + 1) = p^k * N_star(k) + (p - 1) * p^k * N_bar(k)" ); _ASSERT( simplify(p^k*(N_bar(k) - N_bar(k-1)) - (p^n-1)*L(k)) = 0, "p^k*(N_bar(k) - N_bar(k-1)) = (p^n-1)*L(k)" ); _ASSERT( simplify(N_bar(k-1) - L(k)/p^k - N_star(k)) = 0, "N_bar(k-1) = L(k)/p^k + P(k)" ); _ASSERT( simplify(factor(N_bar(k) - (N_star(k)*(p^n-1)/(p-1) + p^(n*r)))) = 0, "N_bar(k) = N_star(k)*(p^n-1)/(p-1) + p^(n*r)" ); _ASSERT( simplify(sum(p^j*(p-1)*N_bar(j),j=0..k-1) - p^k*N_bar(k) + p^(n*(r+k))) = 0, "sum(p^j*(p-1)*N_bar(j),j=0..k-1)" ); _ASSERT( limit(simplify(N_bar(k)/N_lim(k)-1),k=infinity) = 0, "N_bar(k) is asymptotic to N_lim(k)" ); end:
-- ---------------------------------------------------------------- [ Sign.idr ] -- Module : -- Description : -- Copyright : (c) Jan de Muijnck-Hughes -- License : see LICENSE -- --------------------------------------------------------------------- [ EOH ] module Crypto.Std.Sign import Crypto.Common import Crypto.Message import Crypto.Std.Key ||| Representation of a digital signature scheme. class SignScheme s where ||| Generate a sign and verify key pair. signKeyGen : (Key Public Sign n, Key Private Sign m) ||| Sign a message and return a signed message. ||| ||| @ sigkey The senders signing key. ||| @ msg The message to be signed. signMsg : (sigkey : Key Private Sign m) -> (msg : Vect a Bits32) -> Signed b ||| Verify a signed message and return the message. ||| ||| @ verkey The senders verification key. ||| @ sigmsg The signed message. verifyMsg : (verkey : Key Public Sign n) -> (sigmsg : Signed b) -> Maybe (Vect a Bits32) -- --------------------------------------------------------------------- [ EOF ]
Nike's inspiration for its team collection comes from the understanding that every team is special. While each style has its own unique design, every player will be armed with the most innovative technology. Find a variety of Nike Football Kits, Nike Womens wear, Nike Training Wear and much more.
(* * Copyright 2014, General Dynamics C4 Systems * * This software may be distributed and modified according to the terms of * the GNU General Public License version 2. Note that NO WARRANTY is provided. * See "LICENSE_GPLv2.txt" for details. * * @TAG(GD_GPL) *) theory EmptyFail_AI imports "./$L4V_ARCH/ArchTcb_AI" begin context begin interpretation Arch . requalify_facts ef_machine_op_lift end lemmas [wp] = empty_fail_bind empty_fail_bindE empty_fail_get empty_fail_modify empty_fail_whenEs empty_fail_when empty_fail_gets empty_fail_assertE empty_fail_error_bits empty_fail_mapM_x empty_fail_mapM empty_fail_sequence_x ef_ignore_failure ef_machine_op_lift lemmas empty_fail_error_bits[simp] lemma sequence_empty_fail[wp]: "(\<And>m. m \<in> set ms \<Longrightarrow> empty_fail m) \<Longrightarrow> empty_fail (sequence ms)" apply (induct ms) apply (simp add: sequence_def | wp)+ done lemma sequenceE_empty_fail[wp]: "(\<And>m. m \<in> set ms \<Longrightarrow> empty_fail m) \<Longrightarrow> empty_fail (sequenceE ms)" apply (induct ms) apply (simp add: sequenceE_def | wp)+ done lemma sequenceE_x_empty_fail[wp]: "(\<And>m. m \<in> set ms \<Longrightarrow> empty_fail m) \<Longrightarrow> empty_fail (sequenceE_x ms)" apply (induct ms) apply (simp add: sequenceE_x_def | wp)+ done lemma mapME_empty_fail[wp]: "(\<And>x. empty_fail (m x)) \<Longrightarrow> empty_fail (mapME m xs)" by (clarsimp simp: mapME_def image_def | wp)+ lemma mapME_x_empty_fail[wp]: "(\<And>x. empty_fail (f x)) \<Longrightarrow> empty_fail (mapME_x f xs)" by (clarsimp simp: mapME_x_def | wp)+ lemma filterM_empty_fail[wp]: "(\<And>m. m \<in> set ms \<Longrightarrow> empty_fail (P m)) \<Longrightarrow> empty_fail (filterM P ms)" apply (induct ms) apply (simp | wp)+ done lemma zipWithM_x_empty_fail[wp]: "(\<And>x y. empty_fail (f x y)) \<Longrightarrow> empty_fail (zipWithM_x f xs ys)" by (clarsimp simp: zipWithM_x_def zipWith_def | wp)+ lemma zipWithM_empty_fail[wp]: "(\<And>x y. empty_fail (f x y)) \<Longrightarrow> empty_fail (zipWithM f xs ys)" by (clarsimp simp: zipWithM_def zipWith_def | wp)+ lemma handle'_empty_fail[wp]: "\<lbrakk>empty_fail f; \<And>e. empty_fail (handler e)\<rbrakk> \<Longrightarrow> empty_fail (f <handle2> handler)" apply (simp add: handleE'_def | wp)+ apply (case_tac x, simp_all) done lemma handle_empty_fail[wp]: "\<lbrakk>empty_fail f; \<And>e. empty_fail (handler e)\<rbrakk> \<Longrightarrow> empty_fail (f <handle> handler)" by (simp add: handleE_def | wp)+ lemma lookup_error_on_failure_empty_fail[wp]: "empty_fail f \<Longrightarrow> empty_fail (lookup_error_on_failure a f)" by (simp add: lookup_error_on_failure_def | wp)+ lemma empty_on_failure_empty_fail[wp]: "empty_fail f \<Longrightarrow> empty_fail (empty_on_failure f)" by (simp add: empty_on_failure_def catch_def split: sum.splits | wp)+ lemma unify_failure_empty_fail[wp]: "empty_fail f \<Longrightarrow> empty_fail (unify_failure f)" by (simp add: unify_failure_def | wp)+ lemma split_if_empty_fail[wp]: "\<lbrakk>P \<Longrightarrow> empty_fail f; \<not> P \<Longrightarrow> empty_fail g\<rbrakk> \<Longrightarrow> empty_fail (if P then f else g)" by simp lemma const_on_failure_empty_fail[wp]: "empty_fail f \<Longrightarrow> empty_fail (const_on_failure a f)" by (simp add: const_on_failure_def catch_def split: sum.splits | wp)+ lemma liftME_empty_fail[simp]: "empty_fail (liftME f m) = empty_fail m" apply (simp add: liftME_def) apply (rule iffI) apply (simp add: bindE_def) apply (drule empty_fail_bindD1) apply (simp | wp)+ done lemma select_empty_fail[wp]: "S \<noteq> {} \<Longrightarrow> empty_fail (select S)" by (simp add: empty_fail_def select_def) lemma select_f_empty_fail[wp]: "(fst S = {} \<Longrightarrow> snd S) \<Longrightarrow> empty_fail (select_f S)" by (simp add: select_f_def empty_fail_def) lemma select_ext_empty_fail: "S \<noteq> {} \<Longrightarrow> empty_fail (select_ext a S)" by (simp add: select_ext_def | wp)+ lemma do_extended_op_empty_fail[wp]: "empty_fail (do_extended_op f)" apply(simp add: do_extended_op_def) apply (wp | simp add: mk_ef_def split_def)+ done lemma do_machine_op_empty_fail[wp]: "empty_fail f \<Longrightarrow> empty_fail (do_machine_op f)" apply (simp add: do_machine_op_def | wp)+ apply (simp add: empty_fail_def) apply (simp add: split_def) done lemma throw_on_false_empty_fail[wp]: "empty_fail f \<Longrightarrow> empty_fail (throw_on_false ex f)" by (simp add: throw_on_false_def | wp)+ lemma without_preemption_empty_fail[wp]: "empty_fail f \<Longrightarrow> empty_fail (without_preemption f)" by simp lemma put_empty_fail[wp]: "empty_fail (put f)" by (simp add: put_def empty_fail_def) crunch_ignore (empty_fail) (add: bind bindE lift liftE liftM "when" whenE unless unlessE return fail assert_opt mapM mapM_x sequence_x catch handleE do_extended_op cap_insert_ext empty_slot_ext create_cap_ext cap_swap_ext cap_move_ext reschedule_required switch_if_required_to attempt_switch_to set_thread_state_ext OR_choice OR_choiceE timer_tick) crunch (empty_fail) empty_fail[wp]: set_object, gets_the, get_register, get_cap (simp: split_def kernel_object.splits) lemma check_cap_at_empty_fail[wp]: "empty_fail m \<Longrightarrow> empty_fail (check_cap_at cap slot m)" by (simp add: check_cap_at_def | wp)+ lemma as_user_empty_fail[wp]: "empty_fail f \<Longrightarrow> empty_fail (as_user t f)" apply (simp add: as_user_def | wp)+ apply (simp add: empty_fail_def) apply (case_tac xa) apply (simp | wp)+ done crunch (empty_fail) empty_fail[wp]: get_message_info (simp: split_def kernel_object.splits) lemma cap_fault_on_failure_empty_fail[wp]: "empty_fail f \<Longrightarrow> empty_fail (cap_fault_on_failure a b f)" by (simp add: cap_fault_on_failure_def | wp)+ lemma syscall_empty_fail[wp]: "\<lbrakk>empty_fail a; \<And>x. empty_fail (b x); \<And>x. empty_fail (c x); \<And>x. empty_fail (d x); \<And>x. empty_fail (e x)\<rbrakk> \<Longrightarrow> empty_fail (syscall a b c d e)" by (simp add: syscall_def split: sum.splits | wp | intro impI allI)+ definition spec_empty_fail where "spec_empty_fail m s \<equiv> fst (m s) = {} \<longrightarrow> snd (m s)" lemma drop_spec_empty_fail: "empty_fail m \<Longrightarrow> spec_empty_fail m s" by (simp add: empty_fail_def spec_empty_fail_def) lemma spec_empty_fail_bind: "\<lbrakk>spec_empty_fail f s; \<And>x. empty_fail (g x)\<rbrakk> \<Longrightarrow> spec_empty_fail (f >>= g) s" by (fastforce simp: bind_def spec_empty_fail_def empty_fail_def image_def split_def split_paired_Bex intro: prod_eqI) lemma spec_empty_fail_bindE: "\<lbrakk>spec_empty_fail f s; \<And>x. empty_fail (g x)\<rbrakk> \<Longrightarrow> spec_empty_fail (f >>=E g) s" by (fastforce simp: bindE_def lift_def split: sum.splits intro: spec_empty_fail_bind) lemma spec_empty_fail_bind': "\<lbrakk>spec_empty_fail f s; \<And>x s'. (x, s') \<in> fst (f s) \<Longrightarrow> spec_empty_fail (g x) s'\<rbrakk> \<Longrightarrow> spec_empty_fail (f >>= g) s" by (fastforce simp: bind_def spec_empty_fail_def image_def split_def split_paired_Bex intro: prod_eqI) lemma spec_empty_fail_bindE': "\<lbrakk>spec_empty_fail f s; \<And>x s'. (Inr x, s') \<in> fst (f s) \<Longrightarrow> spec_empty_fail (g x) s'\<rbrakk> \<Longrightarrow> spec_empty_fail (f >>=E g) s" apply (simp add: bindE_def) apply (rule spec_empty_fail_bind') apply simp apply (clarsimp simp: lift_def split: sum.splits | rule conjI | wp drop_spec_empty_fail)+ done lemma spec_empty_returnOk: "spec_empty_fail (returnOk x) s" apply (rule drop_spec_empty_fail) apply simp done lemma spec_empty_whenE: "spec_empty_fail f s \<Longrightarrow> spec_empty_fail (whenE P f) s" apply (simp add: whenE_def) apply (clarsimp simp: spec_empty_returnOk) done lemma use_spec_empty_fail: "(\<And>s. spec_empty_fail f s) \<Longrightarrow> empty_fail f" apply (simp add: empty_fail_def spec_empty_fail_def) done lemma resolve_address_bits_spec_empty_fail: notes spec_empty_fail_bindE'[wp_split] shows "spec_empty_fail (resolve_address_bits slot) s" unfolding resolve_address_bits_def proof (induct arbitrary: s rule: resolve_address_bits'.induct) case (1 z cap cref s') show ?case apply (simp add: resolve_address_bits'.simps) apply (case_tac cap, (wp | simp | intro impI conjI | rule "1.hyps" | rule drop_spec_empty_fail | simp add: whenE_def in_monad | force)+) done qed lemmas resolve_address_bits_empty_fail[wp] = resolve_address_bits_spec_empty_fail[THEN use_spec_empty_fail] crunch (empty_fail) empty_fail[wp]: set_register, lookup_slot_for_cnode_op, decode_untyped_invocation, range_check, lookup_source_slot, lookup_pivot_slot, cap_swap_for_delete, is_final_cap, set_cap, allActiveTCBs locale EmptyFail_AI_load_word = fixes state_ext_t :: "'state_ext::state_ext itself" assumes loadWord_empty_fail[wp]: "\<And>p. empty_fail (loadWord p)" assumes load_word_offs_empty_fail[wp]: "\<And>p offset. empty_fail (load_word_offs p offset :: (machine_word, 'state_ext) s_monad)" context EmptyFail_AI_load_word begin lemma get_extra_cptrs_empty_fail[wp]: fixes a b shows "empty_fail (get_extra_cptrs a b :: (cap_ref list, 'state_ext) s_monad)" apply (simp add: get_extra_cptrs_def) apply (cases a) apply (simp | wp loadWord_empty_fail load_word_offs_empty_fail)+ done end locale EmptyFail_AI_derive_cap = EmptyFail_AI_load_word state_ext_t for state_ext_t :: "'state_ext::state_ext itself" + assumes derive_cap_empty_fail[wp]: "\<And>slot cap. empty_fail (derive_cap slot cap :: (cap, 'state_ext) se_monad)" context EmptyFail_AI_derive_cap begin lemma decode_cnode_invocation_empty_fail[wp]: "\<And>a b c d. empty_fail (decode_cnode_invocation a b c d :: (cnode_invocation, 'state_ext) se_monad)" by (simp add: decode_cnode_invocation_def split: invocation_label.splits list.splits | wp | intro impI conjI allI)+ end lemma decode_read_registers_empty_fail[wp]: "empty_fail (decode_read_registers data (ThreadCap p))" by (simp add: decode_read_registers_def split: list.splits cap.splits | wp | intro allI impI conjI)+ lemma decode_write_registers_empty_fail[wp]: "empty_fail (decode_write_registers data (ThreadCap p))" by (simp add: decode_write_registers_def split: list.splits cap.splits | wp | intro allI impI conjI)+ lemma decode_copy_registers_empty_fail[wp]: "empty_fail (decode_copy_registers data (ThreadCap p) ec)" by (simp add: decode_copy_registers_def split: list.splits cap.splits | wp | intro allI impI conjI)+ lemma alternative_empty_fail[wp]: "empty_fail f \<or> empty_fail g \<Longrightarrow> empty_fail (f \<sqinter> g)" by (auto simp: alternative_def empty_fail_def) lemma OR_choice_empty_fail[wp]: "\<lbrakk>empty_fail f; empty_fail g\<rbrakk> \<Longrightarrow> empty_fail (OR_choice c f g)" by (simp add: OR_choice_def mk_ef_def split_def | wp)+ lemma OR_choiceE_empty_fail[wp]: "\<lbrakk>empty_fail f; empty_fail g\<rbrakk> \<Longrightarrow> empty_fail (OR_choiceE c f g)" by (simp add: OR_choiceE_def mk_ef_def split_def | wp)+ lemmas empty_fail_return[wp] locale EmptyFail_AI_rec_del = EmptyFail_AI_derive_cap state_ext_t for state_ext_t :: "'state_ext::state_ext itself" + assumes empty_slot_empty_fail[wp]: "\<And>slot irq. empty_fail (empty_slot slot irq :: (unit, 'state_ext) s_monad)" assumes finalise_cap_empty_fail[wp]: "\<And>cap final. empty_fail (finalise_cap cap final :: (cap \<times> irq option, 'state_ext) s_monad)" assumes preemption_point_empty_fail[wp]: "empty_fail (preemption_point :: (unit, 'state_ext) p_monad)" context EmptyFail_AI_rec_del begin lemma rec_del_spec_empty_fail: fixes call and s :: "'state_ext state" shows "spec_empty_fail (rec_del call) s" proof (induct rule: rec_del.induct, simp_all only: drop_spec_empty_fail[OF empty_fail] rec_del_fails) case (1 slot exposed s) show ?case apply (subst rec_del.simps) apply (simp only: split_def) apply (rule spec_empty_fail_bindE) apply (simp add: "1.hyps") apply (wp | simp)+ done next case (2 slot exposed s) show ?case apply (subst rec_del.simps) apply (rule spec_empty_fail_bindE') apply ((wp drop_spec_empty_fail | simp)+)[1] apply (simp | intro conjI impI)+ apply (wp drop_spec_empty_fail)[1] apply (rule spec_empty_fail_bindE') apply ((wp drop_spec_empty_fail | simp)+)[1] apply (rule spec_empty_fail_bindE') apply ((wp drop_spec_empty_fail | simp)+)[1] apply (simp add: split_def | intro conjI impI)+ apply ((wp drop_spec_empty_fail | simp)+)[3] apply (rule spec_empty_fail_bindE') apply ((wp drop_spec_empty_fail | simp)+)[1] apply (rule spec_empty_fail_bindE') apply (rule "2.hyps", simp+) apply (rule spec_empty_fail_bindE') apply (wp drop_spec_empty_fail)[1] apply (rule "2.hyps", simp+) done next case 3 show ?case apply (simp | wp drop_spec_empty_fail)+ done next case (4 ptr bits n slot s) show ?case apply (subst rec_del.simps) apply (rule spec_empty_fail_bindE') apply (wp drop_spec_empty_fail)[1] apply (rule spec_empty_fail_bindE) apply (rule "4.hyps", assumption+) apply (wp | simp)+ done qed lemma rec_del_empty_fail[wp]: "empty_fail (rec_del call :: (bool * irq option, 'state_ext) p_monad)" apply (simp add: empty_fail_def) apply (rule allI) apply (rule rec_del_spec_empty_fail[simplified spec_empty_fail_def]) done end locale EmptyFail_AI_cap_revoke = EmptyFail_AI_rec_del state_ext_t for state_ext_t :: "'state_ext::state_ext itself" + assumes cap_delete_empty_fail[wp]: "\<And>cap. empty_fail (cap_delete cap :: (unit, 'state_ext) p_monad)" context EmptyFail_AI_cap_revoke begin lemma cap_revoke_spec_empty_fail: fixes slot and s :: "'state_ext state" shows "spec_empty_fail (cap_revoke slot) s" proof (induct rule: cap_revoke.induct) case (1 slot) show ?case apply (subst cap_revoke.simps) apply (rule spec_empty_fail_bindE', ((wp drop_spec_empty_fail | simp)+)[1])+ apply (simp add: whenE_def | intro conjI impI)+ apply (rule spec_empty_fail_bindE', ((wp drop_spec_empty_fail select_ext_empty_fail | simp)+)[1])+ apply (rule "1.hyps", simp+) apply (wp drop_spec_empty_fail)+ done qed lemma cap_revoke_empty_fail[wp]: "\<And>slot. empty_fail (cap_revoke slot :: (unit, 'state_ext) p_monad)" apply (simp add: empty_fail_def) apply (rule allI) apply (rule cap_revoke_spec_empty_fail[simplified spec_empty_fail_def]) done end locale EmptyFail_AI_schedule = EmptyFail_AI_cap_revoke state_ext_t for state_ext_t :: "'state_ext::state_ext itself" + assumes switch_to_idle_thread_empty_fail[wp]: "empty_fail (switch_to_idle_thread :: (unit, 'state_ext) s_monad)" assumes get_thread_state_empty_fail[wp]: "empty_fail (get_thread_state ref :: (thread_state, 'state_ext) s_monad)" assumes guarded_switch_to_empty_fail[wp]: "empty_fail (guarded_switch_to thread :: (unit, 'state_ext) s_monad)" locale EmptyFail_AI_schedule_unit = EmptyFail_AI_schedule "TYPE(unit)" context EmptyFail_AI_schedule_unit begin lemma schedule_empty_fail[wp]: "empty_fail (schedule :: (unit,unit) s_monad)" apply (simp add: schedule_def) apply wp apply (rule disjI2) apply wp done end crunch (empty_fail) empty_fail[wp]: set_scheduler_action, next_domain, reschedule_required (simp: scheduler_action.split) locale EmptyFail_AI_schedule_det = EmptyFail_AI_schedule "TYPE(det_ext)" + assumes choose_thread_empty_fail[wp]: "empty_fail choose_thread" context EmptyFail_AI_schedule_det begin lemma schedule_empty_fail'[wp]: "empty_fail (schedule :: (unit,det_ext) s_monad)" apply (simp add: schedule_def) apply (wp | clarsimp split: scheduler_action.splits| intro impI conjI)+ done end locale EmptyFail_AI_call_kernel = EmptyFail_AI_schedule state_ext_t for state_ext_t :: "'state_ext::state_ext itself" + assumes activate_thread_empty_fail[wp]: "empty_fail (activate_thread :: (unit, 'state_ext) s_monad)" assumes getActiveIRQ_empty_fail[wp]: "empty_fail getActiveIRQ" assumes handle_event_empty_fail[wp]: "\<And>event. empty_fail (handle_event event :: (unit, 'state_ext) p_monad)" assumes handle_interrupt_empty_fail[wp]: "\<And>interrupt. empty_fail (handle_interrupt interrupt :: (unit, 'state_ext) s_monad)" locale EmptyFail_AI_call_kernel_unit = EmptyFail_AI_schedule_unit + EmptyFail_AI_call_kernel "TYPE(unit)" context EmptyFail_AI_call_kernel_unit begin lemma call_kernel_empty_fail': "empty_fail (call_kernel a :: (unit,unit) s_monad)" apply (simp add: call_kernel_def) apply (wp | simp)+ done end locale EmptyFail_AI_call_kernel_det = EmptyFail_AI_schedule_det + EmptyFail_AI_call_kernel "TYPE(det_ext)" context EmptyFail_AI_call_kernel_det begin lemma call_kernel_empty_fail: "empty_fail (call_kernel a :: (unit,det_ext) s_monad)" apply (simp add: call_kernel_def) by (wp|simp)+ end end
function [outputSignal, inputImages] = applyImagesToMovie(inputImages,inputMovie, varargin) % Applies images to a 3D movie matrix in order to get a signal based on a thresholded version of the image. % Biafra Ahanonu % started: 2013.10.11 % inputs % inputImages - [x y signalNo] of images, signals will be calculated for each image from the movie. % inputMovie - [x y frame] or char string path to the movie. % outputs % outputSignal - [signalNo frame] matrix of each signal's activity trace extracted directly from the movie. % inputImages - [x y signalNo], same as input. % changelog % 2014.02.17 [11:37:35] updated to have single inputs, bring notation in line with other programs % 2014.08.11 - obtain traces using linear indexing and reshaping, much faster than using bsxfun since don't have to make a large intermediate matrix. % 2017.01.14 [20:06:04] - support switched from [nSignals x y] to [x y nSignals]. % 2020.04.28 [17:10:37] - Output modified inputImages. % 2020.10.19 [11:27:33] - Supports inputMovie as a character path to the movie. % 2020.10.26 [17:08:16] - Finished updating to allow read from disk and binning. % 2020.10.27 [12:57:53] - Added support for weighted computation of signals based on pixel values in each image. % 2020.12.02 [00:21:28] - Remove parallelization across inputImages to reduce memory overhead and serialization memory issues (e.g. transferring a duplicate of the movie to all workers). % 2021.08.08 [19:30:20] - Updated to handle CIAtah v4.0 switch to all functions inside ciapkg package. % TODO % DONE: Change so that it accepts a movie and images, current implementation is too specific. % DONE: Add ability to use the values of the filter (multiple by indices). import ciapkg.api.* % import CIAtah functions in ciapkg package API. %======================== % Binary: 1 = input images already thresholded, 0 = not thresholded, applyImagesToMovie will threshold. options.alreadyThreshold = 0; % Binary: 1 = show the wait bar. options.waitbarOn = 1; % Float: value between 0 to 1, all values below this fraction of max value for each input image will be set to zero and not used for calculating output signal. options.threshold = 0.5; % Str: hierarchy name in hdf5 where movie data is located options.inputDatasetName = '/1'; % Int vector: list of specific frames to load. options.frameList = []; % Binary: 1 = read movie from HDD, 0 = load into RAM options.readMovieChunks = 0; % Int: Number of frames options.nFramesPerChunk = 5e3; % Binary: 1 = weight the output trace by the value of the individual pixels, 0 = all image pixels above threshold are weighted evenly when calculating activity trace. options.weightSignalByImage = 0; % OBSOLETE Binary: 1 = load the images/movies % inputDir, inputID, fileRegExp, PCAsuffix options.manualLoadSave = 0; % get options options = getOptions(options,varargin); % display(options) % unpack options into current workspace % fn=fieldnames(options); % for i=1:length(fn) % eval([fn{i} '=options.' fn{i} ';']); % end %======================== % Check maximum number of cores available % maxCores = feature('numCores'); % Open works = max core #, probably should do maxCores-1 for % stability... % matlabpool('open',maxCores); if ischar(inputMovie)==1 % options.readMovieChunks = 1; inputMoviePath = inputMovie; [movieInfo] = ciapkg.io.getMovieInfo(inputMoviePath,'frameList',options.frameList,'inputDatasetName',options.inputDatasetName); if options.readMovieChunks==0 inputMovie = loadMovieList(inputMovie,'frameList',options.frameList,'inputDatasetName',options.inputDatasetName); end % if options.readMovieChunks==0 % else % end else options.readMovieChunks = 0; end % get number of ICs and frames nImages = size(inputImages,3); nFrames = size(inputMovie,3); % nPts = nFrames; movieDims = size(inputMovie); % matrix multiple to get trace for each time-point reverseStr = ''; if options.alreadyThreshold==0 inputImages = thresholdImages(inputImages,'waitbarOn',1,'threshold',options.threshold); end disp(num2str([nanmin(inputMovie(:)) nanmax(inputMovie(:))])) % Only implement in Matlab 2017a and above if ~verLessThan('matlab', '9.2') D_updateParforProgress = parallel.pool.DataQueue; afterEach(D_updateParforProgress, @nUpdateParforProgress); p_updateParforProgress = 1; N_updateParforProgress = nImages; nInterval_updateParforProgress = round(nImages/30); %25 options_waitbarOn = options.waitbarOn; end if options.readMovieChunks==1 frameBins = 1:options.nFramesPerChunk:movieInfo.three; frameBins(end+1) = movieInfo.three; nBins = (length(frameBins)-1); outputSignalCell = cell([nBins 1]); % Pre-threshold the PCA-ICA filters to save time. inputImagesThres = thresholdImages(inputImages,'waitbarOn',1,'threshold',options.threshold); tmpOpts = options; for binNo = 1:nBins disp('+++++++++++++++++++++++++++++++++++++++++++++++++++++') if binNo==nBins framesToProcess = frameBins(binNo):frameBins(binNo+1); else framesToProcess = frameBins(binNo):(frameBins(binNo+1)-1); end fprintf('ROI signal extract %d to %d frames.\n',framesToProcess(1),framesToProcess(end)); tmpOpts.frameList = framesToProcess; tmpOpts.weightSignalByImage = options.weightSignalByImage; tmpOpts.alreadyThreshold = 1; tmpOpts.readMovieChunks = 0; % Prevent recusion loop. [outputSignalCell{binNo}, ~] = applyImagesToMovie(inputImagesThres,inputMoviePath,'options',tmpOpts); end % Combine all the frame chunks into a single [signalNo frames] size matrix outputSignal = cat(2,outputSignalCell{:}); else outputSignal = subfxn_runSignalExtraction(); end function outputSignal = subfxn_runSignalExtraction() % pre-allocate traces outputSignal = zeros(nImages,nFrames); opts_weightSignalByImage = options.weightSignalByImage; % parfor(imageNo = 1:nImages,2) for imageNo = 1:nImages iImage = squeeze(inputImages(:,:,imageNo)); % ======= % get the linear indices, much faster that way % tmpThres = squeeze(inputImagesThres(:,:,i)); tmpThres = iImage; nPts = size(inputMovie,3); movieDims = size(inputMovie); [x, y] = find(tmpThres~=0); nValid = length(x); xrepmat = repmat(x,[1 nPts])'; yrepmat = repmat(y,[1 nPts])'; framerepmat = repmat(1:nPts,[1 length(x)]); linearInd = sub2ind(movieDims, xrepmat(:),yrepmat(:), framerepmat(:)); if isempty(linearInd) disp('empty linearInd!!!') end tmpTrace = inputMovie(linearInd); % tmpTrace tmpTrace = reshape(tmpTrace,[nPts nValid]); if opts_weightSignalByImage==1 tmpWeights = tmpThres(tmpThres~=0); tmpWeights = tmpWeights/nanmax(tmpWeights(:)); tmpTrace = tmpTrace.*tmpWeights(:)'; end % imagesc(tmpTrace); colorbar;pause % size(tmpTrace) tmpTrace = squeeze(nanmean(tmpTrace,2)); % size(tmpTrace) % display(num2str([nanmin(tmpTrace(:)) nanmax(tmpTrace(:))])) % ======= % use bsxfun to matrix multiple 2D image to 3D movie % tmpTrace = nansum(nansum(bsxfun(@times,iImage,inputMovie),1),2); % normalize trace % tmpTrace = tmpTrace/mean(tmpTrace)-1; % ======= outputSignal(imageNo,:) = tmpTrace(:); if ~verLessThan('matlab', '9.2'); send(D_updateParforProgress, imageNo); end % Update progress bar % reverseStr = cmdWaitbar(imageNo,nImages,reverseStr,'inputStr','applying images to movie','displayEvery',5,'waitbarOn',options.waitbarOn); end end function nUpdateParforProgress(~) if ~verLessThan('matlab', '9.2') p_updateParforProgress = p_updateParforProgress + 1; pTmp = p_updateParforProgress; nTmp = N_updateParforProgress; if (mod(pTmp,nInterval_updateParforProgress)==0||pTmp==2||pTmp==nTmp)&&options_waitbarOn==1 if pTmp==nTmp fprintf('%d\n',round(pTmp/nTmp*100)) else fprintf('%d|',round(pTmp/nTmp*100)) end % cmdWaitbar(p,nSignals,'','inputStr','','waitbarOn',1); end % [p mod(p,nInterval)==0 (mod(p,nInterval)==0||p==nSignals)&&options_waitbarOn==1] end end end % normalize traces around zero % outputSignal = normalizeVector(outputSignal,'normRange','zeroCentered'); % if options.manualLoadSave==1 % %For each day, load the downsampled DFOF movie % files = getFileList(inputDir, fileRegExp); % % load movies, automatically concatenate % numMovies = length(files); % for tifMovie=1:numMovies % display(['loading ' num2str(tifMovie) '/' num2str(numMovies) ': ' files{tifMovie}]) % tmpDFOF = load_tif_movie(files{tifMovie},1); % if(tifMovie==1) % DFOF(:,:,:) = tmpDFOF.Movie; % else % DFOF(:,:,end+1:end+length(tmpDFOF.Movie)) = tmpDFOF.Movie; % end % end % filesToLoad={}; % filesToLoad{1} = [inputDir filesep inputID '_ICfilters' PCAsuffix '.mat']; % for i=1:length(filesToLoad) % display(['loading: ' filesToLoad{i}]); % load(filesToLoad{i}) % end % end % if options.manualLoadSave==1 % % save IC traces % savestring = [inputDir filesep inputID '_ICtraces_applied' '.mat']; % display(['saving: ' savestring]) % save(savestring,'IcaTraces'); % end
StringOrInt : (x : Bool) -> Type StringOrInt False = String StringOrInt True = Int valToString : (isInt : Bool) -> StringOrInt isInt -> String valToString False str = trim str valToString True n = cast n valToString' : (isInt : Bool) -> (case isInt of False => String True => Int) -> String valToString' False x = trim x valToString' True x = cast x
lemma uniformly_continuous_on_minus[continuous_intros]: fixes f :: "'a::metric_space \<Rightarrow> 'b::real_normed_vector" shows "uniformly_continuous_on s f \<Longrightarrow> uniformly_continuous_on s (\<lambda>x. - f x)"
{-# LANGUAGE FlexibleInstances #-} module Marvin.Test.Metric where import Marvin.API.Table.Internal import Numeric.LinearAlgebra.HMatrix import Numeric.LinearAlgebra.Data import qualified Data.Vector as Vec import qualified Data.Vector.Unboxed as UVec import Data.Array class Metric a where dist :: a -> a -> Double instance Metric R where dist a b = abs $ a - b instance Metric (Vector R) where dist a b = norm_Inf $ a - b instance Metric (Matrix R) where dist a b = norm_Inf $ a - b instance Metric (Vec.Vector R) where dist a b = Vec.foldl1 max $ Vec.zipWith (\x y -> abs (x - y)) a b instance Metric (UVec.Vector R) where dist a b = UVec.foldl1 max $ UVec.zipWith (\x y -> abs (x - y)) a b instance Metric NumericColumn where dist a b = dist (values a) (values b) instance Metric NumericTable where dist a b = Vec.maximum $ Vec.zipWith dist (columnsVec a) (columnsVec b) instance Metric [Double] where dist a b = maximum $ zipWith dist a b instance Metric [[Double]] where dist a b = maximum $ zipWith dist a b -- infinite norm instance (Ix i, Metric a) => Metric (Array i a) where dist a b = maximum $ zipWith dist (elems a) (elems b)
```python import numpy as np import sympy as sym import numba import pydae.build as db ``` ```python S_b = 90e3 U_b = 400.0 Z_b = U_b**2/S_b I_b = S_b/(np.sqrt(3)*U_b) Omega_b = 2*np.pi*50 R_s = 0.023/Z_b R_r = 0.024/Z_b X_s = 0.086/Z_b X_r = 0.196/Z_b X_m = 3.7/Z_b X_0 = X_s + X_m X1 = X_s + X_r*X_m/(X_r + X_m) T10 = (X_r + X_m)/(Omega_b*R_r) X1 ``` 0.15307854209445587 ```python S_b = 90e3 U_b = 400.0 Z_b = U_b**2/S_b I_b = S_b/(np.sqrt(3)*U_b) Omega_b = 2*np.pi*50 R_s = 0.023/Z_b R_r = 0.024/Z_b X_s = 0.086/Z_b X_r = 0.196/Z_b X_m = 3.7/Z_b X_0 = X_s + X_m X1 = X_s + X_r*X_m/(X_r + X_m) #X1 = X_s+X_m - X_m**2/(X_r+X_m) X1 ``` 0.15307854209445587 ```python S_b = 90e3 U_b = 400.0 Z_b = U_b**2/S_b I_b = S_b/(np.sqrt(3)*U_b) Omega_b = 2*np.pi*50 R_s = 0.023/Z_b R_r = 0.024/Z_b X_s = 0.086/Z_b X_r = 0.196/Z_b X_m = 3.7/Z_b X_0 = X_s + X_m X1 = X_s + X_r*X_m/(X_r + X_m) X1 = X_s - X_m**2/X_r T10 = (X_r + X_m)/(Omega_b*R_r) params = {'S_b':S_b,'U_b':U_b,'I_b':I_b, 'R_s':R_s,'X_0':X_0,'X1':X1,'T10':T10, # synnchronous machine d-axis parameters 'H_m':3.5,'Omega_b':2*np.pi*50, 'v_0':1,'theta_0':0.0, 'X_l':0.05} u_ini_dict = {'P_h':0.1, 'Q_h':0.0} # for the initialization problem u_run_dict = {'tau_m':0.8,'Q_c':0.0} # for the running problem (here initialization and running problem are the same) x_list = ['omega_r','e1d','e1q'] # [inductor current, PI integrator] y_ini_list = ['i_d','i_q','v_h','theta_h','tau_m','Q_c'] # for the initialization problem y_run_list = ['i_d','i_q','v_h','theta_h','P_h','Q_h'] # for the running problem (here initialization and running problem are the same) sys_vars = {'params':params, 'u_list':u_run_dict, 'x_list':x_list, 'y_list':y_run_list} exec(db.sym_gen_str()) # exec to generate the required symbolic varables and constants ``` ```python v_d = -v_h*sin(theta_h) v_q = v_h*cos(theta_h) tau_e = e1d*i_d + e1q*i_q sigma = 1-omega_r domega_r = 1/(2*H_m)*(tau_m - tau_e) de1d = Omega_b*sigma*e1q - (e1d + (X_0 - X1)*i_q)/T10 de1q =-Omega_b*sigma*e1d - (e1q - (X_0 - X1)*i_d)/T10 g_1 = R_s*i_d - X1*i_q - v_d + e1d g_2 = R_s*i_q + X1*i_d - v_q + e1q g_3 = -P_h - (v_h*v_0*sin(theta_h - theta_0))/X_l g_4 = -Q_c - Q_h + (v_h*v_0*cos(theta_h - theta_0))/X_l - v_h**2/X_l g_5 = -P_h - (v_d*i_d + v_q*i_q) g_6 = -Q_h - (v_q*i_d - v_d*i_q) h_1 = I_b*(i_d*i_d + i_q*i_q)**0.5 sys = {'name':'imib_milano_3rd', 'params':params, 'f':[domega_r,de1d,de1q], 'g':[g_1,g_2,g_3,g_4,g_5,g_6], 'g_ini':[g_1,g_2,g_3,g_4,g_5,g_6], 'x':x_list, 'y_ini':y_ini_list, 'y':y_run_list, 'u_run_dict':u_run_dict, 'u_ini_dict':u_ini_dict, 'h':[h_1]} sys = db.system(sys) db.sys2num(sys) ``` ```python sys['f'] ``` $\displaystyle \left[\begin{matrix}\frac{- e1d i_{d} - e1q i_{q} + \tau_{m}}{2 H_{m}}\\e1q \left(314.159265358979 - 314.159265358979 \omega_{r}\right) - \frac{e1d + 1.97654645790554 i_{q}}{T_{10}}\\e1d \left(314.159265358979 \omega_{r} - 314.159265358979\right) - \frac{e1q - 1.97654645790554 i_{d}}{T_{10}}\end{matrix}\right]$ ```python ```
(** Proof Reflection in Coq ; objects.v ; 050128 ; Dimitri Hendriks; Coq 8.0pl1 *) Require Export indices. Set Implicit Arguments. Section objects. Variable l1 l2 : list nat. Unset Elimination Schemes. (* Default trm_ind is useless *) Inductive trm : Set := | var : nat -> trm | fun_ : forall i : index l1, listn trm (select i) -> trm. (* trm_ind : (P:(trm->Prop)) ((n:nat)(P (var n))) ->((i:(index l1); l:(listn trm (select_expl nat l1 i))) (P (fun l))) ->(t:trm)(P t) *) Definition trms := listn trm. Definition emp := niln trm. Section ip1. Variable P : trm -> Prop. Variable P0 : forall n : nat, trms n -> Prop. Hypothesis h : forall n : nat, P (var n). Hypothesis h0 : forall (i : index l1) (l : trms (select i)), P0 l -> P (fun_ i l). Hypothesis h1 : P0 emp. Hypothesis h2 : forall (n : nat) (t : trm), P t -> forall l : trms n, P0 l -> P0 (consn t l). Fixpoint trm_ind' (t : trm) : P t := match t as x return (P x) with | var n => h n | fun_ i l => h0 i (listn_ind P0 h1 (fun n0 t0 v0 h => h2 (trm_ind' t0) h) l) end. End ip1. Section ip2. Variable P : trm -> Prop. Let P0 (n : nat) (l : trms n) : Prop := forall t : trm, inn t l -> P t. Hypothesis h : forall n : nat, P (var n). Hypothesis h0 : forall (i : index l1) (l : trms (select i)), P0 l -> P (fun_ i l). Let h1 : P0 emp := fun t => False_ind (P t). Let h2 (n : nat) (t : trm) (d : P t) (l : trms n) (d0 : P0 l) : P0 (consn t l) := fun u d1 => or_ind (fun d2 : u = t => eq_ind_r (fun t0 => P t0) d d2) (fun d2 : inn u l => d0 u d2) d1. Definition trm_ind := trm_ind' P P0 h h0 h1 h2. End ip2. Set Elimination Schemes. Inductive frm : Set := | top : frm | bot : frm | rel : forall i : index l2, trms (select i) -> frm | imp : frm -> frm -> frm | cnj : frm -> frm -> frm | dsj : frm -> frm -> frm | uvq : frm -> frm | exq : frm -> frm. Inductive prf : Set := | top_intro : prf | hyp : nat -> prf | bot_elim : prf -> frm -> prf | imp_intro : frm -> prf -> prf | imp_elim : prf -> prf -> prf | cnj_intro : prf -> prf -> prf | cnj_elim1 : prf -> prf | cnj_elim2 : prf -> prf | dsj_intro1 : frm -> prf -> prf | dsj_intro2 : frm -> prf -> prf | dsj_elim : prf -> prf -> prf -> prf | uvq_intro : prf -> prf | uvq_elim : trm -> prf -> prf | exq_intro : frm -> trm -> prf -> prf | exq_elim : prf -> prf -> prf. Section map_trm_sec. Variable g : nat -> nat -> nat. Fixpoint map_trm (n : nat) (t : trm) {struct t} : trm := match t with | var i => var (g n i) | fun_ i l => fun_ i (mapn (map_trm n) l) end. End map_trm_sec. Section map_var_sec. Variable g : nat -> trm -> trm. Fixpoint map_frm_var (n : nat) (p : frm) {struct p} : frm := match p with | top => top | bot => bot | rel i l => rel i (mapn (g n) l) | imp q r => imp (map_frm_var n q) (map_frm_var n r) | cnj q r => cnj (map_frm_var n q) (map_frm_var n r) | dsj q r => dsj (map_frm_var n q) (map_frm_var n r) | uvq q => uvq (map_frm_var (S n) q) | exq q => exq (map_frm_var (S n) q) end. Fixpoint map_prf_var (n : nat) (d : prf) {struct d} : prf := match d with | top_intro => top_intro | hyp i => hyp i | bot_elim e p => bot_elim (map_prf_var n e) (map_frm_var n p) | imp_intro p e => imp_intro (map_frm_var n p) (map_prf_var n e) | imp_elim e f => imp_elim (map_prf_var n e) (map_prf_var n f) | cnj_intro e f => cnj_intro (map_prf_var n e) (map_prf_var n f) | cnj_elim1 e => cnj_elim1 (map_prf_var n e) | cnj_elim2 e => cnj_elim2 (map_prf_var n e) | dsj_intro1 p e => dsj_intro1 (map_frm_var n p) (map_prf_var n e) | dsj_intro2 p e => dsj_intro2 (map_frm_var n p) (map_prf_var n e) | dsj_elim e e1 e2 => dsj_elim (map_prf_var n e) (map_prf_var n e1) (map_prf_var n e2) | uvq_intro e => uvq_intro (map_prf_var (S n) e) | uvq_elim t e => uvq_elim (g n t) (map_prf_var n e) | exq_intro p t e => exq_intro (map_frm_var (S n) p) (g n t) (map_prf_var n e) | exq_elim e f => exq_elim (map_prf_var n e) (map_prf_var (S n) f) end. End map_var_sec. Section map_hyp_sec. Variable g : nat -> nat -> nat. Fixpoint map_prf_hyp (n : nat) (d : prf) {struct d} : prf := match d with | top_intro => top_intro | hyp i => hyp (g n i) | bot_elim d p => bot_elim (map_prf_hyp n d) p | imp_intro p d => imp_intro p (map_prf_hyp (S n) d) | imp_elim d e => imp_elim (map_prf_hyp n d) (map_prf_hyp n e) | cnj_intro d e => cnj_intro (map_prf_hyp n d) (map_prf_hyp n e) | cnj_elim1 d => cnj_elim1 (map_prf_hyp n d) | cnj_elim2 d => cnj_elim2 (map_prf_hyp n d) | dsj_intro1 p d => dsj_intro1 p (map_prf_hyp n d) | dsj_intro2 p d => dsj_intro2 p (map_prf_hyp n d) | dsj_elim d e1 e2 => dsj_elim (map_prf_hyp n d) (map_prf_hyp (S n) e1) (map_prf_hyp (S n) e2) | uvq_intro d => uvq_intro (map_prf_hyp n d) | uvq_elim t d => uvq_elim t (map_prf_hyp n d) | exq_intro p t d => exq_intro p t (map_prf_hyp n d) | exq_elim d e => exq_elim (map_prf_hyp n d) (map_prf_hyp (S n) e) end. End map_hyp_sec. Section boolpreds. Fixpoint trm_eqb (t u : trm) {struct u} : bool := match t, u with | var i, var j => nat_eqb i j | fun_ i l, fun_ j w => index_eqb l1 i j && listn_eqb trm_eqb l w | _, _ => false end. Definition trms_eqb := listn_eqb trm_eqb. Fixpoint frm_eqb (p q : frm) {struct q} : bool := match p, q with | top, top => true | bot, bot => true | rel i l, rel j w => index_eqb l2 i j && trms_eqb l w | imp c d, imp e f => frm_eqb c e && frm_eqb d f | cnj c d, cnj e f => frm_eqb c e && frm_eqb d f | dsj c d, dsj e f => frm_eqb c e && frm_eqb d f | uvq c, uvq d => frm_eqb c d | exq c, exq d => frm_eqb c d | _, _ => false end. Fixpoint free_inb_trm (n : nat) (t : trm) {struct t} : bool := match t with | var m => nat_eqb n m | fun_ i l => mapn_orb (free_inb_trm n) l end. Definition free_inb_trms (n k : nat) (l : trms k) := mapn_orb (free_inb_trm n) l. Fixpoint free_inb_frm (n : nat) (p : frm) {struct p} : bool := match p with | top => false | bot => false | rel i l => free_inb_trms n l | imp q r => free_inb_frm n q || free_inb_frm n r | cnj q r => free_inb_frm n q || free_inb_frm n r | dsj q r => free_inb_frm n q || free_inb_frm n r | uvq q => free_inb_frm (S n) q | exq q => free_inb_frm (S n) q end. End boolpreds. End objects. Notation Var := (var _) (only parsing). Notation Top := (top _ _) (only parsing). Notation Bot := (bot _ _) (only parsing). Notation Hyp := (hyp _ _) (only parsing). Notation Top_intro := (top_intro _ _) (only parsing).
%------------------------------------------------------------------------------------------------------------------------------------------------- \section{Genetic Algorithm} \label{section:GA} %------------------------------------------------------------------------------------------------------------------------------------------------- One of the drawbacks of using a Reinforcement Learning strategy is that the training schema can potentially lead to a localized solution space. Such a solution might not have evaluated all possible strategies. This drawback can be countered by using the robustness of methods such as Genetic Algorithms, which carry out more thorough search. We use such a method in order to evolve a Neural Network controller managing the bot in a 1 unit Vs 1 unit fight. This methodology is called \emph{Neuro Evolution} in general. %------------------------------------------------------------------------------------------------------------------------------------------------- \subsection{Representation} Our objective for the neural network is to be able to make informed decisions by interpreting the game state. Hence, a representation for the game state needs to be formulated. This representation needs to be minimal since the number of inputs to the neural network directly correlate to the size of the chromosome to be evolved. In this approach, we use 8 inputs, 4 for each unit involved in the combat: \begin{enumerate} \item \emph{Shots to Death} - This heuristics utilizes the fact that all units in the game, at any given time, will be eliminated by a particular opponent unit in certain number of attack commands. This parameter can be used to represent several values at the same time - \emph{Hit-points}, \emph{Armor}, \emph{Opponent's Damage} and \emph{Damage Multiplier}, combined in equation: \begin{equation} \label{equation:nShots} \text{shots}=\left \lceil\frac{\text{HP}}{(\text{damage}\times\text{times}-\text{armor}) \times\text{mul}} \right \rceil \text{,} \end{equation} This input is normalized over the maximum value of shots left to death when the game starts (maximum hot-points). \item \emph{Distance} - This number signifies the distance of the unit from the opponent, which is normalized over its weapon range. \item \emph{Cooldown Left} - The number of updates left before the unit can shoot again, which is normalized with the largest weapon cooldown time for the two units. \item \emph{Speed} - The unit's top movement speed possible. This is again normalized by the maximum speed out of the two units. \end{enumerate} The neural network can provide 3 output values, each corresponding to an action's desirability ({\bf Move Towards Enemy}, {\bf Move Away from Enemy} or {\bf Attack}). The action with the maximum desirability is performed. %------------------------------------------------------------------------------------------------------------------------------------------------- \subsection{Algorithm Parameters} \begin{enumerate} \item \emph{Fitness}: The fitness of a particular chromosome is an integer value representing a Win ($1$) or a Loss ($-1$). To average out inaccuracy in fitness due to randomness, each chromosome is tested $20$ times and the fitness is averaged out. An alternate method to judge the fitness of a chromosome is to take an average of difference of hitpoints of the two units at the end of a match. \item \emph{Choosing parents}: The natural selection method used in this project is Elite Selection. In a population of $N$ chromosomes, each reproduction cycle selects $\frac{N}{3}$ parents for breeding. However the selection method is modified in such a way that the chromosome with the best fitness is always selected (in order to not forget fitness comparison). The rest of the parents selected are uniformly distributed from the top portion of the population and bottom portion. This version is also comparable to the Roulette-Wheel selection methodology, where segments of the total population have certain selection probabilities. \item \emph{Crossover}: The selected parents reproduce themselves by a simple one point crossover approach. The selection of the split point is normally distributed for each gene. \item \emph{Mutation}: Each child gets mutated with a predefined mutation probability (standard value used in experiments is $0.2$). It is a recommended strategy to mutate a gene representing neural network connection weight by adding a Gaussian Random number. This enables the weight to be not bound within a particular range and enhance progression in the search space. \item \emph{Population Refresh}: All newly produced children, two per parent (i.e. $\frac{N}{3}$ parents generate $\frac{N}{3}$ children), automatically replace the $\frac{N}{3}$ chromosomes at the bottom of the population (not replacing if a chromosome which was selected for parenting). The remaining $\frac{N}{3}$ part of the population is completely mutated, meaning that their each gene mutates with a probability of $1.0$. \item \emph{Optimization}: Genetic Algorithms need a lot of fitness updates, especially in this case where a match to test fitness of a chromosome takes approximately 7 sec. Therefore one performance optimization made for the project is to preserve the fitness value of chromosomes that have not changed. In general, these are the chromosomes that were selected for parenting. \end{enumerate} %------------------------------------------------------------------------------------------------------------------------------------------------- \subsection{Results} %%% \begin{table} \caption{average winning rate} \begin{center} % Table generated by Excel2LaTeX from sheet 'Sheet1' \begin{tabular}{|r|r|r|r|} \hline & {\bf mean }& {\bf stdev} & {\bf p-value} \\ \hline \emph{8.8.4.3 (ref)} & $ 0.025$ & $ 0.099 $ & --- \\ \hline \emph{ 8.8.3 }& $0.019$ & $ 0.099 $ & $ 0.459 $ \\ \hline \emph{8.1.3 }& $ 0.014$ & $ 0.104 $ & $ 0.094 $ \\ \hline \end{tabular} \label{table:winningRate} \end{center} \end{table} % --------- \begin{enumerate} \item Neural Network Structure - While designing the agent, one of the things on our mind was for the neural network to be able to process the inputs efficiently and try to decipher the optimal output values. But there is no way to understand the relation of the input values to the playing strategy for the agent. To test what structure of the neural network can achieve the most efficient results for the bot, we run T-tests with $3$ different structures of the network: \begin{itemize} \item Inputs (8), Hidden Layer (1), Output Layer (3) \item Inputs (8), Hidden Layer (8), Output Layer (3) \item Inputs (8), Hidden Layer (8), Hidden Layer (4), Output Layer (3) \end{itemize} The agent is trained in each case for a certain number of iterations (in this case they were trained for $60$ iterations) and then the best chromosome in each pool plays the game to create a sample space to be tested. The results for the T-test carried out on the three samples are presented in Table \ref{table:winningRate}. The samples are compared with respect to the $8,8,4,3$ network. While comparing the network with the $8,8,3$ neural network, the relatively high p-value signifies that they are similar. This means that the using the larger network is an overkill as it provides relatively similar output quality as the smaller network. This, however is not the case with the $8,1,3$ network. The low p-value means that the samples are not similar. Then the decision to select a network falls on the which has a higher success-rate (which is already being measured by the average fitness value of the chromosomes). \begin{figure}[htp] \centerline{\includegraphics[width=1.0\columnwidth]{fig_GA_AverageFitness}} \caption{Graph showing the progression of Average Fitness of population of size $10$, $20$ and $30$, respectively: NN configuration is $8,8,3$} \label{fig:AverageFitness} \end{figure} \item Population Size - We are also interested in finding out the rate of convergence of the Genetic Algorithm, as well as its dependency on the population size. Generally a larger population size is preferred, since the high number of chromosomes provide a bigger search space for the algorithm to find a fit individual in. But this leads to a lot of overhead on the system to process individuals introduced on each iteration in the larger population. To assess this parameter, we look at a comparative Figure \ref{fig:AverageFitness} showing the progression of average fitness of the populations of sizes $10$, $20$ and $30$. The population with size 10 seems to be too small and has slow convergence to the other two populations. The other two populations are very close in average performance and hence a smaller and more efficient population size is selected. \begin{figure}[htp] \centerline{\includegraphics[width=1.0\columnwidth]{fig_GA_Fitness_MinAvgMax}} \caption{Graph showing the minimum, average and maximum fitness of population of size $10$, NN configuration is $8,8,3$} \label{fig:minAvgMaxFitness} \end{figure} \item Fitness Spread - In a genetic algorithm, the reproduction and discard rules are used to introduce new individuals to the population that can spread the solution in a much broader spectrum. This counters the problems provided by other computation intelligence algorithms to get stuck in local minima. A way to test the spread of the population is by graphing the minimum, average and maximum fitness of a population (size = $10$), as shown in Figure \ref{fig:minAvgMaxFitness}. A large distance between the minimum and maximum fitness points to the conclusion that the population is taking care of introducing diverse individuals. \end{enumerate} %%% \begin{figure}[htp] \centerline{\includegraphics[width=1.0\columnwidth]{fig_GA_Speciation_Top2}} \caption{Graph of Specie difference between the top two chromosomes} \label{fig:specieTop2} \end{figure} \begin{figure}[htp] \centerline{\includegraphics[width=1.0\columnwidth]{fig_GA_Speciation_next2}} \caption{Graph of Specie difference between $2^{nd}$ and $3^{rd}$ ranked chromosomes} \label{fig:specieNext2} \end{figure} %------------------------------------------------------------------------------------------------------------------------------------------------- \subsection{Discussion} According to our tests with the network structure and the population size, the best solution for our one versus one approach is a network structure of 8,8,3 and a population size of $20$. Although the bigger network also evolved well, in terms of performance we clearly have to choose the smaller one. According to the figure \ref{fig:AverageFitness}, the population size of $20$, roughly scores as good as the population size of $30$. When we take the fact, that a smaller population size will evolve $\frac{1}{3}$ faster, into account, we have to chose a population size of $20$. If we measure the cumulative distance between each gene of the top two candidates in the populations which is depicted in Figure \ref{fig:specieTop2}, we can see that the larger populations in genetic evolution hamper the evolving chances because of higher preservation of the top candidates. This can be attributes to the elite selection methodology we have employed that gives increasingly higher probability to the $1^{st}$ and $2^{nd}$ chromosomes to be selected for parenting. This pattern becomes much more varied if we calculate the distance between the $2^{nd}$ and $3^{rd}$ chromosomes (Figure \ref{fig:specieNext2}), which depicts the higher chances for the chromosomes on those positions to be mutated. %%%
-- Logical consistency of IPL open import Library module Consistency (Base : Set) where import Formulas ; open module Form = Formulas Base import Derivations ; open module Der = Derivations Base import NfModelMonad; open module NfM = NfModelMonad Base open Normalization caseTreeMonad using (norm) -- No variable in the empty context. noVar : ∀{A} → Hyp A ε → ⊥ noVar () -- No neutral in the empty context. noNe : ∀{A} → Ne ε A → ⊥ noNe (hyp ()) noNe (impE t u) = noNe t noNe (andE₁ t) = noNe t noNe (andE₂ t) = noNe t -- No normal proof of False in the empty context. noNf : Nf ε False → ⊥ noNf (orE t t₁ t₂) = noNe t noNf (falseE t) = noNe t -- No proof of False in the empty context. consistency : ε ⊢ False → ⊥ consistency = noNf ∘ norm
// Copyright (c) 2020 fortiss GmbH // // Authors: Julian Bernhard, Klemens Esterle, Patrick Hart and // Tobias Kessler // // This work is licensed under the terms of the MIT license. // For a copy, see <https://opensource.org/licenses/MIT>. #ifndef BARK_MODELS_EXECUTION_INTERPOLATION_INTERPOLATE_HPP_ #define BARK_MODELS_EXECUTION_INTERPOLATION_INTERPOLATE_HPP_ #include <Eigen/Core> #include "bark/models/execution/execution_model.hpp" namespace bark { namespace models { namespace execution { using bark::commons::ParamsPtr; using dynamic::State; class ExecutionModelInterpolate : public ExecutionModel { public: explicit ExecutionModelInterpolate(const ParamsPtr& params) : ExecutionModel(params) {} ~ExecutionModelInterpolate() {} /** * @brief Checks if the world time is within the trajectory * @note * @retval boolean: true if contained */ bool CheckIfWorldTimeIsWithinTrajectory(const Trajectory& trajectory, const double& world_time) const; /** * @brief Find exact time in trajectory * @retval BARK state and whether is was found */ std::pair<State, bool> CheckIfTimeExactIsInTrajectory( const Trajectory& trajectory, const double& world_time) const; /** * @brief Find lower time point in trajectory for world_time * @retval Trajectory row-id and whether it was found */ std::pair<int, bool> FindClosestLowerTrajectoryRow( const Trajectory& trajectory, const double& world_time) const; /** * @brief Interpolates between two states * @retval State: interpolated state */ State Interpolate(const State& p0, const State& p1, const double& time) const; /** * @brief Interpolates on trajectory */ virtual void Execute(const double& new_world_time, const dynamic::Trajectory& trajectory, const dynamic::DynamicModelPtr dynamic_model); virtual std::shared_ptr<ExecutionModel> Clone() const; }; inline std::shared_ptr<ExecutionModel> ExecutionModelInterpolate::Clone() const { std::shared_ptr<ExecutionModelInterpolate> model_ptr = std::make_shared<ExecutionModelInterpolate>(*this); return std::dynamic_pointer_cast<ExecutionModel>(model_ptr); } } // namespace execution } // namespace models } // namespace bark #endif // BARK_MODELS_EXECUTION_INTERPOLATION_INTERPOLATE_HPP_
# coding=UTF-8 # ex:ts=4:sw=4:et=on # Copyright (c) 2013, Mathijs Dumon # All rights reserved. # Complete license can be found in the LICENSE file. import numpy as np from .R0models import R0G1Model, R0G2Model, R0G3Model, R0G4Model, R0G5Model, R0G6Model # @UnusedImport from .R1models import R1G2Model, R1G3Model, R1G4Model # @UnusedImport from .R2models import R2G2Model, R2G3Model # @UnusedImport from .R3models import R3G2Model # @UnusedImport # Overview of what is: # x = currently implemented # np = not possible # -/o = not yet implemented # o = priority # # G1 G2 G3 G4 G5 G6 # R0 x x x x x x # R1 np x x x - - # R2 np x x - - - # R3 np x - - - - RGbounds = np.array([ [1, 1, 1, 1, 1, 1], [-1, 1, 1, 1, 0, 0], [-1, 1, 1, 0, 0, 0], [-1, 1, 0, 0, 0, 0], ]) def get_Gbounds_for_R(R, G): global RGbounds maxR, maxG = RGbounds.shape low, upp = 1, 6 if R >= 0 and R < maxR: bounds = RGbounds[R] low, upp = 1 + np.argmax(bounds == 1), maxG - np.argmax(bounds[::-1] == 1) else: raise ValueError("Cannot yet handle R%d!" % R) return (low, upp, max(min(G, upp), low)) def get_Rbounds_for_G(G, R): global RGbounds maxR, maxG = RGbounds.shape low, upp = 0, 0 if G >= 1 and G <= maxG: bounds = RGbounds[:, G - 1] low, upp = np.argmax(bounds == 1), maxR - np.argmax(bounds[::-1] == 1) - 1 else: raise ValueError("Cannot yet handle %d layer structures!" % G) return (low, upp, max(min(R, upp), low)) def get_correct_probability_model(R, G): global RGbounds if (RGbounds[R, G - 1] > 0): return globals()["R%dG%dModel" % (R, G)] else: raise ValueError("Cannot (yet) handle R%d for %d layer structures!" % (R, G))
% ========================================= % COMMAND: _DEBUG % ========================================= \newpage \section{\_DEBUG} \label{cmd:_DEBUG} \paragraph{Syntax:} \subparagraph{} \texttt{\_DEBUG <string>} \paragraph{Purpose:} \subparagraph{} Prints a text string to \texttt{stderr} for debugging purposes.
# ***Introduction to Radar Using Python and MATLAB*** ## Andy Harrison - Copyright (C) 2019 Artech House <br/> # Reflection and Transmission *** Referring to Section 2.6, practical radar problems involve waves propagating in bounded regions and interacting with media of differing constitutive parameters. These media may include the targets of interest to the radar system as well as other regions such as rain, buildings, trees, and birds. Therefore, it is beneficial to study the reflection and refraction of electromagnetic waves occurring when a wave traveling in a given medium impinges on another medium with a different set of constitutive parameters. The reflection and transmission coefficients for perpendicular polarization are given by (Equations 2.102 and 2.103) \begin{align} \Gamma_{{\scriptstyle TE}} &= \frac{\eta_2 \cos \theta_i - \eta_1 \cos \theta_t}{\eta_2 \cos \theta_i + \eta_1 \cos \theta_t},\\ \nonumber \\ \mathrm{T}_{{\scriptstyle TE}} &= \frac{2\, \eta_2 \cos \theta_i}{\eta_2 \cos \theta_i + \eta_1 \cos \theta_t}\nonumber \end{align} The reflection and transmission coefficients for parallel polarization are given by (Equations 2.114 and 2.115) \begin{align} \Gamma_{{\scriptstyle TM}} &= \frac{\eta_2 \cos \theta_t - \eta_1 \cos \theta_i}{\eta_2 \cos \theta_t + \eta_1 \cos \theta_i} \label{eq:parallel_reflection_coefficient},\\ \nonumber \\ \mathrm{T}_{{\scriptstyle TM}} &= \frac{2 \, \eta_2 \cos \theta_i}{\eta_2 \cos \theta_t + \eta_1 \cos \theta_i}. \end{align} *** Begin by getting library path ```python import lib_path ``` Set the operating frequency (Hz), th relative permittivity, the relative permeability, and the conductivity (S/m) for the different regions using the `array` routine from `scipy` ```python from numpy import array frequency = 300e6 relative_permittivity = array([1.3, 2.8]) relative_permeability = array([1.0, 1.0]) conductivity = array([0.01, 0.01]) ``` Set up the keyword args ```python kwargs = {'frequency': frequency, 'relative_permittivity': relative_permittivity, 'relative_permeability': relative_permeability, 'conductivity': conductivity} ``` Calculate the critical angle and Brewster angle using the `plane_waves` routine from `wave_propagation` ```python from Libs.wave_propagation import plane_waves critical_angle = plane_waves.critical_angle(**kwargs) brewster_angle = plane_waves.brewster_angle(**kwargs) # Display the results print('Critical Angle {:.1f}'.format(critical_angle)) print('Brewster Angle {:.1f}'.format(brewster_angle)) ``` Critical Angle 81.1+51.0j Brewster Angle 54.8+3.0j Set the incident angles using the `linspace` routine from `scipy` ```python from scipy import linspace from scipy.constants import pi incident_angle = linspace(0., 0.5 * pi, 1000) ``` Set up the keyword args ```python kwargs = {'frequency': frequency, 'incident_angle': incident_angle, 'relative_permittivity': relative_permittivity, 'relative_permeability': relative_permeability, 'conductivity': conductivity} ``` Calculate the reflection and transmission coefficients using the `plane_waves` routines from `wave_propagation` ```python reflection_coefficient_te, transmission_coefficient_te, reflection_coefficient_tm, transmission_coefficient_tm = plane_waves.reflection_transmission(**kwargs) ``` Display the reflection and transmission coefficients using the `matplotlib` routines ```python from matplotlib import pyplot as plt from scipy import degrees # Set the figure size plt.rcParams["figure.figsize"] = (15, 10) # Set up the axes fig, axes1 = plt.subplots() axes2 = axes1.twinx() # Display the reflection coefficients axes1.plot(degrees(incident_angle), abs(reflection_coefficient_te), 'b', label='|$\Gamma_{TE}$|') axes1.plot(degrees(incident_angle), abs(reflection_coefficient_tm), 'b--', label='|$\Gamma_{TM}$|') # Display the transmission coefficients axes2.plot(degrees(incident_angle), abs(transmission_coefficient_te), 'r', label='|$T_{TE}$|') axes2.plot(degrees(incident_angle), abs(transmission_coefficient_tm), 'r--', label='|$T_{TM}$|') # Set the plot title and labels axes1.set_title('Plane Wave Reflection and Transmission', size=14) axes1.set_xlabel('Incident Angle (degrees)', size=12) axes1.set_ylabel('|Reflection Coefficient|', size=12) axes2.set_ylabel('|Transmission Coefficient|', size=12) # Set the tick label size axes1.tick_params(labelsize=12) axes2.tick_params(labelsize=12) # Set the legend axes1.legend(loc='upper right', prop={'size': 10}) axes2.legend(loc='upper left', prop={'size': 10}) # Turn on the grid axes1.grid(linestyle=':', linewidth=0.5) ```
Formal statement is: lemma islimpt_eq_infinite_cball: "x islimpt S \<longleftrightarrow> (\<forall>e>0. infinite(S \<inter> cball x e))" Informal statement is: A point $x$ is a limit point of a set $S$ if and only if for every $\epsilon > 0$, the intersection of $S$ with the closed ball of radius $\epsilon$ centered at $x$ is infinite.
lemma linear: "linear f"
import tactic class myclass (X : Type) := (op : (X → X) → X) variables (X Y Z : Type) [myclass X] [myclass Y] [myclass Z] namespace myclass structure hom := (f : X → Y) (map' : ∀ (a : X → X) (b : Y → Y), (∀ x, f (a x) = b (f x)) → f (op a) = op b) structure edge := (R : X → Y → Prop) (h : ∀ (a : X → X) (b : Y → Y), (∀ x y, R x y → R (a x) (b y)) → R (op a) (op b)) def diag : edge X X := { R := eq, h := begin intros a b h, congr, ext, apply h, refl, end } example (e : edge X Y) : myclass (Σ' x y, e.R x y) := ⟨λ a, begin cases e, dsimp at *, end⟩ structure hom2 := (f : X → Y) (f2 : (X → X) → (Y → Y)) (hf2 : ∀ (a : X → X) (b : Y → Y), (∀ x, f (a x) = b (f x)) ↔ f2 a = b) (map : ∀ (a : X → X), f (op a) = op (f2 a)) example {X Y : Type} (f : X → Y) (f2 : (X → X) → (Y → Y)) (hf2 : ∀ (a : X → X) (b : Y → Y), (∀ x, f (a x) = b (f x)) ↔ f2 a = b) : function.bijective f ∨ (∀ x y : Y, x = y) := begin rw [or_iff_not_imp_right], intro h, push_neg at h, rcases h with ⟨y₁, y₂, hy⟩, classical, split, intros x₁ x₂ h, have := hf2 id (equiv.swap y₁ y₂), dsimp at this, end lemma lemma1 {X Y : Type} (f : X → Y) (hf : function.has_right_inverse f) : ∃ f2 : (X → X) → (Y → Y), (∀ (a : X → X) (b : Y → Y), (∀ x, f (a x) = b (f x)) ↔ f2 a = b) := begin cases hf with g hg, refine ⟨λ a, f ∘ a ∘ g, _⟩, intros a b, simp only [function.funext_iff, function.comp_app], split, { assume h y, rw [← hg y, h, hg y, hg y] }, { assume h x, rw [← h], delta function.right_inverse function.left_inverse at hg, } end -- example : false := begin -- have := h (@empty.elim unit) (λ _, id) _ (), -- cases this, -- cases this_w, -- intros,simp, -- end def hom2_to_edge (f : hom2 X Y) : edge X Y := { R := λ x y, f.f x = y, h := λ a b h, begin rw [f.map], end } def comp2 (f : hom2 X Y) (g : hom2 Y Z) : hom2 X Z := { f := λ x, g.f (f.f x), f2 := λ a, g.f2 (f.f2 a), hf2 := λ a c, begin split, { have hg2 := g.hf2, have hf2 := f.hf2, intros h, rw [(hg2 _ _).1], clear hg2, intro y, have := f.hf2 a (λ _, f.f2 a y), simp only [function.funext_iff] at this, rw ← this.2, rw h, }, { intros h x, have := (f.hf2 a _).2 rfl, rw [this, (g.hf2 _ _).2], rw h } end, map := λ a, by rw [f.map, g.map] } instance : has_coe_to_fun (hom X Y) (λ _, X → Y) := { coe := hom.f } def id : hom X X := { f := id, map' := λ a b h, have h : a = b, from funext h, by rw h; refl } variables {X Y Z} lemma hom.map (f : hom X Y) : ∀ (a : X → X) (b : Y → Y), (∀ x, f (a x) = b (f x)) → f (op a) = op b := f.map' def comp (f : hom X Y) (g : hom Y Z) : hom X Z := { f := λ x, g (f x), map' := λ a c h, begin cases f with f hf, cases g with g hg, unfold_coes at *, dsimp at *, end } def to_functor {X : Type} (f : (X → X) → X) : (X → X → Type) → X → Type := λ R x, Σ g : {g : X → X // f g = x}, ∀ a b, g.1 x = b → R a b def to_functor_map {X : Type} (f : (X → X) → X) (R₁ R₂ : (X → X → Type)) (i : Π x y, R₁ x y → R₂ x y) : Π x, to_functor f R₁ x → to_functor f R₂ x := λ x g, ⟨g.1, λ a b h, i _ _ (g.2 _ _ h)⟩ variables (X Y) def hom₃ := Σ f : X → Y, Π (i : Y → Y → Type) (x : X), to_functor op (λ x y : X, i (f x) (f y)) x → to_functor op i (f x) end myclass
The Lesbian Gay Bisexual Transgender Queer Intersex Asexual Resource Center opened on January 31, 1994. Its presence on the UC Davis campus was the result of a recommendation made by the Chancellors Committee on Gay, Lesbian & Bisexual Issues in 1992. In the Fall of 1993, a group of students, staff and faculty convened to prepare the Center for its grand opening. The center has been located in several different spaces on campus over the years, starting out in one of the Temporary Buildings near Engineering III, moving to University House Rm 105 circa 1996, and then to the University House Annex in 2000. Its last move was to the Student Community Center in the winter of 2012. Its a very important part of the Rainbow Community, especially among UC Davis students. Current Staff (2012/2013) Interim Director: elizabeth coté MailTo(eacote AT ucdavis DOT edu) Interim Assistant Director: Alfredo Del Cid MailTo(adelcid AT ucdavis DOT edu) Interim Office Coordinator: Joanna Villegas MailTo(jovillegas AT ucdavis DOT edu) Community Counselor: Jezzie Zimbardo MailTo(jzimbardo AT ucdavis DOT edu) LGBTRC Interns: Graduate Student Researcher: Elisa Oceguera MailTo(lgbtgsr AT ucdavis DOT edu) Giovanni Lopez MailTo(oglopez AT hotmail DOT com) Hazel Quintanilla MailTo(hazelgq AT ucdavis DOT edu) Mo Merritt MailTo(memerritt AT ucdavis DOT edu) Val Valles MailTo(vavalles AT ucdavis DOT edu) Dog: Pocket Resources The LGBTRC offers many resources for LGBT/Queer students and allies. Resources include an extensive library of over 1100 books, a collection of current magazines and newspapers, a growing DVD and VHS video library of both entertaining and educational films, several varieties of organizational and educational pamphlets, and much more! Did we mention that we now have wireless internet access? Yay! Volunteer positions are always available and there are a couple of computers that volunteers can use. The center works closely with the Gender and Sexuality Commission, the WRRC (Womens Center) and the CCC as well as holding meetings for pertinent organizations such as La Familia, APIQ, The Bivisibility Project, SAME LOVE, BlackOUT and Gender Group. The LGBTRC is responsible for organizing a number of great events throughout the year, most notably, awareness weeks: Pride Week and TransAction Week in the fall and Intersex Awareness Week and Beyond the Binary in winter. Programs Queer Welcome Queer Welcome is the LGBT Resource Center’s first event of the year and takes place on the first day of classes in the fall quarter to kick off the year. Queer Welcome is an opportunity for new and returning students, faculty and staff to get connected to the community and find support and resources. Different student organizations and campus departments set up information booths and hundreds of people come by to enjoy the food and music! Pride Week Pride Week is an annual week of educational, entertaining, and interactive events on LGBTIQ issues, as well as other intersecting identities. During Pride Week, the center hosts events such as Visibility Day, a chance for the queer community to hang out on the quad and participate in a meet and greet; Safe Zone training, a three hour training designed to raise awareness and discuss ways to make the spaces we live and work in more welcoming and safer for LGBTIQ people. Every year we have a keynote speaker who talks about current issues and intersecting identities within the LGBTIQ community. TransAction Week TransAction Week is one of the LGBT Resource Center’s annual weeks and is designed to raise awareness on issues impacting transgender people and to celebrate the transgender community. Some events include a keynote speaker; Trans 101, an educational workshop; TransForming Body Image, a program exploring transgender body image; Genderpalooza, a fun event exploring gender; and Trans Safe Zone, a twohour training to increase awareness and sensitivity to transgender issues. Beyond the Binary Beyond the Binary is an LGBT Resource Center annual awareness week dedicated to educating about and celebrating nonmonosexual, and many other intersecting, identities. Events during this week include a social visibility kickoff, keynote speaker, panels and workshops, SafeZone training, and a Bi Visibility Project closed nonmonosexual discussion. Intersex Awareness Week Intersex Awareness Week is one of the four educational weeks the LGBT Resource Center plans annually. Intersex Awareness Week events and programs are held on campus to raise awareness about intersex issues to the UC Davis campus. Lavender Grad This annual event recognizes graduating undergrad and grad students’ contributions to UC Davis and the queer community. Lavender Graduation is an opportunity for students, faculty, staff and community members to celebrate the accomplishments of UC Davis lesbian, gay, bisexual, transgender, intersex, queer and ally (LGBTQIA) graduates. Lavender Graduation also provides a space for graduates to recognize significant people in their lives who have helped them achieve their goals (such as family members, a partner or close friend). LGBTRC Volunteer Program Volunteers help make many of the programs and services offered by the UC Davis Lesbian, Gay, Bisexual, Transgender Resource Center possible. Volunteers assist with various projects including updating resources, compiling program evaluations, advertising events, and supporting existing programs such as TransAction Week and Safe Zone. Queer Leadership Retreat Some of the objectives of the Queer Leadership Retreat include: Empowering LGBTIQ students Building community Providing a safe space for students to develop leadership skills Developing strategies for consciousnessraising within our various communities Setting community goals Educating ourselves and raising awareness about issues w/in the community The retreat will include workshops and teambuilding activities, as well as academic resources and an alumni panel. The retreat will provide a unique opportunity for students to network, learn, and share their own knowledge and experiences Crafternoons Crafternoons is a weekly social and crafting event that runs from 2:30 – 5:00 p.m. on Fridays. New craft projects are taught each week, and all supplies are provided. We encourage folks to stop in and check out the event. Crafternoons are a great way to meet people and destress at the end of the week! Safe Zone Safe Zone is a threehour training designed to raise awareness and discuss ways to make the spaces we live and work in more welcoming and safe for LGBTQ people. After completing the training, you have the option of receiving a sign to designate your space as a Safe Zone. Safe Zone is offered twice a quarter. Transgender Safe Zone is a twohour training created to raise awareness and understanding about gender identity and issues faced by Transgender people. Transgender Safe Zone is offered once a quarter. LGBTRC Internship Program Serve as a community liaison for the diverse Lesbian, Gay, Bisexual, Transgender, Intersex, Queer, Questioning (LGBTIQQ) community. Work to create an open, safe, inclusive space and community that promotes learning, discovery, and scholarship about gender and sexual identities. Work collaboratively with other community centers (WRRC, SRRC, CCC, etc). Serve as liaison to students, student organizations, faculty, Ethnic Studies Departments, administrators, and staff to maintain communication and flow of information. Work with various campus student interns and leaders (ie: Culture Day Interns, ASUCD, Gender and Sexuality Commission, SRRC Interns, PAC’s, MIP Interns, etc.). Serve as a representative to promote the mission and goals of the LGBT Resource Center. Outreach to campus and community groups through tabling, planning programs, conducting training, etc. Assess, address, educate about, and advocate for the issues and needs of underrepresented and underserved communities. Implement and deliver programs on various community issues and topics that support the mission and goals of the LGBT Resource Center and promote retention of LGBT students. Write reports on programs and activities. Work with the career staff, other interns, and volunteers in planning, implementation, publicity, and evaluation of LGBT Resource Center programs. Opportunities to attend national, regional, and statewide conferences/meetings. Asexual Awareness Week The 2011 Asexual Awareness Week was from October 23 to 29. It is held annually in late October as a time of education and awareness around asexual, demisexual and greyasexual issues and experiences. It is a national event that began in 2011. February 2010 Vandalism February 26, 2010: The LGBT Resource Center was vandalized with derogatory and hateful words that target the Queer community. The Center asked that the vandalism be kept up for a time in order to ensure that this hate crimes hate crime does not go unnoticed by the campus community and to take this opportunity to educate the campus about struggles that our community continues to face. March 1, 2010: Town hall meeting at 5:30 in the ARC ballroom for the community to come together and express any concerns and to collectively decide what actions should take place in the future. 20041212 23:59:47 nbsp isnt it a shame its stuck in an alley back behind voorhies? Users/JamesDawe i know. its almost like the universitys closet. Users/BrentLaabs Heh Brent, nice choice of words... :P Users/PaulAmnuaypayoat 20060413 23:46:12 nbsp I once walked in, and got the coldest looks and most disapproving looks from the 5 people at the tables. I havent been back since. Users/ArielSanJose 20090109 20:06:21 nbsp I came in to check out the LGBT Center early last quarter and it was the most uncomfortable experience. There was this unwelcoming, judgmental vibe from the other stagnant people in the room. Ugh. Never again. Users/strawberry 20090110 20:33:57 nbsp Id like to add to the above comments. I went here once, no one went out of their way to acknowledge that someone completely new had walked in. They are not very welcoming. Ive heard the same thing from a TON of friends. I have no interest in going back, but I think thats what the people there want because its clearly their exclusive hangout. Something should be done about this. Users/OscarSabino 20090304 11:57:49 nbsp Sorry about the negative experiences that some of you have faced. As a current volunteer for the LGBTRC I will notify the staff about these issues. I will work on making the atmosphere of the center more warm and inviting. Just to point out, there are a lot of organization meetings that take place at the LGBTRC. You could have by chance walked in during a committee meeting which does not excuse unfriendly actions, but it may be a factor. Users/ThUn 20090711 16:23:09 nbsp I went in a couple weeks ago and talked to a really nice person (Angela?) that worked there. She was very inviting and helpful so for those of you who had bad experiences, I would recommend giving it another try. Users/Pringlessong 20100301 15:00:19 nbsp That vandalism is a real shame. That kind of thing makes me pretty upset. Users/DagonJones Based on the graffiti, it is most likely that the person or people responsible for the vandalism do not go to UC Davis. They mention anger towards the Cal Bears. It is most likely the work of underage vandals who dont know that the UC Davis mascot is not a bear. MaxLucas Sh! People are trying to play the victims here so that students keep paying for diversityoriented causes on campus! But I agree, looks more like kids being stupid than actual homophobia and intolerance. On a related note, I started seeing more graffiti around campus these days. Correlation with Third and B closing? Users/hankim What do you mean by people are trying to play the victim to have student pay for diversityoriented causes? Are you saying that we dont need these events and programs on campus. Because without them, the hate crime wouldve been way worse than a simple spray painting tag. Someone couldve actually been hurt, or assaulted without whatever level of safety we have built up to today. We need to continue our programming that helps to educate the community so that we can prevents acts of hate, and other oppressive acts like this from reoccurring in the future. Users/ThUn I do not see how having a bunch of events that only people like you go to really keeps people from getting assaulted for the way they are, not that Davis is a place where that is likely to happen. Users/hankim The purpose of the events is that at each event we have, if even one person from the ally community, or non LGBTQQIA community comes out and educates her/his self, then that message spreads amongst the community. In turn, promoting a better understand of LGBTQQIA issues and struggles. I didnt say that we can make someone less violent, but we can increase visibility and support for our community. Sitting back in silence on causes us to be forgotten, and with that our struggles would be forgotten or meaningless. To say that these events are for people like you is to further reinforce an us vs. them dynamic. And by saying that you are oppressing our community. Making it clear that there is a majority that does not support us. That majority has privilege, something that we do not have. Abusing privilege only further reinforces the oppression and hate that weve seen in recent events at UCSD, UCR, UCI, and here at UCD. Users/ThUn So what are the struggles and issues of the LGBT community that need to so desperately be heard? Users/hankim Thong H. Huynh? Granted, that was racially motivated. But yeah, hatemotivated crimes can happen anywhere, including Davis. Users/WilliamLewis I know they can happen anywhere, but from what I have seen of Davis, the demographics of the type of people living here makes it an unlikely place to happen, which of course does not mean it will not happen. Anyway, my main point was that the type of people who should probably learn a little more about diversity are not going to go and only the choir would be preached to. Users/hankim Violent people are violent. While the LGBTRCs programs may promote understanding of LGBT issues, I doubt that they have made anyone less violent. Users/WilliamLewis Guns dont kill people, violent people kill people. Also, I assume the GO BEARS message is actually support for wiki:wikipedia:Bear (gay culture) Bear Community. BL BL, it said FU Cal Bears. ThUn It says FU Cal Bears. Users/hankim I agree with Users/DagonJones, and will add that its pretty disgusting to see people come on the DavisWiki and try to minimize and even dismiss the impact of this hate crime on the members of our Davis and campus communities who clearly were the intended targets, and on our community as a whole, and to read cynical accusations that public demonstrations of support for them are some subversive attempt to chisel money out of people. Way to kick someone when theyre down. Users/DukeMcAdow 20100306 01:25:41 nbsp in solidarity with the lgbtrc for increasing peoples awareness on lgbtqi issues. in solidarity against homophobia and bigotry. did the center join in solidarity with the strike? and looking a the history of your locations, i was wondering if youd try to move the center or if you think youll stay in that location? Users/JessicaRockwell 20100908 11:51:04 nbsp I feel very uneasy and uncomfortable when I step in here. Some people say hey, hows it going? and most of the time I dont want to answer and have lame smalltalk. These hey, hows it going seem like BS smalltalk. Im sure if someones totally new and never have been in this center it would be nice to say that and introduce the center, but for someone who stops by often, I go there for a reason, it may to talk to a specific person, use the computer, look at the library, or whatever, I just wanna go in there and do what I have to do and leave. Users/anonymon They are probably just trying to be friendly and welcoming. You might try replying, fine, how are you? and keep walking. At least then youve acknowledged them no one says you have to stop and chat if you dont want to. Users/CovertProfessor Fine, thanks. acknowledges their comment and ends the small talk in a polite way. Then just do what you have to do and leave. 20100909 03:32:53 nbsp I guess youre right. I just feel HORRIBLE saying, thanks. Im fine and moving on. I feel like I should ask back. Users/anonymon I dont think there is anything wrong with it, and if if there is, I think it is worse to say nothing. Most of the time, hey, hows it going? really just means hi, anyway. People generally dont really want to know how you are. :) Users/CovertProfessor I volunteered there last quarter, I found the people to be nice. I think one problem is that it is impossible to distinguish the people who work/volunteer there from the people who are just stopping by. Some sort of uniform or even just a simple yarnnecklace nametag thing would be nice. From my experience, everyone I talked to was nice, just remember this is an established community, so in a way you will be sort of an outsider at first. You may feel a little weird walking in, because it seems like everybody knows everybody else already (and everybody really does know everybody else), but people are accommodating and friendly, though this friendliness maybe be tempered slightly by work we are trying to accomplish (I wasnt very social as I spent most of my time helping put the listserv together). Any stares you get are probably because we dont really know why youre there, or were waiting to see if anybody else recognizes you. A lot of the volunteers are new, so when somebody walks in its hard for us (well, I didnt volunteer this quarter so not really us but whatever) to know how to welcome you. Some people just want to become part of the local gay community, some people are part of university staff, others are on a scavenger hunt for some class. We (again with with the inclusiveness :P) deal with an odd mix of official tasks and just being friendly to people who come in, which sometimes leads to official tasks, so sometimes it takes a moment for us to move from one frame of mind to another. Also, coming later in the day helps, there tends to be more people just hanging out around then, rather than earlier in the morning (when I volunteered). If youve never been part of a gay community, havent been part of the Davis gay community, or have never been to the LGBTRC before, I recommend telling someone that, so that we can realize youre there to meet people and not to just chill and do school work (as people often do). Its not exactly the greatest place to meet people, but its definitely a place. Users/AndrewJacobs
The convex hull of two points $a$ and $b$ is the set of all points of the form $u a + v b$ where $u$ and $v$ are non-negative real numbers that sum to $1$.
section {* Standalone Verification Component Based on KAT *} theory VC_KAT_scratch imports Main GCD (*"$ISABELLE_HOME/src/HOL/Eisbach/Eisbach"*) begin subsection {* KAT: Definition and Basic Properties *} text {* Most proofs are fully automated when using SMT solvers. We have added proofs without these tools for the AFP, but SMT proofs can be commented in if wanted. *} notation times (infixl "\<cdot>" 70) class plus_ord = plus + ord + assumes less_eq_def: "x \<le> y \<longleftrightarrow> x + y = y" and less_def: "x < y \<longleftrightarrow> x \<le> y \<and> x \<noteq> y" class dioid = semiring + one + zero + plus_ord + assumes add_idem [simp]: "x + x = x" and mult_onel [simp]: "1 \<cdot> x = x" and mult_oner [simp]: "x \<cdot> 1 = x" and add_zerol [simp]: "0 + x = x" and annil [simp]: "0 \<cdot> x = 0" and annir [simp]: "x \<cdot> 0 = 0" begin subclass monoid_mult by (standard, simp_all) subclass order apply (standard, simp_all add: less_def less_eq_def add_commute) apply auto[1] by (metis add_assoc) lemma mult_isol: "x \<le> y \<Longrightarrow> z \<cdot> x \<le> z \<cdot> y" by (metis distrib_left less_eq_def) lemma mult_isor: "x \<le> y \<Longrightarrow> x \<cdot> z \<le> y \<cdot> z" by (metis distrib_right less_eq_def) lemma add_iso: "x \<le> y \<Longrightarrow> x + z \<le> y + z" by (metis (no_types, lifting) abel_semigroup.commute add.abel_semigroup_axioms add.semigroup_axioms add_idem less_eq_def semigroup.assoc) lemma add_lub: "x + y \<le> z \<longleftrightarrow> x \<le> z \<and> y \<le> z" by (metis add_assoc add_commute less_eq_def order.ordering_axioms ordering.refl) end class kleene_algebra = dioid + fixes star :: "'a \<Rightarrow> 'a" ("_\<^sup>\<star>" [101] 100) assumes star_unfoldl: "1 + x \<cdot> x\<^sup>\<star> \<le> x\<^sup>\<star>" and star_unfoldr: "1 + x\<^sup>\<star> \<cdot> x \<le> x\<^sup>\<star>" and star_inductl: "z + x \<cdot> y \<le> y \<Longrightarrow> x\<^sup>\<star> \<cdot> z \<le> y" and star_inductr: "z + y \<cdot> x \<le> y \<Longrightarrow> z \<cdot> x\<^sup>\<star> \<le> y" begin lemma star_sim: "x \<cdot> y \<le> z \<cdot> x \<Longrightarrow> x \<cdot> y\<^sup>\<star> \<le> z\<^sup>\<star> \<cdot> x" (* by (sm add_commute add_assoc add_idem distrib_left distrib_right less_eq_def mult_onel star_inductr star_unfoldr mult_assoc) *) proof - assume "x \<cdot> y \<le> z \<cdot> x" hence "x + z\<^sup>\<star> \<cdot> x \<cdot> y \<le> x + z\<^sup>\<star> \<cdot> z \<cdot> x" by (metis add_lub distrib_left eq_refl less_eq_def mult_assoc) also have "... \<le> z\<^sup>\<star> \<cdot> x" using add_lub mult_isor star_unfoldr by fastforce finally show ?thesis by (simp add: star_inductr) qed end class kat = kleene_algebra + fixes at :: "'a \<Rightarrow> 'a" assumes test_one [simp]: "at (at 1) = 1" and test_mult [simp]: "at (at (at (at x) \<cdot> at (at y))) = at (at y) \<cdot> at (at x)" and test_mult_comp [simp]: "at x \<cdot> at (at x) = 0" and test_de_morgan: "at x + at y = at (at (at x) \<cdot> at (at y))" begin definition t_op :: "'a \<Rightarrow> 'a" ("t_" [100] 101) where "t x = at (at x)" lemma t_n [simp]: "t (at x) = at x" by (metis add_idem test_de_morgan test_mult t_op_def) lemma t_comm: "t x \<cdot> t y = t y \<cdot> t x" by (metis add_commute test_de_morgan test_mult t_op_def) lemma t_idem [simp]: "t x \<cdot> t x = t x" by (metis add_idem test_de_morgan test_mult t_op_def) lemma t_mult_closed [simp]: "t (t x \<cdot> t y) = t x \<cdot> t y" using t_comm t_op_def by auto subsection{* Propositional Hoare Logic *} definition H :: "'a \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" where "H p x q \<longleftrightarrow> t p \<cdot> x \<le> x \<cdot> t q" definition if_then_else :: "'a \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'a" ("if _ then _ else _ fi" [64,64,64] 63) where "if p then x else y fi = t p \<cdot> x + at p \<cdot> y" definition while :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" ("while _ do _ od" [64,64] 63) where "while p do x od = (t p \<cdot> x)\<^sup>\<star> \<cdot> at p" definition while_inv :: "'a \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'a" ("while _ inv _ do _ od" [64,64,64] 63) where "while p inv i do x od = while p do x od" lemma H_skip: "H p 1 p" by (simp add: H_def) lemma H_cons: "t p \<le> t p' \<Longrightarrow> t q' \<le> t q \<Longrightarrow> H p' x q' \<Longrightarrow> H p x q" by (meson H_def mult_isol mult_isor order.trans) lemma H_seq: "H r y q \<Longrightarrow> H p x r \<Longrightarrow> H p (x \<cdot> y) q" (* by (smt H_def add.semigroup_axioms distrib_left distrib_right less_eq_def mult.semigroup_axioms semigroup.assoc)*) proof - assume h1: "H p x r" and h2: "H r y q" hence h3: "t p \<cdot> x \<le> x \<cdot> t r" and h4: "t r \<cdot> y \<le> y \<cdot> t q" using H_def apply blast using H_def h2 by blast hence "t p \<cdot> x \<cdot> y \<le> x \<cdot> t r \<cdot> y" using mult_isor by blast also have "... \<le> x \<cdot> y \<cdot> t q" by (simp add: h4 mult_isol mult_assoc) finally show ?thesis by (simp add: H_def mult_assoc) qed lemma H_cond: "H (t p \<cdot> t r) x q \<Longrightarrow> H (t p \<cdot> at r) y q \<Longrightarrow> H p (if r then x else y fi) q" (* by (smt H_def abel_semigroup.commute abel_semigroup.left_commute if_then_else_def add.abel_semigroup_axioms distrib_left distrib_right less_eq_def mult.semigroup_axioms t_comm t_idem t_mult_closed t_n semigroup.assoc)*) proof - assume h1: "H (t p \<cdot> t r) x q" and h2: "H (t p \<cdot> at r) y q" hence h3: "t r \<cdot> t p \<cdot> t r \<cdot> x \<le> t r \<cdot> x \<cdot> t q" and h4: "at r \<cdot> t p \<cdot> at r \<cdot> y \<le> at r \<cdot> y \<cdot> t q" by (simp add: H_def mult_isol mult_assoc, metis H_def h2 mult_isol mult_assoc t_mult_closed t_n) hence h5: "t p \<cdot> t r \<cdot> x \<le> t r \<cdot> x \<cdot> t q" and h6: "t p \<cdot> at r \<cdot> y \<le> at r \<cdot> y \<cdot> t q" by (simp add: mult_assoc t_comm, metis h4 mult_assoc t_comm t_idem t_n) have "t p \<cdot> (t r \<cdot> x + at r \<cdot> y) = t p \<cdot> t r \<cdot> x + t p \<cdot> at r \<cdot> y" by (simp add: distrib_left mult_assoc) also have "... \<le> t r \<cdot> x \<cdot> t q + t p \<cdot> at r \<cdot> y" using h5 add_iso by blast also have "... \<le> t r \<cdot> x \<cdot> t q + at r \<cdot> y \<cdot> t q" by (simp add: add_commute h6 add_iso) finally show ?thesis by (simp add: H_def if_then_else_def distrib_right) qed lemma H_loop: "H (t p \<cdot> t r) x p \<Longrightarrow> H p (while r do x od) (t p \<cdot> at r)" (* by (smt while_def H_def distrib_left less_eq_def mult.semigroup_axioms semigroup.assoc star_sim t_comm t_idem t_mult_closed mult_isor test_mult t_n t_op_def)*) proof - assume "H (t p \<cdot> t r) x p" hence "t r \<cdot> t p \<cdot> t r \<cdot> x \<le> t r \<cdot> x \<cdot> t p" by (metis H_def distrib_left less_eq_def mult_assoc t_mult_closed) hence "t p \<cdot> t r \<cdot> x \<le> t r \<cdot> x \<cdot> t p" by (simp add: mult_assoc t_comm) hence "t p \<cdot> (t r \<cdot> x)\<^sup>\<star> \<cdot> at r \<le> (t r \<cdot> x)\<^sup>\<star> \<cdot> t p \<cdot> at r" by (metis mult_isor star_sim mult_assoc) hence "t p \<cdot> (t r \<cdot> x)\<^sup>\<star> \<cdot> at r \<le> (t r \<cdot> x)\<^sup>\<star> \<cdot> at r \<cdot> t p \<cdot> at r" by (metis mult_assoc t_comm t_idem t_n) thus ?thesis by (metis H_def mult_assoc t_mult_closed t_n while_def) qed lemma H_while_inv: "t p \<le> t i \<Longrightarrow> t i \<cdot> at r \<le> t q \<Longrightarrow> H (t i \<cdot> t r) x i \<Longrightarrow> H p (while r inv i do x od) q" by (metis H_cons H_loop t_mult_closed t_n while_inv_def) end subsection{* Soundness *} notation relcomp (infixl ";" 70) interpretation rel_d: dioid Id "{}" "op \<union>" "op ;" "op \<subseteq>" "op \<subset>" by (standard, auto) lemma (in dioid) power_inductl: "z + x \<cdot> y \<le> y \<Longrightarrow> x ^ i \<cdot> z \<le> y" by (induct i, simp add: add_lub, smt add_lub distrib_left less_eq_def power.power_Suc mult_assoc) lemma (in dioid) power_inductr: "z + y \<cdot> x \<le> y \<Longrightarrow> z \<cdot> x ^ i \<le> y" by (induct i, simp add: add_lub, smt add_lub combine_common_factor less_eq_def power_Suc2 mult_assoc) lemma power_is_relpow: "rel_d.power X i = X ^^ i" by (induct i, simp_all add: relpow_commute) lemma rel_star_def: "X^* = (\<Union>i. rel_d.power X i)" by (simp add: power_is_relpow rtrancl_is_UN_relpow) lemma rel_star_contl: "X ; Y^* = (\<Union>i. X ; rel_d.power Y i)" by (simp add: rel_star_def relcomp_UNION_distrib) lemma rel_star_contr: "X^* ; Y = (\<Union>i. (rel_d.power X i) ; Y)" by (simp add: rel_star_def relcomp_UNION_distrib2) definition rel_at :: "'a rel \<Rightarrow> 'a rel" where "rel_at X = Id \<inter> - X" interpretation rel_kat: kat Id "{}" "op \<union>" "op ;" "op \<subseteq>" "op \<subset>" rtrancl rel_at apply standard apply auto[2] by (auto simp: rel_star_contr rel_d.power_inductl rel_star_contl SUP_least rel_d.power_inductr rel_at_def) subsection{* Embedding Predicates in Relations *} type_synonym 'a pred = "'a \<Rightarrow> bool" abbreviation p2r :: "'a pred \<Rightarrow> 'a rel" ("\<lceil>_\<rceil>") where "\<lceil>P\<rceil> \<equiv> {(s,s) |s. P s}" lemma t_p2r [simp]: "rel_kat.t_op \<lceil>P\<rceil> = \<lceil>P\<rceil>" by (auto simp add: rel_kat.t_op_def rel_at_def) lemma p2r_neg_hom [simp]: "rel_at \<lceil>P\<rceil> = \<lceil>\<lambda>s. \<not> P s\<rceil>" by (auto simp: rel_at_def) lemma p2r_conj_hom [simp]: "\<lceil>P\<rceil> \<inter> \<lceil>Q\<rceil> = \<lceil>\<lambda>s. P s \<and> Q s\<rceil>" by auto lemma p2r_conj_hom_var [simp]: "\<lceil>P\<rceil> ; \<lceil>Q\<rceil> = \<lceil>\<lambda>s. P s \<and> Q s\<rceil>" by auto lemma p2r_disj_hom [simp]: "\<lceil>P\<rceil> \<union> \<lceil>Q\<rceil> = \<lceil>\<lambda>s. P s \<or> Q s\<rceil>" by auto lemma impl_prop [simp]: "\<lceil>P\<rceil> \<subseteq> \<lceil>Q\<rceil> \<longleftrightarrow> (\<forall>s. P s \<longrightarrow> Q s)" by auto subsection {* Store and Assignment *} type_synonym 'a store = "string \<Rightarrow> 'a" definition gets :: "string \<Rightarrow> ('a store \<Rightarrow> 'a) \<Rightarrow> 'a store rel" ("_ ::= _" [70, 65] 61) where "v ::= e = {(s,s (v := e s)) |s. True}" lemma H_assign: "rel_kat.H \<lceil>\<lambda>s. P (s (v := e s))\<rceil> (v ::= e) \<lceil>P\<rceil>" by (auto simp: gets_def rel_kat.H_def rel_kat.t_op_def rel_at_def) lemma H_assign_var: "(\<forall>s. P s \<longrightarrow> Q (s (v := e s))) \<Longrightarrow> rel_kat.H \<lceil>P\<rceil> (v ::= e) \<lceil>Q\<rceil>" by (auto simp: gets_def rel_kat.H_def rel_kat.t_op_def rel_at_def) subsection {* Simplifications *} abbreviation H_sugar :: "'a pred \<Rightarrow> 'a rel \<Rightarrow> 'a pred \<Rightarrow> bool" ("PRE _ _ POST _" [64,64,64] 63) where "PRE P X POST Q \<equiv> rel_kat.H \<lceil>P\<rceil> X \<lceil>Q\<rceil>" abbreviation if_then_else_sugar :: "'a pred \<Rightarrow> 'a rel \<Rightarrow> 'a rel \<Rightarrow> 'a rel" ("IF _ THEN _ ELSE _ FI" [64,64,64] 63) where "IF P THEN X ELSE Y FI \<equiv> rel_kat.if_then_else \<lceil>P\<rceil> X Y" abbreviation while_inv_sugar :: "'a pred \<Rightarrow> 'a pred \<Rightarrow> 'a rel \<Rightarrow> 'a rel" ("WHILE _ INV _ DO _ OD" [64,64,64] 63) where "WHILE P INV I DO X OD \<equiv> rel_kat.while_inv \<lceil>P\<rceil> \<lceil>I\<rceil> X" subsection {* Examples *} lemma euclid: "PRE (\<lambda>s::nat store. s ''x'' = x \<and> s ''y'' = y) (WHILE (\<lambda>s. s ''y'' \<noteq> 0) INV (\<lambda>s. gcd (s ''x'') (s ''y'') = gcd x y) DO (''z'' ::= (\<lambda>s. s ''y'')); (''y'' ::= (\<lambda>s. s ''x'' mod s ''y'')); (''x'' ::= (\<lambda>s. s ''z'')) OD) POST (\<lambda>s. s ''x'' = gcd x y)" apply (rule rel_kat.H_while_inv, simp_all, clarsimp) apply (intro rel_kat.H_seq) apply (subst H_assign, simp)+ apply (rule H_assign_var) using gcd_red_nat by auto section {* Refinement Component Based on KAT *} subsection {* Definition of RKAT *} class rkat = kat + fixes R :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" assumes R1: "H p (R p q) q" and R2: "H p x q \<Longrightarrow> x \<le> R p q" begin subsection {* Refinement Laws *} lemma R_skip: "1 \<le> R p p" by (simp add: H_skip R2) lemma R_cons: "t p \<le> t p' \<Longrightarrow> t q' \<le> t q \<Longrightarrow> R p' q' \<le> R p q" by (simp add: H_cons R2 R1) lemma R_seq: "(R p r) \<cdot> (R r q) \<le> R p q" using H_seq R2 R1 by blast lemma R_cond: "if v then (R (t v \<cdot> t p) q) else (R (at v \<cdot> t p) q) fi \<le> R p q" by (metis H_cond R1 R2 t_comm t_n) lemma R_loop: "while q do (R (t p \<cdot> t q) p) od \<le> R p (t p \<cdot> at q)" by (simp add: H_loop R2 R1) end subsection {* Soundness *} definition rel_R :: "'a rel \<Rightarrow> 'a rel \<Rightarrow> 'a rel" where "rel_R P Q = \<Union>{X. rel_kat.H P X Q}" interpretation rel_rkat: rkat Id "{}" "op \<union>" "op ;" "op \<subseteq>" "op \<subset>" rtrancl rel_at rel_R by (standard, auto simp: rel_R_def rel_kat.H_def rel_kat.t_op_def rel_at_def) subsection {* Assignment Laws *} lemma R_assign: "(\<forall>s. P s \<longrightarrow> Q (s (v := e s))) \<Longrightarrow> (v ::= e) \<subseteq> rel_R \<lceil>P\<rceil> \<lceil>Q\<rceil>" by (simp add: H_assign_var rel_rkat.R2) lemma R_assignr: "(\<forall>s. Q' s \<longrightarrow> Q (s (v := e s))) \<Longrightarrow> (rel_R \<lceil>P\<rceil> \<lceil>Q'\<rceil>) ; (v ::= e) \<subseteq> rel_R \<lceil>P\<rceil> \<lceil>Q\<rceil>" proof - assume a1: "\<forall>s. Q' s \<longrightarrow> Q (s(v := e s))" have "\<forall>p pa cs f. \<exists>fa. (p fa \<or> cs ::= f \<subseteq> rel_R \<lceil>p\<rceil> \<lceil>pa\<rceil>) \<and> (\<not> pa (fa(cs := f fa::'a)) \<or> cs ::= f \<subseteq> rel_R \<lceil>p\<rceil> \<lceil>pa\<rceil>)" using R_assign by blast hence "v ::= e \<subseteq> rel_R \<lceil>Q'\<rceil> \<lceil>Q\<rceil>" using a1 by blast thus ?thesis by (meson dual_order.trans rel_d.mult_isol rel_rkat.R_seq) qed lemma R_assignl: "(\<forall>s. P s \<longrightarrow> P' (s (v := e s))) \<Longrightarrow> (v ::= e) ; (rel_R \<lceil>P'\<rceil> \<lceil>Q\<rceil>) \<subseteq> rel_R \<lceil>P\<rceil> \<lceil>Q\<rceil>" proof - assume a1: "\<forall>s. P s \<longrightarrow> P' (s(v := e s))" have "\<forall>p pa cs f. \<exists>fa. (p fa \<or> cs ::= f \<subseteq> rel_R \<lceil>p\<rceil> \<lceil>pa\<rceil>) \<and> (\<not> pa (fa(cs := f fa::'a)) \<or> cs ::= f \<subseteq> rel_R \<lceil>p\<rceil> \<lceil>pa\<rceil>)" using R_assign by blast then have "v ::= e \<subseteq> rel_R \<lceil>P\<rceil> \<lceil>P'\<rceil>" using a1 by blast then show ?thesis by (meson dual_order.trans rel_d.mult_isor rel_rkat.R_seq) qed subsection {* Example *} lemma var_swap_ref1: "rel_R \<lceil>\<lambda>s. s ''x'' = a \<and> s ''y'' = b\<rceil> \<lceil>\<lambda>s. s ''x'' = b \<and> s ''y'' = a\<rceil> \<supseteq> (''z'' ::= (\<lambda>s. s ''x'')); rel_R \<lceil>\<lambda>s. s ''z'' = a \<and> s ''y'' = b\<rceil> \<lceil>\<lambda>s. s ''x'' = b \<and> s ''y'' = a\<rceil>" by (rule R_assignl, auto) lemma var_swap_ref2: "rel_R \<lceil>\<lambda>s. s ''z'' = a \<and> s ''y'' = b\<rceil> \<lceil>\<lambda>s. s ''x'' = b \<and> s ''y'' = a\<rceil> \<supseteq> (''x'' ::= (\<lambda>s. s ''y'')); rel_R \<lceil>\<lambda>s. s ''z'' = a \<and> s ''x'' = b\<rceil> \<lceil>\<lambda>s. s ''x'' = b \<and> s ''y'' = a\<rceil>" by (rule R_assignl, auto) lemma var_swap_ref3: "rel_R \<lceil>\<lambda>s. s ''z'' = a \<and> s ''x'' = b\<rceil> \<lceil>\<lambda>s. s ''x'' = b \<and> s ''y'' = a\<rceil> \<supseteq> (''y'' ::= (\<lambda>s. s ''z'')); rel_R \<lceil>\<lambda>s. s ''x'' = b \<and> s ''y'' = a\<rceil> \<lceil>\<lambda>s. s ''x'' = b \<and> s ''y'' = a\<rceil>" by (rule R_assignl, auto) lemma var_swap_ref_var: "rel_R \<lceil>\<lambda>s. s ''x'' = a \<and> s ''y'' = b\<rceil> \<lceil>\<lambda>s. s ''x'' = b \<and> s ''y'' = a\<rceil> \<supseteq> (''z'' ::= (\<lambda>s. s ''x'')); (''x'' ::= (\<lambda>s. s ''y'')); (''y'' ::= (\<lambda>s. s ''z''))" using var_swap_ref1 var_swap_ref2 var_swap_ref3 rel_rkat.R_skip by fastforce end
Page 475 - Gallery Design of Home Interior | Parrisislandosc Espresso Medicine Cabinet. 8x10 Area Rugs Target. Jc Penneys Area Rugs.
theory Approx_Beta imports DBM_Zone_Semantics Regions_Beta Closure begin chapter \<open>Correctness of \<open>\<beta>\<close>-approximation from \<open>\<alpha>\<close>-regions\<close> text \<open>Instantiating real\<close> instantiation real :: linordered_ab_monoid_add begin definition neutral_real: "\<one> = (0 :: real)" instance by standard (auto simp: neutral_real) end text \<open>Merging the locales for the two types of regions\<close> locale Regions = fixes X and k :: "'c \<Rightarrow> nat" and v :: "'c \<Rightarrow> nat" and n :: nat and not_in_X assumes finite: "finite X" assumes clock_numbering: "clock_numbering' v n" "\<forall>k\<le>n. k > 0 \<longrightarrow> (\<exists>c \<in> X. v c = k)" "\<forall> c \<in> X. v c \<le> n" assumes not_in_X: "not_in_X \<notin> X" assumes non_empty: "X \<noteq> {}" begin definition \<R>_def: "\<R> \<equiv> {Regions.region X I r | I r. Regions.valid_region X k I r}" definition \<R>\<^sub>\<beta>_def: "\<R>\<^sub>\<beta> \<equiv> {Regions_Beta.region X I J r | I J r. Regions_Beta.valid_region X k I J r}" definition V_def: "V \<equiv> {v . \<forall> x \<in> X. v x \<ge> 0}" sublocale alpha_interp: AlphaClosure X k \<R> V by (unfold_locales) (auto simp: finite \<R>_def V_def) sublocale beta_interp: Beta_Regions' X k \<R>\<^sub>\<beta> V v n not_in_X using finite non_empty clock_numbering not_in_X by (unfold_locales) (auto simp: \<R>\<^sub>\<beta>_def V_def) abbreviation "Approx\<^sub>\<beta> \<equiv> beta_interp.Approx\<^sub>\<beta>" section \<open>Preparing Bouyer's Theorem\<close> lemma region_dbm: assumes "R \<in> \<R>" defines "v' \<equiv> \<lambda> i. THE c. c \<in> X \<and> v c = i" obtains M where"[M]\<^bsub>v,n\<^esub> = R" and "\<forall> i \<le> n. \<forall> j \<le> n. M i 0 = \<infinity> \<and> j > 0 \<and> i \<noteq> j\<longrightarrow> M i j = \<infinity> \<and> M j i = \<infinity>" and "\<forall> i \<le> n. M i i = Le 0" and "\<forall> i \<le> n. \<forall> j \<le> n. i > 0 \<and> j > 0 \<and> M i 0 \<noteq> \<infinity> \<and> M j 0 \<noteq> \<infinity> \<longrightarrow> (\<exists> d :: int. (- k (v' j) \<le> d \<and> d \<le> k (v' i) \<and> M i j = Le d \<and> M j i = Le (-d)) \<or> (- k (v' j) \<le> d - 1 \<and> d \<le> k (v' i) \<and> M i j = Lt d \<and> M j i = Lt (-d + 1)))" and "\<forall> i \<le> n. i > 0 \<and> M i 0 \<noteq> \<infinity> \<longrightarrow> (\<exists> d :: int. d \<le> k (v' i) \<and> d \<ge> 0 \<and> (M i 0 = Le d \<and> M 0 i = Le (-d) \<or> M i 0 = Lt d \<and> M 0 i = Lt (-d + 1)))" and "\<forall> i \<le> n. i > 0 \<longrightarrow> (\<exists> d :: int. - k (v' i) \<le> d \<and> d \<le> 0 \<and> (M 0 i = Le d \<or> M 0 i = Lt d))" and "\<forall> i. \<forall> j. M i j \<noteq> \<infinity> \<longrightarrow> get_const (M i j) \<in> \<int>" and "\<forall> i \<le> n. \<forall> j \<le> n. M i j \<noteq> \<infinity> \<and> i > 0 \<and> j > 0 \<longrightarrow> (\<exists> d:: int. (M i j = Le d \<or> M i j = Lt d) \<and> (- k (v' j)) \<le> d \<and> d \<le> k (v' i))" proof - from assms obtain I r where R: "R = region X I r" "valid_region X k I r" unfolding \<R>_def by blast let ?X\<^sub>0 = "{x \<in> X. \<exists>d. I x = Regions.intv.Intv d}" define f where "f x = (if isIntv (I x) then Lt (intv_const (I x) + 1) else if isConst (I x) then Le (intv_const (I x)) else \<infinity>)" for x define g where "g x = (if isIntv (I x) then Lt (- intv_const (I x)) else if isConst (I x) then Le (- intv_const (I x)) else Lt (- k x))" for x define h where "h x y = (if isIntv (I x) \<and> isIntv (I y) then if (y, x) \<in> r \<and> (x, y) \<notin> r then Lt (int (intv_const (I x)) - intv_const (I y) + 1) else if (x, y) \<in> r \<and> (y, x) \<notin> r then Lt (int (intv_const (I x)) - intv_const (I y)) else Le (int (intv_const (I x)) - intv_const (I y)) else if isConst (I x) \<and> isConst (I y) then Le (int (intv_const (I x)) - intv_const (I y)) else if isIntv (I x) \<and> isConst (I y) then Lt (int (intv_const (I x)) + 1 - intv_const (I y)) else if isConst (I x) \<and> isIntv (I y) then Lt (int (intv_const (I x)) - intv_const (I y)) else \<infinity>)" for x y let ?M = "\<lambda> i j. if i = 0 then if j = 0 then Le 0 else g (v' j) else if j = 0 then f (v' i) else if i = j then Le 0 else h (v' i) (v' j)" have "[?M]\<^bsub>v,n\<^esub> \<subseteq> R" proof fix u assume u: "u \<in> [?M]\<^bsub>v,n\<^esub>" show "u \<in> R" unfolding R proof (standard, goal_cases) case 1 show ?case proof fix c assume c: "c \<in> X" with clock_numbering have c2: "v c \<le> n" "v c > 0" "v' (v c) = c" unfolding v'_def by auto with u have "dbm_entry_val u None (Some c) (g c)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto then show "0 \<le> u c" by (cases "isIntv (I c)"; cases "isConst (I c)") (auto simp: g_def) qed next case 2 show ?case proof fix c assume c: "c \<in> X" with clock_numbering have c2: "v c \<le> n" "v c > 0" "v' (v c) = c" unfolding v'_def by auto with u have *: "dbm_entry_val u None (Some c) (g c)" "dbm_entry_val u (Some c) None (f c)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto show "intv_elem c u (I c)" proof (cases "I c") case (Const d) then have "\<not> isIntv (I c)" "isConst (I c)" by auto with * Const show ?thesis unfolding g_def f_def using Const by auto next case (Intv d) then have "isIntv (I c)" "\<not> isConst (I c)" by auto with * Intv show ?thesis unfolding g_def f_def by auto next case (Greater d) then have "\<not> isIntv (I c)" "\<not> isConst (I c)" by auto with * Greater R(2) c show ?thesis unfolding g_def f_def by fastforce qed qed next show "?X\<^sub>0 = ?X\<^sub>0" .. show "\<forall>x \<in> ?X\<^sub>0. \<forall> y \<in> ?X\<^sub>0. (x, y) \<in> r \<longleftrightarrow> frac (u x) \<le> frac (u y)" proof (standard, standard) fix x y assume A: "x \<in> ?X\<^sub>0" "y \<in> ?X\<^sub>0" show "(x, y) \<in> r \<longleftrightarrow> frac (u x) \<le> frac (u y)" proof (cases "x = y") case True have "refl_on ?X\<^sub>0 r" using R(2) by auto with A True show ?thesis unfolding refl_on_def by auto next case False from A obtain d d' where AA: "I x = Intv d" "I y = Intv d'" "isIntv (I x)" "isIntv (I y)" "\<not> isConst (I x)" "\<not> isConst (I y)" by auto from A False clock_numbering have B: "v x \<le> n" "v x > 0" "v' (v x) = x" "v y \<le> n" "v y > 0" "v' (v y) = y" "v x \<noteq> v y" unfolding v'_def by auto with u have *: "dbm_entry_val u (Some x) (Some y) (h x y)" "dbm_entry_val u (Some y) (Some x) (h y x)" "dbm_entry_val u None (Some x) (g x)" "dbm_entry_val u (Some x) None (f x)" "dbm_entry_val u None (Some y) (g y)" "dbm_entry_val u (Some y) None (f y)" unfolding DBM_zone_repr_def DBM_val_bounded_def by force+ show "(x, y) \<in> r \<longleftrightarrow> frac (u x) \<le> frac (u y)" proof assume C: "(x, y) \<in> r" show "frac (u x) \<le> frac (u y)" proof (cases "(y, x) \<in> r") case False with * AA C have **: "u x - u y < int d - d'" "d < u x" "u x < d + 1" "d' < u y" "u y < d' + 1" unfolding f_def g_def h_def by auto from nat_intv_frac_decomp[OF **(2,3)] nat_intv_frac_decomp[OF **(4,5)] **(1) show "frac (u x) \<le> frac (u y)" by simp next case True with * AA C have **: "u x - u y \<le> int d - d'" "d < u x" "u x < d + 1" "d' < u y" "u y < d' + 1" unfolding f_def g_def h_def by auto from nat_intv_frac_decomp[OF **(2,3)] nat_intv_frac_decomp[OF **(4,5)] **(1) show "frac (u x) \<le> frac (u y)" by simp qed next assume "frac (u x) \<le> frac (u y)" show "(x, y) \<in> r" proof (rule ccontr) assume C: "(x,y) \<notin> r" moreover from R(2) have "total_on ?X\<^sub>0 r" by auto ultimately have "(y, x) \<in> r" using False A unfolding total_on_def by auto with *(2-) AA C have **: "u y - u x < int d' - d" "d < u x" "u x < d + 1" "d' < u y" "u y < d' + 1" unfolding f_def g_def h_def by auto from nat_intv_frac_decomp[OF **(2,3)] nat_intv_frac_decomp[OF **(4,5)] **(1) have "frac (u y) < frac (u x)" by simp with \<open>frac _ \<le> _\<close> show False by auto qed qed qed qed qed qed moreover have "R \<subseteq> [?M]\<^bsub>v,n\<^esub>" proof fix u assume u: "u \<in> R" show "u \<in> [?M]\<^bsub>v,n\<^esub>" unfolding DBM_zone_repr_def DBM_val_bounded_def proof (safe, goal_cases) case 1 then show ?case by auto next case (2 c) with clock_numbering have "c \<in> X" by metis with clock_numbering have *: "c \<in> X" "v c > 0" "v' (v c) = c" unfolding v'_def by auto with R u have "intv_elem c u (I c)" "valid_intv (k c) (I c)" by auto then have "dbm_entry_val u None (Some c) (g c)" unfolding g_def by (cases "I c") auto with * show ?case by auto next case (3 c) with clock_numbering have "c \<in> X" by metis with clock_numbering have *: "c \<in> X" "v c > 0" "v' (v c) = c" unfolding v'_def by auto with R u have "intv_elem c u (I c)" "valid_intv (k c) (I c)" by auto then have "dbm_entry_val u (Some c) None (f c)" unfolding f_def by (cases "I c") auto with * show ?case by auto next case (4 c1 c2) with clock_numbering have "c1 \<in> X" "c2 \<in> X" by metis+ with clock_numbering have *: "c1 \<in> X" "v c1 > 0" "v' (v c1) = c1" "c2 \<in> X" "v c2 > 0" "v' (v c2) = c2" unfolding v'_def by auto with R u have "intv_elem c1 u (I c1)" "valid_intv (k c1) (I c1)" "intv_elem c2 u (I c2)" "valid_intv (k c2) (I c2)" by auto then have "dbm_entry_val u (Some c1) (Some c2) (h c1 c2)" unfolding h_def proof(cases "I c1", cases "I c2", fastforce+, cases "I c2", fastforce, goal_cases) case (1 d d') then show ?case proof (cases "(c2, c1) \<in> r", goal_cases) case 1 show ?case proof (cases "(c1, c2) \<in> r") case True with 1 *(1,4) R(1) u have "frac (u c1) = frac (u c2)" by auto with 1 have "u c1 - u c2 = real d - d'" by (fastforce dest: nat_intv_frac_decomp) with 1 show ?thesis by auto next case False with 1 show ?thesis by auto qed next case 2 show ?case proof (cases "c1 = c2") case True then show ?thesis by auto next case False with 2 R(2) *(1,4) have "(c1, c2) \<in> r" by (fastforce simp: total_on_def) with 2 *(1,4) R(1) u have "frac (u c1) < frac (u c2)" by auto with 2 have "u c1 - u c2 < real d - d'" by (fastforce dest: nat_intv_frac_decomp) with 2 show ?thesis by auto qed qed qed fastforce+ then show ?case proof (cases "v c1 = v c2", goal_cases) case True with * clock_numbering have "c1 = c2" by auto then show ?thesis by auto next case 2 with * show ?case by auto qed qed qed ultimately have "[?M]\<^bsub>v,n\<^esub> = R" by blast moreover have "\<forall> i \<le> n. \<forall> j \<le> n. ?M i 0 = \<infinity> \<and> j > 0 \<and> i \<noteq> j \<longrightarrow> ?M i j = \<infinity> \<and> ?M j i = \<infinity>" unfolding f_def h_def by auto moreover have "\<forall> i \<le> n. ?M i i = Le 0" by auto moreover { fix i j assume A: "i \<le> n" "j \<le> n" "i > 0" "j > 0" "?M i 0 \<noteq> \<infinity>" "?M j 0 \<noteq> \<infinity>" with clock_numbering(2) obtain c1 c2 where B: "v c1 = i" "v c2 = j" "c1 \<in> X" "c2 \<in> X" by meson with clock_numbering(1) A have C: "v' i = c1" "v' j = c2" unfolding v'_def by force+ from R(2) B have valid: "valid_intv (k c1) (I c1)" "valid_intv (k c2) (I c2)" by auto have "\<exists> d :: int. (- k (v' j) \<le> d \<and> d \<le> k (v' i) \<and> ?M i j = Le d \<and> ?M j i = Le (-d) \<or> (- k (v' j) \<le> d - 1 \<and> d \<le> k (v' i) \<and> ?M i j = Lt d \<and> ?M j i = Lt (-d + 1)))" proof (cases "i = j") case True then show ?thesis by auto next case False then show ?thesis proof (cases "I c1", goal_cases) case 1 then show ?case proof (cases "I c2") case Const let ?d = "int (intv_const (I c1)) - int (intv_const (I c2))" from Const 1 have "isConst (I c1)" "isConst (I c2)" by auto with A(1-4) C valid show ?thesis unfolding h_def by (intro exI[where x = ?d]) auto next case Intv let ?d = "int(intv_const (I c1)) - int (intv_const (I c2))" from Intv 1 have "isConst (I c1)" "isIntv (I c2)" by auto with A(1-4) C valid show ?thesis unfolding h_def by (intro exI[where x = ?d]) auto next case Greater then have "\<not> isIntv (I c2)" "\<not> isConst (I c2)" by auto with A 1(1) C have False unfolding f_def by simp then show ?thesis by fast qed next case 2 then show ?case proof (cases "I c2") case Const let ?d = "int (intv_const (I c1)) + 1 - int (intv_const (I c2))" from Const 2 have "isIntv (I c1)" "isConst (I c2)" by auto with A(1-4) C valid show ?thesis unfolding h_def by (intro exI[where x = ?d]) auto next case Intv with 2 have *: "isIntv (I c1)" "isIntv (I c2)" by auto from Intv A(1-4) C show ?thesis apply simp proof (standard, goal_cases) case 1 show ?case proof (cases "(c2, c1) \<in> r") case True note T = this show ?thesis proof (cases "(c1, c2) \<in> r") case True let ?d = "int (intv_const (I c1)) - int (intv_const (I c2))" from True T * valid show ?thesis unfolding h_def by (intro exI[where x = ?d]) auto next case False let ?d = "int (intv_const (I c1)) - int (intv_const (I c2)) + 1" from False T * valid show ?thesis unfolding h_def by (intro exI[where x = ?d]) auto qed next case False let ?d = "int (intv_const (I c1)) - int (intv_const (I c2))" from False * valid show ?thesis unfolding h_def by (intro exI[where x = ?d]) auto qed qed next case Greater then have "\<not> isIntv (I c2)" "\<not> isConst (I c2)" by auto with A 2(1) C have False unfolding f_def by simp then show ?thesis by fast qed next case 3 then have "\<not> isIntv (I c1)" "\<not> isConst (I c1)" by auto with A 3(1) C have False unfolding f_def by simp then show ?thesis by fast qed qed } moreover { fix i assume A: "i \<le> n" "i > 0" "?M i 0 \<noteq> \<infinity>" with clock_numbering(2) obtain c1 where B: "v c1 = i" "c1 \<in> X" by meson with clock_numbering(1) A have C: "v' i = c1" unfolding v'_def by force+ from R(2) B have valid: "valid_intv (k c1) (I c1)" by auto have "\<exists> d :: int. d \<le> k (v' i) \<and> d \<ge> 0 \<and> (?M i 0 = Le d \<and> ?M 0 i = Le (-d) \<or> ?M i 0 = Lt d \<and> ?M 0 i = Lt (-d + 1))" proof (cases "i = 0") case True then show ?thesis by auto next case False then show ?thesis proof (cases "I c1", goal_cases) case 1 let ?d = "int (intv_const (I c1))" from 1 have "isConst (I c1)" "\<not> isIntv (I c1)" by auto with A C valid show ?thesis unfolding f_def g_def by (intro exI[where x = ?d]) auto next case 2 let ?d = "int (intv_const (I c1)) + 1" from 2 have "isIntv(I c1)" "\<not> isConst (I c1)" by auto with A C valid show ?thesis unfolding f_def g_def by (intro exI[where x = ?d]) auto next case 3 then have "\<not> isIntv (I c1)" "\<not> isConst (I c1)" by auto with A 3(1) C have False unfolding f_def by simp then show ?thesis by fast qed qed } moreover { fix i assume A: "i \<le> n" "i > 0" with clock_numbering(2) obtain c1 where B: "v c1 = i" "c1 \<in> X" by meson with clock_numbering(1) A have C: "v' i = c1" unfolding v'_def by force+ from R(2) B have valid: "valid_intv (k c1) (I c1)" by auto have "\<exists> d :: int. - k (v' i) \<le> d \<and> d \<le> 0 \<and> (?M 0 i = Le d \<or> ?M 0 i = Lt d)" proof (cases "i = 0") case True then show ?thesis by auto next case False then show ?thesis proof (cases "I c1", goal_cases) case 1 let ?d = "- int (intv_const (I c1))" from 1 have "isConst (I c1)" "\<not> isIntv (I c1)" by auto with A C valid show ?thesis unfolding f_def g_def by (intro exI[where x = ?d]) auto next case 2 let ?d = "- int (intv_const (I c1))" from 2 have "isIntv(I c1)" "\<not> isConst (I c1)" by auto with A C valid show ?thesis unfolding f_def g_def by (intro exI[where x = ?d]) auto next case 3 let ?d = "- (k c1)" from 3 have "\<not> isIntv (I c1)" "\<not> isConst (I c1)" by auto with A C show ?thesis unfolding g_def by (intro exI[where x = ?d]) auto qed qed } moreover have "\<forall> i. \<forall> j. ?M i j \<noteq> \<infinity> \<longrightarrow> get_const (?M i j) \<in> \<int>" unfolding f_def g_def h_def by auto moreover have "\<forall> i \<le> n. \<forall> j \<le> n. i > 0 \<and> j > 0 \<and> ?M i j \<noteq> \<infinity> \<longrightarrow> (\<exists> d:: int. (?M i j = Le d \<or> ?M i j = Lt d) \<and> (- k (v' j)) \<le> d \<and> d \<le> k (v' i))" proof (auto, goal_cases) case A: (1 i j) with clock_numbering(2) obtain c1 c2 where B: "v c1 = i" "c1 \<in> X" "v c2 = j" "c2 \<in> X" by meson with clock_numbering(1) A have C: "v' i = c1" "v' j = c2" unfolding v'_def by force+ from R(2) B have valid: "valid_intv (k c1) (I c1)" "valid_intv (k c2) (I c2)" by auto with A B C show ?case proof (simp, goal_cases) case 1 show ?case proof (cases "I c1", goal_cases) case 1 then show ?case proof (cases "I c2") case Const let ?d = "int (intv_const (I c1)) - int (intv_const (I c2))" from Const 1 have "isConst (I c1)" "isConst (I c2)" by auto with A(1-4) C valid show ?thesis unfolding h_def by (intro exI[where x = ?d]) auto next case Intv let ?d = "int(intv_const (I c1)) - int (intv_const (I c2))" from Intv 1 have "isConst (I c1)" "isIntv (I c2)" by auto with A(1-4) C valid show ?thesis unfolding h_def by (intro exI[where x = ?d]) auto next case Greater then have "\<not> isIntv (I c2)" "\<not> isConst (I c2)" by auto with A 1(1) C show ?thesis unfolding h_def by simp qed next case 2 then show ?case proof (cases "I c2") case Const let ?d = "int (intv_const (I c1)) + 1 - int (intv_const (I c2))" from Const 2 have "isIntv (I c1)" "isConst (I c2)" by auto with A(1-4) C valid show ?thesis unfolding h_def by (intro exI[where x = ?d]) auto next case Intv with 2 have *: "isIntv (I c1)" "isIntv (I c2)" by auto from Intv A(1-4) C show ?thesis proof goal_cases case 1 show ?case proof (cases "(c2, c1) \<in> r") case True note T = this show ?thesis proof (cases "(c1, c2) \<in> r") case True let ?d = "int (intv_const (I c1)) - int (intv_const (I c2))" from True T * valid show ?thesis unfolding h_def by (intro exI[where x = ?d]) auto next case False let ?d = "int (intv_const (I c1)) - int (intv_const (I c2)) + 1" from False T * valid show ?thesis unfolding h_def by (intro exI[where x = ?d]) auto qed next case False let ?d = "int (intv_const (I c1)) - int (intv_const (I c2))" from False * valid show ?thesis unfolding h_def by (intro exI[where x = ?d]) auto qed qed next case Greater then have "\<not> isIntv (I c2)" "\<not> isConst (I c2)" by auto with A 2(1) C show ?thesis unfolding h_def by simp qed next case 3 then have "\<not> isIntv (I c1)" "\<not> isConst (I c1)" by auto with A 3(1) C show ?thesis unfolding h_def by simp qed qed qed moreover show ?thesis apply (rule that) apply (rule calculation(1)) apply (rule calculation(2)) apply (rule calculation(3)) apply (blast intro: calculation)+ apply (rule calculation(7)) using calculation(8) apply blast done qed lemma len_inf_elem: "(a, b) \<in> set (arcs i j xs) \<Longrightarrow> M a b = \<infinity> \<Longrightarrow> len M i j xs = \<infinity>" apply (induction rule: arcs.induct) apply (auto simp: mult) apply (rename_tac a' b' x xs) apply (case_tac "M a' x") by auto lemma dbm_add_strict_right_mono_neutral: "a < Le d \<Longrightarrow> a + Le (-d) < Le 0" unfolding less mult by (cases a) (auto elim!: dbm_lt.cases) lemma dbm_lt_not_inf_less[intro]: "A \<noteq> \<infinity> \<Longrightarrow> A \<prec> \<infinity>" by (cases A) auto lemma add_inf[simp]: "a + \<infinity> = \<infinity>" "\<infinity> + a = \<infinity>" unfolding mult by (cases a) auto lemma inf_lt[simp,dest!]: "\<infinity> < x \<Longrightarrow> False" by (cases x) (auto simp: less) lemma zone_diag_lt: assumes "a \<le> n" "b \<le> n" and C: "v c1 = a" "v c2 = b" and not0: "a > 0" "b > 0" shows "[(\<lambda> i j. if i = a \<and> j = b then Lt d else \<infinity>)]\<^bsub>v,n\<^esub> = {u. u c1 - u c2 < d}" unfolding DBM_zone_repr_def DBM_val_bounded_def proof (standard, goal_cases) case 1 then show ?case using \<open>a \<le> n\<close> \<open>b \<le> n\<close> C by fastforce next case 2 then show ?case proof (safe, goal_cases) case 1 from not0 show ?case unfolding dbm_le_def by auto next case 2 with not0 show ?case by auto next case 3 with not0 show ?case by auto next case (4 u' y z) show ?case proof (cases "v y = a \<and> v z = b") case True with 4 clock_numbering C \<open>a \<le> n\<close> \<open>b \<le> n\<close> have "u' y - u' z < d" by metis with True show ?thesis by auto next case False then show ?thesis by auto qed qed qed lemma zone_diag_le: assumes "a \<le> n" "b \<le> n" and C: "v c1 = a" "v c2 = b" and not0: "a > 0" "b > 0" shows "[(\<lambda> i j. if i = a \<and> j = b then Le d else \<infinity>)]\<^bsub>v,n\<^esub> = {u. u c1 - u c2 \<le> d}" unfolding DBM_zone_repr_def DBM_val_bounded_def proof (rule, goal_cases) case 1 then show ?case using \<open>a \<le> n\<close> \<open>b \<le> n\<close> C by fastforce next case 2 then show ?case proof (safe, goal_cases) case 1 from not0 show ?case unfolding dbm_le_def by auto next case 2 with not0 show ?case by auto next case 3 with not0 show ?case by auto next case (4 u' y z) show ?case proof (cases "v y = a \<and> v z = b") case True with 4 clock_numbering C \<open>a \<le> n\<close> \<open>b \<le> n\<close> have "u' y - u' z \<le> d" by metis with True show ?thesis by auto next case False then show ?thesis by auto qed qed qed lemma zone_diag_lt_2: assumes "a \<le> n" and C: "v c = a" and not0: "a > 0" shows "[(\<lambda> i j. if i = a \<and> j = 0 then Lt d else \<infinity>)]\<^bsub>v,n\<^esub> = {u. u c < d}" unfolding DBM_zone_repr_def DBM_val_bounded_def proof (rule, goal_cases) case 1 then show ?case using \<open>a \<le> n\<close> C by fastforce next case 2 then show ?case proof (safe, goal_cases) case 1 from not0 show ?case unfolding dbm_le_def by auto next case 2 with not0 show ?case by auto next case (3 u c) show ?case proof (cases "v c = a") case False then show ?thesis by auto next case True with 3 clock_numbering C \<open>a \<le> n\<close> have "u c < d" by metis with C show ?thesis by auto qed next case (4 u' y z) from clock_numbering(1) have "0 < v z" by auto then show ?case by auto qed qed lemma zone_diag_le_2: assumes "a \<le> n" and C: "v c = a" and not0: "a > 0" shows "[(\<lambda> i j. if i = a \<and> j = 0 then Le d else \<infinity>)]\<^bsub>v,n\<^esub> = {u. u c \<le> d}" unfolding DBM_zone_repr_def DBM_val_bounded_def proof (rule, goal_cases) case 1 then show ?case using \<open>a \<le> n\<close> C by fastforce next case 2 then show ?case proof (safe, goal_cases) case 1 from not0 show ?case unfolding dbm_le_def by auto next case 2 with not0 show ?case by auto next case (3 u c) show ?case proof (cases "v c = a") case False then show ?thesis by auto next case True with 3 clock_numbering C \<open>a \<le> n\<close> have "u c \<le> d" by metis with C show ?thesis by auto qed next case (4 u' y z) from clock_numbering(1) have "0 < v z" by auto then show ?case by auto qed qed lemma zone_diag_lt_3: assumes "a \<le> n" and C: "v c = a" and not0: "a > 0" shows "[(\<lambda> i j. if i = 0 \<and> j = a then Lt d else \<infinity>)]\<^bsub>v,n\<^esub> = {u. - u c < d}" unfolding DBM_zone_repr_def DBM_val_bounded_def proof (rule, goal_cases) case 1 then show ?case using \<open>a \<le> n\<close> C by fastforce next case 2 then show ?case proof (safe, goal_cases) case 1 from not0 show ?case unfolding dbm_le_def by auto next case (2 u c) show ?case proof (cases "v c = a", goal_cases) case False then show ?thesis by auto next case True with 2 clock_numbering C \<open>a \<le> n\<close> have "- u c < d" by metis with C show ?thesis by auto qed next case (3 u) with not0 show ?case by auto next case (4 u' y z) from clock_numbering(1) have "0 < v y" by auto then show ?case by auto qed qed lemma len_int_closed: "\<forall> i j. (M i j :: real) \<in> \<int> \<Longrightarrow> len M i j xs \<in> \<int>" by (induction xs arbitrary: i) auto lemma get_const_distr: "a \<noteq> \<infinity> \<Longrightarrow> b \<noteq> \<infinity> \<Longrightarrow> get_const (a + b) = get_const a + get_const b" by (cases a) (cases b, auto simp: mult)+ lemma len_int_dbm_closed: "\<forall> (i, j) \<in> set (arcs i j xs). (get_const (M i j) :: real) \<in> \<int> \<and> M i j \<noteq> \<infinity> \<Longrightarrow> get_const (len M i j xs) \<in> \<int> \<and> len M i j xs \<noteq> \<infinity>" by (induction xs arbitrary: i) (auto simp: get_const_distr, simp add: dbm_add_not_inf mult) lemma zone_diag_le_3: assumes "a \<le> n" and C: "v c = a" and not0: "a > 0" shows "[(\<lambda> i j. if i = 0 \<and> j = a then Le d else \<infinity>)]\<^bsub>v,n\<^esub> = {u. - u c \<le> d}" unfolding DBM_zone_repr_def DBM_val_bounded_def proof (rule, goal_cases) case 1 then show ?case using \<open>a \<le> n\<close> C by fastforce next case 2 then show ?case proof (safe, goal_cases) case 1 from not0 show ?case unfolding dbm_le_def by auto next case (2 u c) show ?case proof (cases "v c = a") case False then show ?thesis by auto next case True with 2 clock_numbering C \<open>a \<le> n\<close> have "- u c \<le> d" by metis with C show ?thesis by auto qed next case (3 u) with not0 show ?case by auto next case (4 u' y z) from clock_numbering(1) have "0 < v y" by auto then show ?case by auto qed qed lemma dbm_lt': assumes "[M]\<^bsub>v,n\<^esub> \<subseteq> V" "M a b \<le> Lt d" "a \<le> n" "b \<le> n" "v c1 = a" "v c2 = b" "a > 0" "b > 0" shows "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c1 - u c2 < d}" proof - from assms have "[M]\<^bsub>v,n\<^esub> \<subseteq> [(\<lambda> i j. if i = a \<and> j = b then Lt d else \<infinity>)]\<^bsub>v,n\<^esub>" apply safe apply (rule DBM_le_subset) unfolding less_eq dbm_le_def by auto moreover from zone_diag_lt[OF \<open>a \<le> n\<close> \<open>b \<le> n\<close> assms(5-)] have "[(\<lambda> i j. if i = a \<and> j = b then Lt d else \<infinity>)]\<^bsub>v,n\<^esub> = {u. u c1 - u c2 < d}" by blast moreover from assms have "[M]\<^bsub>v,n\<^esub> \<subseteq> V" by auto ultimately show ?thesis by auto qed lemma dbm_lt'2: assumes "[M]\<^bsub>v,n\<^esub> \<subseteq> V" "M a 0 \<le> Lt d" "a \<le> n" "v c1 = a" "a > 0" shows "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c1 < d}" proof - from assms(2) have "[M]\<^bsub>v,n\<^esub> \<subseteq> [(\<lambda> i j. if i = a \<and> j = 0 then Lt d else \<infinity>)]\<^bsub>v,n\<^esub>" apply safe apply (rule DBM_le_subset) unfolding less_eq dbm_le_def by auto moreover from zone_diag_lt_2[OF \<open>a \<le> n\<close> assms(4,5)] have "[(\<lambda> i j. if i = a \<and> j = 0 then Lt d else \<infinity>)]\<^bsub>v,n\<^esub> = {u. u c1 < d}" by blast ultimately show ?thesis using assms(1) by auto qed lemma dbm_lt'3: assumes "[M]\<^bsub>v,n\<^esub> \<subseteq> V" "M 0 a \<le> Lt d" "a \<le> n" "v c1 = a" "a > 0" shows "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. - u c1 < d}" proof - from assms(2) have "[M]\<^bsub>v,n\<^esub> \<subseteq> [(\<lambda> i j. if i = 0 \<and> j = a then Lt d else \<infinity>)]\<^bsub>v,n\<^esub>" apply safe apply (rule DBM_le_subset) unfolding less_eq dbm_le_def by auto moreover from zone_diag_lt_3[OF \<open>a \<le> n\<close> assms(4,5)] have "[(\<lambda> i j. if i = 0 \<and> j = a then Lt d else \<infinity>)]\<^bsub>v,n\<^esub> = {u. - u c1 < d}" by blast ultimately show ?thesis using assms(1) by auto qed lemma dbm_le': assumes "[M]\<^bsub>v,n\<^esub> \<subseteq> V" "M a b \<le> Le d" "a \<le> n" "b \<le> n" "v c1 = a" "v c2 = b" "a > 0" "b > 0" shows "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c1 - u c2 \<le> d}" proof - from assms have "[M]\<^bsub>v,n\<^esub> \<subseteq> [(\<lambda> i j. if i = a \<and> j = b then Le d else \<infinity>)]\<^bsub>v,n\<^esub>" apply safe apply (rule DBM_le_subset) unfolding less_eq dbm_le_def by auto moreover from zone_diag_le[OF \<open>a \<le> n\<close> \<open>b \<le> n\<close> assms(5-)] have "[(\<lambda> i j. if i = a \<and> j = b then Le d else \<infinity>)]\<^bsub>v,n\<^esub> = {u. u c1 - u c2 \<le> d}" by blast moreover from assms have "[M]\<^bsub>v,n\<^esub> \<subseteq> V" by auto ultimately show ?thesis by auto qed lemma dbm_le'2: assumes "[M]\<^bsub>v,n\<^esub> \<subseteq> V" "M a 0 \<le> Le d" "a \<le> n" "v c1 = a" "a > 0" shows "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c1 \<le> d}" proof - from assms(2) have "[M]\<^bsub>v,n\<^esub> \<subseteq> [(\<lambda> i j. if i = a \<and> j = 0 then Le d else \<infinity>)]\<^bsub>v,n\<^esub>" apply safe apply (rule DBM_le_subset) unfolding less_eq dbm_le_def by auto moreover from zone_diag_le_2[OF \<open>a \<le> n\<close> assms(4,5)] have "[(\<lambda> i j. if i = a \<and> j = 0 then Le d else \<infinity>)]\<^bsub>v,n\<^esub> = {u. u c1 \<le> d}" by blast ultimately show ?thesis using assms(1) by auto qed lemma dbm_le'3: assumes "[M]\<^bsub>v,n\<^esub> \<subseteq> V" "M 0 a \<le> Le d" "a \<le> n" "v c1 = a" "a > 0" shows "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. - u c1 \<le> d}" proof - from assms(2) have "[M]\<^bsub>v,n\<^esub> \<subseteq> [(\<lambda> i j. if i = 0 \<and> j = a then Le d else \<infinity>)]\<^bsub>v,n\<^esub>" apply safe apply (rule DBM_le_subset) unfolding less_eq dbm_le_def by auto moreover from zone_diag_le_3[OF \<open>a \<le> n\<close> assms(4,5)] have "[(\<lambda> i j. if i = 0 \<and> j = a then Le d else \<infinity>)]\<^bsub>v,n\<^esub> = {u. - u c1 \<le> d}" by blast ultimately show ?thesis using assms(1) by auto qed lemma int_zone_dbm: assumes "\<forall> (_,d) \<in> collect_clock_pairs cc. d \<in> \<int>" "\<forall> c \<in> collect_clks cc. v c \<le> n" obtains M where "{u. u \<turnstile> cc} = [M]\<^bsub>v,n\<^esub>" and "dbm_int M n" using int_zone_dbm[OF _ assms] clock_numbering(1) by auto lemma non_empty_dbm_diag_set': assumes "clock_numbering' v n" "\<forall>i\<le>n. \<forall>j\<le>n. M i j \<noteq> \<infinity> \<longrightarrow> get_const (M i j) \<in> \<int>" "[M]\<^bsub>v,n\<^esub> \<noteq> {}" obtains M' where "[M]\<^bsub>v,n\<^esub> = [M']\<^bsub>v,n\<^esub> \<and> (\<forall>i\<le>n. \<forall>j\<le>n. M' i j \<noteq> \<infinity> \<longrightarrow> get_const (M' i j) \<in> \<int>) \<and> (\<forall> i \<le> n. M' i i = \<one>)" proof - let ?M = "\<lambda>i j. if i = j then \<one> else M i j" from non_empty_dbm_diag_set[OF assms(1,3)] have "[M]\<^bsub>v,n\<^esub> = [?M]\<^bsub>v,n\<^esub>" by auto moreover from assms(2) have "\<forall>i\<le>n. \<forall>j\<le>n. ?M i j \<noteq> \<infinity> \<longrightarrow> get_const (?M i j) \<in> \<int>" unfolding neutral by auto moreover have "\<forall> i \<le> n. ?M i i = \<one>" by auto ultimately show ?thesis by (auto intro: that) qed lemma dbm_entry_int: "x \<noteq> \<infinity> \<Longrightarrow> get_const x \<in> \<int> \<Longrightarrow> \<exists> d :: int. x = Le d \<or> x = Lt d" apply (cases x) using Ints_cases by auto abbreviation "vabstr \<equiv> beta_interp.vabstr" section \<open>Bouyer's Main Theorem\<close> theorem region_zone_intersect_empty_approx_correct: assumes "R \<in> \<R>" "Z \<subseteq> V" "R \<inter> Z = {}" "vabstr Z M" shows "R \<inter> Approx\<^sub>\<beta> Z = {}" proof - define v' where "v' i = (THE c. c \<in> X \<and> v c = i)" for i from region_dbm[OF assms(1)] obtain M\<^sub>R where M\<^sub>R: "[M\<^sub>R]\<^bsub>v,n\<^esub> = R" "\<forall>i\<le>n. \<forall>j\<le>n. M\<^sub>R i 0 = \<infinity> \<and> 0 < j \<and> i \<noteq> j \<longrightarrow> M\<^sub>R i j = \<infinity> \<and> M\<^sub>R j i = \<infinity>" "\<forall>i\<le>n. M\<^sub>R i i = Le 0" "\<forall>i\<le>n. \<forall>j\<le>n. 0 < i \<and> 0 < j \<and> M\<^sub>R i 0 \<noteq> \<infinity> \<and> M\<^sub>R j 0 \<noteq> \<infinity> \<longrightarrow> (\<exists>d. - int (k (THE c. c \<in> X \<and> v c = j)) \<le> d \<and> d \<le> int (k (THE c. c \<in> X \<and> v c = i)) \<and> M\<^sub>R i j = Le d \<and> M\<^sub>R j i = Le (real_of_int (- d)) \<or> - int (k (THE c. c \<in> X \<and> v c = j)) \<le> d - 1 \<and> d \<le> int (k (THE c. c \<in> X \<and> v c = i)) \<and> M\<^sub>R i j = Lt d \<and> M\<^sub>R j i = Lt (real_of_int (- d + 1)))" "\<forall>i\<le>n. 0 < i \<and> M\<^sub>R i 0 \<noteq> \<infinity> \<longrightarrow> (\<exists>d\<le>int (k (THE c. c \<in> X \<and> v c = i)). d \<ge> 0 \<and> (M\<^sub>R i 0 = Le d \<and> M\<^sub>R 0 i = Le (real_of_int (- d)) \<or> M\<^sub>R i 0 = Lt d \<and> M\<^sub>R 0 i = Lt (real_of_int (- d + 1))))" "\<forall>i\<le>n. 0 < i \<longrightarrow> (\<exists>d\<ge>- int (k (THE c. c \<in> X \<and> v c = i)). d \<le> 0 \<and> (M\<^sub>R 0 i = Le d \<or> M\<^sub>R 0 i = Lt d))" "\<forall>i j. M\<^sub>R i j \<noteq> \<infinity> \<longrightarrow> get_const (M\<^sub>R i j) \<in> \<int>" "\<forall>i\<le>n. \<forall>j\<le>n. M\<^sub>R i j \<noteq> \<infinity> \<and> 0 < i \<and> 0 < j \<longrightarrow> (\<exists>d. (M\<^sub>R i j = Le d \<or> M\<^sub>R i j = Lt d) \<and> - int (k (THE c. c \<in> X \<and> v c = j)) \<le> d \<and> d \<le> int (k (THE c. c \<in> X \<and> v c = i)))" . show ?thesis proof (cases "R = {}") case True then show ?thesis by auto next case False from clock_numbering(2) have cn_weak: "\<forall>k\<le>n. 0 < k \<longrightarrow> (\<exists> c. v c = k)" by auto show ?thesis proof (cases "Z = {}") case True then show ?thesis using beta_interp.apx_empty by blast next case False from assms(4) have "Z = [M]\<^bsub>v,n\<^esub>" "\<forall> i\<le>n. \<forall> j\<le>n. M i j \<noteq> \<infinity> \<longrightarrow> get_const (M i j) \<in> \<int>" by auto from this(1) non_empty_dbm_diag_set'[OF clock_numbering(1) this(2)] \<open>Z \<noteq> {}\<close> obtain M where M: "Z = [M]\<^bsub>v,n\<^esub> \<and> (\<forall>i\<le>n. \<forall>j\<le>n. M i j \<noteq> \<infinity> \<longrightarrow> get_const (M i j) \<in> \<int>) \<and> (\<forall>i\<le>n. M i i = \<one>)" by auto with not_empty_cyc_free[OF cn_weak] False have "cyc_free M n" by auto then have "cycle_free M n" using cycle_free_diag_equiv by auto from M have "Z = [FW M n]\<^bsub>v,n\<^esub>" unfolding neutral by (auto intro!: FW_zone_equiv[OF cn_weak]) moreover from fw_canonical[OF \<open>cycle_free M _\<close>] M have "canonical (FW M n) n" unfolding neutral by auto moreover from FW_int_preservation M have "\<forall>i\<le>n. \<forall>j\<le>n. FW M n i j \<noteq> \<infinity> \<longrightarrow> get_const (FW M n i j) \<in> \<int>" by auto ultimately obtain M where M: "[M]\<^bsub>v,n\<^esub> = Z" "canonical M n" "\<forall>i\<le>n. \<forall>j\<le>n. M i j \<noteq> \<infinity> \<longrightarrow> get_const (M i j) \<in> \<int>" by blast let ?M = "\<lambda> i j. min (M i j) (M\<^sub>R i j)" from M(1) M\<^sub>R(1) assms have "[M]\<^bsub>v,n\<^esub> \<inter> [M\<^sub>R]\<^bsub>v,n\<^esub> = {}" by auto moreover from DBM_le_subset[folded less_eq, of n ?M M] have "[?M]\<^bsub>v,n\<^esub> \<subseteq> [M]\<^bsub>v,n\<^esub>" by auto moreover from DBM_le_subset[folded less_eq, of n ?M M\<^sub>R] have "[?M]\<^bsub>v,n\<^esub> \<subseteq> [M\<^sub>R]\<^bsub>v,n\<^esub>" by auto ultimately have "[?M]\<^bsub>v,n\<^esub> = {}" by blast then have "\<not> cyc_free ?M n" using cyc_free_not_empty[of n ?M v] clock_numbering(1) by auto then obtain i xs where xs: "i \<le> n" "set xs \<subseteq> {0..n}" "len ?M i i xs < \<one>" by auto from this(1,2) canonical_shorten_rotate_neg_cycle[OF M(2) this(2,1,3)] obtain i ys where ys: "len ?M i i ys < \<one>" "set ys \<subseteq> {0..n}" "successive (\<lambda>(a, b). ?M a b = M a b) (arcs i i ys)" "i \<le> n" and distinct: "distinct ys" "i \<notin> set ys" and cycle_closes: "ys \<noteq> [] \<longrightarrow> ?M i (hd ys) \<noteq> M i (hd ys) \<or> ?M (last ys) i \<noteq> M (last ys) i" by fastforce have one_M_aux: "len ?M i j ys = len M\<^sub>R i j ys" if "\<forall> (a,b) \<in> set (arcs i j ys). M a b \<ge> M\<^sub>R a b" for j using that by (induction ys arbitrary: i) (auto simp: min_def) have one_M: "\<exists> (a,b) \<in> set (arcs i i ys). M a b < M\<^sub>R a b" proof (rule ccontr, goal_cases) case 1 then have "\<forall>(a, b)\<in>set (arcs i i ys). M\<^sub>R a b \<le> M a b" by auto from one_M_aux[OF this] have "len ?M i i ys = len M\<^sub>R i i ys" . with Nil ys(1) xs(3) have "len M\<^sub>R i i ys < \<one>" by simp from DBM_val_bounded_neg_cycle[OF _ \<open>i \<le> n\<close> \<open>set ys \<subseteq> _\<close> this cn_weak] have "[M\<^sub>R]\<^bsub>v,n\<^esub> = {}" unfolding DBM_zone_repr_def by auto with \<open>R \<noteq> {}\<close> M\<^sub>R(1) show False by auto qed have one_M_R_aux: "len ?M i j ys = len M i j ys" if "\<forall> (a,b) \<in> set (arcs i j ys). M a b \<le> M\<^sub>R a b" for j using that by (induction ys arbitrary: i) (auto simp: min_def) have one_M_R: "\<exists> (a,b) \<in> set (arcs i i ys). M a b > M\<^sub>R a b" proof (rule ccontr, goal_cases) case 1 then have "\<forall>(a, b)\<in>set (arcs i i ys). M\<^sub>R a b \<ge> M a b" by auto from one_M_R_aux[OF this] have "len ?M i i ys = len M i i ys" . with Nil ys(1) xs(3) have "len M i i ys < \<one>" by simp from DBM_val_bounded_neg_cycle[OF _ \<open>i \<le> n\<close> \<open>set ys \<subseteq> _\<close> this cn_weak] have "[M]\<^bsub>v,n\<^esub> = {}" unfolding DBM_zone_repr_def by auto with \<open>Z \<noteq> {}\<close> M(1) show False by auto qed have 0: "(0,0) \<notin> set (arcs i i ys)" proof (cases "ys = []") case False with distinct show ?thesis using arcs_distinct1 by blast next case True with ys(1) have "?M i i < \<one>" by auto then have "M i i < \<one> \<or> M\<^sub>R i i < \<one>" by (simp add: min_less_iff_disj) from one_M one_M_R True show ?thesis by auto qed { fix a b assume A: "(a,b) \<in> set (arcs i i ys)" assume not0: "a > 0" from aux1[OF ys(4,4,2) A] have C2: "a \<le> n" by auto then obtain c1 where C: "v c1 = a" "c1 \<in> X" using clock_numbering(2) not0 unfolding v'_def by meson then have "v' a = c1" using clock_numbering C2 not0 unfolding v'_def by fastforce with C C2 have "\<exists> c \<in> X. v c = a \<and> v' a = c" "a \<le> n" by auto } note clock_dest_1 = this { fix a b assume A: "(a,b) \<in> set (arcs i i ys)" assume not0: "b > 0" from aux1[OF ys(4,4,2) A] have C2: "b \<le> n" by auto then obtain c2 where C: "v c2 = b" "c2 \<in> X" using clock_numbering(2) not0 unfolding v'_def by meson then have "v' b = c2" using clock_numbering C2 not0 unfolding v'_def by fastforce with C C2 have "\<exists> c \<in> X. v c = b \<and> v' b = c" "b \<le> n" by auto } note clock_dest_2 = this have clock_dest: "\<And> a b. (a,b) \<in> set (arcs i i ys) \<Longrightarrow> a > 0 \<Longrightarrow> b > 0 \<Longrightarrow> \<exists> c1 \<in> X. \<exists> c2 \<in> X. v c1 = a \<and> v c2 = b \<and> v' a = c1 \<and> v' b = c2 &&& a \<le> n &&& b \<le> n" using clock_dest_1 clock_dest_2 by (auto) presburger { fix a assume A: "(a,0) \<in> set (arcs i i ys)" assume not0: "a > 0" assume bounded: "M\<^sub>R a 0 \<noteq> \<infinity>" assume lt: "M a 0 < M\<^sub>R a 0" from clock_dest_1[OF A not0] obtain c1 where C: "v c1 = a" "c1 \<in> X" "v' a = c1" and C2: "a \<le> n" by blast from C2 not0 bounded M\<^sub>R(5) obtain d :: int where *: "d \<le> int (k (v' a))" "M\<^sub>R a 0 = Le d \<and> M\<^sub>R 0 a = Le (- d) \<or> M\<^sub>R a 0 = Lt d \<and> M\<^sub>R 0 a = Lt (- d + 1)" unfolding v'_def by auto with C have **: "d \<le> int (k c1)" by auto from *(2) have ?thesis proof (standard, goal_cases) case 1 with lt have "M a 0 < Le d" by auto then have "M a 0 \<le> Lt d" unfolding less less_eq dbm_le_def by (fastforce elim!: dbm_lt.cases) from dbm_lt'2[OF assms(2)[folded M(1)] this C2 C(1) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c1 < d}" by auto from beta_interp.\<beta>_boundedness_lt'[OF ** C(2) this] have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. u c1 < d}" . moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u (Some c1) None (M\<^sub>R a 0)" "dbm_entry_val u None (Some c1) (M\<^sub>R 0 a)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto then have "u c1 = d" using 1 by auto then have "u \<notin> {u \<in> V. u c1 < d}" by auto } ultimately show ?thesis using M\<^sub>R(1) M(1) by auto next case 2 from 2 lt have "M a 0 \<noteq> \<infinity>" by auto with dbm_entry_int[OF this] M(3) \<open>a \<le> n\<close> obtain d' :: int where d': "M a 0 = Le d' \<or> M a 0 = Lt d'" by auto then have "M a 0 \<le> Le (d - 1)" using lt 2 apply (auto simp: less_eq dbm_le_def less) apply (cases rule: dbm_lt.cases) apply auto apply rule apply (cases rule: dbm_lt.cases) by auto with lt have "M a 0 \<le> Le (d - 1)" by auto from dbm_le'2[OF assms(2)[folded M(1)] this C2 C(1) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c1 \<le> d - 1}" by auto from beta_interp.\<beta>_boundedness_le'[OF _ C(2) this] ** have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. u c1 \<le> d - 1}" by auto moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u None (Some c1) (M\<^sub>R 0 a)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto then have "u c1 > d - 1" using 2 by auto then have "u \<notin> {u \<in> V. u c1 \<le> d - 1}" by auto } ultimately show ?thesis using M\<^sub>R(1) M(1) by auto qed } note bounded_zero_1 = this { fix a assume A: "(0,a) \<in> set (arcs i i ys)" assume not0: "a > 0" assume bounded: "M\<^sub>R a 0 \<noteq> \<infinity>" assume lt: "M 0 a < M\<^sub>R 0 a" from clock_dest_2[OF A not0] obtain c1 where C: "v c1 = a" "c1 \<in> X" "v' a = c1" and C2: "a \<le> n" by blast from C2 not0 bounded M\<^sub>R(5) obtain d :: int where *: "d \<le> int (k (v' a))" "M\<^sub>R a 0 = Le d \<and> M\<^sub>R 0 a = Le (- d) \<or> M\<^sub>R a 0 = Lt d \<and> M\<^sub>R 0 a = Lt (- d + 1)" unfolding v'_def by auto with C have **: "- int (k c1) \<le> - d" by auto from *(2) have ?thesis proof (standard, goal_cases) case 1 with lt have "M 0 a < Le (-d)" by auto then have "M 0 a \<le> Lt (-d)" unfolding less less_eq dbm_le_def by (fastforce elim!: dbm_lt.cases) from dbm_lt'3[OF assms(2)[folded M(1)] this C2 C(1) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. d < u c1}" by auto from beta_interp.\<beta>_boundedness_gt'[OF _ C(2) this] ** have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. - u c1 < -d}" by auto moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u (Some c1) None (M\<^sub>R a 0)" "dbm_entry_val u None (Some c1) (M\<^sub>R 0 a)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with 1 have "u \<notin> {u \<in> V. - u c1 < -d}" by auto } ultimately show ?thesis using M\<^sub>R(1) M(1) by auto next case 2 from 2 lt have "M 0 a \<noteq> \<infinity>" by auto with dbm_entry_int[OF this] M(3) \<open>a \<le> n\<close> obtain d' :: int where d': "M 0 a = Le d' \<or> M 0 a = Lt d'" by auto then have "M 0 a \<le> Le (-d)" using lt 2 apply (auto simp: less_eq dbm_le_def less) apply (cases rule: dbm_lt.cases) apply auto apply rule apply (metis get_const.simps(2) 2 of_int_less_iff of_int_minus zless_add1_eq) apply (cases rule: dbm_lt.cases) apply auto apply (rule dbm_lt.intros(5)) by (simp add: int_lt_Suc_le) from dbm_le'3[OF assms(2)[folded M(1)] this C2 C(1) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. d \<le> u c1}" by auto from beta_interp.\<beta>_boundedness_ge'[OF _ C(2) this] ** have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. - u c1 \<le> -d}" by auto moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u (Some c1) None (M\<^sub>R a 0)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with 2 have "u \<notin> {u \<in> V. - u c1 \<le> -d}" by auto } ultimately show ?thesis using M\<^sub>R(1) M(1) by auto qed } note bounded_zero_2 = this { fix a b c c1 c2 assume A: "(a,b) \<in> set (arcs i i ys)" assume not0: "a > 0" "b > 0" assume lt: "M a b = Lt c" assume neg: "M a b + M\<^sub>R b a < \<one>" assume C: "v c1 = a" "v c2 = b" "c1 \<in> X" "c2 \<in> X" and C2: "a \<le> n" "b \<le> n" assume valid: "-k c2 \<le> -get_const (M\<^sub>R b a)" "-get_const (M\<^sub>R b a) \<le> k c1" from neg have "M\<^sub>R b a \<noteq> \<infinity>" by auto then obtain d where *: "M\<^sub>R b a = Le d \<or> M\<^sub>R b a = Lt d" by (cases "M\<^sub>R b a", auto)+ with M\<^sub>R(7) \<open>_ _ _ \<noteq> \<infinity>\<close> have "d \<in> \<int>" by fastforce with * obtain d :: int where *: "M\<^sub>R b a = Le d \<or> M\<^sub>R b a = Lt d" using Ints_cases by auto with valid have valid: "- k c2 \<le> -d" "-d \<le> k c1" by auto from * neg lt have "M a b \<le> Lt (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) from dbm_lt'[OF assms(2)[folded M(1)] this C2 C(1,2) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c1 - u c2 < - d}" . from beta_interp.\<beta>_boundedness_diag_lt'[OF valid C(3,4) this] have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. u c1 - u c2 < -d}" . moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u (Some c2) (Some c1) (M\<^sub>R b a)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with * have "u \<notin> {u \<in> V. u c1 - u c2 < -d}" by auto } ultimately have ?thesis using M\<^sub>R(1) M(1) by auto } note neg_sum_lt = this { fix a b assume A: "(a,b) \<in> set (arcs i i ys)" assume not0: "a > 0" "b > 0" assume neg: "M a b + M\<^sub>R b a < \<one>" from clock_dest[OF A not0] obtain c1 c2 where C: "v c1 = a" "v c2 = b" "c1 \<in> X" "c2 \<in> X" and C2: "a \<le> n" "b \<le> n" by blast then have C3: "v' a = c1" "v' b = c2" unfolding v'_def using clock_numbering(1) by auto from neg have inf: "M a b \<noteq> \<infinity>" "M\<^sub>R b a \<noteq> \<infinity>" by auto from M\<^sub>R(8) inf not0 C(3,4) C2 C3 obtain d :: int where d: "M\<^sub>R b a = Le d \<or> M\<^sub>R b a = Lt d" "- int (k c1) \<le> d" "d \<le> int (k c2)" unfolding v'_def by auto from inf obtain c where c: "M a b = Le c \<or> M a b = Lt c" by (cases "M a b") auto { assume **: "M a b \<le> Lt (-d)" from dbm_lt'[OF assms(2)[folded M(1)] this C2 C(1,2) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c1 - u c2 < (- d)}" . from beta_interp.\<beta>_boundedness_diag_lt'[OF _ _ C(3,4) this] d have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. u c1 - u c2 < -d}" by auto moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u (Some c2) (Some c1) (M\<^sub>R b a)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with d have "u \<notin> {u \<in> V. u c1 - u c2 < -d}" by auto } ultimately have ?thesis using M\<^sub>R(1) M(1) by auto } note aux = this from c have ?thesis proof (standard, goal_cases) case 2 with neg d have "M a b \<le> Lt (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) with aux show ?thesis . next case 1 note A = this from d(1) show ?thesis proof (standard, goal_cases) case 1 with A neg d have "M a b \<le> Lt (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) with aux show ?thesis . next case 2 with A neg d have "M a b \<le> Le (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) from dbm_le'[OF assms(2)[folded M(1)] this C2 C(1,2) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c1 - u c2 \<le> - d}" . from beta_interp.\<beta>_boundedness_diag_le'[OF _ _ C(3,4) this] d have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. u c1 - u c2 \<le> -d}" by auto moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u (Some c2) (Some c1) (M\<^sub>R b a)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with A 2 have "u \<notin> {u \<in> V. u c1 - u c2 \<le> -d}" by auto } ultimately show ?thesis using M\<^sub>R(1) M(1) by auto qed qed } note neg_sum_1 = this { fix a b assume A: "(a,0) \<in> set (arcs i i ys)" assume not0: "a > 0" assume neg: "M a 0 + M\<^sub>R 0 a < \<one>" from clock_dest_1[OF A not0] obtain c1 where C: "v c1 = a" "c1 \<in> X" and C2: "a \<le> n" by blast with clock_numbering(1) have C3: "v' a = c1" unfolding v'_def by auto from neg have inf: "M a 0 \<noteq> \<infinity>" "M\<^sub>R 0 a \<noteq> \<infinity>" by auto from M\<^sub>R(6) not0 C2 C3 obtain d :: int where d: "M\<^sub>R 0 a = Le d \<or> M\<^sub>R 0 a = Lt d" "- int (k c1) \<le> d" "d \<le> 0" unfolding v'_def by auto from inf obtain c where c: "M a 0 = Le c \<or> M a 0 = Lt c" by (cases "M a 0") auto { assume "M a 0 \<le> Lt (-d)" from dbm_lt'2[OF assms(2)[folded M(1)] this C2 C(1) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c1 < - d}" . from beta_interp.\<beta>_boundedness_lt'[OF _ C(2) this] d have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. u c1 < -d}" by auto moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u None (Some c1) (M\<^sub>R 0 a)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with d have "u \<notin> {u \<in> V. u c1 < -d}" by auto } ultimately have ?thesis using M\<^sub>R(1) M(1) by auto } note aux = this from c have ?thesis proof (standard, goal_cases) case 2 with neg d have "M a 0 \<le> Lt (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) with aux show ?thesis . next case 1 note A = this from d(1) show ?thesis proof (standard, goal_cases) case 1 with A neg d have "M a 0 \<le> Lt (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) with aux show ?thesis . next case 2 with A neg d have "M a 0 \<le> Le (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) from dbm_le'2[OF assms(2)[folded M(1)] this C2 C(1) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c1 \<le> - d}" . from beta_interp.\<beta>_boundedness_le'[OF _ C(2) this] d have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. u c1 \<le> -d}" by auto moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u None (Some c1) (M\<^sub>R 0 a)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with A 2 have "u \<notin> {u \<in> V. u c1 \<le> -d}" by auto } ultimately show ?thesis using M\<^sub>R(1) M(1) by auto qed qed } note neg_sum_1' = this { fix a b assume A: "(0,b) \<in> set (arcs i i ys)" assume not0: "b > 0" assume neg: "M 0 b + M\<^sub>R b 0 < \<one>" from clock_dest_2[OF A not0] obtain c2 where C: "v c2 = b" "c2 \<in> X" and C2: "b \<le> n" by blast with clock_numbering(1) have C3: "v' b = c2" unfolding v'_def by auto from neg have "M 0 b \<noteq> \<infinity>" "M\<^sub>R b 0 \<noteq> \<infinity>" by auto with M\<^sub>R(5) not0 C2 C3 obtain d :: int where d: "M\<^sub>R b 0 = Le d \<or> M\<^sub>R b 0 = Lt d" "d \<le> k c2" unfolding v'_def by fastforce from \<open>M 0 b \<noteq> \<infinity>\<close> obtain c where c: "M 0 b = Le c \<or> M 0 b = Lt c" by (cases "M 0 b") auto { assume "M 0 b \<le> Lt (-d)" from dbm_lt'3[OF assms(2)[folded M(1)] this C2 C(1) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c2 > d}" by simp from beta_interp.\<beta>_boundedness_gt'[OF _ C(2) this] d have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. - u c2 < -d}" by auto moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u (Some c2) None (M\<^sub>R b 0)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with d have "u \<notin> {u \<in> V. - u c2 < -d}" by auto } ultimately have ?thesis using M\<^sub>R(1) M(1) by auto } note aux = this from c have ?thesis proof (standard, goal_cases) case 2 with neg d have "M 0 b \<le> Lt (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) with aux show ?thesis . next case A: 1 from d(1) show ?thesis proof (standard, goal_cases) case 1 with A neg have "M 0 b \<le> Lt (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) with aux show ?thesis . next case 2 with A neg c have "M 0 b \<le> Le (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) from dbm_le'3[OF assms(2)[folded M(1)] this C2 C(1) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c2 \<ge> d}" by simp from beta_interp.\<beta>_boundedness_ge'[OF _ C(2) this] d(2) have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. - u c2 \<le> -d}" by auto moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u (Some c2) None (M\<^sub>R b 0)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with A 2 have "u \<notin> {u \<in> V. - u c2 \<le> -d}" by auto } ultimately show ?thesis using M\<^sub>R(1) M(1) by auto qed qed } note neg_sum_1'' = this { fix a b assume A: "(a,b) \<in> set (arcs i i ys)" assume not0: "b > 0" "a > 0" assume neg: "M\<^sub>R a b + M b a < \<one>" from clock_dest[OF A not0(2,1)] obtain c1 c2 where C: "v c1 = a" "v c2 = b" "c1 \<in> X" "c2 \<in> X" and C2: "a \<le> n" "b \<le> n" by blast then have C3: "v' a = c1" "v' b = c2" unfolding v'_def using clock_numbering(1) by auto from neg have inf: "M b a \<noteq> \<infinity>" "M\<^sub>R a b \<noteq> \<infinity>" by auto with M\<^sub>R(8) not0 C(3,4) C2 C3 obtain d :: int where d: "M\<^sub>R a b = Le d \<or> M\<^sub>R a b = Lt d" "d \<ge> -int (k c2)" "d \<le> int (k c1)" unfolding v'_def by blast from inf obtain c where c: "M b a = Le c \<or> M b a = Lt c" by (cases "M b a") auto { assume "M b a \<le> Lt (-d)" from dbm_lt'[OF assms(2)[folded M(1)] this C2(2,1) C(2,1) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c2 - u c1 < - d}" . from beta_interp.\<beta>_boundedness_diag_lt'[OF _ _ C(4,3) this] d have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. u c2 - u c1 < -d}" by auto moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u (Some c1) (Some c2) (M\<^sub>R a b)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with d have "u \<notin> {u \<in> V. u c2 - u c1 < -d}" by auto } ultimately have ?thesis using M\<^sub>R(1) M(1) by auto } note aux = this from c have ?thesis proof (standard, goal_cases) case 2 with neg d have "M b a \<le> Lt (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) with aux show ?thesis . next case A: 1 from d(1) show ?thesis proof (standard, goal_cases) case 1 with A neg d have "M b a \<le> Lt (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) with aux show ?thesis . next case 2 with A neg d have "M b a \<le> Le (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) from dbm_le'[OF assms(2)[folded M(1)] this C2(2,1) C(2,1) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c2 - u c1 \<le> - d}" . from beta_interp.\<beta>_boundedness_diag_le'[OF _ _ C(4,3) this] d have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. u c2 - u c1 \<le> -d}" by auto moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u (Some c1) (Some c2) (M\<^sub>R a b)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with A 2 have "u \<notin> {u \<in> V. u c2 - u c1 \<le> -d}" by auto } ultimately show ?thesis using M\<^sub>R(1) M(1) by auto qed qed } note neg_sum_2 = this { fix a b assume A: "(a,0) \<in> set (arcs i i ys)" assume not0: "a > 0" assume neg: "M\<^sub>R a 0 + M 0 a < \<one>" from clock_dest_1[OF A not0] obtain c1 where C: "v c1 = a" "c1 \<in> X" and C2: "a \<le> n" by blast with clock_numbering(1) have C3: "v' a = c1" unfolding v'_def by auto from neg have inf: "M 0 a \<noteq> \<infinity>" "M\<^sub>R a 0 \<noteq> \<infinity>" by auto with M\<^sub>R(5) not0 C2 C3 obtain d :: int where d: "M\<^sub>R a 0 = Le d \<or> M\<^sub>R a 0 = Lt d" "d \<le> int (k c1)" "d \<ge> 0" unfolding v'_def by auto from inf obtain c where c: "M 0 a = Le c \<or> M 0 a = Lt c" by (cases "M 0 a") auto { assume "M 0 a \<le> Lt (-d)" from dbm_lt'3[OF assms(2)[folded M(1)] this C2 C(1) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c1 > d}" by simp from beta_interp.\<beta>_boundedness_gt'[OF _ C(2) this] d have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. u c1 > d}" by auto moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u (Some c1) None (M\<^sub>R a 0)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with d have "u \<notin> {u \<in> V. u c1 > d}" by auto } ultimately have ?thesis using M\<^sub>R(1) M(1) by auto } note aux = this from c have ?thesis proof (standard, goal_cases) case 2 with neg d have "M 0 a \<le> Lt (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) with aux show ?thesis . next case A: 1 from d(1) show ?thesis proof (standard, goal_cases) case 1 with A neg d have "M 0 a \<le> Lt (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) with aux show ?thesis . next case 2 with A neg d have "M 0 a \<le> Le (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) from dbm_le'3[OF assms(2)[folded M(1)] this C2 C(1) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c1 \<ge> d}" by simp from beta_interp.\<beta>_boundedness_ge'[OF _ C(2) this] d have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. u c1 \<ge> d}" by auto moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u (Some c1) None (M\<^sub>R a 0)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with A 2 have "u \<notin> {u \<in> V. u c1 \<ge> d}" by auto } ultimately show ?thesis using M\<^sub>R(1) M(1) by auto qed qed } note neg_sum_2' = this { fix a b assume A: "(0,b) \<in> set (arcs i i ys)" assume not0: "b > 0" assume neg: "M\<^sub>R 0 b + M b 0 < \<one>" from clock_dest_2[OF A not0] obtain c2 where C: "v c2 = b" "c2 \<in> X" and C2: "b \<le> n" by blast with clock_numbering(1) have C3: "v' b = c2" unfolding v'_def by auto from neg have "M b 0 \<noteq> \<infinity>" "M\<^sub>R 0 b \<noteq> \<infinity>" by auto with M\<^sub>R(6) not0 C2 C3 obtain d :: int where d: "M\<^sub>R 0 b = Le d \<or> M\<^sub>R 0 b = Lt d" "-d \<le> k c2" unfolding v'_def by fastforce from \<open>M b 0 \<noteq> \<infinity>\<close> obtain c where c: "M b 0 = Le c \<or> M b 0 = Lt c" by (cases "M b 0") auto { assume "M b 0 \<le> Lt (-d)" from dbm_lt'2[OF assms(2)[folded M(1)] this C2 C(1) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c2 < - d}" by simp from beta_interp.\<beta>_boundedness_lt'[OF _ C(2) this] d have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. u c2 < -d}" by auto moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u None (Some c2) (M\<^sub>R 0 b)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with d have "u \<notin> {u \<in> V. u c2 < -d}" by auto } ultimately have ?thesis using M\<^sub>R(1) M(1) by auto } note aux = this from c have ?thesis proof (standard, goal_cases) case 2 with neg d have "M b 0 \<le> Lt (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) with aux show ?thesis . next case 1 note A = this from d(1) show ?thesis proof (standard, goal_cases) case 1 with A neg have "M b 0 \<le> Lt (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) with aux show ?thesis . next case 2 with A neg c have "M b 0 \<le> Le (-d)" unfolding less_eq dbm_le_def mult neutral less by (auto elim!: dbm_lt.cases) from dbm_le'2[OF assms(2)[folded M(1)] this C2 C(1) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c2 \<le> - d}" by simp from beta_interp.\<beta>_boundedness_le'[OF _ C(2) this] d(2) have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. u c2 \<le> -d}" by auto moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u None (Some c2) (M\<^sub>R 0 b)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with A 2 have "u \<notin> {u \<in> V. u c2 \<le> -d}" by auto } ultimately show ?thesis using M\<^sub>R(1) M(1) by auto qed qed } note neg_sum_2'' = this { fix a b assume A: "(a,b) \<in> set (arcs i i ys)" assume not0: "a > 0" "b > 0" assume bounded: "M\<^sub>R a 0 \<noteq> \<infinity>" "M\<^sub>R b 0 \<noteq> \<infinity>" assume lt: "M a b < M\<^sub>R a b" from clock_dest[OF A not0] obtain c1 c2 where C: "v c1 = a" "v c2 = b" "c1 \<in> X" "c2 \<in> X" and C2: "a \<le> n" "b \<le> n" by blast from C C2 clock_numbering(1,3) have C3: "v' b = c2" "v' a = c1" unfolding v'_def by blast+ with C C2 not0 bounded M\<^sub>R(4) obtain d :: int where *: "- int (k c2) \<le> d \<and> d \<le> int (k c1) \<and> M\<^sub>R a b = Le d \<and> M\<^sub>R b a = Le (- d) \<or> - int (k c2) \<le> d - 1 \<and> d \<le> int (k c1) \<and> M\<^sub>R a b = Lt d \<and> M\<^sub>R b a = Lt (- d + 1)" unfolding v'_def by force from * have ?thesis proof (standard, goal_cases) case 1 with lt have "M a b < Le d" by auto then have "M a b \<le> Lt d" unfolding less less_eq dbm_le_def by (fastforce elim!: dbm_lt.cases) from dbm_lt'[OF assms(2)[folded M(1)] this C2 C(1,2) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c1 - u c2 < d}" . from beta_interp.\<beta>_boundedness_diag_lt'[OF _ _ C(3,4) this] 1 have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. u c1 - u c2 < d}" by auto moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u (Some c1) (Some c2) (M\<^sub>R a b)" "dbm_entry_val u (Some c2) (Some c1) (M\<^sub>R b a)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with 1 have "u \<notin> {u \<in> V. u c1 - u c2 < d}" by auto } ultimately show ?thesis using M\<^sub>R(1) M(1) by auto next case 2 with lt have "M a b \<noteq> \<infinity>" by auto with dbm_entry_int[OF this] M(3) \<open>a \<le> n\<close> \<open>b \<le> n\<close> obtain d' :: int where d': "M a b = Le d' \<or> M a b = Lt d'" by auto then have "M a b \<le> Le (d - 1)" using lt 2 apply (auto simp: less_eq dbm_le_def less) apply (cases rule: dbm_lt.cases) apply auto apply (rule dbm_lt.intros) apply (cases rule: dbm_lt.cases) by auto with lt have "M a b \<le> Le (d - 1)" by auto from dbm_le'[OF assms(2)[folded M(1)] this C2 C(1,2) not0] have "[M]\<^bsub>v,n\<^esub> \<subseteq> {u \<in> V. u c1 - u c2 \<le> d - 1}" . from beta_interp.\<beta>_boundedness_diag_le'[OF _ _ C(3,4) this] 2 have "Approx\<^sub>\<beta> ([M]\<^bsub>v,n\<^esub>) \<subseteq> {u \<in> V. u c1 - u c2 \<le> d - 1}" by auto moreover { fix u assume u: "u \<in> [M\<^sub>R]\<^bsub>v,n\<^esub>" with C C2 have "dbm_entry_val u (Some c2) (Some c1) (M\<^sub>R b a)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with 2 have "u \<notin> {u \<in> V. u c1 - u c2 \<le> d - 1}" by auto } ultimately show ?thesis using M\<^sub>R(1) M(1) by auto qed } note bounded = this { assume not_bounded: "\<forall> (a,b) \<in> set (arcs i i ys). M a b < M\<^sub>R a b \<longrightarrow> M\<^sub>R a 0 = \<infinity> \<or> M\<^sub>R b 0 = \<infinity>" have "\<exists> y z zs. set zs \<union> {0, y, z} = set (i # ys) \<and> len ?M 0 0 (y # z # zs) < Le 0 \<and> (\<forall> (a,b) \<in> set (arcs 0 0 (y # z # zs)). M a b < M\<^sub>R a b \<longrightarrow> a = y \<and> b = z) \<and> M y z < M\<^sub>R y z \<and> distinct (0 # y # z # zs) \<or> ?thesis" proof (cases ys) case Nil show ?thesis proof (cases "M i i < M\<^sub>R i i") case True then have "?M i i = M i i" by (simp add: min.strict_order_iff) with Nil ys(1) xs(3) have *: "M i i < \<one>" by simp with neg_cycle_empty[OF cn_weak _ \<open>i \<le> n\<close>, of "[]" M] have "[M]\<^bsub>v,n\<^esub> = {}" by auto with \<open>Z \<noteq> {}\<close> M(1) show ?thesis by auto next case False then have "?M i i = M\<^sub>R i i" by (simp add: min_absorb2) with Nil ys(1) xs(3) have "M\<^sub>R i i < \<one>" by simp with neg_cycle_empty[OF cn_weak _ \<open>i \<le> n\<close>, of "[]" M\<^sub>R] have "[M\<^sub>R]\<^bsub>v,n\<^esub> = {}" by auto with \<open>R \<noteq> {}\<close> M\<^sub>R(1) show ?thesis by auto qed next case (Cons w ws) note ws = this show ?thesis proof (cases ws) case Nil with ws ys xs(3) have *: "?M i w + ?M w i < \<one>" "?M w i = M w i \<longrightarrow> ?M i w \<noteq> M i w" "(i, w) \<in> set (arcs i i ys)" by auto have "R \<inter> Approx\<^sub>\<beta> Z = {}" proof (cases "?M w i = M w i") case True with *(2) have "?M i w = M\<^sub>R i w" unfolding min_def by auto with *(1) True have neg: "M\<^sub>R i w + M w i < \<one>" by auto show ?thesis proof (cases "i = 0") case True show ?thesis proof (cases "w = 0") case True with 0 \<open>i = 0\<close> *(3) show ?thesis by auto next case False with \<open>i = 0\<close> neg_sum_2'' *(3) neg show ?thesis by blast qed next case False show ?thesis proof (cases "w = 0") case True with \<open>i \<noteq> 0\<close> neg_sum_2' *(3) neg show ?thesis by blast next case False with \<open>i \<noteq> 0\<close> neg_sum_2 *(3) neg show ?thesis by blast qed qed next case False have "M\<^sub>R w i < M w i" proof (rule ccontr, goal_cases) case 1 then have "M\<^sub>R w i \<ge> M w i" by auto with False show False unfolding min_def by auto qed with one_M ws Nil have "M i w < M\<^sub>R i w" by auto then have "?M i w = M i w" unfolding min_def by auto moreover from False *(2) have "?M w i = M\<^sub>R w i" unfolding min_def by auto ultimately have neg: "M i w + M\<^sub>R w i < \<one>" using *(1) by auto show ?thesis proof (cases "i = 0") case True show ?thesis proof (cases "w = 0") case True with 0 \<open>i = 0\<close> *(3) show ?thesis by auto next case False with \<open>i = 0\<close> neg_sum_1'' *(3) neg show ?thesis by blast qed next case False show ?thesis proof (cases "w = 0") case True with \<open>i \<noteq> 0\<close> neg_sum_1' *(3) neg show ?thesis by blast next case False with \<open>i \<noteq> 0\<close> neg_sum_1 *(3) neg show ?thesis by blast qed qed qed then show ?thesis by simp next case zs: (Cons z zs) from one_M obtain a b where *: "(a,b) \<in> set (arcs i i ys)" "M a b < M\<^sub>R a b" by fastforce from cycle_rotate_3'[OF _ *(1) ys(3)] ws cycle_closes obtain ws' where ws': "len ?M i i ys = len ?M a a (b # ws')" "set (a # b # ws') = set (i # ys)" "1 + length ws' = length ys" "set (arcs i i ys) = set (arcs a a (b # ws'))" and successive: "successive (\<lambda>(a, b). ?M a b = M a b) (arcs a a (b # ws') @ [(a, b)])" by blast from successive have successive_arcs: "successive (\<lambda>(a, b). ?M a b = M a b) (arcs a b (b # ws' @ [a]))" using arcs_decomp_tail by auto from ws'(4) one_M_R *(2) obtain c d where **: "(c,d) \<in> set (arcs a a (b # ws'))" "M c d > M\<^sub>R c d" "(a,b) \<noteq> (c,d)" by fastforce from card_distinct[of "a # b # ws'"] distinct_card[of "i # ys"] ws'(2,3) distinct have distinct: "distinct (a # b # ws')" by simp from ws zs ws'(3) have "ws' \<noteq> []" by auto then obtain z zs where z: "ws' = zs @ [z]" by (metis append_butlast_last_id) then have "b # ws' = (b # zs) @ [z]" by simp with len_decomp[OF this, of ?M a a] arcs_decomp_tail have rotated: "len ?M a a (b # ws') = len ?M z z (a # b # zs)" "set (arcs a a (b # ws')) = set (arcs z z (a # b # zs))" by (auto simp add: comm) from ys(1) xs(3) ws'(1) have "len ?M a a (b # ws') < \<one>" by auto from ws'(2) ys(2) \<open>i \<le> n\<close> z have n_bounds: "a \<le> n" "b \<le> n" "set ws' \<subseteq> {0..n}" "z \<le> n" by auto from * have a_b: "?M a b = M a b" by (simp add: min.strict_order_iff) from successive successive_split[of _ "arcs a z (b # zs)" "[(z,a), (a,b)]"] have first: "successive (\<lambda>(a, b). ?M a b = M a b) (arcs a z (b # zs))" and last_two: "successive (\<lambda>(a, b). ?M a b = M a b) [(z, a), (a, b)]" using arcs_decomp_tail z by auto from * not_bounded have not_bounded': "M\<^sub>R a 0 = \<infinity> \<or> M\<^sub>R b 0 = \<infinity>" by auto from this(1) have "z = 0" proof assume inf: "M\<^sub>R b 0 = \<infinity>" from a_b successive obtain z where z: "(b,z) \<in> set (arcs b a ws')" "?M b z \<noteq> M b z" by (cases ws') auto then have "?M b z = M\<^sub>R b z" by (meson min_def) from arcs_distinct2[OF _ _ _ _ z(1)] distinct have "b \<noteq> z" by auto from z n_bounds have "z \<le> n" apply (induction ws' arbitrary: b) apply auto[] apply (rename_tac ws' b) apply (case_tac ws') apply auto done have "M\<^sub>R b z = \<infinity>" proof (cases "z = 0") case True with inf show ?thesis by auto next case False with inf M\<^sub>R(2) \<open>b \<noteq> z\<close> \<open>z \<le> n\<close> \<open>b \<le> n\<close> show ?thesis by blast qed with \<open>?M b z = M\<^sub>R b z\<close> have "len ?M b a ws' = \<infinity>" by (auto intro: len_inf_elem[OF z(1)]) then have "\<infinity> = len ?M a a (b # ws')" by simp with \<open>len ?M a a _ < \<one>\<close> show ?thesis by auto next assume inf: "M\<^sub>R a 0 = \<infinity>" show "z = 0" proof (rule ccontr) assume "z \<noteq> 0" with last_two a_b have "?M z a = M\<^sub>R z a" by (auto simp: min_def) from distinct z have "a \<noteq> z" by auto with \<open>z \<noteq> 0\<close> \<open>a \<le> n\<close> \<open>z \<le> n\<close> M\<^sub>R(2) inf have "M\<^sub>R z a = \<infinity>" by blast with \<open>?M z a = M\<^sub>R z a\<close> have "len ?M z z (a # b # zs) = \<infinity>" by (auto intro: len_inf_elem) with \<open>len ?M a a _ < \<one>\<close> rotated show False by auto qed qed { fix c d assume A: "(c, d) \<in> set (arcs 0 0 (a # b # zs))" "M c d < M\<^sub>R c d" then have *: "?M c d = M c d" by (simp add: min.strict_order_iff) from rotated(2) A \<open>z = 0\<close> not_bounded ws'(4) have **: "M\<^sub>R c 0 = \<infinity> \<or> M\<^sub>R d 0 = \<infinity>" by auto { assume inf: "M\<^sub>R c 0 = \<infinity>" fix x assume x: "(x, c) \<in> set (arcs a 0 (b # zs))" "?M x c \<noteq> M x c" from x(2) have "?M x c = M\<^sub>R x c" unfolding min_def by auto from arcs_elem[OF x(1)] z \<open>z = 0\<close> have "x \<in> set (a # b # ws')" "c \<in> set (a # b # ws')" by auto with n_bounds have "x \<le> n" "c \<le> n" by auto have "x = 0" proof (rule ccontr) assume "x \<noteq> 0" from distinct z arcs_distinct1[OF _ _ _ _ x(1)] \<open>z = 0\<close>have "x \<noteq> c" by auto with \<open>x \<noteq> 0\<close> \<open>c \<le> n\<close> \<open>x \<le> n\<close> M\<^sub>R(2) inf have "M\<^sub>R x c = \<infinity>" by blast with \<open>?M x c = M\<^sub>R x c\<close> have "len ?M a 0 (b # zs) = \<infinity>" by (fastforce intro: len_inf_elem[OF x(1)]) with \<open>z = 0\<close> have "len ?M z z (a # b # zs) = \<infinity>" by auto with \<open>len ?M a a _ < \<one>\<close> rotated show False by auto qed with arcs_distinct_dest1[OF _ x(1), of z] z distinct x \<open>z = 0\<close> have False by auto } note c_0_inf = this have "a = c \<and> b = d" proof (cases "(c, d) = (0, a)") case True with last_two \<open>z = 0\<close> * a_b have False by auto then show ?thesis by simp next case False show ?thesis proof (rule ccontr, goal_cases) case 1 with False A(1) have ***: "(c, d) \<in> set (arcs b 0 zs)" by auto from successive z \<open>z = 0\<close> have "successive (\<lambda>(a, b). ?M a b = M a b) ([(a, b)] @ arcs b 0 zs @ [(0, a), (a, b)])" by (simp add: arcs_decomp) then have ****: "successive (\<lambda>(a, b). ?M a b = M a b) (arcs b 0 zs)" using successive_split[of _ "[(a, b)]" "arcs b 0 zs @ [(0, a), (a, b)]"] successive_split[of _ "arcs b 0 zs" "[(0, a), (a, b)]"] by auto from successive_predecessor[OF *** _ this] successive z obtain x where x: "(x, c) \<in> set (arcs a 0 (b # zs))" "?M x c \<noteq> M x c" proof (cases "c = b") case False then have "zs \<noteq> []" using *** by auto from successive_predecessor[OF *** False **** _ this] * obtain x where x: "(zs = [c] \<and> x = b \<or> (\<exists>ys. zs = c # d # ys \<and> x = b) \<or> (\<exists>ys. zs = ys @ [x, c] \<and> d = 0) \<or> (\<exists>ys ws. zs = ys @ x # c # d # ws))" "?M x c \<noteq> M x c" by blast+ from this(1) have "(x, c) \<in> set (arcs a 0 (b # zs))" using arcs_decomp by auto with x(2) show ?thesis by (auto intro: that) next case True have ****: "successive (\<lambda>(a, b). ?M a b = M a b) (arcs a 0 (b # zs))" using first \<open>z = 0\<close> arcs_decomp successive_arcs z by auto show ?thesis proof (cases zs) case Nil with **** True *** * show ?thesis by (auto intro: that) next case (Cons u us) with *** True distinct z \<open>z = 0\<close> have "distinct (b # u # us @ [0])" by auto from arcs_distinct_fix[OF this] *** True Cons have "d = u" by auto with **** * Cons True show ?thesis by (auto intro: that) qed qed show False proof (cases "d = 0") case True from ** show False proof assume "M\<^sub>R c 0 = \<infinity>" from c_0_inf[OF this x] show False . next assume "M\<^sub>R d 0 = \<infinity>" with \<open>d = 0\<close> M\<^sub>R(3) show False by auto qed next case False with *** have "zs \<noteq> []" by auto from successive_successor[OF \<open>(c,d) \<in> set (arcs b 0 zs)\<close> False **** _ this] * obtain e where "(zs = [d] \<and> e = 0 \<or> (\<exists>ys. zs = d # e # ys) \<or> (\<exists>ys. zs = ys @ [c, d] \<and> e = 0) \<or> (\<exists>ys ws. zs = ys @ c # d # e # ws))" "?M d e \<noteq> M d e" by blast then have e: "(d, e) \<in> set (arcs b 0 zs)" "?M d e \<noteq> M d e" using arcs_decomp by auto from ** show False proof assume inf: "M\<^sub>R d 0 = \<infinity>" from e have "?M d e = M\<^sub>R d e" by (meson min_def) from arcs_distinct2[OF _ _ _ _ e(1)] z \<open>z = 0\<close> distinct have "d \<noteq> e" by auto from z n_bounds have "set zs \<subseteq> {0..n}" by auto with e have "e \<le> n" apply (induction zs arbitrary: d) apply auto apply (case_tac zs) apply auto done from n_bounds z arcs_elem(2)[OF A(1)] have "d \<le> n" by auto have "M\<^sub>R d e = \<infinity>" proof (cases "e = 0") case True with inf show ?thesis by auto next case False with inf M\<^sub>R(2) \<open>d \<noteq> e\<close> \<open>e \<le> n\<close> \<open>d \<le> n\<close> show ?thesis by blast qed with \<open>?M d e = M\<^sub>R d e\<close> have "len ?M b 0 zs = \<infinity>" by (auto intro: len_inf_elem[OF e(1)]) with \<open>z = 0\<close> rotated have "\<infinity> = len ?M a a (b # ws')" by simp with \<open>len ?M a a _ < \<one>\<close> show ?thesis by auto next assume "M\<^sub>R c 0 = \<infinity>" from c_0_inf[OF this x] show False . qed qed qed qed } then have "\<forall>(c, d)\<in>set (arcs 0 0 (a # b # zs)). M c d < M\<^sub>R c d \<longrightarrow> c = a \<and> d = b" by blast moreover from ys(1) xs(3) have "len ?M i i ys < Le 0" unfolding neutral by auto moreover with rotated ws'(1) have "len ?M z z (a # b # zs) < Le 0" by auto moreover from \<open>z = 0\<close> z ws'(2) have "set zs \<union> {0, a, b} = set (i # ys)" by auto moreover from \<open>z = 0\<close> distinct z have "distinct (0 # a # b # zs)" by auto ultimately show ?thesis using \<open>z = 0\<close> \<open>M a b < M\<^sub>R a b\<close> by blast qed qed note * = this { assume "\<not> ?thesis" with * obtain y z zs where *: "set zs \<union> {0, y, z} = set (i # ys)" "len ?M 0 0 (y # z # zs) < Le 0" "\<forall>(a, b)\<in>set (arcs 0 0 (y # z # zs)). M a b < M\<^sub>R a b \<longrightarrow> a = y \<and> b = z" "M y z < M\<^sub>R y z" and distinct': "distinct (0 # y # z # zs)" by blast then have "y \<noteq> 0" "z \<noteq> 0" by auto let ?r = "len M\<^sub>R z 0 zs" have "\<forall>(a, b)\<in>set (arcs z 0 zs). ?M a b = M\<^sub>R a b" proof (safe, goal_cases) case A: (1 a b) have "M\<^sub>R a b \<le> M a b" proof (rule ccontr, goal_cases) case 1 with *(3) A have "a = y" "b = z" by auto with A distinct' arcs_distinct3[OF _ A, of y] show False by auto qed then show ?case by (simp add: min_def) qed then have r: "len ?M z 0 zs = ?r" by (induction zs arbitrary: z) auto with *(2) have **: "?M 0 y + (?M y z + ?r) < Le 0" by simp from M\<^sub>R(1) \<open>R \<noteq> {}\<close> obtain u where u: "DBM_val_bounded v u M\<^sub>R n" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto from *(1) \<open>i \<le> n\<close> \<open>set ys \<subseteq> _\<close> have "y \<le> n" "z \<le> n" by fastforce+ from *(1) ys(2,4) have "set zs \<subseteq> {0 ..n}" by auto from \<open>y \<le> n\<close> \<open>z \<le> n\<close> clock_numbering(2) \<open>y \<noteq> 0\<close> \<open>z \<noteq> 0\<close> obtain c1 c2 where C: "c1 \<in> X" "c2 \<in> X" "v c1 = y" "v c2 = z" by blast+ with clock_numbering(1,3) have C2: "v' y = c1" "v' z = c2" unfolding v'_def by auto with C have "v (v' z) = z" by auto with DBM_val_bounded_len'1[OF u, of zs "v' z"] have "dbm_entry_val u (Some (v' z)) None ?r" using \<open>z \<le> n\<close> clock_numbering(2) \<open>set zs \<subseteq> _\<close> distinct' by force from len_inf_elem ** have tl_not_inf: "\<forall>(a, b)\<in>set (arcs z 0 zs). M\<^sub>R a b \<noteq> \<infinity>" by fastforce with M\<^sub>R(7) len_int_dbm_closed have "get_const ?r \<in> \<int> \<and> ?r \<noteq> \<infinity>" by blast then obtain r :: int where r': "?r = Le r \<or> ?r = Lt r" using Ints_cases by (cases ?r) auto from r' \<open>dbm_entry_val _ _ _ _\<close> C C2 have le: "u (v' z) \<le> r" by fastforce from arcs_ex_head obtain z' where "(z, z') \<in> set (arcs z 0 zs)" by blast then have z': "(z, z') \<in> set (arcs 0 0 (y # z # zs))" "(z, z') \<in> set (arcs z 0 zs)" by auto have "M\<^sub>R z 0 \<noteq> \<infinity>" proof (rule ccontr, goal_cases) case 1 then have inf: "M\<^sub>R z 0 = \<infinity>" by auto have "M\<^sub>R z z' = \<infinity>" proof (cases "z' = 0") case True with 1 show ?thesis by auto next case False from arcs_elem[OF z'(1)] *(1) \<open>i \<le> n\<close> \<open>set ys \<subseteq> _\<close> have "z' \<le> n" by fastforce moreover from distinct' *(1) arcs_distinct1[OF _ _ _ _ z'(1)] have "z \<noteq> z'" by auto ultimately show ?thesis using M\<^sub>R(2) \<open>z \<le> n\<close> False inf by blast qed with tl_not_inf z'(2) show False by auto qed with M\<^sub>R(5) \<open>z \<noteq> 0\<close> \<open>z \<le> n\<close> obtain d :: int where d: "M\<^sub>R z 0 = Le d \<and> M\<^sub>R 0 z = Le (-d) \<or> M\<^sub>R z 0 = Lt d \<and> M\<^sub>R 0 z = Lt (-d + 1)" "d \<le> k (v' z)" "0 \<le> d" unfolding v'_def by auto text \<open>Needs property that len of integral dbm entries is integral and definition of \<open>M_R\<close>\<close> from this (1) have rr: "?r \<ge> M\<^sub>R z 0" proof (standard, goal_cases) case A: 1 with u \<open>z \<le> n\<close> C C2 have *: "- u (v' z) \<le> -d" unfolding DBM_val_bounded_def by fastforce from r' show ?case proof (standard, goal_cases) case 1 with le * A show ?case unfolding less_eq dbm_le_def by fastforce next case 2 with \<open>dbm_entry_val _ _ _ _\<close> C C2 have "u (v' z) < r" by fastforce with * have "r > d" by auto with A 2 show ?case unfolding less_eq dbm_le_def by fastforce qed next case A: 2 with u \<open>z \<le> n\<close> C C2 have *: "- u (v' z) < -d + 1" unfolding DBM_val_bounded_def by fastforce from r' show ?case proof (standard, goal_cases) case 1 with le * A show ?case unfolding less_eq dbm_le_def by fastforce next case 2 with \<open>dbm_entry_val _ _ _ _\<close> C C2 have "u (v' z) \<le> r" by fastforce with * have "r \<ge> d" by auto with A 2 show ?case unfolding less_eq dbm_le_def by fastforce qed qed with *(3) \<open>y \<noteq> 0\<close> have "M 0 y \<ge> M\<^sub>R 0 y" by fastforce then have "?M 0 y = M\<^sub>R 0 y" by (simp add: min.absorb2) moreover from *(4) have "?M y z = M y z" unfolding min_def by auto ultimately have **: "M\<^sub>R 0 y + (M y z + M\<^sub>R z 0) < Le 0" using ** add_mono_right[OF add_mono_right[OF rr], of "M\<^sub>R 0 y" "M y z"] by simp from ** have not_inf: "M\<^sub>R 0 y \<noteq> \<infinity>" "M y z \<noteq> \<infinity>" "M\<^sub>R z 0 \<noteq> \<infinity>" by auto from M\<^sub>R(6) \<open>y \<noteq> 0\<close> \<open>y \<le> n\<close> obtain c :: int where c: "M\<^sub>R 0 y = Le c \<or> M\<^sub>R 0 y = Lt c" "- k (v' y) \<le> c" "c \<le> 0" unfolding v'_def by auto have ?thesis proof (cases "M\<^sub>R 0 y + M\<^sub>R z 0 = Lt (c + d)") case True from ** have "(M\<^sub>R 0 y + M\<^sub>R z 0) + M y z < Le 0" using comm assoc by metis with True have **: "Lt (c + d) + M y z < Le 0" by simp then have "M y z \<le> Le (- (c + d))" unfolding less less_eq dbm_le_def mult by (cases "M y z") (fastforce elim!: dbm_lt.cases)+ from dbm_le'[OF assms(2)[folded M(1)] this \<open>y \<le> n\<close> \<open>z \<le> n\<close> C(3,4)] \<open>y \<noteq> 0\<close> \<open>z \<noteq> 0\<close> M have subs: "Z \<subseteq> {u \<in> V. u c1 - u c2 \<le> - (c + d)}" by blast with c d have "- k (v' z) \<le> - (c + d)" "- (c + d) \<le> k (v' y)" by auto with beta_interp.\<beta>_boundedness_diag_le'[OF _ _ C(1,2) subs] C2 have "Approx\<^sub>\<beta> Z \<subseteq> {u \<in> V. u c1 - u c2 \<le> - (c + d)}" by auto moreover { fix u assume u: "u \<in> R" with C \<open>y \<le> n\<close> \<open>z \<le> n\<close> M\<^sub>R(1) have "dbm_entry_val u (Some c2) None (M\<^sub>R z 0)" "dbm_entry_val u None (Some c1) (M\<^sub>R 0 y)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with True c d(1) have "u \<notin> {u \<in> V. u c1 - u c2 \<le> - (c + d)}" unfolding mult by auto } ultimately show ?thesis by blast next case False with c d have "M\<^sub>R 0 y + M\<^sub>R z 0 = Le (c + d)" unfolding mult by fastforce moreover from ** have "(M\<^sub>R 0 y + M\<^sub>R z 0) + M y z < Le 0" using comm assoc by metis ultimately have **: "Le (c + d) + M y z < Le 0" by simp then have "M y z \<le> Lt (- (c + d))" unfolding less less_eq dbm_le_def mult by (cases "M y z") (fastforce elim!: dbm_lt.cases)+ from dbm_lt'[OF assms(2)[folded M(1)] this \<open>y \<le> n\<close> \<open>z \<le> n\<close> C(3,4)] \<open>y \<noteq> 0\<close> \<open>z \<noteq> 0\<close> M have subs: "Z \<subseteq> {u \<in> V. u c1 - u c2 < - (c + d)}" by auto from c d(2-) C2 have "- k c2 \<le> - (c + d)" "- (c + d) \<le> k c1" by auto from beta_interp.\<beta>_boundedness_diag_lt'[OF this C(1,2) subs] have "Approx\<^sub>\<beta> Z \<subseteq> {u \<in> V. u c1 - u c2 < - (c + d)}" . moreover { fix u assume u: "u \<in> R" with C \<open>y \<le> n\<close> \<open>z \<le> n\<close> M\<^sub>R(1) have "dbm_entry_val u (Some c2) None (M\<^sub>R z 0)" "dbm_entry_val u None (Some c1) (M\<^sub>R 0 y)" unfolding DBM_zone_repr_def DBM_val_bounded_def by auto with c d(1) have "u \<notin> {u \<in> V. u c1 - u c2 < - (c + d)}" by auto } ultimately show ?thesis by auto qed } then have ?thesis by auto } with bounded 0 bounded_zero_1 bounded_zero_2 show ?thesis by blast qed qed qed section \<open>Nice Corollaries of Bouyer's Theorem\<close> lemma \<R>_V: "\<Union> \<R> = V" unfolding V_def \<R>_def using region_cover[of X _ k] by auto lemma regions_beta_V: "R \<in> \<R>\<^sub>\<beta> \<Longrightarrow> R \<subseteq> V" unfolding V_def \<R>\<^sub>\<beta>_def by auto lemma apx_V: "Z \<subseteq> V \<Longrightarrow> Approx\<^sub>\<beta> Z \<subseteq> V" proof (goal_cases) case 1 from beta_interp.apx_in[OF 1] obtain U where "Approx\<^sub>\<beta> Z = \<Union>U" "U \<subseteq> \<R>\<^sub>\<beta>" by auto with regions_beta_V show ?thesis by auto qed corollary approx_\<beta>_closure_\<alpha>: assumes "Z \<subseteq> V" "vabstr Z M" shows "Approx\<^sub>\<beta> Z \<subseteq> Closure\<^sub>\<alpha> Z" proof - note T = region_zone_intersect_empty_approx_correct[OF _ assms(1) _ assms(2-)] have "- \<Union>{R \<in> \<R>. R \<inter> Z \<noteq> {}} = \<Union>{R \<in> \<R>. R \<inter> Z = {}} \<union> - V" proof (safe, goal_cases) case 1 with \<R>_V show False by fast next case 2 then show ?case using alpha_interp.valid_regions_distinct_spec by fastforce next case 3 then show ?case using \<R>_V unfolding V_def by blast qed with T apx_V[OF assms(1)] have "Approx\<^sub>\<beta> Z \<inter> - \<Union>{R \<in> \<R>. R \<inter> Z \<noteq> {}} = {}" by auto then show ?thesis unfolding alpha_interp.cla_def by blast qed definition "V' \<equiv> {Z. Z \<subseteq> V \<and> (\<exists> M. vabstr Z M)}" corollary approx_\<beta>_closure_\<alpha>': "Z \<in> V' \<Longrightarrow> Approx\<^sub>\<beta> Z \<subseteq> Closure\<^sub>\<alpha> Z" using approx_\<beta>_closure_\<alpha> unfolding V'_def by auto text \<open>We could prove this more directly too (without using \<open>Closure\<^sub>\<alpha> Z\<close>), obviously\<close> lemma apx_empty_iff: assumes "Z \<subseteq> V" "vabstr Z M" shows "Z = {} \<longleftrightarrow> Approx\<^sub>\<beta> Z = {}" using alpha_interp.cla_empty_iff[OF assms(1)] approx_\<beta>_closure_\<alpha>[OF assms] beta_interp.apx_subset by auto lemma apx_empty_iff': assumes "Z \<in> V'" shows "Z = {} \<longleftrightarrow> Approx\<^sub>\<beta> Z = {}" using apx_empty_iff assms unfolding V'_def by force lemma apx_V': assumes "Z \<subseteq> V" shows "Approx\<^sub>\<beta> Z \<in> V'" proof (cases "Z = {}") case True with beta_interp.apx_empty beta_interp.empty_zone_dbm show ?thesis unfolding V'_def neutral by auto next case False then have non_empty: "Approx\<^sub>\<beta> Z \<noteq> {}" using beta_interp.apx_subset by blast from beta_interp.apx_in[OF assms] obtain U M where *: "Approx\<^sub>\<beta> Z = \<Union>U" "U \<subseteq> \<R>\<^sub>\<beta>" "Z \<subseteq> Approx\<^sub>\<beta> Z" "vabstr (Approx\<^sub>\<beta> Z) M" by blast moreover from * beta_interp.\<R>_union have "\<Union> U \<subseteq> V" by blast ultimately show ?thesis using *(1,4) unfolding V'_def by auto qed section \<open>A New Zone Semantics Abstracting with \<open>Approx\<^sub>\<beta>\<close>\<close> lemma step_z_V': assumes "A \<turnstile> \<langle>l,Z\<rangle> \<leadsto> \<langle>l',Z'\<rangle>" "valid_abstraction A X k" "\<forall>c\<in>clk_set A. v c \<le> n" "Z \<in> V'" shows "Z' \<in> V'" proof - from assms(3) clock_numbering have numbering: "global_clock_numbering A v n" by metis from assms(4) obtain M where M: "Z \<subseteq> V" "Z = [M]\<^bsub>v,n\<^esub>" "dbm_int M n" unfolding V'_def by auto from alpha_interp.step_z_V[OF assms(1) M(1)] M(2) assms(1) step_z_dbm_DBM[OF _ numbering] step_z_dbm_preserves_int[OF _ numbering assms(2) M(3)] obtain M' where M': "Z' \<subseteq> V" "Z' = [M']\<^bsub>v,n\<^esub>" "dbm_int M' n" by metis then show ?thesis unfolding V'_def by blast qed lemma steps_z_V': "A \<turnstile> \<langle>l,Z\<rangle> \<leadsto>* \<langle>l',Z'\<rangle> \<Longrightarrow> valid_abstraction A X k \<Longrightarrow> \<forall>c\<in>clk_set A. v c \<le> n \<Longrightarrow> Z \<in> V' \<Longrightarrow> Z' \<in> V'" by (induction rule: steps_z.induct) (auto intro: step_z_V') subsection \<open>Single Step\<close> inductive step_z_beta :: "('a, 'c, t, 's) ta \<Rightarrow> 's \<Rightarrow> ('c, t) zone \<Rightarrow> 's \<Rightarrow> ('c, t) zone \<Rightarrow> bool" ("_ \<turnstile> \<langle>_, _\<rangle> \<leadsto>\<^sub>\<beta> \<langle>_, _\<rangle>" [61,61,61] 61) where step_beta: "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto> \<langle>l', Z'\<rangle> \<Longrightarrow> A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta> \<langle>l', Approx\<^sub>\<beta> Z'\<rangle>" inductive_cases[elim!]: "A \<turnstile> \<langle>l, u\<rangle> \<leadsto>\<^sub>\<beta> \<langle>l',u'\<rangle>" declare step_z_beta.intros[intro] lemma step_z_alpha_sound: "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta> \<langle>l',Z'\<rangle> \<Longrightarrow> valid_abstraction A X k \<Longrightarrow> \<forall>c\<in>clk_set A. v c \<le> n \<Longrightarrow> Z \<in> V' \<Longrightarrow> Z' \<noteq> {} \<Longrightarrow> \<exists> Z''. A \<turnstile> \<langle>l, Z\<rangle> \<leadsto> \<langle>l',Z''\<rangle> \<and> Z'' \<noteq> {}" apply (induction rule: step_z_beta.induct) apply (frule step_z_V') apply assumption+ apply (rotate_tac 4) apply (drule apx_empty_iff') by blast lemma step_z_alpha_complete: "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto> \<langle>l',Z'\<rangle> \<Longrightarrow> valid_abstraction A X k \<Longrightarrow> \<forall>c\<in>clk_set A. v c \<le> n \<Longrightarrow> Z \<in> V' \<Longrightarrow> Z' \<noteq> {} \<Longrightarrow> \<exists> Z''. A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta> \<langle>l', Z''\<rangle> \<and> Z'' \<noteq> {}" apply (frule step_z_V') apply assumption+ apply (rotate_tac 4) apply (drule apx_empty_iff') by blast subsection \<open>Multi step\<close> inductive steps_z_beta :: "('a, 'c, t, 's) ta \<Rightarrow> 's \<Rightarrow> ('c, t) zone \<Rightarrow> 's \<Rightarrow> ('c, t) zone \<Rightarrow> bool" ("_ \<turnstile> \<langle>_, _\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>_, _\<rangle>" [61,61,61] 61) where refl: "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l, Z\<rangle>" | step: "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l', Z'\<rangle> \<Longrightarrow> A \<turnstile> \<langle>l', Z'\<rangle> \<leadsto>\<^sub>\<beta> \<langle>l'', Z''\<rangle> \<Longrightarrow> A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l'', Z''\<rangle>" declare steps_z_beta.intros[intro] lemma V'_V: "Z \<in> V' \<Longrightarrow> Z \<subseteq> V" unfolding V'_def by auto lemma steps_z_beta_V': "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l', Z'\<rangle> \<Longrightarrow> valid_abstraction A X k \<Longrightarrow>\<forall>c\<in>clk_set A. v c \<le> n \<Longrightarrow> Z \<in> V' \<Longrightarrow> Z' \<in> V'" proof (induction rule: steps_z_beta.induct) case refl then show ?case by fast next case (step A l Z l' Z' l'' Z'') from this(2) obtain Z''' where Z''': "A \<turnstile> \<langle>l', Z'\<rangle> \<leadsto> \<langle>l'',Z'''\<rangle>" "Z'' = Approx\<^sub>\<beta> Z'''" by auto from step_z_V'[OF this(1)] step have "Z''' \<in> V'" by auto from apx_V'[OF V'_V, OF this] Z'''(2) show ?case by auto qed lemma alpha_beta_step: "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta> \<langle>l', Z'\<rangle> \<Longrightarrow> valid_abstraction A X k \<Longrightarrow> \<forall>c\<in>clk_set A. v c \<le> n \<Longrightarrow> Z \<in> V' \<Longrightarrow> \<exists> Z''. A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<alpha> \<langle>l', Z''\<rangle> \<and> Z' \<subseteq> Z''" apply (induction rule: step_z_beta.induct) apply (frule step_z_V') apply assumption+ apply (rotate_tac 4) apply (drule approx_\<beta>_closure_\<alpha>') apply auto done subsubsection \<open>Soundness\<close> lemma alpha_beta_step': "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta> \<langle>l', Z'\<rangle> \<Longrightarrow> valid_abstraction A X k \<Longrightarrow> \<forall>c\<in>clk_set A. v c \<le> n \<Longrightarrow> Z \<in> V' \<Longrightarrow> W \<subseteq> V \<Longrightarrow> Z \<subseteq> W \<Longrightarrow> \<exists> W'. A \<turnstile> \<langle>l, W\<rangle> \<leadsto>\<^sub>\<alpha> \<langle>l', W'\<rangle> \<and> Z' \<subseteq> W'" proof (induction rule: step_z_beta.induct) case (step_beta A l Z l' Z') from alpha_interp.step_z_mono[OF step_beta(1,6)] obtain W' where W': "A \<turnstile> \<langle>l, W\<rangle> \<leadsto> \<langle>l',W'\<rangle>" "Z' \<subseteq> W'" by blast from approx_\<beta>_closure_\<alpha>'[OF step_z_V'[OF step_beta(1-4)]] alpha_interp.cla_mono[OF this(2)] this(1) show ?case by auto qed lemma alpha_beta_steps: "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l', Z'\<rangle> \<Longrightarrow> valid_abstraction A X k \<Longrightarrow> \<forall>c\<in>clk_set A. v c \<le> n \<Longrightarrow> Z \<in> V' \<Longrightarrow> \<exists> Z''. A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<alpha>* \<langle>l', Z''\<rangle> \<and> Z' \<subseteq> Z''" proof (induction rule: steps_z_beta.induct) case refl then show ?case by auto next case (step A l Z l' Z' l'' Z'') then obtain Z''' where *: "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<alpha>* \<langle>l',Z'''\<rangle>" "Z' \<subseteq> Z'''" by auto from alpha_beta_step'[OF step.hyps(2) step.prems(1,2) steps_z_beta_V'[OF step.hyps(1) step.prems] alpha_interp.steps_z_alpha_V[OF this(1) V'_V] this(2)] step.prems obtain W' where "A \<turnstile> \<langle>l', Z'''\<rangle> \<leadsto>\<^sub>\<alpha> \<langle>l'',W'\<rangle>" "Z'' \<subseteq> W'" by blast with * show ?case by auto qed corollary steps_z_beta_sound: "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l', Z'\<rangle> \<Longrightarrow> \<forall>c\<in>clk_set A. v c \<le> n \<Longrightarrow> valid_abstraction A X k \<Longrightarrow> Z \<in> V' \<Longrightarrow> Z' \<noteq> {} \<Longrightarrow> \<exists> Z''. A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>* \<langle>l', Z''\<rangle> \<and> Z'' \<noteq> {}" proof (goal_cases) case 1 then have "Z \<subseteq> V" unfolding V'_def by auto from alpha_beta_steps[OF 1(1,3,2,4)] obtain Z''' where *: "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<alpha>* \<langle>l',Z'''\<rangle>" "Z' \<subseteq> Z'''" by blast from alpha_interp.steps_z_alpha_closure_involutive[OF *(1) 1(3) \<open>Z \<subseteq> V\<close>] obtain Z'' where Z'': "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>* \<langle>l',Z''\<rangle>" "Closure\<^sub>\<alpha> Z''' \<subseteq> Closure\<^sub>\<alpha> Z''" "Z'' \<subseteq> Z'''" by blast with alpha_interp.closure_subs[OF alpha_interp.steps_z_alpha_V[OF *(1) \<open>Z \<subseteq> V\<close>]] 1(5) alpha_interp.cla_empty_iff[OF alpha_interp.steps_z_V, OF this(1) \<open>Z \<subseteq> V\<close>] *(2) have "Z'' \<noteq> {}" by auto with Z'' show ?thesis by auto qed subsubsection \<open>Completeness\<close> lemma apx_mono: "Z' \<subseteq> V \<Longrightarrow> Z \<subseteq> Z' \<Longrightarrow> Approx\<^sub>\<beta> Z \<subseteq> Approx\<^sub>\<beta> Z'" proof (goal_cases) case 1 with beta_interp.apx_in have "Approx\<^sub>\<beta> Z' \<in> {S. \<exists>U M. S = \<Union>U \<and> U \<subseteq> \<R>\<^sub>\<beta> \<and> Z' \<subseteq> S \<and> beta_interp.vabstr S M \<and> beta_interp.normalized M}" by auto with 1 obtain U M where "Approx\<^sub>\<beta> Z' = \<Union>U" "U \<subseteq> \<R>\<^sub>\<beta>" "Z \<subseteq> Approx\<^sub>\<beta> Z'" "beta_interp.vabstr (Approx\<^sub>\<beta> Z') M" "beta_interp.normalized M" by auto with beta_interp.apx_min show ?thesis by auto qed lemma step_z_beta_mono: "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta> \<langle>l', Z'\<rangle> \<Longrightarrow> Z \<subseteq> W \<Longrightarrow> W \<subseteq> V \<Longrightarrow> \<exists> W'. A \<turnstile> \<langle>l, W\<rangle> \<leadsto>\<^sub>\<beta> \<langle>l', W'\<rangle> \<and> Z' \<subseteq> W'" proof (goal_cases) case 1 then obtain Z'' where *: "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto> \<langle>l',Z''\<rangle>" "Z' = Approx\<^sub>\<beta> Z''" by auto from alpha_interp.step_z_mono[OF this(1) 1(2)] obtain W' where "A \<turnstile> \<langle>l, W\<rangle> \<leadsto> \<langle>l',W'\<rangle>" "Z'' \<subseteq> W'" by auto moreover with *(2) apx_mono[OF alpha_interp.step_z_V] \<open>W \<subseteq> V\<close> have "Z' \<subseteq> Approx\<^sub>\<beta> W'" by metis ultimately show ?case by blast qed lemma steps_z_beta_V: "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l', Z'\<rangle> \<Longrightarrow> Z \<subseteq> V \<Longrightarrow> Z' \<subseteq> V" proof (induction rule: steps_z_beta.induct) case refl then show ?case by blast next case (step A l Z l' Z' l'' Z'') then obtain Z''' where "A \<turnstile> \<langle>l', Z'\<rangle> \<leadsto> \<langle>l'',Z'''\<rangle>" "Z'' = Approx\<^sub>\<beta> Z'''" by auto with alpha_interp.step_z_V[OF this(1)] apx_V step(3,4) show "Z'' \<subseteq> V" by auto qed lemma steps_z_beta_mono: "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l', Z'\<rangle> \<Longrightarrow> Z \<subseteq> W \<Longrightarrow> W \<subseteq> V \<Longrightarrow> \<exists> W'. A \<turnstile> \<langle>l, W\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l', W'\<rangle> \<and> Z' \<subseteq> W'" proof (induction rule: steps_z_beta.induct) case refl then show ?case by auto next case (step A l Z l' Z' l'' Z'') then obtain W' where "A \<turnstile> \<langle>l, W\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l',W'\<rangle>" "Z' \<subseteq> W'" by auto with step_z_beta_mono[OF step(2) this(2) steps_z_beta_V[OF this(1) step(5)]] show ?case by blast qed lemma steps_z_beta_alt: "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta> \<langle>l', Z'\<rangle> \<Longrightarrow> A \<turnstile> \<langle>l', Z'\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l'', Z''\<rangle> \<Longrightarrow> A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l'', Z''\<rangle>" by (rotate_tac, induction rule: steps_z_beta.induct) blast+ lemma steps_z_beta_complete: "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>* \<langle>l', Z'\<rangle> \<Longrightarrow> valid_abstraction A X k \<Longrightarrow> Z \<subseteq> V \<Longrightarrow> \<exists> Z''. A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l',Z''\<rangle> \<and> Z' \<subseteq> Z''" proof (induction rule: steps_z.induct) case refl with apx_empty_iff show ?case by blast next case (step A l Z l' Z' l'' Z'') with alpha_interp.step_z_V[OF this(1,5)] obtain Z''' where "A \<turnstile> \<langle>l', Z'\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l'',Z'''\<rangle>" "Z'' \<subseteq> Z'''" by blast with steps_z_beta_mono[OF this(1) beta_interp.apx_subset apx_V[OF alpha_interp.step_z_V[OF step(1,5)]]] obtain W' where "A \<turnstile> \<langle>l', Approx\<^sub>\<beta> Z'\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l'', W'\<rangle>" " Z'' \<subseteq> W'" by auto moreover with step(1) have "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l'',W'\<rangle>" by (auto intro: steps_z_beta_alt) ultimately show ?case by auto qed lemma steps_z_beta_complete': "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>* \<langle>l',Z'\<rangle> \<Longrightarrow> valid_abstraction A X k \<Longrightarrow> Z \<subseteq> V \<Longrightarrow> Z' \<noteq> {} \<Longrightarrow> \<exists> Z''. A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l',Z''\<rangle> \<and> Z'' \<noteq> {}" using steps_z_beta_complete by fast end end
# Modelo simplificado de la dinámica lateral ## Modelo dinámico La dinámica de inclinación de una moto se puede aproximar al comportamiento de un péndulo invertido, si consideramos el punto de contacto de la rueda con el suelo $O$ como una articulación con el eje paralelo a la dirección de la marcha. La siguiente figura muestra un esquema del modelo, que se puede interpretar como una vista de la moto desde atrás, donde $G$ es el centro de masas: Durante el movimiento de rodadura, el punto de contacto entre la rueda y el suelo corresponderá en cada instante a puntos materiales diferentes, tanto de la rueda como del suelo. Si la rodadura se produce sin deslizamiento, dicho punto de contacto tendrá siempre velocidad nula, por lo que constituirá el *centro instantáneo de rotación* o CIR de la rueda. Aunque el punto de contacto físico tenga velocidad nula en cada instante, la localización del contacto irá cambiando a lo largo del tiempo, siguiendo una trayectoria sobre el suelo. La velocidad a la que se produce este desplazamiento del punto de contacto se denomina *velocidad de sucesión*, y es fácil demostrar que, cuando hay rodadura sin deslizamiento y el desplazamiento es rectilíneo, coincide con la velocidad de traslación del centro de la rueda o, lo que es lo mismo, con la velocidad de avance de la moto $v$. Si el movimiento no es rectilíneo pero el radio de curvatura $\rho$ de la trayectoria es suficientemente grande, $v$ sigue siendo una buena estimación de la velocidad de sucesión. En nuestro modelo, por lo tanto, vamos a considerar que la articulación $O$ se mueve sobre el suelo a una velocidad $v$ (que consideraremos constante para simplificar el estudio), a lo largo de una trayectoria curvilínea con radio de curvatura $\rho$, que dependerá del ángulo de dirección $u$ y de la distancia entre ejes de la moto $l$, como se verá a continuación. ## Efecto del ángulo de dirección Para estimar la relación entre el ángulo de dirección $u$ y el radio de curvatura de la trayectoria $\rho$, consideraremos un modelo simplificado de la moto como el que se muestra en la figura, en este caso visto desde arriba: En este modelo, a diferencia de lo que ocurre en una moto real, el eje de dirección es vertical y pasa por el centro de la rueda delantera. Por este motivo, cuando la moto no está inclinada, la distancia entre ejes $l$ permanecerá constante al girar la dirección un ángulo cualquiera $u$. En estas condiciones, para cumplir el campo de velocidades de sólido rígido del chasis sin que las ruedas tengan deslizamiento, los centros de curvatura de sus trayectorias deben coincidir en un mismo punto, que será a su vez el CIR del chasis. Se demuestra fácilmente que este punto es la intersección de los ejes de ambas ruedas, como muestra la figura anterior. De este modo, se puede establecer la relación geométrica entre el ángulo de dirección y los radios de curvatura $\rho_f$ y $\rho_r$ de las trayectorias de las ruedas delantera y trasera, respectivamente: $$ \rho_f = \frac{l}{\sin u} \qquad \rho_r = \frac{l}{\tan u} $$ Para ángulos de dirección $u$ pequeños, ambos radios se pueden aproximar a un mismo valor $\rho$: $$ \rho \approx \frac{l}{u} $$ ## Ecuación del movimiento Para estudiar el movimiento del péndulo, se consideran unos ejes móviles $\bar x \bar y \bar z$ con origen en $O$, tales que $\bar y$ apunta siempre en la dirección de la marcha (es decir, es tangente a la trayectoria) y $\bar z$ apunta siempre hacia arriba, coincidiendo con el eje global $z$. Para que los ejes locales se mantengan siempre tangentes a la trayectoria, deberán tener una velocidad angular $\mathbf\Omega$: $$ \mathbf \Omega = \begin{bmatrix} 0 \\ 0 \\ \Omega \end{bmatrix} = \begin{bmatrix} 0 \\ 0 \\ \frac{v}{\rho} \end{bmatrix} $$ que, al ser en dirección $\bar z$, coincidirá con su expresión en ejes locales $\bar{\mathbf\Omega}$. Consideraremos que el radio de curvatura $r$ es positivo cuando la moto gira hacia la izquierda (velocidad angular positiva sobre el eje $z$). La velocidad angular del péndulo, expresada en los ejes locales $\bar x \bar y \bar z$, será el resultado de sumar esta velocidad angular de arrastre $\bar{\mathbf\Omega}$, a una velocidad angular relativa según el eje $\bar y$, causada por las variaciones del ángulo $\varphi$: $$ \bar{\mathbf\omega} = \begin{bmatrix} 0 \\ \dot\varphi \\ \frac{v}{\rho} \end{bmatrix} $$ Para obtener la aceleración angular, hay que derivar este vector en la base móvil usando la fórmula de Boure: $$ \dot{\bar{\mathbf\omega}} = \left.\frac{d{\bar{\mathbf\omega}}}{dt}\right|_m + \bar{\mathbf\Omega}\times\bar{\mathbf\omega} = \begin{bmatrix} -\frac{v}{\rho}\dot\varphi \\ \ddot\varphi \\ 0 \end{bmatrix} $$ En esta derivación, se considera que el radio de curvatura es constante, ya que técnicamente no es un grado de libertad del movimiento (no se puede variar arbitrariamente, depende de la trayectoria). Aplicando el campo de aceleraciones del sólido rígido, la aceleración del centro de masas en ejes locales $\bar{\mathbf a}_G$ será igual a: $$ \bar{\mathbf a}_G = \bar{\mathbf a}_O + \bar{\mathbf\omega}\times\left(\bar{\mathbf\omega}\times\bar{\mathbf{OG}}\right) + \dot{\bar{\mathbf\omega}}\times\bar{\mathbf{OG}} $$ donde donde $\bar{\mathbf a}_O$ es la aceleración del punto de contacto $O$, que será la que le corresponde por tener una trayectoria curvilínea de radio $r$ a velocidad constante $v$: $$ \bar{\mathbf a}_O = \begin{bmatrix} -\frac{v^2}{\rho} \\ 0 \\ 0 \end{bmatrix} $$ Sustituyendo esto en la expresión anterior y evaluando los productos vectoriales, se obtiene la aceleración del centro de masas en ejes locales: $$ \bar{\mathbf a}_G = \begin{bmatrix} -\frac{v^2}{\rho^2}\left(\rho + h\sin\varphi\right) - h\dot\varphi^2\sin\varphi + h\ddot\varphi\cos\varphi \\ 2h\frac{v}{\rho}\dot\varphi\cos\varphi \\ - h\dot\varphi^2\cos\varphi - h\ddot\varphi\sin\varphi \end{bmatrix} $$ Ahora que ya tenemos la aceleración del centro de masas, podemos plantear las ecuaciones de Newton-Euler para nuestro sistema, considerando únicamente la dinámica en el plano $\bar x\bar z$. Según la primera figura, donde se muestran las componentes de la aceleración de $G$ en azul, se puede ver que: $$ \begin{align} & R_x = m\bar a_{Gx} \\ & R_z - mg = m\bar a_{Gz} \\ & R_zh\sin\varphi - R_xh\cos\varphi = I_G\ddot\varphi \end{align} $$ Estas ecuaciones son no lineales, así que hay que linealizarlas alrededor de la posición de equilibrio ($\varphi = 0$) para poder expresar la dinámica del sistema en forma *state-space*. Si se consideran valores pequeños de $\varphi$ y $\dot\varphi$, se puede asumir que $\sin\varphi \approx \varphi$, $\cos\varphi \approx 1$, $\varphi^2 \approx 0$ y $\dot\varphi^2 \approx 0$. Además, si $\rho$ es grande, podemos considerar que $\rho^2$ tiende a infinito. Aplicando estas simplificaciones, y sustituyendo los valores de $R_x$ y $R_z$ de las dos primeras ecuaciones en la tercera, se obtiene la ecuación del movimiento linealizada: $$ \left(I_G + mh^2\right)\ddot\varphi = mgh\varphi + mh\frac{v^2}{\rho} $$ Sustituyendo la expresión de $\rho$ en función de $u$, se obtiene la ecuación que se utilizará para diseñar el controlador: $$ \left(I_G + mh^2\right)\ddot\varphi = mgh\varphi + mh\frac{v^2}{l}u $$ Entonces, los parámetros que vamos a necesitar para diseñar el controlador son la masa total $m$, el momento de inercia respecto a un eje longitudinal que pasa por el centro de masas $I_G$, la altura del centro de masas $h$ y la distancia entre ejes $l$, además de la velocidad de avance $v$. ## Sistema linealizado en forma *state-space* Ya tenemos la ecuación del movimiento linealizada alrededor de la posición de equilibrio, en forma de ecuación diferencial lineal con coeficientes constantes. Para poder diseñar el controlador, queremos reescribirla usando la representación *state-space*, que para un sistema SISO (*single input, single output*) genérico, con una entrada $u$ y una salida $y$, tiene la forma: $$ \begin{align} & \dot{\mathbf x}=\mathbf{Ax}+\mathbf{B}u \\ & y = \mathbf{Cx} + Du \end{align} $$ La primera ecuación describe la dinámica del sistema, que se modeliza a través de una serie de *estados* contenidos en un vector $\mathbf x$, que son controlados por medio de una entrada o *input* $u$ (en nuestro caso, el ángulo de dirección). La matriz $\mathbf A$ y el vector $\mathbf B$ son constantes, de modo que se trata de un sistema de ecuaciones diferenciales lineales con coeficientes constantes. La segunda ecuación relaciona la salida o *output* $y$, es decir, la magnitud del sistema que queremos conocer y/o controlar (el ángulo de inclinación $\varphi$ en nuestro sistema), con los estados $\mathbf x$ y el *input* $u$, a través de una matriz $\mathbf C$ y un escalar $D$, también constantes. La ecuación del movimiento de nuestro sistema es de segundo orden, así que tendremos que establecer el vector de estados de forma que se pueda expresar como un sistema de primer orden. Podemos elegir los estados $\mathbf x$ de la siguiente manera: $$ \mathbf{x}= \begin{bmatrix} \varphi \\ \dot\varphi \end{bmatrix} \implies \dot{\mathbf{x}}= \begin{bmatrix} \dot\varphi \\ \ddot\varphi \end{bmatrix} $$ Esto nos permite reescribir la ecuación del movimiento en la forma $\dot{\mathbf x}=\mathbf{Ax}+\mathbf{B}u$: $$ \begin{bmatrix} \dot\varphi \\ \ddot\varphi \end{bmatrix} = \begin{bmatrix} 0 && 1 \\ \frac{mgh}{\left(I_G+mh^2\right)} && 0 \end{bmatrix} \begin{bmatrix} \varphi \\ \dot\varphi \end{bmatrix} + \begin{bmatrix} 0 \\ \frac{mhv^2}{l\left(I_G+mh^2\right)} \end{bmatrix}u $$ Como se mencionó más arriba, la salida del sistema $y$ es el propio ángulo de inclinación $\varphi$, por lo que los cuatro términos $\mathbf A$, $\mathbf B$, $\mathbf C$ y $D$ resultan finalmente: $$ \mathbf A = \begin{bmatrix} 0 && 1 \\ \frac{mgh}{\left(I_G+mh^2\right)} && 0 \end{bmatrix} \qquad \mathbf B = \begin{bmatrix} 0 \\ \frac{mhv^2}{l\left(I_G+mh^2\right)} \end{bmatrix} \qquad \mathbf C = \begin{bmatrix} 1 && 0 \end{bmatrix} \qquad D = 0 $$ Para poder continuar con el diseño del controlador, se asignarán valores numéricos a los parámetros. En este caso se utilizarán unos valores genéricos, cuando se tenga la moto construida habrá que estimar los valores reales y sustituirlos. Cada vez que se modifique una celda de código, hay que volver a ejecutarla usando SHIFT+ENTER. Además, habrá que ejecutar también todas las que haya debajo, o al menos las que se vean afectadas por los cambios. Se pueden usar las opciones que hay en el menú de Jupyter, para ejecutar una celda y las siguientes, o todas desde el principio, etc. ```python import numpy as np m = 100.0 # Masa en kg I = 10.0 # Momento de inercia en kg·m2 v = 10.0 # Velocidad en m/s l = 1.0 # Distancia entre ejes en m h = 1.0 # Altura del CDG en m g = 9.81 # Aceleración de la gravedad en m/s2 A = np.array([[0.0, 1.0], [m*g*h/(I + m*h**2), 0.0]]) B = np.array([[0], [m*h*v**2/l/(I + m*h**2)]]) C = np.array([[1.0, 0.0]]) D = np.array([[0]]) # Mostramos en la salida el contenido de A y B print(A) print(B) ``` # Diseño del controlador ## Estabilidad del sistema En la bibliografía sobre Control es frecuente referirse al sistema que queremos controlar, de modo general, como *planta*. Cuando no hay realimentación, el sistema se conoce como *planta en lazo abierto*. Si consideramos el sistema libre, sin introducir ningún *input*, resultará una ecuación diferencial de la forma: $$ \dot{\mathbf x} = \mathbf A\mathbf x $$ Se dice que un sistema es *asintóticamente estable* si, para unas condiciones iniciales cualesquiera, el vector de estados $\mathbf x$ tiende a cero cuando el tiempo tiende a infinito. Se puede demostrar que la solución de un sistema de este tipo es estable cuando todos los autovalores de $\mathbf A$ tienen parte real negativa. Como se puede ver, nuestro sistema no es estable, ya que tiene un autovalor con parte real positiva: ```python # Comprobamos los autovalores de 'A' print(np.real(np.linalg.eig(A)[0])) ``` ## Primera aproximación: regulador PID Cuando no se dispone de un modelo dinámico del sistema, la forma más simple de estabilizarlo es utilizando un regulador PID. En este caso, se podría utilizar un PID en el que la entrada fuese el error entre el ángulo de inclinación deseado y el real, y la salida el ángulo de dirección. En general, lo que hay que hacer para estabilizar una moto es girar el manillar en la dirección de la caída ([*steer into the fall*](https://youtu.be/o7nSQ2ycGX4)). Para eso, es necesario tener una estimación del ángulo de la moto, y la aproximación más simple es utilizar el ángulo de equilibrio estático, es decir, el que mantiene la moto en equilibrio al girar con velocidad y radio constantes. Este ángulo se puede estimar a partir de la ecuación del movimiento, haciendo nulas las derivadas del ángulo $\varphi$: $$ mgh\varphi + mh\frac{v^2}{\rho} = 0 \implies \varphi \approx - \frac{v^2}{\rho g} = -\frac{v}{\rho}\frac{v}{g} = -\Omega\frac{v}{g} $$ donde $\Omega$, que como recordaremos es la velocidad de rotación del sistema de referencia $\bar x\bar y\bar z$, se puede aproximar para inclinaciones pequeñas al valor proporcionado por el eje vertical del giróscopo, mientras que $v$ se puede estimar colocando un *encoder* en la rueda trasera. El problema de utilizar este tipo de controladores es que suelen ser difíciles de ajustar, por lo que, si es posible, es mejor recurrir a métodos más avanzados, como la realimentación de estados, que se describe a continuación. ## Estabilización mediante realimentación de estados La realimentación de estados se basa en definir la entrada del sistema $u$ como una realimentación negativa del estado, de la siguiente forma: $$ u = -\mathbf K\mathbf x $$ Con este tipo de realimentación, se puede determinar de forma analítica una matriz de ganancias $\mathbf K$ que estabilice el sistema, es decir, que haga que todos los autovalores sean negativos. Para verlo más claramente, se puede introducir la realimentación en las ecuaciones del sistema: $$ \dot{\mathbf x} = \mathbf{Ax} - \mathbf{BKx} = \left(\mathbf A - \mathbf{BK}\right)\mathbf x $$ Se observa que, ahora, la estabilidad del sistema la determinan los autovalores de $\mathbf A - \mathbf{BK}$. Esto significa que se puede alterar la dinámica del sistema a conveniencia, ajustando la matriz $\mathbf K$ para colocar los autovalores donde nos interese. Este método de control se puede esquematizar de la forma siguiente: ## Regulador cuadrático lineal (LQR) El esquema de control de realimentación de estados deja abierta una decisión complicada al diseñador: ¿dónde colocar los autovalores del sistema? Existen diversos métodos descritos en la bibliografía, dedicados a determinar dónde se deben colocar los autovalores para conseguir que la respuesta del sistema presente unas características determinadas. Estas técnicas se denominan *asignación de polos* o *pole placement*. En nuestro proyecto, vamos a utilizar un método un poco más elaborado, pero a la vez sencillo de utilizar: el LQR (*Linear Quadratic Regulator*). El LQR no es más que otro método para calcular la matriz $\mathbf K$, pero en lugar de tener que decidir dónde colocamos los polos del sistema, lo que hacemos es buscar el valor único de $\mathbf K$ que minimiza la siguiente función: $$ J = \int_0^\infty\left(\mathbf x^T\mathbf Q\mathbf x + Ru^2\right)dt $$ Esta función (expresada aquí en forma específica para un sistema con un solo *inupt*) combina los valores cuadráticos, integrados a lo largo del tiempo, de las dos magnitudes que queremos minimizar: el error en los estados y la magnitud de la entrada de control. Cada término va ponderado por una matriz de pesos, $\mathbf Q$ para los estados y $R$ para el *input*, de modo que podemos decidir qué variables queremos penalizar más en el controlador. Por ejemplo, asignar un valor elevado al elemento $Q_{ii}$ de la diagonal de $\mathbf Q$ hará que los errores en el estado $x_i$ sean fuertemente penalizados en la función objetivo $J$, así que el controlador se esforzará en manternerlos bajos. Por el contrario, si se hace lo mismo con $R$, se indica al controlador que debe utilizar lo menos posible el actuador correspondiente al *input* $u$. Modificando los elementos de $\mathbf Q$ y $R$ se puede ajustar el comportamiento del controlador a nuestros requerimientos, dando mayor relevancia al error en los estados o al esfuerzo de control. Lo primero puede ser importante si en algún estado es crítico mantener errores bajos, y lo segundo, por ejemplo, si es prioritario obtener una alta eficiencia energética, aunque sea al coste de menor precisión en el control. En nuestro caso, se podría utilizar para evitar que el controlador use ángulos de dirección demasiado grandes. Una vez dererminadas las matrices de pesos, la función `lqr` del módulo de control de Python resuelve el problema de optimización y nos devuelve directamente la matriz de ganancias $\mathbf K$ que minimiza la función $J$: ```python from control import ss, lqr, ss2tf, minreal, forced_response sys = ss(A, B, C, D) Q = np.eye(2) # Aproximación incial para 'Q' Q[0, 0] = 10 # Ajustamos peso del ángulo 'phi' R = 1 # Peso del ángulo de dirección 'u' K = lqr(sys, Q, R)[0] ``` Para evaluar el comportamiento del regulador, se simula el comportamiento del sistema cuando partimos de un estado inicial fuera de equilibrio. Por ejemplo, colocamos la moto con un ángulo inicial $\varphi_0$ de 20º, mientras circula en línea recta a la velocidad prefijada $v$ (10 m/s), de modo que el controlador generará un movimiento de dirección que la estabilice en vertical. La entrada de referencia $r$ que se ve en el diagrama del controlador se dejará a cero (se explicará más adelante para qué sirve esta entrada). Como ejercicio, es interesante comprobar cómo varía la repuesta al modificar los pesos del LQR: ```python %%capture from matplotlib.animation import FuncAnimation import plotfunctions as pf ``` ```python # Ensamblamos el sistema en lazo cerrado clsys = ss(A - B*K, B, C, D) tf = 4.00 # Duración de la simulación en segundos dt = 0.01 # Paso de tiempo de simulación en segundos # Simulamos respuesta para ángulo incial de 20 grados t = np.arange(0, tf, dt) r = np.zeros(t.shape) T, Y, X = forced_response(clsys, t, r, X0=[np.radians(20), 0]) pf.plot_response(T, X) ``` Como se ve, el ángulo $\varphi$ converge rápidamente a cero, estabilizando el sistema. Podemos verificar que, al aplicar la realimentación, todos los autovalores del sistema tienen ahora parte real negativa: ```python print(np.real(np.linalg.eig(A - B*K)[0])) ``` A continuación se muestra una animación con el movimiento resultante: ```python FuncAnimation(pf.fg1, pf.anim, init_func=pf.init, frames=len(T), fargs=(X[[0, 1]], dt, h), interval=dt*1000, blit=True) ``` ## Control de la inclinación Cuando se aplica realimentación de estados, el controlador resultante siempre tratará de llevarlos todos al equilibrio, y eso es lo que se conoce como *regulador*. Hasta ahora, esto nos ha servido para estabilizar el sistema, manteniéndolo equilibrado en vertical. Si lo que queremos es asignar a algún *output* un valor arbitrario, hay que convertir nuestro regulador en un *servomecanismo*. El objetivo ahora es conseguir que el *output* (en nuestro caso el ángulo de inclinación $\varphi$) siga a un valor de referencia $r$, en lugar de converger hacia cero. De este modo, podremos controlar la dirección de la moto sin perder la estabilidad, ya que controlar el ángulo de inclinación implica controlar indirectamente el radio de curvatura de la trayectoria. Antes de explicar cómo se consigue esto, es importante introducir un nuevo concepto: el *tipo* de sistema o planta. Por definición, una planta es de tipo $N$ cuando, al representarla como un sistema con realimentación unitaria (como el que se muestra en la figura), el término de grado más bajo del denominador de la función de transferencia $G$ es de grado $N$. Es decir, si la función de transferencia en lazo abierto $G$ se escribe en la forma: $$ G(s) = \frac{K\left(T_as + 1\right)\left(T_bs + 1\right)\cdots\left(T_ms + 1\right)} {s^N\left(T_1s + 1\right)\left(T_2s + 1\right)\cdots\left(T_ps + 1\right)} $$ el tipo de sistema corresponderá al exponente $N$ que aparece en el denominador. El tipo de un sistema determina su error de seguimiento $e$ en régimen permanente para distintos tipos de entrada. Por ejemplo, en un sistema tipo 1, el error tenderá a cero para una entrada escalón, a un valor finito para una entrada rampa, y a infinito para una entrada cuadrática o de orden superior. En cambio, un sistema tipo 0 ya mostrará un error finito en régimen permanente para una entrada escalón, y será inestable frente a una entrada rampa o superior. Para saber el tipo de nuestro sistema, primero tenemos que representarlo según la estructura de la figura. La función de transferencia $H$ del sistema completo, entre la entrada $r$ y la salida $y$, se puede obtener sustituyendo el error de seguimiento $e$ por su valor $r - y$: $$ y = G\left(r - y\right) \implies H(s) = \frac{y}{r} = \frac{G}{1 + G} $$ Entonces, se puede hacer el cambio inverso para expresar cualquier sistema con función de transferencia $H$ como sistema con realimentación unitaria: $$ G(s) = \frac{H}{1 - H} $$ Comprobamos el tipo de nuestro sistema, incluyendo la realimentación, cuando consideramos que nuestro único *output* es la inclinación. La función `ss2tf` devuelve la función de transferencia $H$ de un sistema *state-space*, y `minreal` (*minimal realization*) cancela los factores comunes del numerador y el denominador: ```python # Función de transferencia del sistema con realimentación H = ss2tf(clsys) # Calculamos G para transformar en sistema con realimentación unitaria minreal(H/(1 - H)) ``` Mirando el denominador, se ve claramente que nuestro sistema realimentado es de tipo 0. Eso significa que, para una entrada escalón, la salida $\varphi$ nunca alcanzará el valor de referencia $r$ en régimen permanente. Si queremos controlar la inclinación correctamente, tendremos que convertir el sistema en uno de tipo 1. ## Servomecanismo tipo 1 para controlar la inclinación Existen varios métodos para convertir un sistema de tipo 0 en uno de tipo 1. Lo más sencillo es escalar la entrada de referencia $r$ mediante una ganancia de precompensación $\bar N$, que haga que el *output* que queremos controlar (en nuestro caso la inclinación $\varphi$) alcance el valor solicitado en régimen permanente. En la figura se muestra un diagrama del sistema original, con la precompensación aplicada en la entrada: Al añadir la precompensación, la entrada del sistema en lazo abierto pasa a ser: $$ u = -\mathbf{Kx} + \bar Nr $$ Vamos a estudiar lo que ocurre con $\mathbf x$ y $u$ cuando tenemos una entrada de referencia $r$ de tipo escalón unitario. Supondremos que, en régimen permanente, la salida $y$ alcanza efectivamente el valor de referencia (es decir, la unidad). Como en régimen estacionario las derivadas de los estados $\dot{\mathbf x}$ serán nulas, podemos escribir: $$ \begin{bmatrix} \mathbf 0 \\ 1 \end{bmatrix} = \begin{bmatrix} \mathbf A && \mathbf B \\ \mathbf C && D \end{bmatrix} \begin{bmatrix} \mathbf{x_\infty} \\ u_\infty \end{bmatrix} $$ Si la matriz de este sistema lineal es invertible, resolviéndolo obtendremos el valor de $\mathbf x_\infty$ y $u_\infty$. Como la referencia $r$ es un escalón unitario, la entrada del sistema en lazo abierto alcanzará el siguiente valor: $$ u_\infty = -\mathbf{Kx}_\infty + \bar N $$ de donde obtenemos $\bar N$ como: $$ \bar N = \mathbf{Kx}_\infty + u_\infty $$ ```python # Precompensación para salida 'xp' en régimen permanente xuinf = np.linalg.solve(np.block([[A, B], [C, D]]), [0, 0, 1]) Nb = K@xuinf[:-1] + xuinf[-1] ``` Antes de simular la respuesta, podemos montar el sistema en lazo cerrado con precompensación en la entrada, y comprobar su tipo: ```python # Sistema en lazo cerrado, incluyendo la precompensación clsyspc = ss(A - B*K, B*Nb, C, D) # Función de transferencia del sistema con realimentación H = ss2tf(clsyspc) # Calculamos G(s) para sistema con realimentación unitaria minreal(H/(1 - H)) ``` Vemos que el sistema se puede considerar prácticamente de tipo 1, ya que el término de grado 0 del denominador es cero o despreciable (varía según la plataforma donde se ejecute el código). Para evaluar el comportamiento del controlador, se simulará una maniobra simple, equivalente a tomar una curva hacia la derecha. Partiendo del equilibrio en línea recta, se pasará el ángulo deseado de 0 a 20 grados, se mantendrá así durante dos segundos, y se volverá a poner a cero después. ```python # Simulamos respuesta a escalón t = np.arange(0, tf, dt) r = np.zeros(len(t)) r[t < 2] = np.radians(20) T, Y, X = forced_response(clsyspc, t, r) pf.plot_response(T, X) ``` Se observa que el sistema alcanza efectivamente el ángulo objetivo, y vuelve a la vertical de forma controlada. Antes de implementar este controlador, hay que tener cuidado con los valores del *input*, ya que al estar el modelo linealizado, pueden tomar cualquier valor. Esto quiere decir que nos pueden salir ángulos de dirección poco realistas, como 120 grados. Es importante entonces observar los ángulos que está utilizando el controlador: ```python pf.plot_steering(T, (Nb*r - K@X).T) ``` Vemos que, para alcanzar el ángulo solicitado de 20 grados ($\varphi$ positivo, hacia la derecha), la dirección gira brevemente 60 grados ($u$ positivo, hacia la izquierda), y cuando alcanza la inclinación deseada se mantiene con un ligero ángulo hacia la derecha. Después, cuando se le pide volver a la vertical al cabo de 2 segundos, da un toque aún más hacia la derecha, que hace que la moto se vaya dirigiendo de nuevo a la vertical. Esto es bien conocido por cualquiera que haya montado en moto, y es lo que se conoce como *contramanillar*: para girar hacia un lado, antes hay que dar un toque al manillar hacia el lado contrario, lo que ayuda a inclinar la moto en la dirección buscada. El problema es que un giro de 60 grados se sale de la zona de validez del modelo lineal, aparte de que un golpe de manillar de esa magnitud en una moto que circula a 10 m/s no parece muy razonable. Para corregir esto, hay que reajustar los pesos en el LQR, aumentando el peso del esfuerzo de control $\mathbf R$ para penalizar el uso de ángulos grandes, con la contrapartida de que la maniobra será más lenta. Es interesante probar diferentes valores, volviendo a ejecutar el código para ver cómo varía la respuesta. A continuación se muestra una animación del movimiento obtenido: ```python FuncAnimation(pf.fg1, pf.anim, init_func=pf.init, frames=len(T), fargs=(X[[0, 1]], dt, h), interval=dt*1000, blit=True) ``` Los parámetros obtenidos ya sirven para controlar la inclinación de la moto. Los valores mostrados más abajo se pueden copiar y pegar directamente en un *sketch* de Arduino, de forma que, para implementar el controlador, sólo habría que calcular el ángulo de dirección como $u = -\mathbf{Kx} + \bar Nr$. ```python print('const float K1 =%10.6f; // Ganancia phi' % K[0, 0]) print('const float K2 =%10.6f; // Ganancia phip' % K[0, 1]) print('const float Nb =%10.6f; // Precompensación' % Nb) ``` Una de las principales desventajas de los métodos de realimentación de estados es que, para calcular $u$, necesitamos conocer el valor de todos los estados (además de los parámetros del sistema). Esto es un problema, porque normalmente no todos los estados se pueden medir fácilmente con precisión. En estos casos, lo que se hace es utilizar un *observador*, que consiste en un modelo del sistema que se va integrando en el tiempo, corrigiendo los errores de integración a partir de los datos de los sensores. Como el uso de observadores embebidos en el controlador queda fuera del ámbito de este proyecto, es usará una estimación de estados simplificada. En nuestro sistema, el vector $\mathbf x$ contiene dos estados: $\varphi$ y $\dot\varphi$. El primero se puede estimar usando el método descrito más arriba, utilizando las condiciones de equilibrio estático. El segundo es muy fácil de medir, ya que nos lo proporciona directamente la lectura del eje longitudinal del giróscopo. Si esto no resultase válido, se podría plantear el uso de un observador más avanzado. ## Servomecanismo tipo 1 con integrador en la entrada El problema de usar precompensación es que la respuesta es muy sensible a errores en los parámetros del sistema. Cualquier desviación en el valor de $\bar N$ hará que el sistema vuelva a ser de tipo 0, y por lo tanto tenga error en régimen permanente. La manera de resolver esto de forma más robusta es añadir a la entrada un integrador, en lugar de una simple ganancia: Para utilizar este esquema de control, hay que aumentar el sistema añadiéndole un estado adicional $\xi$, que será la integral del error de seguimiento $r - y$. Por lo tanto, su derivada es: $$ \dot\xi = r - y = r - \left(\mathbf{Cx} + Du\right) $$ y la entrada $u$ pasa a ser ahora: $$ u = -\mathbf K \mathbf x + k_I\xi $$ La ecuación completa de la dinámica del sistema aumentado, sin incluir la realimentación, se puede escribir como: $$ \begin{bmatrix} \dot{\mathbf x} \\ \dot\xi \end{bmatrix} = \begin{bmatrix} \mathbf A && \mathbf 0 \\ -\mathbf C && 0 \end{bmatrix} \begin{bmatrix} \mathbf{x} \\ \xi\end{bmatrix} + \begin{bmatrix} \mathbf B \\ -D \end{bmatrix}u + \begin{bmatrix} \mathbf 0 \\ 1 \end{bmatrix}r $$ Estudiaremos la respuesta de este sistema a una entrada escalón, cuando el tiempo tiende a infinito. Para ello, definiremos las siguientes magnitudes: $$ \mathbf e = \begin{bmatrix} \mathbf{x} - \mathbf{x}_\infty \\ \xi - \xi_\infty \end{bmatrix} \qquad u_e = u - u_\infty $$ que representan las desviaciones del estado y la entrada respecto sus propios valores en régimen permanente. Como $r$ es un escalón, su valor es siempre el mismo para $t>0$, de forma que podemos escribir la dinámica del error como: $$ \dot{\mathbf e} = \begin{bmatrix} \mathbf A && \mathbf 0 \\ -\mathbf C && 0 \end{bmatrix} \mathbf e + \begin{bmatrix} \mathbf B \\ -D \end{bmatrix}u_e = \hat{\mathbf A}\mathbf e + \hat{\mathbf B}u_e $$ Se puede observar que, de forma análoga a lo que ocurría al principio, tenemos un sistema en forma *state-space* cuyos estados tienen que converger asintóticamente a cero. Eso quiere decir que existirán unas ganancias de realimentación $\hat{\mathbf K}$ que estabilicen el sistema: $$ \hat{\mathbf K} = \begin{bmatrix} \mathbf K && -k_I \end{bmatrix} $$ Estas ganancias se pueden calcular, como antes, utilizando el algoritmo LQR, sabiendo que ahora tenemos tres estados, ya que el último corresponde a la integral del error de seguimiento en la velocidad: ```python # Ecuaciones del sistema con los errores como estados # Se añade la integral del error en ángulo como tercer estado Ah = np.block([[A, np.zeros((2, 1))], [-C, 0]]) Bh = np.block([[B], [-D]]) # Controlador LQR para la dinámica del error Q = np.eye(3) # Aproximación inicial para 'Q' Q[0, 0] = 1.0 # Ajustamos peso del ángulo 'phi' Q[1, 1] = 0.0 # Ajustamos peso de la derivada del ángulo 'phip' Q[2, 2] = 25 # Peso de la integral del error en velocidad 'xi' R = 1 # Peso del ángulo de dirección 'u' # La función 'LQR' también admite usar sólo las matrices A y B Kh = lqr(Ah, Bh, Q, R)[0] Kn = Kh[0, :-1] Ki = -Kh[0, -1] ``` Para simular el sistema, utilizaremos un modelo aumentado de la siguiente forma, que se obtiene al sustituir en la ecuación del sistema el valor de $u$: $$ \begin{bmatrix} \dot{\mathbf x} \\ \dot\xi \end{bmatrix} = \begin{bmatrix} \mathbf A - \mathbf{BK} && \mathbf Bk_I \\ D\mathbf K - \mathbf C && -Dk_I \end{bmatrix} \begin{bmatrix} \mathbf{x} \\ \xi\end{bmatrix} + \begin{bmatrix} \mathbf 0 \\ 1 \end{bmatrix}r $$ Antes de simular, verificamos que este sistema es de tipo 1: ```python # Sistema en lazo cerrado con las ganancias obtenidas An = np.block([[A - B*Kn, B*Ki], [D*Kn - C, -D*Ki]]) Bn = [[0], [0], [1]] Cn = [1, 0, 0] Dn = 0 clisysint = ss(An, Bn, Cn, Dn) # Función de transferencia del sistema con realimentación H = ss2tf(clisysint) # Calculamos G(s) para sistema con realimentación unitaria minreal(H/(1 - H)) ``` Para comprobar que este método es mucho más robusto que la precompensación, se puede hacer una prueba sencilla. En el modelo con precompensación, si antes de ensamblar el sistema *state-space* se simula un error en los parámetros del modelo multiplicando $\bar N$ por un factor, por ejemplo 0.9, se verá que el sistema pasa a ser tipo 0, incluso con valores del factor muy cercanos a la unidad. En cambio, si se hace lo mismo con $k_I$, se observa que el término de grado 0 del denominador se mantiene cercano a cero para cualquier valor de $k_I$. A continuación se muestra gráficamente la respuesta, comparándola con la obtenida usando precompensación (en línea de puntos). Igual que antes, es interesante probar diferentes pesos en el LQR, para ver cómo varía el comportamiento del sistema. ```python # Respuesta con misma entrada que en el modelo con precompensación T, Y, Xn = forced_response(clisysint, t, r) pf.plot_response(T, Xn, X) ``` Antes de implementar el controlador, hay que asegurarse de que no estamos saturando el actuador: ```python pf.plot_steering(T, (Ki*Xn[2, :] - Kn@Xn[:2, :]).T) ``` Vemos que los ángulos de dirección se mantienen dentro de valores mucho más realistas. Como siempre, es interesante ver cómo afectan los pesos del LQR, buscando un equilibrio entre una respuesta rápida (pero más brusca) y un comportamiento suave (pero más lento). A continuación se muestra una animación del nuevo movimiento: ```python FuncAnimation(pf.fg1, pf.anim, init_func=pf.init, frames=len(T), fargs=(Xn[[0, 2]], dt, h), interval=dt*1000, blit=True) ``` Como todo es correcto, ya podemos utilizar las ganancias para la implementación física del controlador. Se pueden copiar y pegar directamente en un sketch de Arduino, siendo ahora el ángulo de dirección $u= -\mathbf{Kx} + k_I\xi$. Esto significa que, además de los estados anteriores, habrá que ir calculando la integral del error de inclinación, que no es más que ir acumulando en una variable dicho error $\xi = r - \varphi$, multiplicado por el paso de tiempo. ```python print('const float K1 =%10.6f; // Ganancia phi' % Kn[0, 0]) print('const float K2 =%10.6f; // Ganancia phip' % Kn[0, 1]) print('const float Ki =%10.6f; // Ganancia integral' % Ki) ``` ## Funcionamiento a diferentes velocidades Todo lo que se ha hecho hasta ahora ha implicado considerar la velocidad de avance $v$ como un parámetro más del modelo, como la masa o el momento de inercia. Eso quiere decir que las ganancias del controlador que obtengamos estarán optimizadas para funcionar a esa velocidad. En la práctica, lo más probable es que el controlador funcione correctamente en un rango determinado de velocidades, pero si nos alejamos mucho de $v$, empezará a fallar. Una forma de comprobar la sensibilidad del sistema es calcular las ganancias para diferentes velocidades: cuanto menos varíen las ganancias al cambiar la velocidad, más robusto será el controlador. Si la variabilidad de ganancias es alta y se quiere un controlador que pueda funcionar a diferentes velocidades, habrá que calcular varios vectores de ganancias $\mathbf K_i$, de modo que cada uno tendrá validez en un rango de velocidades. Incluso se puede estudiar la variación de las ganancias en función de la velocidad, e implementar alguna estrategia de interpolación. # Bibliografía [1] Ogata, K. *Modern Control Engineering, 5th Ed.*, Pearson, 2010 [2] Williams II, R. L., Lawrence, D. A. *Linear State-Space Control Systems*, John Wiley & Sons, 2007 [3] Hespanha, J. P. *Linear Systems Theory, 2nd Ed.*, Princeton University Press, 2018 [4] Åström, K. J., Murray, R. M., [*Feedback Systems*](http://www.cds.caltech.edu/~murray/amwiki/index.php), Princeton University Press, 2008 [5] Messner, B. et al. [*Control Tutorials for Matlab and Simulink*](http://ctms.engin.umich.edu) # Acerca de este documento Autor: Urbano Lugrís Armesto [Laboratorio de Ingeniería Mecánica](http://lim.ii.udc.es/) [Escuela Politécnica Superior](https://eps.udc.es/) [Universidad de A Coruña](https://www.udc.es/) Este documento pertenece a la asignatura *Proyecto Interdisciplinar II*, de cuarto curso de los Grados en Ingeniería Mecánica e Ingeniería en Tecnologías Industriales, impartidos en la Escuela Politécnica Superior de la Universidad de A Coruña.
% Created 2020-06-17 Wed 20:19 % Intended LaTeX compiler: pdflatex \documentclass[11pt]{article} \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} \usepackage{graphicx} \usepackage{grffile} \usepackage{longtable} \usepackage{wrapfig} \usepackage{rotating} \usepackage[normalem]{ulem} \usepackage{amsmath} \usepackage{textcomp} \usepackage{amssymb} \usepackage{capt-of} \usepackage{hyperref} \author{Ryan Hammang} \date{\today} \title{Post-install Script} \hypersetup{ pdfauthor={Ryan Hammang}, pdftitle={Post-install Script}, pdfkeywords={}, pdfsubject={}, pdfcreator={Emacs 26.3 (Org mode 9.1.9)}, pdflang={English}} \begin{document} \maketitle \tableofcontents \section{The script} \label{sec:orgbe82fe9} \#+BEGIN\(_{\text{SRC}}\) sh sudo apt install $\backslash$ r-cran-ggplot2 $\backslash$ ubuntu-restricted-addons ubuntu-restricted-extras \section{Latex} \label{sec:org2c93cec} \begin{itemize} \item texmaker \item emacs \item gdebi \item insynchq \item texlive-latex-extra \item texlive-publishers \item pandoc \item texlive-music \item texlive-humanities \end{itemize} \#+END\(_{\text{SRC}}\) \section{surface-linux} \label{sec:orgf8958ca} \section{Java 8 Selection} \label{sec:org870cf32} \section{Emacs Config file} \label{sec:org4e86b83} \section{miniconda} \label{sec:org76add3f} \section{r-lang} \label{sec:orgcc4ff74} \begin{itemize} \item wget rstudio \end{itemize} \section{postgresql} \label{sec:org280fd24} \begin{itemize} \item wget \end{itemize} \end{document}
Formal statement is: lemma pos_less_divideR_eq [field_simps]: "a < b /\<^sub>R c \<longleftrightarrow> c *\<^sub>R a < b" if "c > 0" Informal statement is: If $c > 0$, then $a < b/c$ if and only if $ca < b$.
State Before: F : Type ?u.140859 α : Type u_1 β : Type ?u.140865 γ : Type ?u.140868 ι : Type u_2 κ : Type ?u.140874 inst✝¹ : DistribLattice α inst✝ : OrderBot α s : Finset ι t : Finset κ f : ι → α g : κ → α a : α ⊢ _root_.Disjoint a (sup s f) ↔ ∀ ⦃i : ι⦄, i ∈ s → _root_.Disjoint a (f i) State After: no goals Tactic: simp only [disjoint_iff, sup_inf_distrib_left, Finset.sup_eq_bot_iff]
{-# OPTIONS --rewriting #-} open import Agda.Builtin.Unit open import Agda.Builtin.Equality open import Agda.Builtin.Equality.Rewrite data Box : Set → Set₁ where box : (A : Set) → Box A data D (A : Set) : Set₁ where c : A → Box A → D A postulate any : {A : Set} → A one : {A : Set} → D A rew : ∀ A → c any (box A) ≡ one -- Jesper, 2020-06-17: Ideally Agda should reject the above rewrite -- rule, since it causes reduction to be unstable under eta-conversion works : c any (box ⊤) ≡ c tt (box ⊤) works = refl -- However, currently it is accepted, breaking subject reduction: {-# REWRITE rew #-} fails : c any (box ⊤) ≡ c tt (box ⊤) fails = refl
# From Matthew Shun-Shin <[email protected]> 2018-01-14 require(rms) set.seed(1) m <- 50 d <- expand.grid(arm=c('a','b','c'), i=1 : m) d$x <- runif(nrow(d)) d$y <- rnorm(nrow(d)) dd <- datadist(d) options(datadist="dd") f <- ols(y ~ x + arm, data=d) summary(f, verbose=TRUE) summary(f, conf.type='simult', verbose=TRUE) # simult ignored #Works contrast(f, list(arm=c('c','b')), list(arm='a')) contrast(f, list(arm=c('c','b')), list(arm="a"), conf.type='simultaneous') g <- orm(y ~ x + arm, data=d) summary(g, verbose=TRUE) summary(g, conf.type='simultaneous', verbose=TRUE) # simult ignored contrast(g, list(arm=c('b','c')), list(arm='a')) contrast(g, list(arm=c('b','c')), list(arm='a'), conf.type='simult')
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W3D4_DeepLearning1/student/W3D4_Tutorial1.ipynb" target="_parent"></a> # Neuromatch Academy: Week 3, Day4, Tutorial 1 # Deep Learning: Decoding Neural Responses By Jorge A. Menendez; reviewed by Carsen Stringer, Roozbeh Farhoodi, and Madineh Sarvestani #Tutorial Objectives In this tutorial, we'll use deep learning to decode stimulus information from the responses of sensory neurons. Specifically, we'll look at the activity of ~20,000 neurons in mouse primary visual cortex responding to oriented gratings recorded in [this study](https://www.biorxiv.org/content/10.1101/679324v2.abstract). Our task will be to decode the orientation of the presented stimulus from the responses of the whole population of neurons. We could do this in a number of ways, but here we'll use deep learning. Deep learning is particularly well-suited to this problem for a number of reasons: * The data are very high-dimensional: the neural response to a stimulus is a ~20,000 dimensional vector. Many machine learning techniques fail in such high dimensions, but deep learning actually thrives in this regime as long as you have enough data (which we do here!). * As you'll be able to see below, different neurons can respond quite differently to stimuli. This complex pattern of responses will, therefore, require non-linear methods to be decoded, which we can easily do with non-linear activation functions in deep networks. * Deep learning architectures are highly flexible, meaning we can easily adapt the architecture of our decoding model to optimize decoding. Here, we'll focus on a single architecture, but you'll see that it can easily be modified with few changes to the code. In this tutorial, we will learn how to: * Build a deep feed-forward network using PyTorch * Evaluate the network's outputs using PyTorch built-in loss functions * Compute gradients of the loss with respect to each parameter of the network using automatic differentiation * Implement stochastic gradient descent to optimize the network parameters ```python #@title Video: decoding from neural data & feed-forward network architectures and computation from IPython.display import YouTubeVideo video = YouTubeVideo(id="BXvfDj3AP_A", width=854, height=480, fs=1) print("Video available at https://youtu.be/" + video.id) video ``` Video available at https://youtu.be/BXvfDj3AP_A --- ## Setup **Don't forget to execute the hidden cells below!** ```python import os import numpy as np import torch from torch import nn from torch import optim import matplotlib as mpl from matplotlib import pyplot as plt ``` ```python # @title Download data # Download data from OSF data_filename = 'mouseV1.npy' if data_filename in os.listdir(): print('data already downloaded!') else: print('downloading data...') !wget -O mouseV1.npy https://osf.io/6g4nz/download ``` downloading data... --2020-07-01 23:04:54-- https://osf.io/6g4nz/download Resolving osf.io (osf.io)... 35.190.84.173 Connecting to osf.io (osf.io)|35.190.84.173|:443... connected. HTTP request sent, awaiting response... 302 FOUND Location: https://files.ca-1.osf.io/v1/resources/hygbm/providers/osfstorage/5edacf4208aad3008943c865?action=download&direct&version=1 [following] --2020-07-01 23:04:55-- https://files.ca-1.osf.io/v1/resources/hygbm/providers/osfstorage/5edacf4208aad3008943c865?action=download&direct&version=1 Resolving files.ca-1.osf.io (files.ca-1.osf.io)... 35.241.38.243 Connecting to files.ca-1.osf.io (files.ca-1.osf.io)|35.241.38.243|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 867771933 (828M) [application/octet-stream] Saving to: ‘mouseV1.npy’ mouseV1.npy 100%[===================>] 827.57M 107MB/s in 6.6s 2020-07-01 23:05:02 (125 MB/s) - ‘mouseV1.npy’ saved [867771933/867771933] ```python #@title Figure Settings %matplotlib inline %config InlineBackend.figure_format='retina' mpl.rcParams['font.family'] = 'sans-serif' mpl.rcParams['axes.spines.right'] = False mpl.rcParams['axes.spines.top'] = False mpl.rcParams['lines.linewidth'] = 3 mpl.rcParams['legend.frameon'] = False mpl.rcParams['axes.labelsize'] = 16 mpl.rcParams['figure.figsize'] = (8, 6) ``` ```python #@title Helper Functions def load_data(): """Load mouse V1 data from Stringer et al. (2019) Data from study reported in this preprint: https://www.biorxiv.org/content/10.1101/679324v2.abstract These data comprise time-averaged responses of ~20,000 neurons to ~4,000 stimulus gratings of different orientations, recorded through Calcium imaginge. The responses have been normalized by spontanous levels of activity and then z-scored over stimuli, so expect negative numbers. This function returns the relevant data (neural responses and stimulus orientations) in a torch.Tensor of data type torch.float32 in order to match the default data type for nn.Parameters in Google Colab. Returns: resp (torch.Tensor): n_stimuli x n_neurons matrix of neural responses, each row contains the responses of each neuron to a given stimulus stimuli: (torch.Tensor): n_stimuli x 1 column vector with orientation of each stimulus, in radians """ data = np.load(data_filename, allow_pickle=True).item() # Neural responses resp = torch.tensor(data['resp'], dtype=torch.float32) # Stimuli stimuli = torch.tensor(data['stimuli'], dtype=torch.float32).unsqueeze(1) # add singleton dimension to make a column vector return resp, stimuli def plot_data_matrix(X): """Visualize data matrix of neural responses using a heatmap Args: X (torch.Tensor or np.ndarray): matrix of neural responses to visualize with a heatmap """ fig, ax = plt.subplots() cax = ax.imshow(X, cmap=mpl.cm.pink, vmin=np.percentile(X, 1), vmax=np.percentile(X, 99)) cbar = plt.colorbar(cax, ax=ax, label='normalized neural response') ax.set_aspect('auto') ax.set_xticks([]) ax.set_yticks([]) class progress_bar(): """Progress bar for displaying progress over neural network training Args: n_epochs (int): number of epochs that network will be trained for Attributes: bar (list of str): the current state of the progress bar counter (int): counter tracking the current number of epochs run n_epochs (int): total number of epochs to be run """ def __init__(self, n_epochs): self.bar = ['|'] for i in range(n_epochs): self.bar.append(' ') self.bar.append('|') self.counter = 0 self.n_epochs = n_epochs def update(self): """Update the progress bar and print it""" self.counter += 1 self.bar[self.counter] = '=' print('%s (epoch %i/%i complete)' % (''.join(self.bar), self.counter, self.n_epochs)) ``` --- ## Load and visualize data In the next cell, we have provided code to load the data and plot the matrix `resp` of neural responses. ```python # Load data resp, stimuli = load_data() n_stimuli, n_neurons = resp.shape print(f'resp contains responses of {n_neurons} neurons to {n_stimuli} stimuli') # Visualize data matrix plot_data_matrix(resp[:300, :300].T) # plot responses of first 300 neurons to first 300 stimuli plt.xlabel('stimulus') plt.ylabel('neuron') plt.show() ``` ### Exercise 1 Plot the tuning curve of a single neuron. The tuning curve should show the responses of that neuron to each stimulus as a function of the stimulus orientation. You'll need the following variables: * `resp` contains the responses of every neuron to every stimulus. The $i$th column contains the responses of the $i$th neuron to each stimulus. * `stimuli` contains the orientations of each stimulus **Suggestions** * convert the stimuli from radians to degrees to get a more interpretable $x$-axis ```python def plot_tuning(): """Plot the tuning curve of a random neuron""" ################################################################################ ## TO DO for students: sample a random neuron and plot its tuning curve # neuron_indx = ... # pick random neuron # plt.plot(..., ..., '.') # plot its responses as a function of stimulus orientation raise NotImplementedError("Student exercise: write code for plotting tuning curve") ################################################################################ plt.title('neuron %i' % neuron_indx) plt.xlabel('stimulus orientation ($^o$)') plt.ylabel('neural response') plt.xticks(np.linspace(0, 360, 5)) plt.show() # plot_tuning() # UNCOMMENT ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial1_Solution_2a11ec87.py) *Example output:* --- ## Building deep feed-forward networks with *PyTorch* We'll now build a simple deep neural network that takes as input a vector of neural responses and outputs a single number representing the decoded stimulus orientation. Specifically, we'll build a deep network with one hidden layer. Let $\mathbf{r}^{(n)} = \begin{bmatrix} r_1^{(n)} & r_2^{(n)} & \ldots & r_N^{(n)} \end{bmatrix}^T$ denote the vector of neural responses (of neurons $1, \ldots, N$) to the $n$th stimulus. The network we will use is described by the following set of equations: \begin{align} \mathbf{h}^{(n)} &= \mathbf{W}^{in} \mathbf{r}^{(n)} + \mathbf{b}^{in}, && [\mathbf{W}^{in}: M \times N], \\ y^{(n)} &= \mathbf{W}^{out} \mathbf{h}^{(n)} + \mathbf{b}^{out}, && [\mathbf{W}^{out}: 1 \times M], \end{align} where $y^{(n)}$ denotes the scalar output of the network: the decoded orientation of the $n$th stimulus. The $M$-dimensional vector $\mathbf{h}^{(n)}$ denotes the activations of the **hidden layer** of the network. The reason it is called hidden can be glimpsed from the schematic below, showing how the hidden layer is, in a sense, "hidden" from the input and output of the network. Deeper networks may have multiple hidden layers, but here we'll just focus on a single hidden layer. <p align="center"> </p> We have provided code below for building such a network in PyTorch, using the PyTorch `nn.Module` object class. It contains three key ingredients: * `__init__()` method to initialize its parameters, like in any other Python class. In this case, it takes a single argument which specifies the number of units in the hidden layer. * `nn.Linear()` modules, which are built-in PyTorch objects containing all the weights and biases for a given network layer. See [here](https://pytorch.org/docs/master/generated/torch.nn.Linear.html) for documentation. It takes two arguments: * \# of inputs to that layer, and * \# of outputs from that layer For the input layer, for example, we have: * \# of inputs = \# of neurons whose responses are to be decoded ($N$) * \# of outputs = \# of hidden layer units ($M$) PyTorch will initialize all weights and biases randomly. * `forward()` method, which takes as argument an input to the network and returns the network output. In our case, this comprises computing the output $y$ from a given input $\mathbf{r}$ using the above two equations. We provide the code for doing this using the built-in PyTorch `nn.Linear()` objects. ```python #@title DeepNet class DeepNet(nn.Module): """Deep Network with one hidden layer Args: n_hidden (int): number of units in hidden layer Attributes: in_layer (nn.Linear): weights and biases of input layer out_layer (nn.Linear): weights and biases of output layer """ def __init__(self, n_hidden): super().__init__() # needed to invoke the properties of the parent class nn.Module self.in_layer = nn.Linear(n_neurons, n_hidden) # neural activity --> hidden units self.out_layer = nn.Linear(n_hidden, 1) # hidden units --> output def forward(self, r): """Decode stimulus orientation from neural responses Args: r (torch.Tensor): vector of neural responses to decode, must be of length n_neurons. Can also be a tensor of shape p x n_neurons, containing p vectors of neural responses Returns: y (torch.Tensor): network outputs for each input provided in r. If r is a vector, then y is a 1D tensor of length 1. If r is a 2D tensor then y is a 2D tensor of shape p x 1. """ h = self.in_layer(r) # hidden representation y = self.out_layer(h) return y ``` We next provide code for running the deep network, i.e. for using the deep network to decode stimulus orientation from a vector of neural responses. An important thing to note in this code snippet is the `.detach()` method. The PyTorch `nn.Module` class is special in that, behind the scenes, each of the variables linked to it are linked to each other in a computational graph, for the purposes of automatic differentiation (which will be discussed below). As a result, if you want to do anything that is not a `torch` operation to the parameters or outputs of an `nn.Module` class, you'll need to first "detach" it from its computational graph. This is what the `.detach()` method does. You'll see in this code snippet we need to call it on the outputs of the network so that we can pass it through the `np.rad2deg()` function. ```python # Initialize a deep network with M=200 hidden units net = DeepNet(200) # Decode stimulus orientation from neural responses to the first stimulus # in the data set. Note that the output of the network will be nonsense # as its weights have been initialized randomly. istim = 0 # index of first stimulus r = resp[istim] # neural responses to this stimulus out = net(r) # compute output from network using forward() method ori_decode = np.rad2deg(out.detach()) # decoded orientation, in degrees -- need to use .detach() so that the torch.Tensor can be treated as a numpy array ori_true = np.rad2deg(stimuli[istim]) # true stimulus orientation, in degrees print('decoded orientation: %.2f degrees' % ori_decode) print('true orientation: %.2f degrees' % ori_true) ``` decoded orientation: -32.62 degrees true orientation: 150.13 degrees Note that the deep network we constructed above comprises solely **linear** operations on each layer: each layer is just a weighted sum of the elements in the previous layer. We'll next incorporate a certain class of **non-linear** operations. Using non-linear layer-to-layer transformations, in fact, allows deep networks to perform much more complex computations, which is why non-linearities are always used in practice. ### Exercise 2 Create a new class `DeepNetReLU` by modifying our above deep network model to implement a non-linear transformation from the input layer to the hidden layer. In other words, modify it so that the hidden layer activations are given by \begin{equation} \mathbf{h}^{(n)} = \phi(\mathbf{W}^{in} \mathbf{r}^{(n)} + \mathbf{b}^{in}) \end{equation} where $\phi$ is some non-linear function, referred to as the **activation function**. We'll use linear rectification: \begin{equation} \phi(x) = \begin{cases} x & \text{if } x > 0 \\ 0 & \text{else} \end{cases} \end{equation} which can be implemented in PyTorch using `torch.relu()`. Hidden layers with this activation function are typically referred to as "**Re**ctified **L**inear **U**nits", or **ReLU**'s. **Hint**: you only need to modify the `forward()` method of the above `DeepNet()` class. ```python class DeepNetReLU(nn.Module): def __init__(self, n_hidden): super().__init__() # needed to invoke the properties of the parent class nn.Module ############################################################## ## TO DO for students: initialize network weights using nn.Linear() objects raise NotImplementedError("Student exercise: write code for initializing deep network") ############################################################## def forward(self, r): ############################################################## ## TO DO for students: write code for computing network output, using a ## rectified linear activation function for the hidden units raise NotImplementedError("Student exercise: write code for computing network output") ############################################################## ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial1_Solution_48c0b82d.py) ```python #@title Video: loss functions & SGD from IPython.display import YouTubeVideo video = YouTubeVideo(id="rx8d2EkV1Mg", width=854, height=480, fs=1) print("Video available at https://youtu.be/" + video.id) video ``` Video available at https://youtu.be/rx8d2EkV1Mg --- ## Evaluating loss functions Because the weights of the network are currently randomly chosen, the outputs of the network are nonsense: the decoded stimulus orientation is nowhere close to the true stimulus orientation. We'll shortly write some code to change these weights so that the network does a better job of decoding. But to do so, we first need to define what we mean by "better". One simple way of defining this is to use the **squared error** \begin{equation} L = (y - \tilde{y})^2 \end{equation} where $y$ is the network output and $\tilde{y}$ is the true stimulus orientation. When the decoded stimulus orientation is far from the true stimulus orientation, $L$ will be large. We thus refer to $L$ as the **loss function**, as it quantifies how *bad* the network is at decoding stimulus orientation. PyTorch actually carries with it a number of built-in loss functions. The one corresponding to squared error is called `nn.MSELoss()`. This will take as arguments a number of network outputs $y_1, y_2, \ldots, y_P$ and corresponding target outputs $\tilde{y}_1, \tilde{y}_2, \ldots, \tilde{y}_P$, and compute the **mean squared error (MSE)** \begin{equation} L = \frac{1}{P}\sum_{n=1}^P \left(y^{(n)} - \tilde{y}^{(n)}\right)^2 \end{equation} We provide code below evaluating the mean squared error of our linear deep network for the neural responses to the first 100 stimuli. ```python # PyTorch mean squared error loss function loss_fn = nn.MSELoss() # Evaluate mean squared error on neural responses # to first 100 stimuli istim = np.arange(100) # indeces of first 100 stimuli out = net(resp[istim]) # decoded stimulus orientations ori = stimuli[istim] # true stimulus orientations print('mean squared error: %.2f' % loss_fn(out, ori)) ``` mean squared error: 12.17 ### Exercise 3 Write a function that will evaluate the mean squared error for a given set of neural responses. Use the initialized `loss_fn` above to compute the mean squared error. Then, initialize a deep network with $M=20$ rectified linear units in the hidden layer, and use your function to evaluate its mean squared error on population responses to 100 random stimuli. ```python ###################################################################### ## TO DO for students: ## - initialize a deep network with one hidden layer of 20 rectified ## linear units, using the above implemented DeepNetReLU() class ## - pick 100 random stimuli ## - use the network to decode stimulus orientation from responses to ## these stimuli ## - evaluate the mean squared error using loss_fn() # net = ... # initialize deep network, using DeepNetReLU() # istim = ... # indices of 100 random stimuli # out = ... # use network to decode orientation from neural responses # loss = ... # evaluate mean squared error using loss_fn() # print('mean squared error: %.2f' % loss) ###################################################################### ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial1_Solution_70ebabdf.py) --- ## Optimization with stochastic gradient descent Our goal is to modify the weights to make the mean squared error loss $L$ as small as possible over the whole data set. To do this, we'll use the **stochastic gradient descent (SGD)** algorithm, which consists of iterating four simple steps: 1. **Randomly sample a *mini-batch* of training data**. This is only strictly necessary when the full data set is too big to pass through the network all at once. That said, using mini-batches turns out to also help avoid overfitting. 2. **Evaluate the loss** at this mini-batch 3. **Compute the gradient of the loss** with respect to each of the network weights. In PyTorch, we can do this with one line of code: if the loss is stored in a variable `loss`, all you need to do is run ``` loss.backward() ``` PyTorch will then compute the gradients of this quantity with respect to each network parameter using [automatic differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation), and store them behind the scenes. 4. **Update the network weights** by descending the gradient. In Pytorch, we can do this with one line of code by using built-in optimizers. The SGD optimizer is called `optim.SGD` (documentation [here](https://pytorch.org/docs/stable/optim.html#torch.optim.SGD)), and takes as an argument * the parameters to be updated * the learning rate to use For example, to optimize *all* the parameters of a network `net` using a learning rate of .001, one would use ``` optimizer = optim.SGD(net.parameters(), lr=.001) ``` where `.parameters()` is a method of the `nn.Module` class that returns a [Python generator object](https://wiki.python.org/moin/Generators) over all the parameters of that `nn.Module` class (in our case, $\mathbf{W}^{in}, \mathbf{b}^{in}, \mathbf{W}^{out}, \mathbf{b}^{out}$). To update each of these parameters, we then need only call the `.step()` method of this optimizer: ``` optimizer.step() ``` This single line of code will execute the SGD updates for each parameter given to the optimizer when it was initialized. Note that this is true no matter how big/small the network is, allowing us to use the same two lines of code for any deep network model built using PyTorch. Finally, an important detail to remember is that the gradients of each parameter need to be cleared before calling `.backward()`, or else PyTorch will try to accumulate gradients across iterations. This can again be done using built-in optimizers via the method `zero_grad()`, as follows: ``` optimizer.zero_grad() ``` For the mathematical details of the SGD algorithm, see the appendix. ### Exercise 4 1. Complete the function `train()` that optimizes the weights of a given network with stochastic gradient descent on some given training data. We've provided most of the code for iterating over steps 1-4 listed above. The only piece left for you to complete is step 2: evaluating the loss $L$ at the current mini-batch. **Hint:** you already wrote code for computing the MSE term $L_{MSE}$ in the previous exercise, and you can use the `L2penalty()` function to evaluate the regularization term $\mathcal{R}_{L2}$. Don't forget to mutiply the regularization term by the regularization weight `gamma` before summing it with the MSE! **Note:** this function has two nested loops. The outer loop is over epochs. The inner loop iterates the SGD steps 1-4 outlined above over random mini-batches within each epoch. (cf. appendix) 2. Initialize a deep network with one hidden layer of 20 rectified linear units and use this function to train it on some training data (with the given default SGD parameters). **Hint:** use your `DeepNetReLU()` class to initialize the deep network. Note that SGD is essentially an algorithm for fitting the network's parameters to the given training data. Selecting this training data is thus crucial for ensuring that the optimized parameters **generalize** to unseen data they weren't trained on. In our case, for example, we want to make sure that our trained network is good at decoding stimulus orientations from neural responses to any orientation, not just those in our data set. To ensure this, we will split up the full data set into a **training set** and a **test set**. We'll use the training set for optimizing the parameters with SGD, and then evaluate how good the optimized parameters are by using the trained network to decode stimulus orientations from neural responses in the test set. Good decoding performance on this test set should then be indicative of good decoding performance on the neurons' responses to any other stimulus orientation. This procedure is commonly used in machine learning (not just in deep learning)and is typically referred to as **cross-validation**. When using this for SGD, it is also worth noting that the random subsampling of our data used to build the training set induces a certain amount of stochasticity into our gradient descent algorithm, which will help avoid potential local minima. ```python def train(net, train_data, train_labels): """Run stochastic gradient descent for a given network Args: net (nn.Module): deep network whose parameters to optimize with SGD train_data (torch.Tensor): n_train x n_neurons tensor with neural responses to train on train_labels (torch.Tensor): n_train x 1 tensor with orientations of the stimuli corresponding to each row of train_data """ # Set SGD hyperparameters learning_rate = 1e-3 # learning rate for SGD n_epochs = 10 # number of epochs to run SGD batch_size = 250 # number of data points in each mini-batch # Initialize PyTorch SGD optimizer optimizer = optim.SGD(net.parameters(), lr=learning_rate) # Placeholder to save MSE at each iteration train_mse = [] # Progress bar to track progress pbar = progress_bar(n_epochs) # Loop over epochs for i in range(n_epochs): # Split up training data into random non-overlapping mini-batches ishuffle = torch.randperm(train_data.shape[0]) # random ordering of training data minibatch_data = torch.split(train_data[ishuffle], batch_size) # split train_data into minibatches minibatch_labels = torch.split(train_labels[ishuffle], batch_size) # split train_labels into minibatches # Loop over mini-batches for r, ori in zip(minibatch_data, minibatch_labels): # Evaluate mean squared error loss ###################################################################### ## TO DO for students: evaluate loss at current mini-batch # out = ... # use network to decode orientation from neural responses in this minibatch # loss = ... # evaluate mean squared error for this minibatch raise NotImplementedError("Student exercise: write code for evaluating loss at current mini-batch") ###################################################################### # Store current mean squared error train_mse.append(loss.item()) # .item() transforms the tensor to a scalar and does .detach() for us # Compute gradients optimizer.zero_grad() # clear gradients loss.backward() # Update weights optimizer.step() # Track progress pbar.update() # Plot the loss plt.plot(train_mse) plt.xlim([0, None]) plt.ylim([0, None]) plt.xlabel('iterations of stochastic gradient descent') plt.ylabel('mean squared error\non training data') plt.show() # Split data into training set and testing set n_train = int(0.75 * n_stimuli) # putting 75% of data into training set ishuffle = torch.randperm(n_stimuli) itrain = ishuffle[:n_train] # indices of data samples to include in training set itest = ishuffle[n_train:] # indices of data samples to include in testing set # Initialize network and train it on training set ###################################################################### ## TO DO for students: initialize deep network and train it on training set # net = ... # use M=20 hidden units # train(net, ..., ...) ###################################################################### ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial1_Solution_3455cc67.py) *Example output:* ### Exercise 5 1. Complete the function `test()` that decodes stimulus orientations from a given set of test data and plots these against the true stimulus orientations, in degrees. 2. Use this function to evaluate the network trained in the previous exercise on data from the test set, indexed by `itest`. **Hint:** don't forget to use `.detach()` to do any plotting or `numpy` operations on the network output ```python def test(net, test_data, test_labels): """Decode stimulus orientation from neural responses in test data and plot against the true stimulus orientations, in degrees Args: net (nn.Module): deep network to use to decode stimulus orientation test_data (torch.Tensor): n_test x n_neurons tensor with neural responses to decode test_labels (torch.Tensor): n_test x 1 tensor with orientations of the stimuli corresponding to each row of test_data, in radians """ ######################################################################## ## TO DO for students: decode stimulus orientation from neural responses in test_data ## and plot against true stimulus orientations in test_labels # out = ... # decode stimulus orientation for each population response in test set # ori_decode = ... # transform from radians to degrees # ori_true = ... # true stimulus orientations, in degrees raise NotImplementedError("Student exercise: write code for decoding stimulus orientation from neural responses in test set") ######################################################################## # Plot plt.plot(ori_true, ori_decode, '.') # plot true orientation vs decoded orientation plt.xlabel('true stimulus orientation ($^o$)') plt.ylabel('decoded stimulus orientation ($^o$)') axticks = np.linspace(0, 360, 5) plt.xticks(axticks) plt.yticks(axticks) plt.show() ############################################################################### ## TO DO for students: use test() function to evaluate network trained in previous exercise ## on the test set # test(net, ..., ...) ############################################################################### ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial1_Solution_a7fbc2de.py) *Example output:* --- ## Model criticism Let's now take a step back and think about how our model is succeeding/failing and how to improve it. ### Exercise 6 1. Complete the function `decoding_error()` that computes the decoding error (decoded stimulus orientation minus true stimulus orientation, in degrees) on a given set of test data and plots it as a function of the true stimulus orientation. 2. Use this function to evaluate and plot the decoding error of the network trained in the previous exercise on data from the test set, indexed by `itest`. 3. Interpret what you see. Some questions to think about: * Are some stimulus orientations harder to decode than others? * If so, in what sense? Are the decoded orientations for these stimuli more variable and/or are they biased? * Can you explain this variability/bias? What makes these stimulus orientations different from the others? * Can you think of a way to modify the deep network in order to avoid this? ```python def decoding_error(net, test_data, test_labels): """Plot decoding error as a function of true stimulus orientation, in degrees Args: net (nn.Module): deep network to use to decode stimulus orientation test_data (torch.Tensor): n_test x n_neurons tensor with neural responses to decode test_labels (torch.Tensor): n_test x 1 tensor with orientations of the stimuli corresponding to each row of test_data, in radians """ ######################################################################## ## TO DO for students: compute and plot the decoding error # out = ... # decode stimulus orientation for each population response in test set # ori_decode = ... # transform from radians to degrees # ori_true = ... # true stimulus orientations, in degrees # error = ... # decoding error, in degrees # plt.plot(..., ..., '.') # plot decoding error as a function of true orientation raise NotImplementedError("Student exercise: write code for computing and plotting the decoding error") ######################################################################## plt.xlabel('true stimulus orientation ($^o$)') plt.ylabel('decoding error ($^o$)') plt.xticks(np.linspace(0, 360, 5)) plt.yticks(np.linspace(-360, 360, 9)) plt.show() ############################################################################### ## TO DO for students: use decoding_error() function to plot decoding error on the test set ## for the network trained above # decoding_error(net, ..., ...) ############################################################################### ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial1_Solution_8af20f38.py) *Example output:* --- ## Summary We have now covered a number of common and powerful techniques for applying deep learning to decoding from neural data, some of which are common to almost any machine learning problem: * Building and training neural network models with **PyTorch and automatic differentiation** * Choosing and evaluating **loss functions** * Implementing **stochastic gradient descent** by iterating over random **mini-batches** of training data * Testing a trained model on unseen data by splitting the data into a **training set and test set** --- # Appendix ## Stochastic gradient descent Here we provide a more detailed explanation of the **stochastic gradient descent (SGD) algorithm** as applied to our decoding problem, and its implementation in PyTorch. This algorithm consists of iterating four simple steps: 1. **Randomly sample a *mini-batch* of training data.** In our case, that means randomly sampling a set of neural response vectors $\mathbf{r}^{(1)}, \mathbf{r}^{(2)}, \ldots, \mathbf{r}^{(P)}$ and corresponding stimulus orientations $\tilde{y}^{(1)}, \tilde{y}^{(2)}, \ldots, \tilde{y}^{(P)}$. 2. **Evaluate the loss** at this mini-batch. For a mean squared error loss, this is given by \begin{equation} L = \frac{1}{P}\sum_{n=1}^P (y^{(n)} - \tilde{y}^{(n)})^2 \end{equation} where recall that $y^{(n)}$ denotes the stimulus orientation decoded from the population response $\mathbf{r}^{(n)}$ to the $n$th stimulus, and $\tilde{y}^{(n)}$ is the true orientation of this stimulus. 3. **Compute the gradient of the loss** with respect to each of the network weights. In our case, that entails computing the quantities \begin{equation} \frac{\partial L}{\partial \mathbf{W}^{in}}, \frac{\partial L}{\partial \mathbf{b}^{in}}, \frac{\partial L}{\partial \mathbf{W}^{out}}, \frac{\partial L}{\partial \mathbf{b}^{out}} \end{equation} Usually, this would require lots of math in order to derive these gradients, and lots of code to compute them. But this is where PyTorch comes to the rescue! Using a cool technique called [automatic differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation), PyTorch will calculate these gradients automatically for you. If the loss is stored in a variable `loss`, all you need to do is run ``` loss.backward() ``` PyTorch will then compute the gradient of this quantity with respect to each network parameter. These are computed and stored behind the scenes, and can be accessed through the `.grad` attribute of each of the network's parameters. But, as you'll see next, we won't even need to. 4. **Update the network weights** by descending the gradient: \begin{align} \mathbf{W}^{in} &\leftarrow \mathbf{W}^{in} - \alpha \frac{\partial L}{\partial \mathbf{W}^{in}} \\ \mathbf{b}^{in} &\leftarrow \mathbf{b}^{in} - \alpha \frac{\partial L}{\partial \mathbf{b}^{in}} \\ \mathbf{W}^{out} &\leftarrow \mathbf{W}^{out} - \alpha \frac{\partial L}{\partial \mathbf{W}^{out}} \\ \mathbf{b}^{out} &\leftarrow \mathbf{b}^{out} - \alpha \frac{\partial L}{\partial \mathbf{b}^{out}} \end{align} where $\alpha$ is called the **learning rate**. This **hyperparameter** of the SGD algorithm controls how far we descend the gradient on each iteration. It should be as large as possible so that fewer iterations are needed, but not too large so that the parameter updates can't skip over minima in the loss landscape. In Pytorch we can implement all of these updates with just a single line of code. We first construct an SGD optimizer using the `optim.SGD` class (documentation [here](https://pytorch.org/docs/stable/optim.html#torch.optim.SGD)), and tell it which parameters to update and what learning rate to use. For example, to optimize the parameters of a network `net` using a learning rate of .001, we construct an SGD optimizer as follows ``` optimizer = optim.SGD(net.parameters(), lr=.001) ``` where the method `net.parameters()` returns all of the parameters (i.e. weights and biases) in `net`. To update each of these parameters, we then need only call the `.step()` method of this optimizer: ``` optimizer.step() ``` This single line of code will execute each of the updates written above. Moreover, for more complicated networks with more layers and more parameters, this single line of code will suffice to update *all* of its parameters according to the analogous gradient descent equations. ## Epochs and mini-batches Note that our above implementation of SGD contains two nested loops. The inner loop is over mini-batches, implementing steps 1-4 above. But take a close look at exactly how these mini-batches are being randomly sampled. Rather than sampling a totally random mini-batch of training data each iteration, we split up the full training data into non-overlapping mini-batches to ensure that these mini-batches will cover the whole training data set. Each time we loop over all the mini-batches covering the training data set is called an **epoch**. The outer loop is over **epochs**: we loop over the entire training data set `n_epoch` times.
SUBROUTINE forces USE vmec_main, p5 => cp5 USE realspace USE vforces USE vsvd, ONLY: torflux_edge => torflux IMPLICIT NONE C----------------------------------------------- C L o c a l P a r a m e t e r s C----------------------------------------------- REAL(rprec), PARAMETER :: p25 = p5*p5 C----------------------------------------------- C L o c a l V a r i a b l e s C----------------------------------------------- INTEGER :: l, lk, ndim REAL(rprec), DIMENSION(:), ALLOCATABLE :: 1 bsqr, gvvs, guvs, guus REAL(rprec), DIMENSION(:), POINTER :: gcon, lv_e, lu_e, lu_o REAL(rprec) :: rcon1, zcon1, dphids C----------------------------------------------- ndim = 1+nrzt ! ! POINTER ALIASES ! gcon => z1(:,0) lv_e => crmn_e; lu_e => czmn_e; lu_o => czmn_o ALLOCATE (bsqr(ndim), gvvs(ndim), guvs(ndim), guus(ndim), 1 stat=l) IF (l .ne. 0) STOP 'Allocation error in VMEC FORCES' ! ! ON ENTRY, ARMN=ZU,BRMN=ZS,AZMN=RU,BZMN=RS,LU=R*BSQ,LV = BSQ*SQRT(G)/R12 ! HERE, XS (X=Z,R) DO NOT INCLUDE DERIVATIVE OF EXPLICIT SQRT(S) ! BSQ = |B|**2/2 + p ! GIJ = (BsupI * BsupJ) * SQRT(G) (I,J = U,V) ! IT IS ESSENTIAL THAT LU,LV AT j=1 ARE ZERO INITIALLY ! ! SOME OF THE BIGGER LOOPS WERE SPLIT TO FACILITATE CACHE ! HITS, PIPELINING ON RISCS ! ! FOR OPTIMIZATION ON CRAY, MUST USE COMPILER DIRECTIVES TO ! GET VECTORIZATION OF LOOPS INVOLVING POINTERS! ! ! ! ORIGIN OF VARIOUS TERMS ! ! LU : VARIATION OF DOMINANT .5*(RU-odd*Zodd - ZU-odd*Rodd) TERM ! IN JACOBIAN ! ! LV : VARIATION OF R-TERM IN JACOBIAN ! ! GVV: VARIATION OF R**2-TERM AND Rv**2,Zv**2 IN gvv ! ! GUU, GUV: VARIATION OF Ru, Rv, Zu, Zv IN guu, guv ! dphids = p25/torflux_edge lu_e(1:ndim:ns) = 0; lv_e(1:ndim:ns) = 0 guu(1:ndim:ns) = 0; guv(1:ndim:ns) = 0; gvv(1:ndim:ns) = 0 guus = guu*shalf; guvs = guv*shalf; gvvs = gvv*shalf CDIR$ IVDEP DO l = 1, ndim armn_e(l) = ohs*armn_e(l) * lu_e(l) azmn_e(l) =-ohs*azmn_e(l) * lu_e(l) brmn_e(l) = brmn_e(l) * lu_e(l) bzmn_e(l) =-bzmn_e(l) * lu_e(l) bsqr(l) = phip(l)*lu_e(l)/shalf(l) END DO CDIR$ IVDEP DO l = 1, ndim armn_o(l) = armn_e(l) *shalf(l) azmn_o(l) = azmn_e(l) *shalf(l) brmn_o(l) = brmn_e(l) *shalf(l) bzmn_o(l) = bzmn_e(l) *shalf(l) END DO ! ! CONSTRUCT CYLINDRICAL FORCE KERNELS ! NOTE: presg(ns+1) == 0, AND WILL BE "FILLED IN" AT EDGE ! FOR FREE-BOUNDARY BY RBSQ ! CDIR$ IVDEP DO l = 1, nrzt guu(l) = p5*(guu(l) + guu(l+1)) gvv(l) = p5*(gvv(l) + gvv(l+1)) bsqr(l) = dphids*(bsqr(l) + bsqr(l+1)) guus(l) = p5*(guus(l) + guus(l+1)) gvvs(l) = p5*(gvvs(l) + gvvs(l+1)) END DO CDIR$ IVDEP DO l = 1, nrzt armn_e(l) = armn_e(l+1) - armn_e(l) + p5*(lv_e(l) + lv_e(l+1)) 1 - gvv(l)*r1(l,0) azmn_e(l) = azmn_e(l+1) - azmn_e(l) brmn_e(l) = p5*(brmn_e(l) + brmn_e(l+1)) bzmn_e(l) = p5*(bzmn_e(l) + bzmn_e(l+1)) END DO CDIR$ IVDEP DO l = 1, nrzt armn_e(l) = armn_e(l) - gvvs(l)*r1(l,1) brmn_e(l) = brmn_e(l) + bsqr(l)*z1(l,1) 1 - guus(l)*ru(l,1) - guu(l)*ru(l,0) bzmn_e(l) = bzmn_e(l) - bsqr(l)*r1(l,1) 1 - guus(l)*zu(l,1) - guu(l)*zu(l,0) END DO lv_e(1:nrzt+1) = lv_e(1:nrzt+1)*shalf(1:nrzt+1) CDIR$ IVDEP DO l = 1, nrzt armn_o(l) = armn_o(l+1) - armn_o(l) - zu(l,0)*bsqr(l) 1 + p5*(lv_e(l) + lv_e(l+1)) azmn_o(l) = azmn_o(l+1) - azmn_o(l) + ru(l,0)*bsqr(l) brmn_o(l) = p5*(brmn_o(l) + brmn_o(l+1)) bzmn_o(l) = p5*(bzmn_o(l) + bzmn_o(l+1)) END DO guu(1:nrzt) = guu(1:nrzt) * sqrts(1:nrzt)**2 bsqr(1:nrzt) = gvv(1:nrzt) * sqrts(1:nrzt)**2 CDIR$ IVDEP DO l = 1, nrzt lu_o(l) = dphids*(lu_e(l)*phip(l) + lu_e(l+1)*phip(l+1)) armn_o(l) = armn_o(l) - (zu(l,1)*lu_o(l) 1 + bsqr(l)*r1(l,1) + gvvs(l)*r1(l,0)) azmn_o(l) = azmn_o(l) + ru(l,1)*lu_o(l) brmn_o(l) = brmn_o(l) + z1(l,1)*lu_o(l) 1 -(guu(l)*ru(l,1) + guus(l)*ru(l,0)) bzmn_o(l) = bzmn_o(l) - (r1(l,1)*lu_o(l) 1 + guu(l)*zu(l,1) + guus(l)*zu(l,0)) END DO IF (lthreed) THEN CDIR$ IVDEP DO l = 1, nrzt guv(l) = p5*(guv(l) + guv(l+1)) guvs(l) = p5*(guvs(l) + guvs(l+1)) brmn_e(l) = brmn_e(l) - (guv(l)*rv(l,0) + guvs(l)*rv(l,1)) bzmn_e(l) = bzmn_e(l) - (guv(l)*zv(l,0) + guvs(l)*zv(l,1)) crmn_e(l) = guv(l) *ru(l,0) + gvv(l) *rv(l,0) 1 + gvvs(l)*rv(l,1) + guvs(l)*ru(l,1) czmn_e(l) = guv(l) *zu(l,0) + gvv(l) *zv(l,0) 1 + gvvs(l)*zv(l,1) + guvs(l)*zu(l,1) END DO CDIR$ IVDEP DO l = 1, nrzt guv(l) = guv(l) *sqrts(l)*sqrts(l) brmn_o(l) = brmn_o(l) - (guvs(l)*rv(l,0) + guv(l)*rv(l,1)) bzmn_o(l) = bzmn_o(l) - (guvs(l)*zv(l,0) + guv(l)*zv(l,1)) crmn_o(l) = guvs(l)*ru(l,0) + gvvs(l)*rv(l,0) 1 + bsqr(l)*rv(l,1) + guv(l) *ru(l,1) czmn_o(l) = guvs(l)*zu(l,0) + gvvs(l)*zv(l,0) 1 + bsqr(l)*zv(l,1) + guv(l) *zu(l,1) END DO ENDIF ! ! ASSIGN EDGE FORCES (JS = NS) FOR FREE BOUNDARY CALCULATION ! IF (ivac .ge. 1) THEN lk = 0 CDIR$ IVDEP DO l = ns,nrzt,ns lk = lk+1 armn_e(l) = armn_e(l) + zu0(l)*rbsq(lk) armn_o(l) = armn_o(l) + zu0(l)*rbsq(lk) azmn_e(l) = azmn_e(l) - ru0(l)*rbsq(lk) azmn_o(l) = azmn_o(l) - ru0(l)*rbsq(lk) END DO fz00_edge = SUM(wint(ns:nrzt:ns)*ru0(ns:nrzt:ns)*rbsq(1:nznt)) ENDIF 100 CONTINUE DEALLOCATE (bsqr, gvvs, guvs, guus, stat=l) ! ! COMPUTE CONSTRAINT FORCE KERNELS ! CDIR$ IVDEP DO l = 1,nrzt rcon1 = (rcon(l,0) - rcon0(l)) * gcon(l) zcon1 = (zcon(l,0) - zcon0(l)) * gcon(l) brmn_e(l) = brmn_e(l) + rcon1 bzmn_e(l) = bzmn_e(l) + zcon1 brmn_o(l) = brmn_o(l)+ rcon1*sqrts(l) bzmn_o(l) = bzmn_o(l)+ zcon1*sqrts(l) rcon(l,0) = ru0(l) * gcon(l) zcon(l,0) = zu0(l) * gcon(l) rcon(l,1) = rcon(l,0) * sqrts(l) zcon(l,1) = zcon(l,0) * sqrts(l) END DO END SUBROUTINE forces
`%||%` <- function(x, y) if (is.null(x)) y else x from_json <- function(obj) { simplify_homogeneous_lists(jsonlite::fromJSON(obj, simplifyVector = FALSE)) } has_names <- function(obj) { !is.null(names(obj)) && !is.null(Find(function(s) !is.na(s) && (s != ""), names(obj))) } enlist_if_named <- function(obj) { if(is.atomic(obj) && has_names(obj)) as.list(obj) else obj } recursively_enlist_if_named <- function(obj) { if (is.atomic(obj) || length(obj) == 0) enlist_if_named(obj) else { if (is.data.frame(obj)) { as.data.frame(lapply(obj, recursively_enlist_if_named)) } else lapply(obj, recursively_enlist_if_named) } } to_json <- function(obj) { obj2 <- recursively_enlist_if_named(obj) as.character(jsonlite::toJSON(obj2, auto_unbox = TRUE)) } #' Fix jsonlite's JSON simplification. #' #' @param object any R object derived from \code{\link[jsonlite]{fromJSON}}. #' @param simple_check logical. This is needed to modify behavior for recursive function call. #' @return the same object, with any list components where each element is #' an atomic vector of length 1 or NULL coerced to a single atomic vector. #' @note See https://github.com/jeroenooms/jsonlite/issues/66 for more details. #' @examples #' \dontrun{ #' simplify_homogeneous_lists(jsonlite::fromJSON( #' '{ "numeric": [1,2], "list": [1, "a"] }', simplifyVector = FALSE)) #' # A list with atomic numeric vector in the "numeric" key and #' # a list in the "list" key. #' # list(numeric = c(1,2), list = list(1, "a")) #' } simplify_homogeneous_lists <- function(object, simple_check = TRUE) { if (isTRUE(simple_check) && is_simple_list(object)) { return(try_simplify(object)) } if (is.list(object)) { if (all(vapply(object, terminal, logical(1)))) { type <- common_type(object) if (identical(type, "NULL")) { object } else if (is.na(type)) { object } else { vapply(object, tricky_identity, vector(type, 1), type) } } else { lapply(object, simplify_homogeneous_lists, simple_check = FALSE) } } else { object } } try_simplify <- function(lst) { if (any(vapply(lst, is.null, logical(1)))) { denull(lst) } else { simplify_homogeneous_lists(lst, simple_check = FALSE) } } is_simple_list <- function(lst) { is.list(lst) && all(vapply(lst, Negate(is.list), logical(1))) && all(vapply(lst, length, numeric(1)) <= 1) } denull <- function (lst) { Map(function(x) { if (is.null(x)) NA else x }, lst) } tricky_identity <- function(obj, type) { if (is.null(obj)) { as(NA, type) } else { as(obj, type) } } terminal <- function(x) { is.null(x) || (is.atomic(x) && length(x) == 1) } common_type <- function(x) { types <- vapply(Filter(Negate(is.null), x), class, character(1)) if (length(types) == 0) { "NULL" } else if (length(unique(types)) == 1) { types[1] } else { NA } } packagefile <- function(file, ..., read = FALSE) { file <- system.file(file, ..., package = "microserver") if (isTRUE(read)) { paste(collapse = "\n", readLines(file)) } else { file } }
[STATEMENT] lemma Qp_function_tuple_comp_closed: assumes "f \<in> carrier (Q\<^sub>p\<^bsup>n\<^esup>) \<rightarrow> carrier Q\<^sub>p" assumes "length fs = n" assumes "is_function_tuple Q\<^sub>p m fs" shows "function_tuple_comp Q\<^sub>p fs f \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<rightarrow> carrier Q\<^sub>p" [PROOF STATE] proof (prove) goal (1 subgoal): 1. function_tuple_comp Q\<^sub>p fs f \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<rightarrow> carrier Q\<^sub>p [PROOF STEP] using assms function_tuple_comp_closed [PROOF STATE] proof (prove) using this: f \<in> carrier (Q\<^sub>p\<^bsup>n\<^esup>) \<rightarrow> carrier Q\<^sub>p length fs = n is_function_tuple Q\<^sub>p m fs \<lbrakk>?f \<in> carrier (?R\<^bsup>?n\<^esup>) \<rightarrow> carrier ?R; length ?fs = ?n; is_function_tuple ?R ?m ?fs\<rbrakk> \<Longrightarrow> function_tuple_comp ?R ?fs ?f \<in> carrier (?R\<^bsup>?m\<^esup>) \<rightarrow> carrier ?R goal (1 subgoal): 1. function_tuple_comp Q\<^sub>p fs f \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<rightarrow> carrier Q\<^sub>p [PROOF STEP] by blast
In view of the great potential of this plant for cosmetics industry, we dedicated an intensive research program, funded within the EU project, to the detailed characterisation of saponins present in different parts of the plant. The paper describes the first study of seasonal variations of surface activity of soapwort , collected in 2016 from the wild state in Poland. The plants were dried under different conditions and separated into roots/rhizomes, stems, leaves and flowers/fruits. The respective extracts were tested for the ability to reduce surface tension (crucial for the cleansing properties) and to froth (crucial for foaming). In the course of the study we have observed that during the plant lifecycle the surface activity of extracts from individual organs varied significantly. Since soapwort is a perennial plant, the changes probably reflect the saponin content alterations related to their production and storage in the plant for the next season.
import os import time, pickle from DLplatform.parameters import Parameters from typing import List import numpy as np class LearningLogger(): ''' Class with all the logging files creation needed for monitoring of the learning process. All the logs are written along with the current timestamp in milliseconds. ''' # log files names are hardcoded _learnerLossFile = 'losses.txt' _learnerPredLabelFile = 'predictions.txt' _learnerViolationsFile = 'violations.txt' _learnerBalancingFile = 'balancing.txt' _learnerRegistrationsFile = 'registrations.txt' _learnerBalancingRequestFile = 'balancing_requests.txt' _learnerSendModelFile = 'send_model.txt' def __init__(self, path: str, id, level='NORMAL'): ''' Initializes logging level and the path to the logging files Logging level defines for example if all the averaged models are saved. Parameters ---------- path to the logging files id defines the folder that the files will be saved inside the path for example 'coordinator', 'worker0', etc. level of the logging ''' self._logLevel = level self._path = path self._id = str(id) self._logpath = os.path.join(self._path, self._id) if not os.path.isdir(self._logpath): os.mkdir(self._logpath) self._logModel_path_before = os.path.join(self._logpath, 'before') if not os.path.isdir(self._logModel_path_before): os.mkdir(self._logModel_path_before) self._logModel_path_after = os.path.join(self._logpath, 'after') if not os.path.isdir(self._logModel_path_after): os.mkdir(self._logModel_path_after) def logLearnerLoss(self, lossValue: float): ''' Logs loss suffered by a worker Parameters ---------- lossValue ''' header = "Time\t\t\tLossValue \n" logFilePath = os.path.join(self._logpath, self._learnerLossFile) with open(logFilePath, 'a') as output: if(os.path.getsize(logFilePath)== 0): output.write(header) output.write('%.3f\t\t%.8f\n' % (time.time(), lossValue)) def logPredictionsLabels(self, predictions: list, labels: list): ''' Logs predictions of the worker along with the labels Allows to calculate accuracy afterwards. Parameters ---------- predictions - list of predictions made by a worker labels - true labels corresponding to predictions ''' header = "Time\t\t\t Predictions\t\t\t\t\t\t\t\t\t\t\t\t\t Label\n" logFilePath = os.path.join(self._logpath, self._learnerPredLabelFile) with open(logFilePath, 'a') as output: if(os.path.getsize(logFilePath)== 0): output.write(header) for i in range(len(predictions)): if isinstance(labels[i], int) and isinstance(predictions[i], int): output.write('%.3f\t\t%s\t\t%s\n' % (time.time(), str(predictions[i]), str(labels[i]))) elif isinstance(labels[i], float) and isinstance(predictions[i], float): output.write('%.3f\t\t%s\t\t%s\n' % (time.time(), str(predictions[i]), str(labels[i]))) elif isinstance(labels[i], int) and not isinstance(predictions[i], int): output.write('%.3f\t\t%s\t\t%s\n' % (time.time(), ','.join(map(str, predictions[i])), str(labels[i]))) else: output.write('%.3f\t\t%s\t\t%s\n' % (time.time(), ','.join(map(str, predictions[i])), ','.join(map(str, labels[i])))) def logViolation(self, localConditionMsg: str, localConditionHolds: bool): ''' Logs violations along with local condition message Parameters ---------- localConditionMsg localConditionHolds - defines if violation is observed, opposite value is logged for intuitive understandability of the logs if log level is DEBUG all the checks are written otherwise only violation (i.e. when it is False) is logged ''' header = "Time\t\t\t Condiiton\t\t\t Message \n" logFilePath = os.path.join(self._logpath, self._learnerViolationsFile) with open(logFilePath, 'a') as output: if(os.path.getsize(logFilePath)== 0): output.write(header) if self._logLevel == 'DEBUG' or not localConditionHolds: output.write('%.3f\t\t%i\t\t%s\n' % (time.time(), not localConditionHolds, localConditionMsg)) def logBalancing(self, flags: dict, violationNodes: list, balancingSet: list): ''' Logs balancing that happenned on coordinator side The nodes that reported violation and the nodes that finally took part in the balancing process are written together with a flag if the synchronization was full. Parameters ---------- flags - returned flags from synchronization violationNodes - nodes in violation balancingSet - nodes that performed balancing ''' header = "Time\t\t\tSync\t\t\tViolationNode\t\tBalancingSet\n" logFilePath = os.path.join(self._logpath, self._learnerBalancingFile) if flags.get('setReference') is None: fullSync = False else: fullSync = flags['setReference'] with open(logFilePath, 'a') as output: if(os.path.getsize(logFilePath)== 0): output.write(header) output.write('%.3f\t\t%i\t\t\t%s\t\t\t%s\n' % (time.time(), fullSync, ','.join(map(str, violationNodes)), ','.join(map(str, balancingSet)))) def logAveragedModel(self, nodes : List[int], params: Parameters, flags:dict): ''' Logs averaged model, i.e., saves the parameters of an averaged model If loglevel is DEBUG the models are saved with timestamp, otherwise one and the same file is overwritten. Parameters ---------- params - weights of an averaged model ''' if "nosync" in flags: if self._logLevel == 'DEBUG': filename = 'averagedState_' + str(time.time()) + '_node_'+str(nodes[0]) self.logModel(filename = filename, params = params) else: filename = 'currentAveragedState_node_'+str(nodes[0]) self.logModel(filename = filename, params = params) else: if self._logLevel == 'DEBUG': filename = 'averagedState_' + str(time.time()) self.logModel(filename = filename, params = params) #np.save(os.path.join(self._logpath, modelName), params.get()) else: filename = 'currentAveragedState' self.logModel(filename = filename, params = params) #np.save(os.path.join(self._logpath, 'currentAveragedWeights'), params.get()) def logModel(self, filename : str, params: Parameters): ''' Logs a model, i.e., saves the parameters of a model Parameters ---------- filename - what name contains the model parameters params - weights of an averaged model ''' pickle.dump(params, open(os.path.join(self._logpath, filename), 'wb')) def logIntermediateModel(self,param: Parameters,checks): ''' Logs the intermediate model ,i.e, saves the parameters of a model in DEBUG mode Parameters _________ params - weights of an averaged model ''' if self._logLevel == 'DEBUG': if checks == 0: filename = 'IntermediateState_before_' +str(time.time()) np.save(os.path.join(self._logModel_path_before, filename), param.get()) else: filename = 'IntermediateState_after_' + str(time.time()) np.save(os.path.join(self._logModel_path_after, filename), param.get()) ''' All the messages are logged with exchange used, topic used, identifier of the node if applicable and direction - was it sent or received. Ideally each send log line will correspond to one receive log line. ''' def logViolationMessage(self, exchange: str, topic: str, identifier, message_size: int, direction: str): ''' Logs violation message Parameters ---------- exchange topic identifier of the worker that sent the violation size of the message direction ''' header = "Time\t\t\tExchange\t\tTopic\t\tIdentifier\t\tMsg_size\tDirection\n" logFilePath = os.path.join(self._logpath, self._learnerViolationsFile) with open(logFilePath, 'a') as output: if(os.path.getsize(logFilePath)== 0): output.write(header) output.write('%.3f\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\n' % (time.time(), exchange, topic, str(identifier), str(message_size), direction)) def logRegistrationMessage(self, exchange: str, topic: str, identifier, message_size: int, direction: str): ''' Logs registration message Parameters ---------- exchange topic identifier of the worker that is registered direction ''' header = "Time\t\t\tExchange\t\tTopic\t\tIdentifier\t\tMsg_size\tDirection\n" logFilePath = os.path.join(self._logpath, self._learnerRegistrationsFile) with open(logFilePath, 'a') as output: if(os.path.getsize(logFilePath)== 0): output.write(header) output.write('%.3f\t\t%s\t\t%s\t\t%s\t\t\t%s\t\t\t%s\n' % (time.time(), exchange, topic, str(identifier), str(message_size), direction)) def logDeregistrationMessage(self, exchange: str, topic: str, identifier, message_size: int, direction: str): ''' Logs deregistration message Parameters ---------- exchange topic identifier of the worker that is deregistered direction ''' header = "Time\t\t\tExchange\t\t\tTopic\t\tIdentifier\t\tMsg_size\tDirection\n" logFilePath = os.path.join(self._logpath, self._learnerRegistrationsFile) with open(logFilePath, 'a') as output: if(os.path.getsize(logFilePath)== 0): output.write(header) output.write('%.3f\t\t%s\t\t%s\t\t%s\t\t\t%s\t\t\t%s\n' % (time.time(), exchange, topic, str(identifier), str(message_size), direction)) def logBalancingMessage(self, exchange: str, topic: str, identifier, message_size: int, direction: str): ''' Logs balancing message Parameters ---------- exchange topic identifier of the worker that is sending the parameters for balancing process direction ''' header = "Time\t\t\tExchange\t\t\tTopic\t\tIdentifier\t\tMsg_size\t\tDirection\n" logFilePath = os.path.join(self._logpath, self._learnerBalancingFile) with open(logFilePath, 'a') as output: if(os.path.getsize(logFilePath)== 0): output.write(header) output.write('%.3f\t\t%s\t\t%s\t\t%s\t\t\t%s\t\t\t%s\n' % (time.time(), exchange, topic, str(identifier), str(message_size), direction)) def logBalancingRequestMessage(self, exchange: str, topic: str, message_size: int, direction: str, workerId = None): ''' Logs balancing request message Parameters ---------- exchange topic workerId of the worker that is requested. When the message is received we log the workerId, otherwise it is in the topic direction ''' header = "Time\t\t\tExchange\t\t\tTopic\t\tMsg_size\t\tDirection\t\tWorkerID\n" logFilePath = os.path.join(self._logpath, self._learnerBalancingRequestFile) with open(logFilePath, 'a') as output: if(os.path.getsize(logFilePath)== 0): output.write(header) if workerId is None: output.write('%.3f\t\t%s\t\t%s\t\t%s\t\t\t%s\t\t\t%s\n' % (time.time(), exchange, topic, str(message_size), direction,"-")) else: output.write('%.3f\t\t%s\t\t%s\t\t%s\t\t\t%s\t\t\t%s\n' % (time.time(), exchange, topic, str(message_size), direction, workerId)) def logSendModelMessage(self, exchange: str, topic: str, message_size: int, direction: str, workerId = None): ''' Logs message of sending averaged model after balancing Parameters ---------- exchange topic workerId of the worker that is getting the model direction ''' header = "Time\t\t\tExchange\t\tTopic\t\tMsg_size\tDirection\tWorkerID\n" logFilePath = os.path.join(self._logpath, self._learnerSendModelFile) with open(logFilePath, 'a') as output: if(os.path.getsize(logFilePath)== 0): output.write(header) if workerId is None: output.write('%.3f\t\t%s\t\t%s\t\t%s\t\t%s\t\t\t%s\n' % (time.time(), exchange, topic, str(message_size), direction,"-")) else: output.write('%.3f\t\t%s\t\t%s\t\t%s\t\t%s\t\t\t%s\n' % (time.time(), exchange, topic, str(message_size), direction, workerId))
\chapter{List of TSFI}\label{appendix.tsfi} \input{../common/tsfi} %%% Local Variables: %%% mode: latex %%% TeX-master: t %%% End:
Formal statement is: lemma fixes f::"complex \<Rightarrow> complex" and z::complex assumes f_iso:"isolated_singularity_at f z" and f_ness:"not_essential f z" and f_nconst:"\<exists>\<^sub>Fw in (at z). f w\<noteq>0" shows zorder_inverse: "zorder (\<lambda>w. inverse (f w)) z = - zorder f z" and zor_poly_inverse: "\<forall>\<^sub>Fw in (at z). zor_poly (\<lambda>w. inverse (f w)) z w = inverse (zor_poly f z w)" Informal statement is: Suppose $f$ is a complex-valued function with an isolated singularity at $z$. If $f$ is not an essential singularity at $z$, then the zeroes of $f$ are isolated at $z$.
/- Copyright (c) 2020 Mario Carneiro. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mario Carneiro, Floris van Doorn, Yury Kudryashov -/ import topology.instances.nnreal import topology.algebra.ordered.monotone_continuity /-! # Square root of a real number In this file we define * `nnreal.sqrt` to be the square root of a nonnegative real number. * `real.sqrt` to be the square root of a real number, defined to be zero on negative numbers. Then we prove some basic properties of these functions. ## Implementation notes We define `nnreal.sqrt` as the noncomputable inverse to the function `x ↦ x * x`. We use general theory of inverses of strictly monotone functions to prove that `nnreal.sqrt x` exists. As a side effect, `nnreal.sqrt` is a bundled `order_iso`, so for `nnreal` numbers we get continuity as well as theorems like `sqrt x ≤ y ↔ x * x ≤ y` for free. Then we define `real.sqrt x` to be `nnreal.sqrt (real.to_nnreal x)`. We also define a Cauchy sequence `real.sqrt_aux (f : cau_seq ℚ abs)` which converges to `sqrt (mk f)` but do not prove (yet) that this sequence actually converges to `sqrt (mk f)`. ## Tags square root -/ open set filter open_locale filter nnreal topological_space namespace nnreal variables {x y : ℝ≥0} /-- Square root of a nonnegative real number. -/ @[pp_nodot] noncomputable def sqrt : ℝ≥0 ≃o ℝ≥0 := order_iso.symm $ strict_mono.order_iso_of_surjective (λ x, x * x) (λ x y h, mul_self_lt_mul_self x.2 h) $ (continuous_id.mul continuous_id).surjective tendsto_mul_self_at_top $ by simp [order_bot.at_bot_eq] lemma sqrt_le_sqrt_iff : sqrt x ≤ sqrt y ↔ x ≤ y := sqrt.le_iff_le lemma sqrt_lt_sqrt_iff : sqrt x < sqrt y ↔ x < y := sqrt.lt_iff_lt lemma sqrt_eq_iff_sq_eq : sqrt x = y ↔ y * y = x := sqrt.to_equiv.apply_eq_iff_eq_symm_apply.trans eq_comm lemma sqrt_le_iff : sqrt x ≤ y ↔ x ≤ y * y := sqrt.to_galois_connection _ _ lemma le_sqrt_iff : x ≤ sqrt y ↔ x * x ≤ y := (sqrt.symm.to_galois_connection _ _).symm @[simp] lemma sqrt_eq_zero : sqrt x = 0 ↔ x = 0 := sqrt_eq_iff_sq_eq.trans $ by rw [eq_comm, zero_mul] @[simp] lemma sqrt_zero : sqrt 0 = 0 := sqrt_eq_zero.2 rfl @[simp] lemma sqrt_one : sqrt 1 = 1 := sqrt_eq_iff_sq_eq.2 $ mul_one 1 @[simp] lemma mul_self_sqrt (x : ℝ≥0) : sqrt x * sqrt x = x := sqrt.symm_apply_apply x @[simp] lemma sqrt_mul_self (x : ℝ≥0) : sqrt (x * x) = x := sqrt.apply_symm_apply x @[simp] lemma sq_sqrt (x : ℝ≥0) : (sqrt x)^2 = x := by rw [sq, mul_self_sqrt x] @[simp] lemma sqrt_sq (x : ℝ≥0) : sqrt (x^2) = x := by rw [sq, sqrt_mul_self x] lemma sqrt_mul (x y : ℝ≥0) : sqrt (x * y) = sqrt x * sqrt y := by rw [sqrt_eq_iff_sq_eq, mul_mul_mul_comm, mul_self_sqrt, mul_self_sqrt] /-- `nnreal.sqrt` as a `monoid_with_zero_hom`. -/ noncomputable def sqrt_hom : monoid_with_zero_hom ℝ≥0 ℝ≥0 := ⟨sqrt, sqrt_zero, sqrt_one, sqrt_mul⟩ lemma sqrt_inv (x : ℝ≥0) : sqrt (x⁻¹) = (sqrt x)⁻¹ := sqrt_hom.map_inv x lemma sqrt_div (x y : ℝ≥0) : sqrt (x / y) = sqrt x / sqrt y := sqrt_hom.map_div x y lemma continuous_sqrt : continuous sqrt := sqrt.continuous end nnreal namespace real /-- An auxiliary sequence of rational numbers that converges to `real.sqrt (mk f)`. Currently this sequence is not used in `mathlib`. -/ def sqrt_aux (f : cau_seq ℚ abs) : ℕ → ℚ | 0 := rat.mk_nat (f 0).num.to_nat.sqrt (f 0).denom.sqrt | (n + 1) := let s := sqrt_aux n in max 0 $ (s + f (n+1) / s) / 2 theorem sqrt_aux_nonneg (f : cau_seq ℚ abs) : ∀ i : ℕ, 0 ≤ sqrt_aux f i | 0 := by rw [sqrt_aux, rat.mk_nat_eq, rat.mk_eq_div]; apply div_nonneg; exact int.cast_nonneg.2 (int.of_nat_nonneg _) | (n + 1) := le_max_left _ _ /- TODO(Mario): finish the proof theorem sqrt_aux_converges (f : cau_seq ℚ abs) : ∃ h x, 0 ≤ x ∧ x * x = max 0 (mk f) ∧ mk ⟨sqrt_aux f, h⟩ = x := begin rcases sqrt_exists (le_max_left 0 (mk f)) with ⟨x, x0, hx⟩, suffices : ∃ h, mk ⟨sqrt_aux f, h⟩ = x, { exact this.imp (λ h e, ⟨x, x0, hx, e⟩) }, apply of_near, suffices : ∃ δ > 0, ∀ i, abs (↑(sqrt_aux f i) - x) < δ / 2 ^ i, { rcases this with ⟨δ, δ0, hδ⟩, intros } end -/ /-- The square root of a real number. This returns 0 for negative inputs. -/ @[pp_nodot] noncomputable def sqrt (x : ℝ) : ℝ := nnreal.sqrt (real.to_nnreal x) /-quotient.lift_on x (λ f, mk ⟨sqrt_aux f, (sqrt_aux_converges f).fst⟩) (λ f g e, begin rcases sqrt_aux_converges f with ⟨hf, x, x0, xf, xs⟩, rcases sqrt_aux_converges g with ⟨hg, y, y0, yg, ys⟩, refine xs.trans (eq.trans _ ys.symm), rw [← @mul_self_inj_of_nonneg ℝ _ x y x0 y0, xf, yg], congr' 1, exact quotient.sound e end)-/ variables {x y : ℝ} @[simp, norm_cast] lemma coe_sqrt {x : ℝ≥0} : (nnreal.sqrt x : ℝ) = real.sqrt x := by rw [real.sqrt, real.to_nnreal_coe] @[continuity] lemma continuous_sqrt : continuous sqrt := nnreal.continuous_coe.comp $ nnreal.sqrt.continuous.comp nnreal.continuous_of_real theorem sqrt_eq_zero_of_nonpos (h : x ≤ 0) : sqrt x = 0 := by simp [sqrt, real.to_nnreal_eq_zero.2 h] theorem sqrt_nonneg (x : ℝ) : 0 ≤ sqrt x := nnreal.coe_nonneg _ @[simp] theorem mul_self_sqrt (h : 0 ≤ x) : sqrt x * sqrt x = x := by rw [sqrt, ← nnreal.coe_mul, nnreal.mul_self_sqrt, real.coe_to_nnreal _ h] @[simp] theorem sqrt_mul_self (h : 0 ≤ x) : sqrt (x * x) = x := (mul_self_inj_of_nonneg (sqrt_nonneg _) h).1 (mul_self_sqrt (mul_self_nonneg _)) theorem sqrt_eq_iff_mul_self_eq (hx : 0 ≤ x) (hy : 0 ≤ y) : sqrt x = y ↔ y * y = x := ⟨λ h, by rw [← h, mul_self_sqrt hx], λ h, by rw [← h, sqrt_mul_self hy]⟩ @[simp] theorem sq_sqrt (h : 0 ≤ x) : (sqrt x)^2 = x := by rw [sq, mul_self_sqrt h] @[simp] theorem sqrt_sq (h : 0 ≤ x) : sqrt (x ^ 2) = x := by rw [sq, sqrt_mul_self h] theorem sqrt_eq_iff_sq_eq (hx : 0 ≤ x) (hy : 0 ≤ y) : sqrt x = y ↔ y ^ 2 = x := by rw [sq, sqrt_eq_iff_mul_self_eq hx hy] theorem sqrt_mul_self_eq_abs (x : ℝ) : sqrt (x * x) = |x| := by rw [← abs_mul_abs_self x, sqrt_mul_self (abs_nonneg _)] theorem sqrt_sq_eq_abs (x : ℝ) : sqrt (x ^ 2) = |x| := by rw [sq, sqrt_mul_self_eq_abs] @[simp] theorem sqrt_zero : sqrt 0 = 0 := by simp [sqrt] @[simp] theorem sqrt_one : sqrt 1 = 1 := by simp [sqrt] @[simp] theorem sqrt_le_sqrt_iff (hy : 0 ≤ y) : sqrt x ≤ sqrt y ↔ x ≤ y := by rw [sqrt, sqrt, nnreal.coe_le_coe, nnreal.sqrt_le_sqrt_iff, real.to_nnreal_le_to_nnreal_iff hy] @[simp] theorem sqrt_lt_sqrt_iff (hx : 0 ≤ x) : sqrt x < sqrt y ↔ x < y := lt_iff_lt_of_le_iff_le (sqrt_le_sqrt_iff hx) theorem sqrt_lt_sqrt_iff_of_pos (hy : 0 < y) : sqrt x < sqrt y ↔ x < y := by rw [sqrt, sqrt, nnreal.coe_lt_coe, nnreal.sqrt_lt_sqrt_iff, to_nnreal_lt_to_nnreal_iff hy] theorem sqrt_le_sqrt (h : x ≤ y) : sqrt x ≤ sqrt y := by { rw [sqrt, sqrt, nnreal.coe_le_coe, nnreal.sqrt_le_sqrt_iff], exact to_nnreal_le_to_nnreal h } theorem sqrt_lt_sqrt (hx : 0 ≤ x) (h : x < y) : sqrt x < sqrt y := (sqrt_lt_sqrt_iff hx).2 h theorem sqrt_le_iff : sqrt x ≤ y ↔ 0 ≤ y ∧ x ≤ y ^ 2 := begin rw [← and_iff_right_of_imp (λ h, (sqrt_nonneg x).trans h), and.congr_right_iff], exact sqrt_le_left end /- note: if you want to conclude `x ≤ sqrt y`, then use `le_sqrt_of_sq_le`. if you have `x > 0`, consider using `le_sqrt'` -/ theorem le_sqrt (hx : 0 ≤ x) (hy : 0 ≤ y) : x ≤ sqrt y ↔ x ^ 2 ≤ y := by rw [mul_self_le_mul_self_iff hx (sqrt_nonneg _), sq, mul_self_sqrt hy] theorem le_sqrt' (hx : 0 < x) : x ≤ sqrt y ↔ x ^ 2 ≤ y := by { rw [sqrt, ← nnreal.coe_mk x hx.le, nnreal.coe_le_coe, nnreal.le_sqrt_iff, real.le_to_nnreal_iff_coe_le', sq, nnreal.coe_mul], exact mul_pos hx hx } theorem abs_le_sqrt (h : x^2 ≤ y) : |x| ≤ sqrt y := by rw ← sqrt_sq_eq_abs; exact sqrt_le_sqrt h theorem sq_le (h : 0 ≤ y) : x^2 ≤ y ↔ -sqrt y ≤ x ∧ x ≤ sqrt y := begin split, { simpa only [abs_le] using abs_le_sqrt }, { rw [← abs_le, ← sq_abs], exact (le_sqrt (abs_nonneg x) h).mp }, end theorem neg_sqrt_le_of_sq_le (h : x^2 ≤ y) : -sqrt y ≤ x := ((sq_le ((sq_nonneg x).trans h)).mp h).1 theorem le_sqrt_of_sq_le (h : x^2 ≤ y) : x ≤ sqrt y := ((sq_le ((sq_nonneg x).trans h)).mp h).2 @[simp] theorem sqrt_inj (hx : 0 ≤ x) (hy : 0 ≤ y) : sqrt x = sqrt y ↔ x = y := by simp [le_antisymm_iff, hx, hy] @[simp] theorem sqrt_eq_zero (h : 0 ≤ x) : sqrt x = 0 ↔ x = 0 := by simpa using sqrt_inj h (le_refl _) theorem sqrt_eq_zero' : sqrt x = 0 ↔ x ≤ 0 := by rw [sqrt, nnreal.coe_eq_zero, nnreal.sqrt_eq_zero, real.to_nnreal_eq_zero] theorem sqrt_ne_zero (h : 0 ≤ x) : sqrt x ≠ 0 ↔ x ≠ 0 := by rw [not_iff_not, sqrt_eq_zero h] theorem sqrt_ne_zero' : sqrt x ≠ 0 ↔ 0 < x := by rw [← not_le, not_iff_not, sqrt_eq_zero'] @[simp] theorem sqrt_pos : 0 < sqrt x ↔ 0 < x := lt_iff_lt_of_le_iff_le (iff.trans (by simp [le_antisymm_iff, sqrt_nonneg]) sqrt_eq_zero') @[simp] theorem sqrt_mul (hx : 0 ≤ x) (y : ℝ) : sqrt (x * y) = sqrt x * sqrt y := by simp_rw [sqrt, ← nnreal.coe_mul, nnreal.coe_eq, real.to_nnreal_mul hx, nnreal.sqrt_mul] @[simp] theorem sqrt_mul' (x) {y : ℝ} (hy : 0 ≤ y) : sqrt (x * y) = sqrt x * sqrt y := by rw [mul_comm, sqrt_mul hy, mul_comm] @[simp] theorem sqrt_inv (x : ℝ) : sqrt x⁻¹ = (sqrt x)⁻¹ := by rw [sqrt, real.to_nnreal_inv, nnreal.sqrt_inv, nnreal.coe_inv, sqrt] @[simp] theorem sqrt_div (hx : 0 ≤ x) (y : ℝ) : sqrt (x / y) = sqrt x / sqrt y := by rw [division_def, sqrt_mul hx, sqrt_inv, division_def] @[simp] theorem div_sqrt : x / sqrt x = sqrt x := begin cases le_or_lt x 0, { rw [sqrt_eq_zero'.mpr h, div_zero] }, { rw [div_eq_iff (sqrt_ne_zero'.mpr h), mul_self_sqrt h.le] }, end theorem sqrt_div_self' : sqrt x / x = 1 / sqrt x := by rw [←div_sqrt, one_div_div, div_sqrt] theorem sqrt_div_self : sqrt x / x = (sqrt x)⁻¹ := by rw [sqrt_div_self', one_div] theorem lt_sqrt (hx : 0 ≤ x) (hy : 0 ≤ y) : x < sqrt y ↔ x ^ 2 < y := by rw [mul_self_lt_mul_self_iff hx (sqrt_nonneg y), sq, mul_self_sqrt hy] theorem sq_lt : x^2 < y ↔ -sqrt y < x ∧ x < sqrt y := begin split, { simpa only [← sqrt_lt_sqrt_iff (sq_nonneg x), sqrt_sq_eq_abs] using abs_lt.mp }, { rw [← abs_lt, ← sq_abs], exact λ h, (lt_sqrt (abs_nonneg x) (sqrt_pos.mp (lt_of_le_of_lt (abs_nonneg x) h)).le).mp h }, end theorem neg_sqrt_lt_of_sq_lt (h : x^2 < y) : -sqrt y < x := (sq_lt.mp h).1 theorem lt_sqrt_of_sq_lt (h : x^2 < y) : x < sqrt y := (sq_lt.mp h).2 end real open real variables {α : Type*} lemma filter.tendsto.sqrt {f : α → ℝ} {l : filter α} {x : ℝ} (h : tendsto f l (𝓝 x)) : tendsto (λ x, sqrt (f x)) l (𝓝 (sqrt x)) := (continuous_sqrt.tendsto _).comp h variables [topological_space α] {f : α → ℝ} {s : set α} {x : α} lemma continuous_within_at.sqrt (h : continuous_within_at f s x) : continuous_within_at (λ x, sqrt (f x)) s x := h.sqrt lemma continuous_at.sqrt (h : continuous_at f x) : continuous_at (λ x, sqrt (f x)) x := h.sqrt lemma continuous_on.sqrt (h : continuous_on f s) : continuous_on (λ x, sqrt (f x)) s := λ x hx, (h x hx).sqrt @[continuity] lemma continuous.sqrt (h : continuous f) : continuous (λ x, sqrt (f x)) := continuous_sqrt.comp h
#include <ceed.h> #include <petsc.h> #include "../problems/mooney-rivlin.h" // Build libCEED context object PetscErrorCode PhysicsContext_MR(MPI_Comm comm, Ceed ceed, Units *units, CeedQFunctionContext *ctx) { PetscErrorCode ierr; Physics_MR phys; PetscFunctionBegin; ierr = PetscMalloc1(1, units); CHKERRQ(ierr); ierr = PetscMalloc1(1, &phys); CHKERRQ(ierr); ierr = ProcessPhysics_MR(comm, phys, *units); CHKERRQ(ierr); CeedQFunctionContextCreate(ceed, ctx); CeedQFunctionContextSetData(*ctx, CEED_MEM_HOST, CEED_COPY_VALUES, sizeof(*phys), phys); ierr = PetscFree(phys); CHKERRQ(ierr); PetscFunctionReturn(0); } // Build libCEED smoother context object PetscErrorCode PhysicsSmootherContext_MR(MPI_Comm comm, Ceed ceed, CeedQFunctionContext ctx, CeedQFunctionContext *ctx_smoother) { PetscErrorCode ierr; PetscScalar nu_smoother = 0; PetscBool nu_flag = PETSC_FALSE; Physics_MR phys, phys_smoother; PetscFunctionBegin; ierr = PetscOptionsBegin(comm, NULL, "Mooney Rivlin physical parameters for smoother", NULL); CHKERRQ(ierr); ierr = PetscOptionsScalar("-nu_smoother", "Poisson's ratio for smoother", NULL, nu_smoother, &nu_smoother, &nu_flag); CHKERRQ(ierr); if (nu_smoother < 0 || nu_smoother >= 0.5) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Mooney-Rivlin model requires Poisson ratio -nu option in [0, .5)"); ierr = PetscOptionsEnd(); CHKERRQ(ierr); // End of setting Physics if (nu_flag) { // Copy context CeedQFunctionContextGetData(ctx, CEED_MEM_HOST, &phys); ierr = PetscMalloc1(1, &phys_smoother); CHKERRQ(ierr); ierr = PetscMemcpy(phys_smoother, phys, sizeof(*phys)); CHKERRQ(ierr); CeedQFunctionContextRestoreData(ctx, &phys); // Create smoother context CeedQFunctionContextCreate(ceed, ctx_smoother); phys_smoother->lambda = 2 * (phys_smoother->mu_1 + phys_smoother->mu_2) * nu_smoother / (1 - 2*nu_smoother); CeedQFunctionContextSetData(*ctx_smoother, CEED_MEM_HOST, CEED_COPY_VALUES, sizeof(*phys_smoother), phys_smoother); ierr = PetscFree(phys_smoother); CHKERRQ(ierr); } else { *ctx_smoother = NULL; } PetscFunctionReturn(0); } // Process physics options - Mooney-Rivlin PetscErrorCode ProcessPhysics_MR(MPI_Comm comm, Physics_MR phys, Units units) { PetscErrorCode ierr; PetscReal nu = -1; phys->mu_1 = -1; phys->mu_2 = -1; phys->lambda = -1; units->meter = 1; // 1 meter in scaled length units units->second = 1; // 1 second in scaled time units units->kilogram = 1; // 1 kilogram in scaled mass units PetscFunctionBeginUser; ierr = PetscOptionsBegin(comm, NULL, "Mooney Rivlin physical parameters", NULL); CHKERRQ(ierr); ierr = PetscOptionsScalar("-mu_1", "Material Property mu_1", NULL, phys->mu_1, &phys->mu_1, NULL); CHKERRQ(ierr); if (phys->mu_1 < 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Mooney-Rivlin model requires non-negative -mu_1 option (Pa)"); ierr = PetscOptionsScalar("-mu_2", "Material Property mu_2", NULL, phys->mu_2, &phys->mu_2, NULL); CHKERRQ(ierr); if (phys->mu_2 < 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Mooney-Rivlin model requires non-negative -mu_2 option (Pa)"); ierr = PetscOptionsScalar("-nu", "Poisson ratio", NULL, nu, &nu, NULL); CHKERRQ(ierr); if (nu < 0 || nu >= 0.5) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Mooney-Rivlin model requires Poisson ratio -nu option in [0, .5)"); phys->lambda = 2 * (phys->mu_1 + phys->mu_2) * nu / (1 - 2*nu); ierr = PetscOptionsScalar("-units_meter", "1 meter in scaled length units", NULL, units->meter, &units->meter, NULL); CHKERRQ(ierr); units->meter = fabs(units->meter); ierr = PetscOptionsScalar("-units_second", "1 second in scaled time units", NULL, units->second, &units->second, NULL); CHKERRQ(ierr); units->second = fabs(units->second); ierr = PetscOptionsScalar("-units_kilogram", "1 kilogram in scaled mass units", NULL, units->kilogram, &units->kilogram, NULL); CHKERRQ(ierr); units->kilogram = fabs(units->kilogram); ierr = PetscOptionsEnd(); CHKERRQ(ierr); // End of setting Physics // Define derived units units->Pascal = units->kilogram / (units->meter * PetscSqr(units->second)); // Scale material parameters based on units of Pa phys->mu_1 *= units->Pascal; phys->mu_2 *= units->Pascal; phys->lambda *= units->Pascal; PetscFunctionReturn(0); };
(** Use some very basic facilities of mathcomp library *) From mathcomp Require Import ssreflect. Module My. (** We introduce these definitions inside a new module to avoid name clashes with the standard library later *) Inductive bool : Type := | true | false. Check true : bool. Check true. (** Definitions (not part of Coq's type theory, by the way, it's a meta-linguistic feature) *) Definition idb := fun b : bool => b. Check (fun b : bool => b). Check idb. (** Pattern-matching *) Definition negb (b : bool) := match b with | true => false | false => true end. Compute idb true. Compute idb false. Compute negb true. Compute negb false. Variable c : bool. Compute idb c. Compute negb c. Definition andb (b c : bool) : bool := match b with | true => c | false => false end. (** Symbolic computations *) Compute andb c true. Compute andb c false. Compute andb false c. (** Peano numbers -- the first truly inductive type *) Inductive nat : Type := | S of nat | O. Print nat. Check O. Check S. Check (S O). Check S (S O). Check S (S (S O)). (** Incrementing function *) Definition inc := S. (** [Definition inc (n : nat) := S n.] is the same definition, only eta-expanded *) (** eta-expansion is baked into Coq' notion of definitional equality, i.e (fun x => f x) and f are definitionally equal but extensionally equal functions are not necessarily equal, i.e. (forall x, f x = g x -> f = g) does not have to hold. *) Print inc. Compute inc (S (S O)). Definition inc2 (n : nat) := S (S n). Compute inc2 (S (S O)). (** predecessor function *) Definition pred (n : nat) : nat := match n with | S n' => n' | O => O (* truncation! Coq's functions are total *) end. (** Some options to go about implementing [pred] function: pred : nat -> nat (* way to go *) pred : nat -> option nat pred : forall (n : nat), (n <> 0) -> nat *) (** Addition of natural numbers *) (** [{struct n}] means structural recursion on parameter [n]. Coq can infer the [struct] annotation in this case. *) Fixpoint addn (n m : nat) {struct n} : nat := match n with | S n' => S (addn n' m) | O => m end. Compute addn (S (S O)) (S (S O)). (* 2 + 2 is 4 *) (** Alternative implementation by recursion on the second parameter *) Fixpoint addn' (n m : nat) : nat := match m with | S m' => S (addn' n m') | O => n end. Print addn'. Fixpoint muln (n m : nat) : nat := match n with | S n' => addn m (muln n' m) | O => O end. Compute muln (S (S (S O))) (S (S O)). (* 3 * 2 is 6 *) Compute muln (S (S O)) O. (* 2 * 0 is 0 *) Definition square (n : nat) : nat := muln n n. Definition two := (S (S O)). Definition apply2 (f : nat -> nat) (n : nat) := f (f n). Eval hnf in (apply2 (apply2 square) two). (** Various reduction strategies: - Eval cbn in ... call-by-name - Eval lazy in ... call-by-need - Eval cbv in ... call-by-value - Eval compute in ... call-by-value (cbv's synonym) - Eval vm_compute in ... call-by-value using a bytecode virtual machine [Compute] is synonym for [Eval vm_compute in] - Eval native_compute in ... compile to OCaml and evaluate *) End My. (** After closing a module, the identifiers defined in it, get prefixed with the module's name *) Check My.apply2. From mathcomp Require Import ssrfun ssrbool ssrnat. (** Some interactive queries *) About nat. About S. Locate ".+1". (* Notation "n .+1" := (S n). *) (** Apply [n] times a function [f] on natural numbers to an input [x] *) Definition applyn (f : nat -> nat) := fix rec (n : nat) (x : nat) := if n is n'.+1 then rec n' (f x) else x. (* a test *) Compute applyn S 5 42. (** An alternative implementation whose behavior is different when we evaluate it symbolically *) Definition applyn' := fix rec (f : nat -> nat) (n : nat) (x : nat) := if n is n'.+1 then rec f n' (f x) else x. Axiom fun_ext : forall (A B : Type) (f g : A -> B), (forall x, f x = g x) -> f = g. (** A way of defining [applyn] using Coq's section mechanism *) Section Applyn. Variable f : nat -> nat. Fixpoint applyn'' (n : nat) (x : nat) : nat := if n is n'.+1 then applyn'' n' (f x) else x. Variable n : nat. Compute applyn'' (S n) 0. Print applyn''. End Applyn. Print applyn''. (* compare the output to the one inside the above section *) (** A proposition which can never be constructed (in the empty context *) Inductive False : Prop := . (* no constructors! *) Print False. (** Let's illustrate why "in the empty context" is important *) Check (fun f : False => f (* here we construct a value of type [False], but the context is not empty, since the function parameter becomes part of context here *) ). (** Since it's impossible to construct a value of an empty type, it's impossible to call a function of type False -> SomeType *) (** ... provided we are talking about empty contexts again: *) Check (fun g : False => (fun f : False => f) g (* call [fun f : False => f] with [g] as the argument *) ). (** This is why Coq does not allow non-terminating functions *) Fail Fixpoint loop (n : nat) : False := loop n. (* loop : n -> False *) (** If this was allowed, we could construct a value of an empty type in the empy context like so: *) Fail Check (loop O : False). (** And this would preclude Coq from being a logic *) (** To be continued... *)
(* Copyright (C) 2017 M.A.L. Marques This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. *) (* type: work_gga_x *) (* prefix: gga_k_dk_params *params; assert(p->params != NULL); params = (gga_k_dk_params * )(p->params); *) f := x -> add(1*params_a_aa[i]*x^(2*(i-1)), i=1..5) / add(1*params_a_bb[i]*x^(2*(i-1)), i=1..5):
% This program is free software; you can redistribute it and/or modify % it under the terms of the GNU General Public License as published by % the Free Software Foundation; either version 2 of the License, or % (at your option) any later version. % % This program is distributed in the hope that it will be useful, % but WITHOUT ANY WARRANTY; without even the implied warranty of % MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the % GNU General Public License for more details. % % You should have received a copy of the GNU General Public License % along with this program; if not, write to the Free Software % Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA %function gdpowert %GDPOWERT Unit test for the function GDPOWER. % O. Lemoine - August 1996. N=256; % K=1 : constant group delay [sig,gd]=gdpower(N,1); t0=N; t=(1:N); if any(gd~=t0), error('gdpower test 1 failed'); end if any(sig(t~=t0)>sqrt(eps)), error('gdpower test 2 failed'); end % K=2 : linear group delay [sig,gd]=gdpower(N,2); ifl=instfreq(sig); [s,iflaw]=fmlin(N); if any(abs(ifl(3:N-2)-iflaw(3:N-2))>5e-2), error('gdpower test 3 failed'); end % K=1/2 [sig,gpd]=gdpower(N,1/2); fnorm=linspace(0.01,0.45,108); fnorm=fnorm(2:108); gd=sgrpdlay(sig,fnorm); if any(abs(gd-gpd(4:110)')/N>5e-2), error('gdpower test 4 failed'); end; % K=0 [sig,gpd]=gdpower(N,0); fnorm=linspace(0.01,0.45,108); fnorm=fnorm(2:108); gd=sgrpdlay(sig,fnorm); if any(abs(gd-gpd(4:110)')/N>5e-2), error('gdpower test 5 failed'); end; % K=-1 [sig,gpd]=gdpower(N,-1); fnorm=linspace(0.01,0.45,108); fnorm=fnorm(2:108); gd=sgrpdlay(sig,fnorm); if any(abs(gd-gpd(4:110)')/N>5e-2), error('gdpower test 6 failed'); end; N=221; % K=1 : constant group delay [sig,gd]=gdpower(N,1); t0=N; t=(1:N); if any(gd~=t0), error('gdpower test 7 failed'); end if any(sig(t~=t0)>sqrt(eps)), error('gdpower test 8 failed'); end % K=2 : linear group delay [sig,gd]=gdpower(N,2); ifl=instfreq(sig); [s,iflaw]=fmlin(N); if any(abs(ifl(5:N-2)-iflaw(5:N-2))>5e-2), error('gdpower test 9 failed'); end % K=1/2 [sig,gpd]=gdpower(N,1/2); fnorm=linspace(0.01,0.45,108); fnorm=fnorm(2:108); gd=sgrpdlay(sig,fnorm); if any(abs(gd-gpd(4:110)')/N>5e-2), error('gdpower test 10 failed'); end; % K=0 [sig,gpd]=gdpower(N,0); fnorm=linspace(0.01,0.45,108); fnorm=fnorm(2:108); gd=sgrpdlay(sig,fnorm); if any(abs(gd-gpd(4:110)')/N>5e-2), error('gdpower test 11 failed'); end; % K=-1 [sig,gpd]=gdpower(N,-1); fnorm=linspace(0.01,0.45,108); fnorm=fnorm(2:108); gd=sgrpdlay(sig,fnorm); if any(abs(gd-gpd(4:110)')/N>5e-2), error('gdpower test 12 failed'); end;
module Tactic.Reflection.DeBruijn where open import Prelude hiding (abs) open import Builtin.Reflection open import Container.Traversable record DeBruijn {a} (A : Set a) : Set a where field strengthenFrom : (from n : Nat) → A → Maybe A weakenFrom : (from n : Nat) → A → A strengthen : Nat → A → Maybe A strengthen 0 = just strengthen n = strengthenFrom 0 n weaken : Nat → A → A weaken zero = id weaken n = weakenFrom 0 n open DeBruijn {{...}} public patternBindings : List (Arg Pattern) → Nat patternBindings = binds where binds : List (Arg Pattern) → Nat bind : Pattern → Nat binds [] = 0 binds (arg _ a ∷ as) = bind a + binds as bind (con c ps) = binds ps bind dot = 1 bind (var _) = 1 bind (lit l) = 0 bind (proj x) = 0 bind absurd = 0 private Str : Set → Set Str A = Nat → Nat → A → Maybe A strVar : Str Nat strVar lo n x = if x <? lo then just x else if x <? lo + n then nothing else just (x - n) strArgs : Str (List (Arg Term)) strArg : Str (Arg Term) strSort : Str Sort strClauses : Str (List Clause) strClause : Str Clause strAbsTerm : Str (Abs Term) strAbsType : Str (Abs Type) strTerm : Str Term strTerm lo n (var x args) = var <$> strVar lo n x <*> strArgs lo n args strTerm lo n (con c args) = con c <$> strArgs lo n args strTerm lo n (def f args) = def f <$> strArgs lo n args strTerm lo n (meta x args) = meta x <$> strArgs lo n args strTerm lo n (lam v t) = lam v <$> strAbsTerm lo n t strTerm lo n (pi a b) = pi <$> strArg lo n a <*> strAbsType lo n b strTerm lo n (agda-sort s) = agda-sort <$> strSort lo n s strTerm lo n (lit l) = just (lit l) strTerm lo n (pat-lam _ _) = just unknown -- todo strTerm lo n unknown = just unknown strAbsTerm lo n (abs s t) = abs s <$> strTerm (suc lo) n t strAbsType lo n (abs s t) = abs s <$> strTerm (suc lo) n t strArgs lo n [] = just [] strArgs lo n (x ∷ args) = _∷_ <$> strArg lo n x <*> strArgs lo n args strArg lo n (arg i v) = arg i <$> strTerm lo n v strSort lo n (set t) = set <$> strTerm lo n t strSort lo n (lit l) = just (lit l) strSort lo n unknown = just unknown strClauses lo k [] = just [] strClauses lo k (c ∷ cs) = _∷_ <$> strClause lo k c <*> strClauses lo k cs strClause lo k (clause ps b) = clause ps <$> strTerm (lo + patternBindings ps) k b strClause lo k (absurd-clause ps) = just (absurd-clause ps) private Wk : Set → Set Wk A = Nat → Nat → A → A wkVar : Wk Nat wkVar lo k x = if x <? lo then x else x + k wkArgs : Wk (List (Arg Term)) wkArg : Wk (Arg Term) wkSort : Wk Sort wkClauses : Wk (List Clause) wkClause : Wk Clause wkAbsTerm : Wk (Abs Term) wk : Wk Term wk lo k (var x args) = var (wkVar lo k x) (wkArgs lo k args) wk lo k (con c args) = con c (wkArgs lo k args) wk lo k (def f args) = def f (wkArgs lo k args) wk lo k (meta x args) = meta x (wkArgs lo k args) wk lo k (lam v t) = lam v (wkAbsTerm lo k t) wk lo k (pi a b) = pi (wkArg lo k a) (wkAbsTerm lo k b) wk lo k (agda-sort s) = agda-sort (wkSort lo k s) wk lo k (lit l) = lit l wk lo k (pat-lam cs args) = pat-lam (wkClauses lo k cs) (wkArgs lo k args) wk lo k unknown = unknown wkAbsTerm lo k (abs s t) = abs s (wk (suc lo) k t) wkArgs lo k [] = [] wkArgs lo k (x ∷ args) = wkArg lo k x ∷ wkArgs lo k args wkArg lo k (arg i v) = arg i (wk lo k v) wkSort lo k (set t) = set (wk lo k t) wkSort lo k (lit n) = lit n wkSort lo k unknown = unknown wkClauses lo k [] = [] wkClauses lo k (c ∷ cs) = wkClause lo k c ∷ wkClauses lo k cs wkClause lo k (clause ps b) = clause ps (wk (lo + patternBindings ps) k b) wkClause lo k (absurd-clause ps) = absurd-clause ps -- Instances -- DeBruijnTraversable : ∀ {a} {F : Set a → Set a} {{_ : Traversable F}} {A : Set a} {{_ : DeBruijn A}} → DeBruijn (F A) strengthenFrom {{DeBruijnTraversable}} lo k = traverse (strengthenFrom lo k) weakenFrom {{DeBruijnTraversable}} lo k = fmap (weakenFrom lo k) instance DeBruijnNat : DeBruijn Nat strengthenFrom {{DeBruijnNat}} = strVar weakenFrom {{DeBruijnNat}} = wkVar DeBruijnTerm : DeBruijn Term strengthenFrom {{DeBruijnTerm}} = strTerm weakenFrom {{DeBruijnTerm}} = wk DeBruijnList : ∀ {a} {A : Set a} {{_ : DeBruijn A}} → DeBruijn (List A) DeBruijnList = DeBruijnTraversable DeBruijnVec : ∀ {a} {A : Set a} {{_ : DeBruijn A}} {n : Nat} → DeBruijn (Vec A n) DeBruijnVec = DeBruijnTraversable DeBruijnArg : {A : Set} {{_ : DeBruijn A}} → DeBruijn (Arg A) DeBruijnArg = DeBruijnTraversable DeBruijnMaybe : {A : Set} {{_ : DeBruijn A}} → DeBruijn (Maybe A) DeBruijnMaybe = DeBruijnTraversable -- Strip bound names (to ensure _==_ checks α-equality) -- Doesn't touch pattern variables in pattern lambdas. mutual stripBoundNames : Term → Term stripBoundNames (var x args) = var x (stripArgs args) stripBoundNames (con c args) = con c (stripArgs args) stripBoundNames (def f args) = def f (stripArgs args) stripBoundNames (lam v t) = lam v (stripAbs t) stripBoundNames (pat-lam cs args) = pat-lam (stripClauses cs) (stripArgs args) stripBoundNames (pi a b) = pi (stripArg a) (stripAbs b) stripBoundNames (agda-sort s) = agda-sort (stripSort s) stripBoundNames (lit l) = lit l stripBoundNames (meta x args) = meta x (stripArgs args) stripBoundNames unknown = unknown private stripArgs : List (Arg Term) → List (Arg Term) stripArgs [] = [] stripArgs (x ∷ xs) = stripArg x ∷ stripArgs xs stripArg : Arg Term → Arg Term stripArg (arg i t) = arg i (stripBoundNames t) stripAbs : Abs Term → Abs Term stripAbs (abs _ t) = abs "" (stripBoundNames t) stripClauses : List Clause → List Clause stripClauses [] = [] stripClauses (x ∷ xs) = stripClause x ∷ stripClauses xs stripClause : Clause → Clause stripClause (clause ps t) = clause ps (stripBoundNames t) stripClause (absurd-clause ps) = absurd-clause ps stripSort : Sort → Sort stripSort (set t) = set (stripBoundNames t) stripSort (lit n) = lit n stripSort unknown = unknown
library(shiny) library(datasets) library(topicmodels) ## load data ## todo: generalize and clean up data = list() path = "/Users/mnelimar/Dropbox/projects/2016-topicmodelkritiikki/papereittain" files = list.files( path ) for( f in files ) { p <- paste( path , f, sep = '/' ) load( p ) data[ model@k ] <- model } ## start server shinyServer(function(input, output) { model <- reactive({ data[[ input$k ]] }) wordcount <- reactive({ input$n }) output$view <- renderTable({ terms( model(), wordcount() ) }) })
\subsection{Rational numbers} \subsubsection{Defining rational numbers} We previously defined integers in terms of natural numbers. Similarly we can define rational numbers in terms of integers. \(\forall ab \in \mathbb{I} (\neg (b=0)\rightarrow \exists c (b.c=a))\) A rational is an ordered pair of integers. \(\{\{a\},\{a,b\}\}\) So that: \(\{\{a\},\{a,b\}\}=\dfrac{a}{b}\) \subsubsection{Converting integers to rational numbers} Integers can be shown as rational numbers using: \((i,1)\) Integers can then be turned into rational numbers: \(\mathbb{Q}=\dfrac{a}{1}\) \(a=\dfrac{a_1}{a_2}\) \(b=\dfrac{b_1}{b_2}\) \(c=\dfrac{c_1}{c_2}\) \subsubsection{Equivalence classes of rationals} There are an infinite number of ways to write any rational number, as with integers. \(\dfrac{1}{2}\) can be written as \(\dfrac{1}{2}\), \(\dfrac{-2}{-4}\) etc. The class of these terms form an equivalence class. We can show these are equal: \(\dfrac{a}{b}=\{\{a\},\{a,b\}\}\) \(\dfrac{ca}{cb}=\{\{a\},\{a,b\}\}\) \(\dfrac{ca}{cb}=\{\{ca\},\{ca,cb\}\}\) \(\{\{a\},\{a,b\}\}=\{\{ca\},\{ca,cb\}\}\)
-- | -- This module defines the @Density@ type, which is the result of -- doing a kernel density estimate on a sample of data. The idea -- is that the distribution of metrics can be recorded using the @kde@ -- function, and then sampled in the model using @sampleDensity@. {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DeriveAnyClass #-} module Hypersphere.Density where import Control.DeepSeq import Control.Monad.Bayes.Class import Data.Aeson (ToJSON, FromJSON) import Data.Foldable import Data.List.NonEmpty (NonEmpty) import qualified Data.List.NonEmpty as NonEmpty import qualified Data.Vector.Unboxed as UV import Data.Monoid (Any) import qualified Diagrams.Core as D import GHC.Generics import Graphics.Rendering.Chart.Backend.Diagrams import Graphics.Rendering.Chart.Easy import Linear.V2 (V2) import qualified Linear.V2 as V2 import qualified Statistics.Sample.KernelDensity as S -- | Represents a Kernel Density Estimate. -- -- TODO: Should we store the cummulutive density to prevent recalculating -- it all the time when sampling? data Density = Density { dMesh :: !(UV.Vector Double) , dDensity :: !(UV.Vector Double) } deriving (Eq, Ord, Show, Generic, NFData) instance ToJSON Density instance FromJSON Density -- | Find the area under the curve of the probability density. Should -- be pretty close to 1. -- -- Useful for debug. integrate :: Density -> Double integrate d@Density{..} = let spacing = UV.foldl1 subtract $ UV.take 2 dMesh zoids = getTrapezoids d in UV.sum $ UV.map (uncurry $ trapezoidalArea spacing) zoids -- | The area of a trapezoid. -- `trapezoid width height1 height2` trapezoidalArea :: Double -> Double -> Double -> Double trapezoidalArea w h1 h2 = (h1 + h2) * 0.5 * w -- | Get the trapezoids that make up the area under the density curve. getTrapezoids :: Density -> UV.Vector (Double,Double) getTrapezoids Density{..} = UV.zip dDensity $ UV.tail dDensity -- | Cumulative area under the density curve. scanIntegral :: Density -> UV.Vector Double scanIntegral d@Density{..} = let spacing = UV.foldl1 subtract $ UV.take 2 dMesh zoids = getTrapezoids d in UV.scanl (\area trap -> area + uncurry (trapezoidalArea spacing) trap) 0 zoids -- | Create a density from a series of observations. There must be at least -- one observation. kde :: NonEmpty Double -> Density kde = uncurry Density . S.kde 512 . UV.fromList . toList -- | Plot the @Density@ to a file with the given name and title. plotDensity :: FilePath -> String -> Density -> IO () plotDensity file title Density{..} = toFile def file $ do let maxDensity = 1.1 * UV.maximum dDensity layout_title .= title layout_y_axis . laxis_generate .= scaledAxis def (0,maxDensity) plot $ line "" [UV.toList $ UV.zip dMesh dDensity] -- | Sample a value from the @Density@. sampleDensity :: MonadSample m => Density -> m Double sampleDensity d@Density{..} = do s <- random let mi = UV.findIndex (>=s) $ scanIntegral d o = maybe UV.last (\i -> (UV.! i)) mi dMesh return o
lemma lborel_distr_plus: fixes c :: "'a::euclidean_space" shows "distr lborel borel ((+) c) = lborel"
[GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E σ τ : Gal p h : ∀ (x : SplittingField p), x ∈ rootSet p (SplittingField p) → ↑σ x = ↑τ x ⊢ σ = τ [PROOFSTEP] refine' AlgEquiv.ext fun x => (AlgHom.mem_equalizer σ.toAlgHom τ.toAlgHom x).mp ((SetLike.ext_iff.mp _ x).mpr Algebra.mem_top) [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E σ τ : Gal p h : ∀ (x : SplittingField p), x ∈ rootSet p (SplittingField p) → ↑σ x = ↑τ x x : SplittingField p ⊢ AlgHom.equalizer ↑σ ↑τ = ⊤ [PROOFSTEP] rwa [eq_top_iff, ← SplittingField.adjoin_rootSet, Algebra.adjoin_le_iff] [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E h : Splits (RingHom.id F) p f : Gal p x : SplittingField p ⊢ ↑f x = ↑default x [PROOFSTEP] obtain ⟨y, rfl⟩ := Algebra.mem_bot.mp ((SetLike.ext_iff.mp ((IsSplittingField.splits_iff _ p).mp h) x).mp Algebra.mem_top) [GOAL] case intro F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E h : Splits (RingHom.id F) p f : Gal p y : F ⊢ ↑f (↑(algebraMap F (SplittingField p)) y) = ↑default (↑(algebraMap F (SplittingField p)) y) [PROOFSTEP] rw [AlgEquiv.commutes, AlgEquiv.commutes] [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E h : Fact (Splits (algebraMap F E) p) ⊢ Function.Bijective (mapRoots p E) [PROOFSTEP] constructor [GOAL] case left F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E h : Fact (Splits (algebraMap F E) p) ⊢ Function.Injective (mapRoots p E) [PROOFSTEP] exact fun _ _ h => Subtype.ext (RingHom.injective _ (Subtype.ext_iff.mp h)) [GOAL] case right F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E h : Fact (Splits (algebraMap F E) p) ⊢ Function.Surjective (mapRoots p E) [PROOFSTEP] intro y [GOAL] case right F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E h : Fact (Splits (algebraMap F E) p) y : ↑(rootSet p E) ⊢ ∃ a, mapRoots p E a = y [PROOFSTEP] have key := roots_map (IsScalarTower.toAlgHom F p.SplittingField E : p.SplittingField →+* E) ((splits_id_iff_splits _).mpr (IsSplittingField.splits p.SplittingField p)) [GOAL] case right F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E h : Fact (Splits (algebraMap F E) p) y : ↑(rootSet p E) key : roots (map (↑(IsScalarTower.toAlgHom F (SplittingField p) E)) (map (algebraMap F (SplittingField p)) p)) = Multiset.map (↑↑(IsScalarTower.toAlgHom F (SplittingField p) E)) (roots (map (algebraMap F (SplittingField p)) p)) ⊢ ∃ a, mapRoots p E a = y [PROOFSTEP] rw [map_map, AlgHom.comp_algebraMap] at key [GOAL] case right F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E h : Fact (Splits (algebraMap F E) p) y : ↑(rootSet p E) key : roots (map (algebraMap F E) p) = Multiset.map (↑↑(IsScalarTower.toAlgHom F (SplittingField p) E)) (roots (map (algebraMap F (SplittingField p)) p)) ⊢ ∃ a, mapRoots p E a = y [PROOFSTEP] have hy := Subtype.mem y [GOAL] case right F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E h : Fact (Splits (algebraMap F E) p) y : ↑(rootSet p E) key : roots (map (algebraMap F E) p) = Multiset.map (↑↑(IsScalarTower.toAlgHom F (SplittingField p) E)) (roots (map (algebraMap F (SplittingField p)) p)) hy : ↑y ∈ rootSet p E ⊢ ∃ a, mapRoots p E a = y [PROOFSTEP] simp only [rootSet, Finset.mem_coe, (Multiset.mem_toFinset), key, Multiset.mem_map] at hy [GOAL] case right F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E h : Fact (Splits (algebraMap F E) p) y : ↑(rootSet p E) key : roots (map (algebraMap F E) p) = Multiset.map (↑↑(IsScalarTower.toAlgHom F (SplittingField p) E)) (roots (map (algebraMap F (SplittingField p)) p)) hy : ∃ a, a ∈ roots (map (algebraMap F (SplittingField p)) p) ∧ ↑↑(IsScalarTower.toAlgHom F (SplittingField p) E) a = ↑y ⊢ ∃ a, mapRoots p E a = y [PROOFSTEP] rcases hy with ⟨x, hx1, hx2⟩ [GOAL] case right.intro.intro F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E h : Fact (Splits (algebraMap F E) p) y : ↑(rootSet p E) key : roots (map (algebraMap F E) p) = Multiset.map (↑↑(IsScalarTower.toAlgHom F (SplittingField p) E)) (roots (map (algebraMap F (SplittingField p)) p)) x : SplittingField p hx1 : x ∈ roots (map (algebraMap F (SplittingField p)) p) hx2 : ↑↑(IsScalarTower.toAlgHom F (SplittingField p) E) x = ↑y ⊢ ∃ a, mapRoots p E a = y [PROOFSTEP] exact ⟨⟨x, (@Multiset.mem_toFinset _ (Classical.decEq _) _ _).mpr hx1⟩, Subtype.ext hx2⟩ [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E x✝ : ↑(rootSet p (SplittingField p)) ⊢ 1 • x✝ = x✝ [PROOFSTEP] ext [GOAL] case a F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E x✝ : ↑(rootSet p (SplittingField p)) ⊢ ↑(1 • x✝) = ↑x✝ [PROOFSTEP] rfl [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E x✝² x✝¹ : Gal p x✝ : ↑(rootSet p (SplittingField p)) ⊢ (x✝² * x✝¹) • x✝ = x✝² • x✝¹ • x✝ [PROOFSTEP] ext [GOAL] case a F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E x✝² x✝¹ : Gal p x✝ : ↑(rootSet p (SplittingField p)) ⊢ ↑((x✝² * x✝¹) • x✝) = ↑(x✝² • x✝¹ • x✝) [PROOFSTEP] rfl [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : Fact (Splits (algebraMap F E) p) x✝ : ↑(rootSet p E) ⊢ 1 • x✝ = x✝ [PROOFSTEP] simp only [smul_def, Equiv.apply_symm_apply, one_smul] [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : Fact (Splits (algebraMap F E) p) x✝² x✝¹ : Gal p x✝ : ↑(rootSet p E) ⊢ (x✝² * x✝¹) • x✝ = x✝² • x✝¹ • x✝ [PROOFSTEP] simp only [smul_def, Equiv.apply_symm_apply, Equiv.symm_apply_apply, mul_smul] [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : Fact (Splits (algebraMap F E) p) ϕ : E ≃ₐ[F] E x : ↑(rootSet p E) ⊢ ↑(↑(restrict p E) ϕ • x) = ↑ϕ ↑x [PROOFSTEP] let ψ := AlgEquiv.ofInjectiveField (IsScalarTower.toAlgHom F p.SplittingField E) [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : Fact (Splits (algebraMap F E) p) ϕ : E ≃ₐ[F] E x : ↑(rootSet p E) ψ : SplittingField p ≃ₐ[F] { x // x ∈ AlgHom.range (IsScalarTower.toAlgHom F (SplittingField p) E) } := AlgEquiv.ofInjectiveField (IsScalarTower.toAlgHom F (SplittingField p) E) ⊢ ↑(↑(restrict p E) ϕ • x) = ↑ϕ ↑x [PROOFSTEP] change ↑(ψ (ψ.symm _)) = ϕ x [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : Fact (Splits (algebraMap F E) p) ϕ : E ≃ₐ[F] E x : ↑(rootSet p E) ψ : SplittingField p ≃ₐ[F] { x // x ∈ AlgHom.range (IsScalarTower.toAlgHom F (SplittingField p) E) } := AlgEquiv.ofInjectiveField (IsScalarTower.toAlgHom F (SplittingField p) E) ⊢ ↑(↑ψ (↑(AlgEquiv.symm ψ) (↑↑(AlgHom.restrictNormalAux (↑ϕ) (SplittingField p)) (↑↑↑(AlgEquiv.ofInjectiveField (IsScalarTower.toAlgHom F (SplittingField p) E)) ↑(↑(rootsEquivRoots p E).symm x))))) = ↑ϕ ↑x [PROOFSTEP] rw [AlgEquiv.apply_symm_apply ψ] [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : Fact (Splits (algebraMap F E) p) ϕ : E ≃ₐ[F] E x : ↑(rootSet p E) ψ : SplittingField p ≃ₐ[F] { x // x ∈ AlgHom.range (IsScalarTower.toAlgHom F (SplittingField p) E) } := AlgEquiv.ofInjectiveField (IsScalarTower.toAlgHom F (SplittingField p) E) ⊢ ↑(↑↑(AlgHom.restrictNormalAux (↑ϕ) (SplittingField p)) (↑↑↑(AlgEquiv.ofInjectiveField (IsScalarTower.toAlgHom F (SplittingField p) E)) ↑(↑(rootsEquivRoots p E).symm x))) = ↑ϕ ↑x [PROOFSTEP] change ϕ (rootsEquivRoots p E ((rootsEquivRoots p E).symm x)) = ϕ x [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : Fact (Splits (algebraMap F E) p) ϕ : E ≃ₐ[F] E x : ↑(rootSet p E) ψ : SplittingField p ≃ₐ[F] { x // x ∈ AlgHom.range (IsScalarTower.toAlgHom F (SplittingField p) E) } := AlgEquiv.ofInjectiveField (IsScalarTower.toAlgHom F (SplittingField p) E) ⊢ ↑ϕ ↑(↑(rootsEquivRoots p E) (↑(rootsEquivRoots p E).symm x)) = ↑ϕ ↑x [PROOFSTEP] rw [Equiv.apply_symm_apply (rootsEquivRoots p E)] [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : Fact (Splits (algebraMap F E) p) ⊢ Function.Injective ↑(galActionHom p E) [PROOFSTEP] rw [injective_iff_map_eq_one] [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : Fact (Splits (algebraMap F E) p) ⊢ ∀ (a : Gal p), ↑(galActionHom p E) a = 1 → a = 1 [PROOFSTEP] intro ϕ hϕ [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : Fact (Splits (algebraMap F E) p) ϕ : Gal p hϕ : ↑(galActionHom p E) ϕ = 1 ⊢ ϕ = 1 [PROOFSTEP] ext (x hx) [GOAL] case h F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : Fact (Splits (algebraMap F E) p) ϕ : Gal p hϕ : ↑(galActionHom p E) ϕ = 1 x : SplittingField p hx : x ∈ rootSet p (SplittingField p) ⊢ ↑ϕ x = ↑1 x [PROOFSTEP] have key := Equiv.Perm.ext_iff.mp hϕ (rootsEquivRoots p E ⟨x, hx⟩) [GOAL] case h F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : Fact (Splits (algebraMap F E) p) ϕ : Gal p hϕ : ↑(galActionHom p E) ϕ = 1 x : SplittingField p hx : x ∈ rootSet p (SplittingField p) key : ↑(↑(galActionHom p E) ϕ) (↑(rootsEquivRoots p E) { val := x, property := hx }) = ↑1 (↑(rootsEquivRoots p E) { val := x, property := hx }) ⊢ ↑ϕ x = ↑1 x [PROOFSTEP] change rootsEquivRoots p E (ϕ • (rootsEquivRoots p E).symm (rootsEquivRoots p E ⟨x, hx⟩)) = rootsEquivRoots p E ⟨x, hx⟩ at key [GOAL] case h F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : Fact (Splits (algebraMap F E) p) ϕ : Gal p hϕ : ↑(galActionHom p E) ϕ = 1 x : SplittingField p hx : x ∈ rootSet p (SplittingField p) key : ↑(rootsEquivRoots p E) (ϕ • ↑(rootsEquivRoots p E).symm (↑(rootsEquivRoots p E) { val := x, property := hx })) = ↑(rootsEquivRoots p E) { val := x, property := hx } ⊢ ↑ϕ x = ↑1 x [PROOFSTEP] rw [Equiv.symm_apply_apply] at key [GOAL] case h F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : Fact (Splits (algebraMap F E) p) ϕ : Gal p hϕ : ↑(galActionHom p E) ϕ = 1 x : SplittingField p hx : x ∈ rootSet p (SplittingField p) key : ↑(rootsEquivRoots p E) (ϕ • { val := x, property := hx }) = ↑(rootsEquivRoots p E) { val := x, property := hx } ⊢ ↑ϕ x = ↑1 x [PROOFSTEP] exact Subtype.ext_iff.mp (Equiv.injective (rootsEquivRoots p E) key) [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : Decidable (q = 0) hpq : p ∣ q ⊢ restrictDvd hpq = if hq : q = 0 then 1 else restrict p (SplittingField q) [PROOFSTEP] unfold restrictDvd [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : Decidable (q = 0) hpq : p ∣ q ⊢ (if hq : q = 0 then 1 else restrict p (SplittingField q)) = if hq : q = 0 then 1 else restrict p (SplittingField q) [PROOFSTEP] convert rfl [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : p ∣ q hq : q ≠ 0 ⊢ Function.Surjective ↑(restrictDvd hpq) [PROOFSTEP] classical -- Porting note: was `simp only [restrictDvd_def, dif_neg hq, restrict_surjective]` haveI := Fact.mk <| splits_of_splits_of_dvd (algebraMap F q.SplittingField) hq (SplittingField.splits q) hpq simp only [restrictDvd_def, dif_neg hq] exact restrict_surjective _ _ [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : p ∣ q hq : q ≠ 0 ⊢ Function.Surjective ↑(restrictDvd hpq) [PROOFSTEP] haveI := Fact.mk <| splits_of_splits_of_dvd (algebraMap F q.SplittingField) hq (SplittingField.splits q) hpq [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : p ∣ q hq : q ≠ 0 this : Fact (Splits (algebraMap F (SplittingField q)) p) ⊢ Function.Surjective ↑(restrictDvd hpq) [PROOFSTEP] simp only [restrictDvd_def, dif_neg hq] [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : p ∣ q hq : q ≠ 0 this : Fact (Splits (algebraMap F (SplittingField q)) p) ⊢ Function.Surjective ↑(restrict p (SplittingField q)) [PROOFSTEP] exact restrict_surjective _ _ [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E ⊢ Function.Injective ↑(restrictProd p q) [PROOFSTEP] by_cases hpq : p * q = 0 [GOAL] case pos F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : p * q = 0 ⊢ Function.Injective ↑(restrictProd p q) [PROOFSTEP] have : Unique (p * q).Gal := by rw [hpq]; infer_instance [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : p * q = 0 ⊢ Unique (Gal (p * q)) [PROOFSTEP] rw [hpq] [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : p * q = 0 ⊢ Unique (Gal 0) [PROOFSTEP] infer_instance [GOAL] case pos F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : p * q = 0 this : Unique (Gal (p * q)) ⊢ Function.Injective ↑(restrictProd p q) [PROOFSTEP] exact fun f g _ => Eq.trans (Unique.eq_default f) (Unique.eq_default g).symm [GOAL] case neg F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : ¬p * q = 0 ⊢ Function.Injective ↑(restrictProd p q) [PROOFSTEP] intro f g hfg [GOAL] case neg F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : ¬p * q = 0 f g : Gal (p * q) hfg : ↑(restrictProd p q) f = ↑(restrictProd p q) g ⊢ f = g [PROOFSTEP] classical simp only [restrictProd, restrictDvd_def] at hfg simp only [dif_neg hpq, MonoidHom.prod_apply, Prod.mk.inj_iff] at hfg ext (x hx) rw [rootSet_def, Polynomial.map_mul, Polynomial.roots_mul] at hx cases' Multiset.mem_add.mp (Multiset.mem_toFinset.mp hx) with h h · haveI : Fact (p.Splits (algebraMap F (p * q).SplittingField)) := ⟨splits_of_splits_of_dvd _ hpq (SplittingField.splits (p * q)) (dvd_mul_right p q)⟩ have key : x = algebraMap p.SplittingField (p * q).SplittingField ((rootsEquivRoots p _).invFun ⟨x, (@Multiset.mem_toFinset _ (Classical.decEq _) _ _).mpr h⟩) := Subtype.ext_iff.mp (Equiv.apply_symm_apply (rootsEquivRoots p _) ⟨x, _⟩).symm rw [key, ← AlgEquiv.restrictNormal_commutes, ← AlgEquiv.restrictNormal_commutes] exact congr_arg _ (AlgEquiv.ext_iff.mp hfg.1 _) · haveI : Fact (q.Splits (algebraMap F (p * q).SplittingField)) := ⟨splits_of_splits_of_dvd _ hpq (SplittingField.splits (p * q)) (dvd_mul_left q p)⟩ have key : x = algebraMap q.SplittingField (p * q).SplittingField ((rootsEquivRoots q _).invFun ⟨x, (@Multiset.mem_toFinset _ (Classical.decEq _) _ _).mpr h⟩) := Subtype.ext_iff.mp (Equiv.apply_symm_apply (rootsEquivRoots q _) ⟨x, _⟩).symm rw [key, ← AlgEquiv.restrictNormal_commutes, ← AlgEquiv.restrictNormal_commutes] exact congr_arg _ (AlgEquiv.ext_iff.mp hfg.2 _) · rwa [Ne.def, mul_eq_zero, map_eq_zero, map_eq_zero, ← mul_eq_zero] [GOAL] case neg F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : ¬p * q = 0 f g : Gal (p * q) hfg : ↑(restrictProd p q) f = ↑(restrictProd p q) g ⊢ f = g [PROOFSTEP] simp only [restrictProd, restrictDvd_def] at hfg [GOAL] case neg F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : ¬p * q = 0 f g : Gal (p * q) hfg : ↑(MonoidHom.prod (if hq : p * q = 0 then 1 else restrict p (SplittingField (p * q))) (if hq : p * q = 0 then 1 else restrict q (SplittingField (p * q)))) f = ↑(MonoidHom.prod (if hq : p * q = 0 then 1 else restrict p (SplittingField (p * q))) (if hq : p * q = 0 then 1 else restrict q (SplittingField (p * q)))) g ⊢ f = g [PROOFSTEP] simp only [dif_neg hpq, MonoidHom.prod_apply, Prod.mk.inj_iff] at hfg [GOAL] case neg F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : ¬p * q = 0 f g : Gal (p * q) hfg : ↑(restrict p (SplittingField (p * q))) f = ↑(restrict p (SplittingField (p * q))) g ∧ ↑(restrict q (SplittingField (p * q))) f = ↑(restrict q (SplittingField (p * q))) g ⊢ f = g [PROOFSTEP] ext (x hx) [GOAL] case neg.h F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : ¬p * q = 0 f g : Gal (p * q) hfg : ↑(restrict p (SplittingField (p * q))) f = ↑(restrict p (SplittingField (p * q))) g ∧ ↑(restrict q (SplittingField (p * q))) f = ↑(restrict q (SplittingField (p * q))) g x : SplittingField (p * q) hx : x ∈ rootSet (p * q) (SplittingField (p * q)) ⊢ ↑f x = ↑g x [PROOFSTEP] rw [rootSet_def, Polynomial.map_mul, Polynomial.roots_mul] at hx [GOAL] case neg.h F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : ¬p * q = 0 f g : Gal (p * q) hfg : ↑(restrict p (SplittingField (p * q))) f = ↑(restrict p (SplittingField (p * q))) g ∧ ↑(restrict q (SplittingField (p * q))) f = ↑(restrict q (SplittingField (p * q))) g x : SplittingField (p * q) hx : x ∈ ↑(Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) p) + roots (map (algebraMap F (SplittingField (p * q))) q))) ⊢ ↑f x = ↑g x case neg.h F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : ¬p * q = 0 f g : Gal (p * q) hfg : ↑(restrict p (SplittingField (p * q))) f = ↑(restrict p (SplittingField (p * q))) g ∧ ↑(restrict q (SplittingField (p * q))) f = ↑(restrict q (SplittingField (p * q))) g x : SplittingField (p * q) hx : x ∈ ↑(Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) p * map (algebraMap F (SplittingField (p * q))) q))) ⊢ map (algebraMap F (SplittingField (p * q))) p * map (algebraMap F (SplittingField (p * q))) q ≠ 0 [PROOFSTEP] cases' Multiset.mem_add.mp (Multiset.mem_toFinset.mp hx) with h h [GOAL] case neg.h.inl F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : ¬p * q = 0 f g : Gal (p * q) hfg : ↑(restrict p (SplittingField (p * q))) f = ↑(restrict p (SplittingField (p * q))) g ∧ ↑(restrict q (SplittingField (p * q))) f = ↑(restrict q (SplittingField (p * q))) g x : SplittingField (p * q) hx : x ∈ ↑(Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) p) + roots (map (algebraMap F (SplittingField (p * q))) q))) h : x ∈ roots (map (algebraMap F (SplittingField (p * q))) p) ⊢ ↑f x = ↑g x [PROOFSTEP] haveI : Fact (p.Splits (algebraMap F (p * q).SplittingField)) := ⟨splits_of_splits_of_dvd _ hpq (SplittingField.splits (p * q)) (dvd_mul_right p q)⟩ [GOAL] case neg.h.inl F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : ¬p * q = 0 f g : Gal (p * q) hfg : ↑(restrict p (SplittingField (p * q))) f = ↑(restrict p (SplittingField (p * q))) g ∧ ↑(restrict q (SplittingField (p * q))) f = ↑(restrict q (SplittingField (p * q))) g x : SplittingField (p * q) hx : x ∈ ↑(Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) p) + roots (map (algebraMap F (SplittingField (p * q))) q))) h : x ∈ roots (map (algebraMap F (SplittingField (p * q))) p) this : Fact (Splits (algebraMap F (SplittingField (p * q))) p) ⊢ ↑f x = ↑g x [PROOFSTEP] have key : x = algebraMap p.SplittingField (p * q).SplittingField ((rootsEquivRoots p _).invFun ⟨x, (@Multiset.mem_toFinset _ (Classical.decEq _) _ _).mpr h⟩) := Subtype.ext_iff.mp (Equiv.apply_symm_apply (rootsEquivRoots p _) ⟨x, _⟩).symm [GOAL] case neg.h.inl F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : ¬p * q = 0 f g : Gal (p * q) hfg : ↑(restrict p (SplittingField (p * q))) f = ↑(restrict p (SplittingField (p * q))) g ∧ ↑(restrict q (SplittingField (p * q))) f = ↑(restrict q (SplittingField (p * q))) g x : SplittingField (p * q) hx : x ∈ ↑(Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) p) + roots (map (algebraMap F (SplittingField (p * q))) q))) h : x ∈ roots (map (algebraMap F (SplittingField (p * q))) p) this : Fact (Splits (algebraMap F (SplittingField (p * q))) p) key : x = ↑(algebraMap (SplittingField p) (SplittingField (p * q))) ↑(Equiv.invFun (rootsEquivRoots p (SplittingField (p * q))) { val := x, property := (_ : x ∈ Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) p))) }) ⊢ ↑f x = ↑g x [PROOFSTEP] rw [key, ← AlgEquiv.restrictNormal_commutes, ← AlgEquiv.restrictNormal_commutes] [GOAL] case neg.h.inl F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : ¬p * q = 0 f g : Gal (p * q) hfg : ↑(restrict p (SplittingField (p * q))) f = ↑(restrict p (SplittingField (p * q))) g ∧ ↑(restrict q (SplittingField (p * q))) f = ↑(restrict q (SplittingField (p * q))) g x : SplittingField (p * q) hx : x ∈ ↑(Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) p) + roots (map (algebraMap F (SplittingField (p * q))) q))) h : x ∈ roots (map (algebraMap F (SplittingField (p * q))) p) this : Fact (Splits (algebraMap F (SplittingField (p * q))) p) key : x = ↑(algebraMap (SplittingField p) (SplittingField (p * q))) ↑(Equiv.invFun (rootsEquivRoots p (SplittingField (p * q))) { val := x, property := (_ : x ∈ Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) p))) }) ⊢ ↑(algebraMap (SplittingField p) (SplittingField (p * q))) (↑(AlgEquiv.restrictNormal f (SplittingField p)) ↑(Equiv.invFun (rootsEquivRoots p (SplittingField (p * q))) { val := x, property := (_ : x ∈ Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) p))) })) = ↑(algebraMap (SplittingField p) (SplittingField (p * q))) (↑(AlgEquiv.restrictNormal g (SplittingField p)) ↑(Equiv.invFun (rootsEquivRoots p (SplittingField (p * q))) { val := x, property := (_ : x ∈ Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) p))) })) [PROOFSTEP] exact congr_arg _ (AlgEquiv.ext_iff.mp hfg.1 _) [GOAL] case neg.h.inr F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : ¬p * q = 0 f g : Gal (p * q) hfg : ↑(restrict p (SplittingField (p * q))) f = ↑(restrict p (SplittingField (p * q))) g ∧ ↑(restrict q (SplittingField (p * q))) f = ↑(restrict q (SplittingField (p * q))) g x : SplittingField (p * q) hx : x ∈ ↑(Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) p) + roots (map (algebraMap F (SplittingField (p * q))) q))) h : x ∈ roots (map (algebraMap F (SplittingField (p * q))) q) ⊢ ↑f x = ↑g x [PROOFSTEP] haveI : Fact (q.Splits (algebraMap F (p * q).SplittingField)) := ⟨splits_of_splits_of_dvd _ hpq (SplittingField.splits (p * q)) (dvd_mul_left q p)⟩ [GOAL] case neg.h.inr F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : ¬p * q = 0 f g : Gal (p * q) hfg : ↑(restrict p (SplittingField (p * q))) f = ↑(restrict p (SplittingField (p * q))) g ∧ ↑(restrict q (SplittingField (p * q))) f = ↑(restrict q (SplittingField (p * q))) g x : SplittingField (p * q) hx : x ∈ ↑(Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) p) + roots (map (algebraMap F (SplittingField (p * q))) q))) h : x ∈ roots (map (algebraMap F (SplittingField (p * q))) q) this : Fact (Splits (algebraMap F (SplittingField (p * q))) q) ⊢ ↑f x = ↑g x [PROOFSTEP] have key : x = algebraMap q.SplittingField (p * q).SplittingField ((rootsEquivRoots q _).invFun ⟨x, (@Multiset.mem_toFinset _ (Classical.decEq _) _ _).mpr h⟩) := Subtype.ext_iff.mp (Equiv.apply_symm_apply (rootsEquivRoots q _) ⟨x, _⟩).symm [GOAL] case neg.h.inr F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : ¬p * q = 0 f g : Gal (p * q) hfg : ↑(restrict p (SplittingField (p * q))) f = ↑(restrict p (SplittingField (p * q))) g ∧ ↑(restrict q (SplittingField (p * q))) f = ↑(restrict q (SplittingField (p * q))) g x : SplittingField (p * q) hx : x ∈ ↑(Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) p) + roots (map (algebraMap F (SplittingField (p * q))) q))) h : x ∈ roots (map (algebraMap F (SplittingField (p * q))) q) this : Fact (Splits (algebraMap F (SplittingField (p * q))) q) key : x = ↑(algebraMap (SplittingField q) (SplittingField (p * q))) ↑(Equiv.invFun (rootsEquivRoots q (SplittingField (p * q))) { val := x, property := (_ : x ∈ Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) q))) }) ⊢ ↑f x = ↑g x [PROOFSTEP] rw [key, ← AlgEquiv.restrictNormal_commutes, ← AlgEquiv.restrictNormal_commutes] [GOAL] case neg.h.inr F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : ¬p * q = 0 f g : Gal (p * q) hfg : ↑(restrict p (SplittingField (p * q))) f = ↑(restrict p (SplittingField (p * q))) g ∧ ↑(restrict q (SplittingField (p * q))) f = ↑(restrict q (SplittingField (p * q))) g x : SplittingField (p * q) hx : x ∈ ↑(Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) p) + roots (map (algebraMap F (SplittingField (p * q))) q))) h : x ∈ roots (map (algebraMap F (SplittingField (p * q))) q) this : Fact (Splits (algebraMap F (SplittingField (p * q))) q) key : x = ↑(algebraMap (SplittingField q) (SplittingField (p * q))) ↑(Equiv.invFun (rootsEquivRoots q (SplittingField (p * q))) { val := x, property := (_ : x ∈ Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) q))) }) ⊢ ↑(algebraMap (SplittingField q) (SplittingField (p * q))) (↑(AlgEquiv.restrictNormal f (SplittingField q)) ↑(Equiv.invFun (rootsEquivRoots q (SplittingField (p * q))) { val := x, property := (_ : x ∈ Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) q))) })) = ↑(algebraMap (SplittingField q) (SplittingField (p * q))) (↑(AlgEquiv.restrictNormal g (SplittingField q)) ↑(Equiv.invFun (rootsEquivRoots q (SplittingField (p * q))) { val := x, property := (_ : x ∈ Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) q))) })) [PROOFSTEP] exact congr_arg _ (AlgEquiv.ext_iff.mp hfg.2 _) [GOAL] case neg.h F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hpq : ¬p * q = 0 f g : Gal (p * q) hfg : ↑(restrict p (SplittingField (p * q))) f = ↑(restrict p (SplittingField (p * q))) g ∧ ↑(restrict q (SplittingField (p * q))) f = ↑(restrict q (SplittingField (p * q))) g x : SplittingField (p * q) hx : x ∈ ↑(Multiset.toFinset (roots (map (algebraMap F (SplittingField (p * q))) p * map (algebraMap F (SplittingField (p * q))) q))) ⊢ map (algebraMap F (SplittingField (p * q))) p * map (algebraMap F (SplittingField (p * q))) q ≠ 0 [PROOFSTEP] rwa [Ne.def, mul_eq_zero, map_eq_zero, map_eq_zero, ← mul_eq_zero] [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p₁ q₁ p₂ q₂ : F[X] hq₁ : q₁ ≠ 0 hq₂ : q₂ ≠ 0 h₁ : Splits (algebraMap F (SplittingField q₁)) p₁ h₂ : Splits (algebraMap F (SplittingField q₂)) p₂ ⊢ Splits (algebraMap F (SplittingField (q₁ * q₂))) (p₁ * p₂) [PROOFSTEP] apply splits_mul [GOAL] case hf F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p₁ q₁ p₂ q₂ : F[X] hq₁ : q₁ ≠ 0 hq₂ : q₂ ≠ 0 h₁ : Splits (algebraMap F (SplittingField q₁)) p₁ h₂ : Splits (algebraMap F (SplittingField q₂)) p₂ ⊢ Splits (algebraMap F (SplittingField (q₁ * q₂))) p₁ [PROOFSTEP] rw [← (SplittingField.lift q₁ (splits_of_splits_of_dvd (algebraMap F (q₁ * q₂).SplittingField) (mul_ne_zero hq₁ hq₂) (SplittingField.splits _) (dvd_mul_right q₁ q₂))).comp_algebraMap] [GOAL] case hf F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p₁ q₁ p₂ q₂ : F[X] hq₁ : q₁ ≠ 0 hq₂ : q₂ ≠ 0 h₁ : Splits (algebraMap F (SplittingField q₁)) p₁ h₂ : Splits (algebraMap F (SplittingField q₂)) p₂ ⊢ Splits (RingHom.comp (↑(SplittingField.lift q₁ (_ : Splits (algebraMap F (SplittingField (q₁ * q₂))) q₁))) (algebraMap F (SplittingField q₁))) p₁ [PROOFSTEP] exact splits_comp_of_splits _ _ h₁ [GOAL] case hg F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p₁ q₁ p₂ q₂ : F[X] hq₁ : q₁ ≠ 0 hq₂ : q₂ ≠ 0 h₁ : Splits (algebraMap F (SplittingField q₁)) p₁ h₂ : Splits (algebraMap F (SplittingField q₂)) p₂ ⊢ Splits (algebraMap F (SplittingField (q₁ * q₂))) p₂ [PROOFSTEP] rw [← (SplittingField.lift q₂ (splits_of_splits_of_dvd (algebraMap F (q₁ * q₂).SplittingField) (mul_ne_zero hq₁ hq₂) (SplittingField.splits _) (dvd_mul_left q₂ q₁))).comp_algebraMap] [GOAL] case hg F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p₁ q₁ p₂ q₂ : F[X] hq₁ : q₁ ≠ 0 hq₂ : q₂ ≠ 0 h₁ : Splits (algebraMap F (SplittingField q₁)) p₁ h₂ : Splits (algebraMap F (SplittingField q₂)) p₂ ⊢ Splits (RingHom.comp (↑(SplittingField.lift q₂ (_ : Splits (algebraMap F (SplittingField (q₁ * q₂))) q₂))) (algebraMap F (SplittingField q₂))) p₂ [PROOFSTEP] exact splits_comp_of_splits _ _ h₂ [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 ⊢ Splits (algebraMap F (SplittingField (comp p q))) p [PROOFSTEP] let P : F[X] → Prop := fun r => r.Splits (algebraMap F (r.comp q).SplittingField) [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r ⊢ Splits (algebraMap F (SplittingField (comp p q))) p [PROOFSTEP] have key1 : ∀ {r : F[X]}, Irreducible r → P r := by intro r hr by_cases hr' : natDegree r = 0 · exact splits_of_natDegree_le_one _ (le_trans (le_of_eq hr') zero_le_one) obtain ⟨x, hx⟩ := exists_root_of_splits _ (SplittingField.splits (r.comp q)) fun h => hr' ((mul_eq_zero.mp (natDegree_comp.symm.trans (natDegree_eq_of_degree_eq_some h))).resolve_right hq) rw [← aeval_def, aeval_comp] at hx have h_normal : Normal F (r.comp q).SplittingField := SplittingField.instNormal (r.comp q) have qx_int := Normal.isIntegral h_normal (aeval x q) exact splits_of_splits_of_dvd _ (minpoly.ne_zero qx_int) (Normal.splits h_normal _) ((minpoly.irreducible qx_int).dvd_symm hr (minpoly.dvd F _ hx)) [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r ⊢ ∀ {r : F[X]}, Irreducible r → P r [PROOFSTEP] intro r hr [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r r : F[X] hr : Irreducible r ⊢ P r [PROOFSTEP] by_cases hr' : natDegree r = 0 [GOAL] case pos F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r r : F[X] hr : Irreducible r hr' : natDegree r = 0 ⊢ P r [PROOFSTEP] exact splits_of_natDegree_le_one _ (le_trans (le_of_eq hr') zero_le_one) [GOAL] case neg F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r r : F[X] hr : Irreducible r hr' : ¬natDegree r = 0 ⊢ P r [PROOFSTEP] obtain ⟨x, hx⟩ := exists_root_of_splits _ (SplittingField.splits (r.comp q)) fun h => hr' ((mul_eq_zero.mp (natDegree_comp.symm.trans (natDegree_eq_of_degree_eq_some h))).resolve_right hq) [GOAL] case neg.intro F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r r : F[X] hr : Irreducible r hr' : ¬natDegree r = 0 x : SplittingField (comp r q) hx : eval₂ (algebraMap F (SplittingField (comp r q))) x (comp r q) = 0 ⊢ P r [PROOFSTEP] rw [← aeval_def, aeval_comp] at hx [GOAL] case neg.intro F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r r : F[X] hr : Irreducible r hr' : ¬natDegree r = 0 x : SplittingField (comp r q) hx : ↑(aeval (↑(aeval x) q)) r = 0 ⊢ P r [PROOFSTEP] have h_normal : Normal F (r.comp q).SplittingField := SplittingField.instNormal (r.comp q) [GOAL] case neg.intro F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r r : F[X] hr : Irreducible r hr' : ¬natDegree r = 0 x : SplittingField (comp r q) hx : ↑(aeval (↑(aeval x) q)) r = 0 h_normal : Normal F (SplittingField (comp r q)) ⊢ P r [PROOFSTEP] have qx_int := Normal.isIntegral h_normal (aeval x q) [GOAL] case neg.intro F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r r : F[X] hr : Irreducible r hr' : ¬natDegree r = 0 x : SplittingField (comp r q) hx : ↑(aeval (↑(aeval x) q)) r = 0 h_normal : Normal F (SplittingField (comp r q)) qx_int : IsIntegral F (↑(aeval x) q) ⊢ P r [PROOFSTEP] exact splits_of_splits_of_dvd _ (minpoly.ne_zero qx_int) (Normal.splits h_normal _) ((minpoly.irreducible qx_int).dvd_symm hr (minpoly.dvd F _ hx)) [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r ⊢ Splits (algebraMap F (SplittingField (comp p q))) p [PROOFSTEP] have key2 : ∀ {p₁ p₂ : F[X]}, P p₁ → P p₂ → P (p₁ * p₂) := by intro p₁ p₂ hp₁ hp₂ by_cases h₁ : p₁.comp q = 0 · cases' comp_eq_zero_iff.mp h₁ with h h · rw [h, zero_mul] exact splits_zero _ · exact False.elim (hq (by rw [h.2, natDegree_C])) by_cases h₂ : p₂.comp q = 0 · cases' comp_eq_zero_iff.mp h₂ with h h · rw [h, mul_zero] exact splits_zero _ · exact False.elim (hq (by rw [h.2, natDegree_C])) have key := mul_splits_in_splittingField_of_mul h₁ h₂ hp₁ hp₂ rwa [← mul_comp] at key [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r ⊢ ∀ {p₁ p₂ : F[X]}, P p₁ → P p₂ → P (p₁ * p₂) [PROOFSTEP] intro p₁ p₂ hp₁ hp₂ [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r p₁ p₂ : F[X] hp₁ : P p₁ hp₂ : P p₂ ⊢ P (p₁ * p₂) [PROOFSTEP] by_cases h₁ : p₁.comp q = 0 [GOAL] case pos F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r p₁ p₂ : F[X] hp₁ : P p₁ hp₂ : P p₂ h₁ : comp p₁ q = 0 ⊢ P (p₁ * p₂) [PROOFSTEP] cases' comp_eq_zero_iff.mp h₁ with h h [GOAL] case pos.inl F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r p₁ p₂ : F[X] hp₁ : P p₁ hp₂ : P p₂ h₁ : comp p₁ q = 0 h : p₁ = 0 ⊢ P (p₁ * p₂) [PROOFSTEP] rw [h, zero_mul] [GOAL] case pos.inl F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r p₁ p₂ : F[X] hp₁ : P p₁ hp₂ : P p₂ h₁ : comp p₁ q = 0 h : p₁ = 0 ⊢ P 0 [PROOFSTEP] exact splits_zero _ [GOAL] case pos.inr F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r p₁ p₂ : F[X] hp₁ : P p₁ hp₂ : P p₂ h₁ : comp p₁ q = 0 h : eval (coeff q 0) p₁ = 0 ∧ q = ↑C (coeff q 0) ⊢ P (p₁ * p₂) [PROOFSTEP] exact False.elim (hq (by rw [h.2, natDegree_C])) [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r p₁ p₂ : F[X] hp₁ : P p₁ hp₂ : P p₂ h₁ : comp p₁ q = 0 h : eval (coeff q 0) p₁ = 0 ∧ q = ↑C (coeff q 0) ⊢ natDegree q = 0 [PROOFSTEP] rw [h.2, natDegree_C] [GOAL] case neg F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r p₁ p₂ : F[X] hp₁ : P p₁ hp₂ : P p₂ h₁ : ¬comp p₁ q = 0 ⊢ P (p₁ * p₂) [PROOFSTEP] by_cases h₂ : p₂.comp q = 0 [GOAL] case pos F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r p₁ p₂ : F[X] hp₁ : P p₁ hp₂ : P p₂ h₁ : ¬comp p₁ q = 0 h₂ : comp p₂ q = 0 ⊢ P (p₁ * p₂) [PROOFSTEP] cases' comp_eq_zero_iff.mp h₂ with h h [GOAL] case pos.inl F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r p₁ p₂ : F[X] hp₁ : P p₁ hp₂ : P p₂ h₁ : ¬comp p₁ q = 0 h₂ : comp p₂ q = 0 h : p₂ = 0 ⊢ P (p₁ * p₂) [PROOFSTEP] rw [h, mul_zero] [GOAL] case pos.inl F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r p₁ p₂ : F[X] hp₁ : P p₁ hp₂ : P p₂ h₁ : ¬comp p₁ q = 0 h₂ : comp p₂ q = 0 h : p₂ = 0 ⊢ P 0 [PROOFSTEP] exact splits_zero _ [GOAL] case pos.inr F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r p₁ p₂ : F[X] hp₁ : P p₁ hp₂ : P p₂ h₁ : ¬comp p₁ q = 0 h₂ : comp p₂ q = 0 h : eval (coeff q 0) p₂ = 0 ∧ q = ↑C (coeff q 0) ⊢ P (p₁ * p₂) [PROOFSTEP] exact False.elim (hq (by rw [h.2, natDegree_C])) [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r p₁ p₂ : F[X] hp₁ : P p₁ hp₂ : P p₂ h₁ : ¬comp p₁ q = 0 h₂ : comp p₂ q = 0 h : eval (coeff q 0) p₂ = 0 ∧ q = ↑C (coeff q 0) ⊢ natDegree q = 0 [PROOFSTEP] rw [h.2, natDegree_C] [GOAL] case neg F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r p₁ p₂ : F[X] hp₁ : P p₁ hp₂ : P p₂ h₁ : ¬comp p₁ q = 0 h₂ : ¬comp p₂ q = 0 ⊢ P (p₁ * p₂) [PROOFSTEP] have key := mul_splits_in_splittingField_of_mul h₁ h₂ hp₁ hp₂ [GOAL] case neg F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r p₁ p₂ : F[X] hp₁ : P p₁ hp₂ : P p₂ h₁ : ¬comp p₁ q = 0 h₂ : ¬comp p₂ q = 0 key : Splits (algebraMap F (SplittingField (comp p₁ q * comp p₂ q))) (p₁ * p₂) ⊢ P (p₁ * p₂) [PROOFSTEP] rwa [← mul_comp] at key [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r key2 : ∀ {p₁ p₂ : F[X]}, P p₁ → P p₂ → P (p₁ * p₂) ⊢ Splits (algebraMap F (SplittingField (comp p q))) p [PROOFSTEP] induction p using WfDvdMonoid.induction_on_irreducible with | h0 => exact splits_zero _ | hu u hu => exact splits_of_isUnit (algebraMap F (SplittingField (comp u q))) hu | hi p₁ p₂ _ hp₂ hp₁ => apply key2 (key1 hp₂) hp₁ [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r key2 : ∀ {p₁ p₂ : F[X]}, P p₁ → P p₂ → P (p₁ * p₂) ⊢ Splits (algebraMap F (SplittingField (comp p q))) p [PROOFSTEP] induction p using WfDvdMonoid.induction_on_irreducible with | h0 => exact splits_zero _ | hu u hu => exact splits_of_isUnit (algebraMap F (SplittingField (comp u q))) hu | hi p₁ p₂ _ hp₂ hp₁ => apply key2 (key1 hp₂) hp₁ [GOAL] case h0 F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r key2 : ∀ {p₁ p₂ : F[X]}, P p₁ → P p₂ → P (p₁ * p₂) ⊢ Splits (algebraMap F (SplittingField (comp 0 q))) 0 [PROOFSTEP] | h0 => exact splits_zero _ [GOAL] case h0 F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r key2 : ∀ {p₁ p₂ : F[X]}, P p₁ → P p₂ → P (p₁ * p₂) ⊢ Splits (algebraMap F (SplittingField (comp 0 q))) 0 [PROOFSTEP] exact splits_zero _ [GOAL] case hu F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r key2 : ∀ {p₁ p₂ : F[X]}, P p₁ → P p₂ → P (p₁ * p₂) u : F[X] hu : IsUnit u ⊢ Splits (algebraMap F (SplittingField (comp u q))) u [PROOFSTEP] | hu u hu => exact splits_of_isUnit (algebraMap F (SplittingField (comp u q))) hu [GOAL] case hu F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r key2 : ∀ {p₁ p₂ : F[X]}, P p₁ → P p₂ → P (p₁ * p₂) u : F[X] hu : IsUnit u ⊢ Splits (algebraMap F (SplittingField (comp u q))) u [PROOFSTEP] exact splits_of_isUnit (algebraMap F (SplittingField (comp u q))) hu [GOAL] case hi F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r key2 : ∀ {p₁ p₂ : F[X]}, P p₁ → P p₂ → P (p₁ * p₂) p₁ p₂ : F[X] a✝ : p₁ ≠ 0 hp₂ : Irreducible p₂ hp₁ : Splits (algebraMap F (SplittingField (comp p₁ q))) p₁ ⊢ Splits (algebraMap F (SplittingField (comp (p₂ * p₁) q))) (p₂ * p₁) [PROOFSTEP] | hi p₁ p₂ _ hp₂ hp₁ => apply key2 (key1 hp₂) hp₁ [GOAL] case hi F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 P : F[X] → Prop := fun r => Splits (algebraMap F (SplittingField (comp r q))) r key1 : ∀ {r : F[X]}, Irreducible r → P r key2 : ∀ {p₁ p₂ : F[X]}, P p₁ → P p₂ → P (p₁ * p₂) p₁ p₂ : F[X] a✝ : p₁ ≠ 0 hp₂ : Irreducible p₂ hp₁ : Splits (algebraMap F (SplittingField (comp p₁ q))) p₁ ⊢ Splits (algebraMap F (SplittingField (comp (p₂ * p₁) q))) (p₂ * p₁) [PROOFSTEP] apply key2 (key1 hp₂) hp₁ [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 ⊢ Function.Surjective ↑(restrictComp p q hq) [PROOFSTEP] haveI : Fact (Splits (algebraMap F (SplittingField (comp p q))) p) := ⟨splits_in_splittingField_of_comp p q hq⟩ [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 this : Fact (Splits (algebraMap F (SplittingField (comp p q))) p) ⊢ Function.Surjective ↑(restrictComp p q hq) [PROOFSTEP] rw [restrictComp] [GOAL] F : Type u_1 inst✝² : Field F p q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E hq : natDegree q ≠ 0 this : Fact (Splits (algebraMap F (SplittingField (comp p q))) p) ⊢ Function.Surjective ↑(let h := (_ : Fact (Splits (algebraMap F (SplittingField (comp p q))) p)); restrict p (SplittingField (comp p q))) [PROOFSTEP] exact restrict_surjective _ _ [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : CharZero F p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) ⊢ natDegree p ∣ Fintype.card (Gal p) [PROOFSTEP] rw [Gal.card_of_separable p_irr.separable] [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : CharZero F p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) ⊢ natDegree p ∣ finrank F (SplittingField p) [PROOFSTEP] have hp : p.degree ≠ 0 := fun h => Nat.Prime.ne_zero p_deg (natDegree_eq_zero_iff_degree_le_zero.mpr (le_of_eq h)) [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : CharZero F p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) hp : degree p ≠ 0 ⊢ natDegree p ∣ finrank F (SplittingField p) [PROOFSTEP] let α : p.SplittingField := rootOfSplits (algebraMap F p.SplittingField) (SplittingField.splits p) hp [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : CharZero F p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) hp : degree p ≠ 0 α : SplittingField p := rootOfSplits (algebraMap F (SplittingField p)) (_ : Splits (algebraMap F (SplittingField p)) p) hp ⊢ natDegree p ∣ finrank F (SplittingField p) [PROOFSTEP] have hα : IsIntegral F α := Algebra.isIntegral_of_finite _ _ α [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : CharZero F p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) hp : degree p ≠ 0 α : SplittingField p := rootOfSplits (algebraMap F (SplittingField p)) (_ : Splits (algebraMap F (SplittingField p)) p) hp hα : IsIntegral F α ⊢ natDegree p ∣ finrank F (SplittingField p) [PROOFSTEP] use FiniteDimensional.finrank F⟮α⟯ p.SplittingField [GOAL] case h F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : CharZero F p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) hp : degree p ≠ 0 α : SplittingField p := rootOfSplits (algebraMap F (SplittingField p)) (_ : Splits (algebraMap F (SplittingField p)) p) hp hα : IsIntegral F α ⊢ finrank F (SplittingField p) = natDegree p * finrank { x // x ∈ F⟮α⟯ } (SplittingField p) [PROOFSTEP] suffices (minpoly F α).natDegree = p.natDegree by rw [← FiniteDimensional.finrank_mul_finrank F F⟮α⟯ p.SplittingField, IntermediateField.adjoin.finrank hα, this] [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : CharZero F p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) hp : degree p ≠ 0 α : SplittingField p := rootOfSplits (algebraMap F (SplittingField p)) (_ : Splits (algebraMap F (SplittingField p)) p) hp hα : IsIntegral F α this : natDegree (minpoly F α) = natDegree p ⊢ finrank F (SplittingField p) = natDegree p * finrank { x // x ∈ F⟮α⟯ } (SplittingField p) [PROOFSTEP] rw [← FiniteDimensional.finrank_mul_finrank F F⟮α⟯ p.SplittingField, IntermediateField.adjoin.finrank hα, this] [GOAL] case h F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : CharZero F p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) hp : degree p ≠ 0 α : SplittingField p := rootOfSplits (algebraMap F (SplittingField p)) (_ : Splits (algebraMap F (SplittingField p)) p) hp hα : IsIntegral F α ⊢ natDegree (minpoly F α) = natDegree p [PROOFSTEP] suffices minpoly F α ∣ p by have key := (minpoly.irreducible hα).dvd_symm p_irr this apply le_antisymm · exact natDegree_le_of_dvd this p_irr.ne_zero · exact natDegree_le_of_dvd key (minpoly.ne_zero hα) [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : CharZero F p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) hp : degree p ≠ 0 α : SplittingField p := rootOfSplits (algebraMap F (SplittingField p)) (_ : Splits (algebraMap F (SplittingField p)) p) hp hα : IsIntegral F α this : minpoly F α ∣ p ⊢ natDegree (minpoly F α) = natDegree p [PROOFSTEP] have key := (minpoly.irreducible hα).dvd_symm p_irr this [GOAL] F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : CharZero F p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) hp : degree p ≠ 0 α : SplittingField p := rootOfSplits (algebraMap F (SplittingField p)) (_ : Splits (algebraMap F (SplittingField p)) p) hp hα : IsIntegral F α this : minpoly F α ∣ p key : p ∣ minpoly F α ⊢ natDegree (minpoly F α) = natDegree p [PROOFSTEP] apply le_antisymm [GOAL] case a F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : CharZero F p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) hp : degree p ≠ 0 α : SplittingField p := rootOfSplits (algebraMap F (SplittingField p)) (_ : Splits (algebraMap F (SplittingField p)) p) hp hα : IsIntegral F α this : minpoly F α ∣ p key : p ∣ minpoly F α ⊢ natDegree (minpoly F α) ≤ natDegree p [PROOFSTEP] exact natDegree_le_of_dvd this p_irr.ne_zero [GOAL] case a F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : CharZero F p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) hp : degree p ≠ 0 α : SplittingField p := rootOfSplits (algebraMap F (SplittingField p)) (_ : Splits (algebraMap F (SplittingField p)) p) hp hα : IsIntegral F α this : minpoly F α ∣ p key : p ∣ minpoly F α ⊢ natDegree p ≤ natDegree (minpoly F α) [PROOFSTEP] exact natDegree_le_of_dvd key (minpoly.ne_zero hα) [GOAL] case h F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : CharZero F p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) hp : degree p ≠ 0 α : SplittingField p := rootOfSplits (algebraMap F (SplittingField p)) (_ : Splits (algebraMap F (SplittingField p)) p) hp hα : IsIntegral F α ⊢ minpoly F α ∣ p [PROOFSTEP] apply minpoly.dvd F α [GOAL] case h F : Type u_1 inst✝³ : Field F p q : F[X] E : Type u_2 inst✝² : Field E inst✝¹ : Algebra F E inst✝ : CharZero F p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) hp : degree p ≠ 0 α : SplittingField p := rootOfSplits (algebraMap F (SplittingField p)) (_ : Splits (algebraMap F (SplittingField p)) p) hp hα : IsIntegral F α ⊢ ↑(aeval α) p = 0 [PROOFSTEP] rw [aeval_def, map_rootOfSplits _ (SplittingField.splits p) hp] [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] ⊢ Finset.card (Set.toFinset (rootSet p ℂ)) = Finset.card (Set.toFinset (rootSet p ℝ)) + Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) [PROOFSTEP] by_cases hp : p = 0 [GOAL] case pos F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : p = 0 ⊢ Finset.card (Set.toFinset (rootSet p ℂ)) = Finset.card (Set.toFinset (rootSet p ℝ)) + Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) [PROOFSTEP] haveI : IsEmpty (p.rootSet ℂ) := by rw [hp, rootSet_zero]; infer_instance [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : p = 0 ⊢ IsEmpty ↑(rootSet p ℂ) [PROOFSTEP] rw [hp, rootSet_zero] [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : p = 0 ⊢ IsEmpty ↑∅ [PROOFSTEP] infer_instance [GOAL] case pos F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : p = 0 this : IsEmpty ↑(rootSet p ℂ) ⊢ Finset.card (Set.toFinset (rootSet p ℂ)) = Finset.card (Set.toFinset (rootSet p ℝ)) + Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) [PROOFSTEP] simp_rw [(galActionHom p ℂ _).support.eq_empty_of_isEmpty, hp, rootSet_zero, Set.toFinset_empty, Finset.card_empty] [GOAL] case neg F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 ⊢ Finset.card (Set.toFinset (rootSet p ℂ)) = Finset.card (Set.toFinset (rootSet p ℝ)) + Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) [PROOFSTEP] have inj : Function.Injective (IsScalarTower.toAlgHom ℚ ℝ ℂ) := (algebraMap ℝ ℂ).injective [GOAL] case neg F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) ⊢ Finset.card (Set.toFinset (rootSet p ℂ)) = Finset.card (Set.toFinset (rootSet p ℝ)) + Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) [PROOFSTEP] rw [← Finset.card_image_of_injective _ Subtype.coe_injective, ← Finset.card_image_of_injective _ inj] [GOAL] case neg F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) ⊢ Finset.card (Set.toFinset (rootSet p ℂ)) = Finset.card (Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ))) + Finset.card (Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))))) [PROOFSTEP] let a : Finset ℂ := ?_ [GOAL] case neg.refine_2 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := ?neg.refine_1✝ ⊢ Finset.card (Set.toFinset (rootSet p ℂ)) = Finset.card (Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ))) + Finset.card (Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))))) case neg.refine_1 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) ⊢ Finset ℂ [PROOFSTEP] let b : Finset ℂ := ?_ [GOAL] case neg.refine_2.refine_2 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := ?neg.refine_1✝ b : Finset ℂ := ?neg.refine_2.refine_1✝ ⊢ Finset.card (Set.toFinset (rootSet p ℂ)) = Finset.card (Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ))) + Finset.card (Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))))) case neg.refine_2.refine_1 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := ?neg.refine_1✝ ⊢ Finset ℂ case neg.refine_1 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) ⊢ Finset ℂ [PROOFSTEP] let c : Finset ℂ := ?_ -- Porting note: was -- change a.card = b.card + c.card [GOAL] case neg.refine_2.refine_2.refine_2 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := ?neg.refine_1✝ b : Finset ℂ := ?neg.refine_2.refine_1✝ c : Finset ℂ := ?neg.refine_2.refine_2.refine_1✝ ⊢ Finset.card (Set.toFinset (rootSet p ℂ)) = Finset.card (Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ))) + Finset.card (Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))))) case neg.refine_2.refine_2.refine_1 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := ?neg.refine_1✝ b : Finset ℂ := ?neg.refine_2.refine_1✝ ⊢ Finset ℂ case neg.refine_2.refine_1 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := ?neg.refine_1✝ ⊢ Finset ℂ case neg.refine_1 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) ⊢ Finset ℂ [PROOFSTEP] suffices a.card = b.card + c.card by exact this [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := ?neg.refine_1✝ b : Finset ℂ := ?neg.refine_2.refine_1✝ c : Finset ℂ := ?neg.refine_2.refine_2.refine_1✝ this : Finset.card a = Finset.card b + Finset.card c ⊢ Finset.card (Set.toFinset (rootSet p ℂ)) = Finset.card (Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ))) + Finset.card (Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))))) [PROOFSTEP] exact this [GOAL] case neg.refine_2.refine_2.refine_2 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ⊢ Finset.card a = Finset.card b + Finset.card c [PROOFSTEP] have ha : ∀ z : ℂ, z ∈ a ↔ aeval z p = 0 := by intro z; rw [Set.mem_toFinset, mem_rootSet_of_ne hp] [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ⊢ ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 [PROOFSTEP] intro z [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) z : ℂ ⊢ z ∈ a ↔ ↑(aeval z) p = 0 [PROOFSTEP] rw [Set.mem_toFinset, mem_rootSet_of_ne hp] [GOAL] case neg.refine_2.refine_2.refine_2 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 ⊢ Finset.card a = Finset.card b + Finset.card c [PROOFSTEP] have hb : ∀ z : ℂ, z ∈ b ↔ aeval z p = 0 ∧ z.im = 0 := by intro z simp_rw [Finset.mem_image, Set.mem_toFinset, mem_rootSet_of_ne hp] constructor · rintro ⟨w, hw, rfl⟩ exact ⟨by rw [aeval_algHom_apply, hw, AlgHom.map_zero], rfl⟩ · rintro ⟨hz1, hz2⟩ have key : IsScalarTower.toAlgHom ℚ ℝ ℂ z.re = z := by ext; rfl; rw [hz2]; rfl exact ⟨z.re, inj (by rwa [← aeval_algHom_apply, key, AlgHom.map_zero]), key⟩ [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 ⊢ ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 [PROOFSTEP] intro z [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 z : ℂ ⊢ z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 [PROOFSTEP] simp_rw [Finset.mem_image, Set.mem_toFinset, mem_rootSet_of_ne hp] [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 z : ℂ ⊢ (∃ a, ↑(aeval a) p = 0 ∧ ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a = z) ↔ ↑(aeval z) p = 0 ∧ z.im = 0 [PROOFSTEP] constructor [GOAL] case mp F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 z : ℂ ⊢ (∃ a, ↑(aeval a) p = 0 ∧ ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a = z) → ↑(aeval z) p = 0 ∧ z.im = 0 [PROOFSTEP] rintro ⟨w, hw, rfl⟩ [GOAL] case mp.intro.intro F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 w : ℝ hw : ↑(aeval w) p = 0 ⊢ ↑(aeval (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) w)) p = 0 ∧ (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) w).im = 0 [PROOFSTEP] exact ⟨by rw [aeval_algHom_apply, hw, AlgHom.map_zero], rfl⟩ [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 w : ℝ hw : ↑(aeval w) p = 0 ⊢ ↑(aeval (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) w)) p = 0 [PROOFSTEP] rw [aeval_algHom_apply, hw, AlgHom.map_zero] [GOAL] case mpr F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 z : ℂ ⊢ ↑(aeval z) p = 0 ∧ z.im = 0 → ∃ a, ↑(aeval a) p = 0 ∧ ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a = z [PROOFSTEP] rintro ⟨hz1, hz2⟩ [GOAL] case mpr.intro F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 z : ℂ hz1 : ↑(aeval z) p = 0 hz2 : z.im = 0 ⊢ ∃ a, ↑(aeval a) p = 0 ∧ ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a = z [PROOFSTEP] have key : IsScalarTower.toAlgHom ℚ ℝ ℂ z.re = z := by ext; rfl; rw [hz2]; rfl [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 z : ℂ hz1 : ↑(aeval z) p = 0 hz2 : z.im = 0 ⊢ ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) z.re = z [PROOFSTEP] ext [GOAL] case a F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 z : ℂ hz1 : ↑(aeval z) p = 0 hz2 : z.im = 0 ⊢ (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) z.re).re = z.re case a F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 z : ℂ hz1 : ↑(aeval z) p = 0 hz2 : z.im = 0 ⊢ (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) z.re).im = z.im [PROOFSTEP] rfl [GOAL] case a F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 z : ℂ hz1 : ↑(aeval z) p = 0 hz2 : z.im = 0 ⊢ (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) z.re).im = z.im [PROOFSTEP] rw [hz2] [GOAL] case a F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 z : ℂ hz1 : ↑(aeval z) p = 0 hz2 : z.im = 0 ⊢ (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) z.re).im = 0 [PROOFSTEP] rfl [GOAL] case mpr.intro F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 z : ℂ hz1 : ↑(aeval z) p = 0 hz2 : z.im = 0 key : ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) z.re = z ⊢ ∃ a, ↑(aeval a) p = 0 ∧ ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a = z [PROOFSTEP] exact ⟨z.re, inj (by rwa [← aeval_algHom_apply, key, AlgHom.map_zero]), key⟩ [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 z : ℂ hz1 : ↑(aeval z) p = 0 hz2 : z.im = 0 key : ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) z.re = z ⊢ ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) (↑(aeval z.re) p) = ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) 0 [PROOFSTEP] rwa [← aeval_algHom_apply, key, AlgHom.map_zero] [GOAL] case neg.refine_2.refine_2.refine_2 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 ⊢ Finset.card a = Finset.card b + Finset.card c [PROOFSTEP] have hc0 : ∀ w : p.rootSet ℂ, galActionHom p ℂ (restrict p ℂ (Complex.conjAe.restrictScalars ℚ)) w = w ↔ w.val.im = 0 := by intro w rw [Subtype.ext_iff, galActionHom_restrict] exact Complex.conj_eq_iff_im [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 ⊢ ∀ (w : ↑(rootSet p ℂ)), ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 [PROOFSTEP] intro w [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 w : ↑(rootSet p ℂ) ⊢ ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 [PROOFSTEP] rw [Subtype.ext_iff, galActionHom_restrict] [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 w : ↑(rootSet p ℂ) ⊢ ↑(AlgEquiv.restrictScalars ℚ Complex.conjAe) ↑w = ↑w ↔ (↑w).im = 0 [PROOFSTEP] exact Complex.conj_eq_iff_im [GOAL] case neg.refine_2.refine_2.refine_2 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 hc0 : ∀ (w : ↑(rootSet p ℂ)), ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 ⊢ Finset.card a = Finset.card b + Finset.card c [PROOFSTEP] have hc : ∀ z : ℂ, z ∈ c ↔ aeval z p = 0 ∧ z.im ≠ 0 := by intro z simp_rw [Finset.mem_image] constructor · rintro ⟨w, hw, rfl⟩ exact ⟨(mem_rootSet.mp w.2).2, mt (hc0 w).mpr (Equiv.Perm.mem_support.mp hw)⟩ · rintro ⟨hz1, hz2⟩ exact ⟨⟨z, mem_rootSet.mpr ⟨hp, hz1⟩⟩, Equiv.Perm.mem_support.mpr (mt (hc0 _).mp hz2), rfl⟩ [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 hc0 : ∀ (w : ↑(rootSet p ℂ)), ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 ⊢ ∀ (z : ℂ), z ∈ c ↔ ↑(aeval z) p = 0 ∧ z.im ≠ 0 [PROOFSTEP] intro z [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 hc0 : ∀ (w : ↑(rootSet p ℂ)), ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 z : ℂ ⊢ z ∈ c ↔ ↑(aeval z) p = 0 ∧ z.im ≠ 0 [PROOFSTEP] simp_rw [Finset.mem_image] [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 hc0 : ∀ (w : ↑(rootSet p ℂ)), ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 z : ℂ ⊢ (∃ a, a ∈ Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) ∧ ↑a = z) ↔ ↑(aeval z) p = 0 ∧ z.im ≠ 0 [PROOFSTEP] constructor [GOAL] case mp F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 hc0 : ∀ (w : ↑(rootSet p ℂ)), ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 z : ℂ ⊢ (∃ a, a ∈ Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) ∧ ↑a = z) → ↑(aeval z) p = 0 ∧ z.im ≠ 0 [PROOFSTEP] rintro ⟨w, hw, rfl⟩ [GOAL] case mp.intro.intro F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 hc0 : ∀ (w : ↑(rootSet p ℂ)), ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 w : { x // x ∈ rootSet p ℂ } hw : w ∈ Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) ⊢ ↑(aeval ↑w) p = 0 ∧ (↑w).im ≠ 0 [PROOFSTEP] exact ⟨(mem_rootSet.mp w.2).2, mt (hc0 w).mpr (Equiv.Perm.mem_support.mp hw)⟩ [GOAL] case mpr F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 hc0 : ∀ (w : ↑(rootSet p ℂ)), ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 z : ℂ ⊢ ↑(aeval z) p = 0 ∧ z.im ≠ 0 → ∃ a, a ∈ Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) ∧ ↑a = z [PROOFSTEP] rintro ⟨hz1, hz2⟩ [GOAL] case mpr.intro F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 hc0 : ∀ (w : ↑(rootSet p ℂ)), ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 z : ℂ hz1 : ↑(aeval z) p = 0 hz2 : z.im ≠ 0 ⊢ ∃ a, a ∈ Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) ∧ ↑a = z [PROOFSTEP] exact ⟨⟨z, mem_rootSet.mpr ⟨hp, hz1⟩⟩, Equiv.Perm.mem_support.mpr (mt (hc0 _).mp hz2), rfl⟩ [GOAL] case neg.refine_2.refine_2.refine_2 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 hc0 : ∀ (w : ↑(rootSet p ℂ)), ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 hc : ∀ (z : ℂ), z ∈ c ↔ ↑(aeval z) p = 0 ∧ z.im ≠ 0 ⊢ Finset.card a = Finset.card b + Finset.card c [PROOFSTEP] rw [← Finset.card_disjoint_union] [GOAL] case neg.refine_2.refine_2.refine_2 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 hc0 : ∀ (w : ↑(rootSet p ℂ)), ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 hc : ∀ (z : ℂ), z ∈ c ↔ ↑(aeval z) p = 0 ∧ z.im ≠ 0 ⊢ Finset.card a = Finset.card (b ∪ c) [PROOFSTEP] apply congr_arg Finset.card [GOAL] case neg.refine_2.refine_2.refine_2 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 hc0 : ∀ (w : ↑(rootSet p ℂ)), ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 hc : ∀ (z : ℂ), z ∈ c ↔ ↑(aeval z) p = 0 ∧ z.im ≠ 0 ⊢ a = b ∪ c [PROOFSTEP] simp_rw [Finset.ext_iff, Finset.mem_union, ha, hb, hc] [GOAL] case neg.refine_2.refine_2.refine_2 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 hc0 : ∀ (w : ↑(rootSet p ℂ)), ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 hc : ∀ (z : ℂ), z ∈ c ↔ ↑(aeval z) p = 0 ∧ z.im ≠ 0 ⊢ ∀ (a : ℂ), ↑(aeval a) p = 0 ↔ ↑(aeval a) p = 0 ∧ a.im = 0 ∨ ↑(aeval a) p = 0 ∧ a.im ≠ 0 [PROOFSTEP] tauto [GOAL] case neg.refine_2.refine_2.refine_2 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 hc0 : ∀ (w : ↑(rootSet p ℂ)), ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 hc : ∀ (z : ℂ), z ∈ c ↔ ↑(aeval z) p = 0 ∧ z.im ≠ 0 ⊢ Disjoint b c [PROOFSTEP] rw [Finset.disjoint_left] [GOAL] case neg.refine_2.refine_2.refine_2 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 hc0 : ∀ (w : ↑(rootSet p ℂ)), ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 hc : ∀ (z : ℂ), z ∈ c ↔ ↑(aeval z) p = 0 ∧ z.im ≠ 0 ⊢ ∀ ⦃a : ℂ⦄, a ∈ b → ¬a ∈ c [PROOFSTEP] intro z [GOAL] case neg.refine_2.refine_2.refine_2 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 hc0 : ∀ (w : ↑(rootSet p ℂ)), ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 hc : ∀ (z : ℂ), z ∈ c ↔ ↑(aeval z) p = 0 ∧ z.im ≠ 0 z : ℂ ⊢ z ∈ b → ¬z ∈ c [PROOFSTEP] rw [hb, hc] [GOAL] case neg.refine_2.refine_2.refine_2 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] hp : ¬p = 0 inj : Function.Injective ↑(IsScalarTower.toAlgHom ℚ ℝ ℂ) a : Finset ℂ := Set.toFinset (rootSet p ℂ) b : Finset ℂ := Finset.image (↑(IsScalarTower.toAlgHom ℚ ℝ ℂ)) (Set.toFinset (rootSet p ℝ)) c : Finset ℂ := Finset.image (fun a => ↑a) (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ha : ∀ (z : ℂ), z ∈ a ↔ ↑(aeval z) p = 0 hb : ∀ (z : ℂ), z ∈ b ↔ ↑(aeval z) p = 0 ∧ z.im = 0 hc0 : ∀ (w : ↑(rootSet p ℂ)), ↑(↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe))) w = w ↔ (↑w).im = 0 hc : ∀ (z : ℂ), z ∈ c ↔ ↑(aeval z) p = 0 ∧ z.im ≠ 0 z : ℂ ⊢ ↑(aeval z) p = 0 ∧ z.im = 0 → ¬(↑(aeval z) p = 0 ∧ z.im ≠ 0) [PROOFSTEP] tauto [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 ⊢ Function.Bijective ↑(galActionHom p ℂ) [PROOFSTEP] classical have h1 : Fintype.card (p.rootSet ℂ) = p.natDegree := by simp_rw [rootSet_def, Finset.coe_sort_coe, Fintype.card_coe] rw [Multiset.toFinset_card_of_nodup, ← natDegree_eq_card_roots] · exact IsAlgClosed.splits_codomain p · exact nodup_roots ((separable_map (algebraMap ℚ ℂ)).mpr p_irr.separable) have h2 : Fintype.card p.Gal = Fintype.card (galActionHom p ℂ).range := Fintype.card_congr (MonoidHom.ofInjective (galActionHom_injective p ℂ)).toEquiv let conj := restrict p ℂ (Complex.conjAe.restrictScalars ℚ) refine' ⟨galActionHom_injective p ℂ, fun x => (congr_arg (Membership.mem x) (show (galActionHom p ℂ).range = ⊤ from _)).mpr (Subgroup.mem_top x)⟩ apply Equiv.Perm.subgroup_eq_top_of_swap_mem · rwa [h1] · rw [h1] convert prime_degree_dvd_card p_irr p_deg using 1 convert h2.symm · exact ⟨conj, rfl⟩ · rw [← Equiv.Perm.card_support_eq_two] apply Nat.add_left_cancel rw [← p_roots, ← Set.toFinset_card (rootSet p ℝ), ← Set.toFinset_card (rootSet p ℂ)] exact (card_complex_roots_eq_card_real_add_card_not_gal_inv p).symm [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 ⊢ Function.Bijective ↑(galActionHom p ℂ) [PROOFSTEP] have h1 : Fintype.card (p.rootSet ℂ) = p.natDegree := by simp_rw [rootSet_def, Finset.coe_sort_coe, Fintype.card_coe] rw [Multiset.toFinset_card_of_nodup, ← natDegree_eq_card_roots] · exact IsAlgClosed.splits_codomain p · exact nodup_roots ((separable_map (algebraMap ℚ ℂ)).mpr p_irr.separable) [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 ⊢ Fintype.card ↑(rootSet p ℂ) = natDegree p [PROOFSTEP] simp_rw [rootSet_def, Finset.coe_sort_coe, Fintype.card_coe] [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 ⊢ Finset.card (Multiset.toFinset (roots (map (algebraMap ℚ ℂ) p))) = natDegree p [PROOFSTEP] rw [Multiset.toFinset_card_of_nodup, ← natDegree_eq_card_roots] [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 ⊢ Splits (algebraMap ℚ ℂ) p [PROOFSTEP] exact IsAlgClosed.splits_codomain p [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 ⊢ Multiset.Nodup (roots (map (algebraMap ℚ ℂ) p)) [PROOFSTEP] exact nodup_roots ((separable_map (algebraMap ℚ ℂ)).mpr p_irr.separable) [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 h1 : Fintype.card ↑(rootSet p ℂ) = natDegree p ⊢ Function.Bijective ↑(galActionHom p ℂ) [PROOFSTEP] have h2 : Fintype.card p.Gal = Fintype.card (galActionHom p ℂ).range := Fintype.card_congr (MonoidHom.ofInjective (galActionHom_injective p ℂ)).toEquiv [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 h1 : Fintype.card ↑(rootSet p ℂ) = natDegree p h2 : Fintype.card (Gal p) = Fintype.card { x // x ∈ MonoidHom.range (galActionHom p ℂ) } ⊢ Function.Bijective ↑(galActionHom p ℂ) [PROOFSTEP] let conj := restrict p ℂ (Complex.conjAe.restrictScalars ℚ) [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 h1 : Fintype.card ↑(rootSet p ℂ) = natDegree p h2 : Fintype.card (Gal p) = Fintype.card { x // x ∈ MonoidHom.range (galActionHom p ℂ) } conj : (fun x => Gal p) (AlgEquiv.restrictScalars ℚ Complex.conjAe) := ↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe) ⊢ Function.Bijective ↑(galActionHom p ℂ) [PROOFSTEP] refine' ⟨galActionHom_injective p ℂ, fun x => (congr_arg (Membership.mem x) (show (galActionHom p ℂ).range = ⊤ from _)).mpr (Subgroup.mem_top x)⟩ [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 h1 : Fintype.card ↑(rootSet p ℂ) = natDegree p h2 : Fintype.card (Gal p) = Fintype.card { x // x ∈ MonoidHom.range (galActionHom p ℂ) } conj : (fun x => Gal p) (AlgEquiv.restrictScalars ℚ Complex.conjAe) := ↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe) x : Equiv.Perm ↑(rootSet p ℂ) ⊢ MonoidHom.range (galActionHom p ℂ) = ⊤ [PROOFSTEP] apply Equiv.Perm.subgroup_eq_top_of_swap_mem [GOAL] case h0 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 h1 : Fintype.card ↑(rootSet p ℂ) = natDegree p h2 : Fintype.card (Gal p) = Fintype.card { x // x ∈ MonoidHom.range (galActionHom p ℂ) } conj : (fun x => Gal p) (AlgEquiv.restrictScalars ℚ Complex.conjAe) := ↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe) x : Equiv.Perm ↑(rootSet p ℂ) ⊢ Nat.Prime (Fintype.card ↑(rootSet p ℂ)) [PROOFSTEP] rwa [h1] [GOAL] case h1 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 h1 : Fintype.card ↑(rootSet p ℂ) = natDegree p h2 : Fintype.card (Gal p) = Fintype.card { x // x ∈ MonoidHom.range (galActionHom p ℂ) } conj : (fun x => Gal p) (AlgEquiv.restrictScalars ℚ Complex.conjAe) := ↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe) x : Equiv.Perm ↑(rootSet p ℂ) ⊢ Fintype.card ↑(rootSet p ℂ) ∣ Fintype.card { x // x ∈ MonoidHom.range (galActionHom p ℂ) } [PROOFSTEP] rw [h1] [GOAL] case h1 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 h1 : Fintype.card ↑(rootSet p ℂ) = natDegree p h2 : Fintype.card (Gal p) = Fintype.card { x // x ∈ MonoidHom.range (galActionHom p ℂ) } conj : (fun x => Gal p) (AlgEquiv.restrictScalars ℚ Complex.conjAe) := ↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe) x : Equiv.Perm ↑(rootSet p ℂ) ⊢ natDegree p ∣ Fintype.card { x // x ∈ MonoidHom.range (galActionHom p ℂ) } [PROOFSTEP] convert prime_degree_dvd_card p_irr p_deg using 1 [GOAL] case h.e'_4 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 h1 : Fintype.card ↑(rootSet p ℂ) = natDegree p h2 : Fintype.card (Gal p) = Fintype.card { x // x ∈ MonoidHom.range (galActionHom p ℂ) } conj : (fun x => Gal p) (AlgEquiv.restrictScalars ℚ Complex.conjAe) := ↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe) x : Equiv.Perm ↑(rootSet p ℂ) ⊢ Fintype.card { x // x ∈ MonoidHom.range (galActionHom p ℂ) } = Fintype.card (Gal p) [PROOFSTEP] convert h2.symm [GOAL] case h2 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 h1 : Fintype.card ↑(rootSet p ℂ) = natDegree p h2 : Fintype.card (Gal p) = Fintype.card { x // x ∈ MonoidHom.range (galActionHom p ℂ) } conj : (fun x => Gal p) (AlgEquiv.restrictScalars ℚ Complex.conjAe) := ↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe) x : Equiv.Perm ↑(rootSet p ℂ) ⊢ ?τ ∈ MonoidHom.range (galActionHom p ℂ) [PROOFSTEP] exact ⟨conj, rfl⟩ [GOAL] case h3 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 h1 : Fintype.card ↑(rootSet p ℂ) = natDegree p h2 : Fintype.card (Gal p) = Fintype.card { x // x ∈ MonoidHom.range (galActionHom p ℂ) } conj : (fun x => Gal p) (AlgEquiv.restrictScalars ℚ Complex.conjAe) := ↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe) x : Equiv.Perm ↑(rootSet p ℂ) ⊢ Equiv.Perm.IsSwap (↑(galActionHom p ℂ) conj) [PROOFSTEP] rw [← Equiv.Perm.card_support_eq_two] [GOAL] case h3 F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 h1 : Fintype.card ↑(rootSet p ℂ) = natDegree p h2 : Fintype.card (Gal p) = Fintype.card { x // x ∈ MonoidHom.range (galActionHom p ℂ) } conj : (fun x => Gal p) (AlgEquiv.restrictScalars ℚ Complex.conjAe) := ↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe) x : Equiv.Perm ↑(rootSet p ℂ) ⊢ Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) conj)) = 2 [PROOFSTEP] apply Nat.add_left_cancel [GOAL] case h3.a F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 h1 : Fintype.card ↑(rootSet p ℂ) = natDegree p h2 : Fintype.card (Gal p) = Fintype.card { x // x ∈ MonoidHom.range (galActionHom p ℂ) } conj : (fun x => Gal p) (AlgEquiv.restrictScalars ℚ Complex.conjAe) := ↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe) x : Equiv.Perm ↑(rootSet p ℂ) ⊢ ?h3.n + Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) conj)) = ?h3.n + 2 case h3.n F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 h1 : Fintype.card ↑(rootSet p ℂ) = natDegree p h2 : Fintype.card (Gal p) = Fintype.card { x // x ∈ MonoidHom.range (galActionHom p ℂ) } conj : (fun x => Gal p) (AlgEquiv.restrictScalars ℚ Complex.conjAe) := ↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe) x : Equiv.Perm ↑(rootSet p ℂ) ⊢ ℕ [PROOFSTEP] rw [← p_roots, ← Set.toFinset_card (rootSet p ℝ), ← Set.toFinset_card (rootSet p ℂ)] [GOAL] case h3.a F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 h1 : Fintype.card ↑(rootSet p ℂ) = natDegree p h2 : Fintype.card (Gal p) = Fintype.card { x // x ∈ MonoidHom.range (galActionHom p ℂ) } conj : (fun x => Gal p) (AlgEquiv.restrictScalars ℚ Complex.conjAe) := ↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe) x : Equiv.Perm ↑(rootSet p ℂ) ⊢ Finset.card (Set.toFinset (rootSet p ℝ)) + Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) conj)) = Finset.card (Set.toFinset (rootSet p ℂ)) [PROOFSTEP] exact (card_complex_roots_eq_card_real_add_card_not_gal_inv p).symm [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots1 : Fintype.card ↑(rootSet p ℝ) + 1 ≤ Fintype.card ↑(rootSet p ℂ) p_roots2 : Fintype.card ↑(rootSet p ℂ) ≤ Fintype.card ↑(rootSet p ℝ) + 3 ⊢ Function.Bijective ↑(galActionHom p ℂ) [PROOFSTEP] apply galActionHom_bijective_of_prime_degree p_irr p_deg [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots1 : Fintype.card ↑(rootSet p ℝ) + 1 ≤ Fintype.card ↑(rootSet p ℂ) p_roots2 : Fintype.card ↑(rootSet p ℂ) ≤ Fintype.card ↑(rootSet p ℝ) + 3 ⊢ Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 [PROOFSTEP] let n := (galActionHom p ℂ (restrict p ℂ (Complex.conjAe.restrictScalars ℚ))).support.card [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots1 : Fintype.card ↑(rootSet p ℝ) + 1 ≤ Fintype.card ↑(rootSet p ℂ) p_roots2 : Fintype.card ↑(rootSet p ℂ) ≤ Fintype.card ↑(rootSet p ℝ) + 3 n : ℕ := Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ⊢ Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 [PROOFSTEP] have hn : 2 ∣ n := Equiv.Perm.two_dvd_card_support (by rw [← MonoidHom.map_pow, ← MonoidHom.map_pow, show AlgEquiv.restrictScalars ℚ Complex.conjAe ^ 2 = 1 from AlgEquiv.ext Complex.conj_conj, MonoidHom.map_one, MonoidHom.map_one]) [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots1 : Fintype.card ↑(rootSet p ℝ) + 1 ≤ Fintype.card ↑(rootSet p ℂ) p_roots2 : Fintype.card ↑(rootSet p ℂ) ≤ Fintype.card ↑(rootSet p ℝ) + 3 n : ℕ := Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ⊢ ↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)) ^ 2 = 1 [PROOFSTEP] rw [← MonoidHom.map_pow, ← MonoidHom.map_pow, show AlgEquiv.restrictScalars ℚ Complex.conjAe ^ 2 = 1 from AlgEquiv.ext Complex.conj_conj, MonoidHom.map_one, MonoidHom.map_one] [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots1 : Fintype.card ↑(rootSet p ℝ) + 1 ≤ Fintype.card ↑(rootSet p ℂ) p_roots2 : Fintype.card ↑(rootSet p ℂ) ≤ Fintype.card ↑(rootSet p ℝ) + 3 n : ℕ := Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) hn : 2 ∣ n ⊢ Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 [PROOFSTEP] have key := card_complex_roots_eq_card_real_add_card_not_gal_inv p [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots1 : Fintype.card ↑(rootSet p ℝ) + 1 ≤ Fintype.card ↑(rootSet p ℂ) p_roots2 : Fintype.card ↑(rootSet p ℂ) ≤ Fintype.card ↑(rootSet p ℝ) + 3 n : ℕ := Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) hn : 2 ∣ n key : Finset.card (Set.toFinset (rootSet p ℂ)) = Finset.card (Set.toFinset (rootSet p ℝ)) + Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ⊢ Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 [PROOFSTEP] simp_rw [Set.toFinset_card] at key [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots1 : Fintype.card ↑(rootSet p ℝ) + 1 ≤ Fintype.card ↑(rootSet p ℂ) p_roots2 : Fintype.card ↑(rootSet p ℂ) ≤ Fintype.card ↑(rootSet p ℝ) + 3 n : ℕ := Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) hn : 2 ∣ n key : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ⊢ Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 [PROOFSTEP] rw [key, add_le_add_iff_left] at p_roots1 p_roots2 [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots1 : 1 ≤ Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) p_roots2 : Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ≤ 3 n : ℕ := Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) hn : 2 ∣ n key : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ⊢ Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + 2 [PROOFSTEP] rw [key, add_right_inj] [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots1 : 1 ≤ Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) p_roots2 : Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ≤ 3 n : ℕ := Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) hn : 2 ∣ n key : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ⊢ Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) = 2 [PROOFSTEP] suffices ∀ m : ℕ, 2 ∣ m → 1 ≤ m → m ≤ 3 → m = 2 by exact this n hn p_roots1 p_roots2 [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots1 : 1 ≤ Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) p_roots2 : Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ≤ 3 n : ℕ := Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) hn : 2 ∣ n key : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) this : ∀ (m : ℕ), 2 ∣ m → 1 ≤ m → m ≤ 3 → m = 2 ⊢ Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) = 2 [PROOFSTEP] exact this n hn p_roots1 p_roots2 [GOAL] F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots1 : 1 ≤ Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) p_roots2 : Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ≤ 3 n : ℕ := Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) hn : 2 ∣ n key : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ⊢ ∀ (m : ℕ), 2 ∣ m → 1 ≤ m → m ≤ 3 → m = 2 [PROOFSTEP] rintro m ⟨k, rfl⟩ h2 h3 [GOAL] case intro F : Type u_1 inst✝² : Field F p✝ q : F[X] E : Type u_2 inst✝¹ : Field E inst✝ : Algebra F E p : ℚ[X] p_irr : Irreducible p p_deg : Nat.Prime (natDegree p) p_roots1 : 1 ≤ Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) p_roots2 : Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) ≤ 3 n : ℕ := Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) hn : 2 ∣ n key : Fintype.card ↑(rootSet p ℂ) = Fintype.card ↑(rootSet p ℝ) + Finset.card (Equiv.Perm.support (↑(galActionHom p ℂ) (↑(restrict p ℂ) (AlgEquiv.restrictScalars ℚ Complex.conjAe)))) k : ℕ h2 : 1 ≤ 2 * k h3 : 2 * k ≤ 3 ⊢ 2 * k = 2 [PROOFSTEP] exact le_antisymm (Nat.lt_succ_iff.mp (lt_of_le_of_ne h3 (show 2 * k ≠ 2 * 1 + 1 from Nat.two_mul_ne_two_mul_add_one))) (Nat.succ_le_iff.mpr (lt_of_le_of_ne h2 (show 2 * 0 + 1 ≠ 2 * k from Nat.two_mul_ne_two_mul_add_one.symm)))
module CornerSpaceRenorm using QuantumOptics include("types.jl"); export AbstractLattice, Lattice, SquareLattice, NdLattice, pbc_from_obc, obc_from_pbc export eltype, vertices, edges, nv, ne, has_edge, has_vertex, inneighbors, outneighbors, is_directed export AbstractSystem, SquareSystem, NdSystem export gplot, plot_system export CornerBasis include("operators.jl"); export hamiltonian, dissipators include("corner.jl") export corner_subspace, cornerize, vmerge, hmerge, hermitianize, hermitianize! include("steadystate.jl") export steadystate_bicg, steadystate_bicg! end # module
(* Title: HOL/Auth/n_flash_lemma_on_inv__6.thy Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences *) header{*The n_flash Protocol Case Study*} theory n_flash_lemma_on_inv__6 imports n_flash_base begin section{*All lemmas on causal relation between inv__6 and some rule r*} lemma n_PI_Remote_PutXVsinv__6: assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_PI_Remote_PutX dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain dst where a1:"dst\<le>N\<and>r=n_PI_Remote_PutX dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(dst=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_PI_Remote_ReplaceVsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_Replace src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_Replace src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_Get_Put_DirtyVsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') p__Inv4) ''CacheState'')) (Const CACHE_E)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Local'')) (Const true))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') p__Inv4) ''CacheState'')) (Const CACHE_E)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Local'')) (Const true))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_Get_PutVsinv__6: assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_Get_Put_HomeVsinv__6: assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Put_Home dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Get_Put_Home dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(dst=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_1Vsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_2Vsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_3Vsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_4Vsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_5Vsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_6Vsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_7__part__0Vsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_7__part__1Vsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__0Vsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__1Vsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_8_HomeVsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_8_Home_NODE_GetVsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_8Vsinv__6: assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_8_NODE_GetVsinv__6: assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_9__part__0Vsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_9__part__1Vsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_10_HomeVsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_10Vsinv__6: assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_11Vsinv__6: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_GetX_PutXVsinv__6: assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_GetX_PutX_HomeVsinv__6: assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_PutX_Home dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_GetX_PutX_Home dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(dst=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_PutVsinv__6: assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Put dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Put dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(dst=p__Inv4)" have "((formEval (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') p__Inv4) ''InvMarked'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') p__Inv4) ''InvMarked'')) (Const true))) s))" by auto moreover { assume c1: "((formEval (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') p__Inv4) ''InvMarked'')) (Const true)) s))" have "?P1 s" proof(cut_tac a1 a2 b1 c1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume c1: "((formEval (neg (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') p__Inv4) ''InvMarked'')) (Const true))) s))" have "?P1 s" proof(cut_tac a1 a2 b1 c1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately have "invHoldForRule s f r (invariants N)" by satx } moreover { assume b1: "(dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_PutXVsinv__6: assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_PutX dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_PutX dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(dst=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_PutX)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_InvVsinv__6: assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Inv dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Inv dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(dst=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_PI_Local_GetX_PutX_HeadVld__part__0Vsinv__6: assumes a1: "(r=n_PI_Local_GetX_PutX_HeadVld__part__0 N )" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "?P1 s" proof(cut_tac a1 a2 , auto) qed then show "invHoldForRule s f r (invariants N)" by auto qed lemma n_PI_Local_GetX_PutX_HeadVld__part__1Vsinv__6: assumes a1: "(r=n_PI_Local_GetX_PutX_HeadVld__part__1 N )" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "?P1 s" proof(cut_tac a1 a2 , auto) qed then show "invHoldForRule s f r (invariants N)" by auto qed lemma n_PI_Local_GetX_PutX__part__0Vsinv__6: assumes a1: "(r=n_PI_Local_GetX_PutX__part__0 )" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "?P1 s" proof(cut_tac a1 a2 , auto) qed then show "invHoldForRule s f r (invariants N)" by auto qed lemma n_PI_Local_GetX_PutX__part__1Vsinv__6: assumes a1: "(r=n_PI_Local_GetX_PutX__part__1 )" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "?P1 s" proof(cut_tac a1 a2 , auto) qed then show "invHoldForRule s f r (invariants N)" by auto qed lemma n_PI_Local_PutXVsinv__6: assumes a1: "(r=n_PI_Local_PutX )" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "((formEval (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const true))) s))" by auto moreover { assume c1: "((formEval (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const true)) s))" have "?P3 s" apply (cut_tac a1 a2 c1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') p__Inv4) ''CacheState'')) (Const CACHE_E)) (eqn (IVar (Field (Field (Ident ''Sta'') ''HomeProc'') ''CacheState'')) (Const CACHE_E))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume c1: "((formEval (neg (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const true))) s))" have "?P3 s" apply (cut_tac a1 a2 c1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') p__Inv4) ''CacheState'')) (Const CACHE_E)) (eqn (IVar (Field (Field (Ident ''Sta'') ''HomeProc'') ''CacheState'')) (Const CACHE_E))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_PutVsinv__6: assumes a1: "(r=n_NI_Local_Put )" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "?P3 s" apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') p__Inv4) ''CacheState'')) (Const CACHE_E)) (eqn (IVar (Field (Field (Ident ''Sta'') ''HomeUniMsg'') ''Cmd'')) (Const UNI_Put))))" in exI, auto) done then show "invHoldForRule s f r (invariants N)" by auto qed lemma n_NI_WbVsinv__6: assumes a1: "(r=n_NI_Wb )" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "?P3 s" apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') p__Inv4) ''CacheState'')) (Const CACHE_E)) (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb))))" in exI, auto) done then show "invHoldForRule s f r (invariants N)" by auto qed lemma n_NI_ShWbVsinv__6: assumes a1: "(r=n_NI_ShWb N )" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done have "?P3 s" apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') p__Inv4) ''CacheState'')) (Const CACHE_E)) (eqn (IVar (Field (Field (Ident ''Sta'') ''ShWbMsg'') ''Cmd'')) (Const SHWB_ShWb))))" in exI, auto) done then show "invHoldForRule s f r (invariants N)" by auto qed lemma n_NI_Local_Get_Get__part__1Vsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Remote_GetVsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_PI_Remote_Get src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_StoreVsinv__6: assumes a1: "\<exists> src data. src\<le>N\<and>data\<le>N\<and>r=n_Store src data" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Local_GetX_GetX__part__1Vsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_InvAck_3Vsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_3 N src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_InvAck_1Vsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_1 N src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_GetX_GetX__part__1Vsinv__6: assumes a1: "r=n_PI_Local_GetX_GetX__part__1 " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_GetX_GetX__part__0Vsinv__6: assumes a1: "r=n_PI_Local_GetX_GetX__part__0 " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_Store_HomeVsinv__6: assumes a1: "\<exists> data. data\<le>N\<and>r=n_Store_Home data" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_ReplaceVsinv__6: assumes a1: "r=n_PI_Local_Replace " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Local_GetX_Nak__part__1Vsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Local_Get_Nak__part__1Vsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Local_Get_Get__part__0Vsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_InvAck_existsVsinv__6: assumes a1: "\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_InvAck_exists src pp" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Local_GetX_Nak__part__2Vsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Local_Get_Put_HeadVsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Local_Get_Nak__part__2Vsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Local_GetX_GetX__part__0Vsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_Get_PutVsinv__6: assumes a1: "r=n_PI_Local_Get_Put " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_ReplaceVsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Replace src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Remote_GetX_Nak_HomeVsinv__6: assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_Nak_Home dst" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Local_PutXAcksDoneVsinv__6: assumes a1: "r=n_NI_Local_PutXAcksDone " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Remote_GetX_NakVsinv__6: assumes a1: "\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_NakVsinv__6: assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Nak dst" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Remote_GetXVsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_PI_Remote_GetX src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Remote_Get_Nak_HomeVsinv__6: assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Nak_Home dst" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Local_Get_PutVsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Local_GetX_Nak__part__0Vsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_InvAck_exists_HomeVsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_exists_Home src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Replace_HomeVsinv__6: assumes a1: "r=n_NI_Replace_Home " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Remote_Get_NakVsinv__6: assumes a1: "\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Nak_ClearVsinv__6: assumes a1: "r=n_NI_Nak_Clear " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Local_Get_Nak__part__0Vsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_Get_GetVsinv__6: assumes a1: "r=n_PI_Local_Get_Get " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Nak_HomeVsinv__6: assumes a1: "r=n_NI_Nak_Home " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_InvAck_2Vsinv__6: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_2 N src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_FAckVsinv__6: assumes a1: "r=n_NI_FAck " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done end
import data.fintype.basic import tactic.derive_fintype section basics variable Sigma : Type @[reducible] def word : Type := list Sigma @[reducible] def lang : Type := word Sigma → Prop end basics section dfa variable {Sigma : Type} structure dfa(Sigma : Type*) := (Q : Type*) [finQ : fintype Q] [decQ : decidable_eq Q] (init : Q) (final : Q → Prop) [decF : decidable_pred final] (δ : Q → Sigma → Q) open dfa def dfa_δ_star (A : dfa Sigma) : A.Q → word Sigma → A.Q | q [] := q | q (x :: w) := dfa_δ_star (A.δ q x) w def dfa_lang (A : dfa Sigma) : lang Sigma := λ w , A.final (dfa_δ_star A A.init w) end dfa section nfa variables {Sigma : Type} structure nfa(Sigma : Type*) := (Q : Type*) [finQ : fintype Q] [decQ : decidable_eq Q] (inits : Q → Prop) [decI : decidable_pred inits] (final : Q → Prop) [decF : decidable_pred final] (δ : Q → Sigma → Q → Prop) [decD : decidable_pred (sigma.uncurry (sigma.uncurry δ))] open nfa def nfa_δ_star : Π A : nfa Sigma , A.Q → word Sigma → A.Q → Prop | A q0 [] q1 := q0 = q1 | A q0 (x :: w) q1 := ∃ q2 : A.Q, A.δ q0 x q2 ∧ nfa_δ_star A q2 w q1 def nfa_lang (A : nfa Sigma) : lang Sigma := λ w , ∃ q0 q1 : A.Q, A.inits q0 ∧ nfa_δ_star A q0 w q1 ∧ A.final q1 def empty_nfa {Sigma : Type*} : nfa Sigma := { Q := fin 1, finQ := by apply_instance, decQ := by apply_instance, inits := λ _ , true, decI := by apply_instance, final := λ _ , false, decF := by apply_instance, δ := λ _ _ _ , false, decD := λ _, by {dsimp[sigma.uncurry], apply_instance,}, } def epsilon_nfa {Sigma : Type*} : nfa Sigma := { Q := fin 1, finQ := by apply_instance, decQ := by apply_instance, inits := λ _ , true, decI := by apply_instance, final := λ _ , true, decF := by apply_instance, δ := λ _ _ _ , false, decD := λ _, by {dsimp[sigma.uncurry], apply_instance,}, } def single_nfa {Sigma : Type*} [decidable_eq Sigma] (lit : Sigma) : nfa Sigma := { Q := fin 2, finQ := by apply_instance, decQ := by apply_instance, inits := λ x , x.val = 0, decI := by apply_instance, final := λ x , x.val = 1, decF := by apply_instance, δ := λ q0 x q1 , q0.val = 0 ∧ x = lit ∧ q1.val = 1, decD := begin assume x, dsimp [sigma.uncurry], apply_instance, end } end nfa section ε_nfa variables {Sigma : Type} [decidable_eq Sigma] structure ε_nfa(Sigma : Type) := (Q : Type*) [finQ : fintype Q] [decQ : decidable_eq Q] (inits : Q → Prop) [decI : decidable_pred inits] (final : Q → Prop) [decF : decidable_pred final] (δ : Q → option Sigma → Q → Prop) [decD : decidable_pred (sigma.uncurry (sigma.uncurry δ))] open ε_nfa inductive ε_closure{A : ε_nfa Sigma} : (A.Q → Prop) → A.Q → Prop | base : ∀ x : A.Q → Prop, ∀ q : A.Q, x q → ε_closure x q | step : ∀ x : A.Q → Prop, ∀ q q' : A.Q, ε_closure x q → A.δ q none q' → ε_closure x q' /- def ε_nfa_δ_star : Π A : ε_nfa Sigma , A.Q → word Sigma → A.Q → Prop | A q0 [] q1 := ε_closure (λ q, q = q0) q1 | A q0 (x :: w) q1 := ∃ q2 : A.Q, (A.δ q0 (some x) q2 ∨ A.δ q0 none q2) ∧ ε_nfa_δ_star A q2 w q1 -/ inductive ε_nfa_δ_star (A : ε_nfa Sigma) : A.Q → word Sigma → A.Q → Prop | empty : ∀ q : A.Q , ε_nfa_δ_star q [] q | step : ∀ q0 q1 q2 : A.Q, ∀ x : Sigma, ∀ w : word Sigma, A.δ q0 (some x) q1 → ε_nfa_δ_star q1 w q2 → ε_nfa_δ_star q0 (x :: w) q2 | epsilon : ∀ q0 q1 q2 : A.Q, ∀ w : word Sigma, A.δ q0 none q1 → ε_nfa_δ_star q1 w q2 → ε_nfa_δ_star q0 w q2 def ε_nfa_lang (A : ε_nfa Sigma) : lang Sigma := λ w , ∃ q0 q1 : A.Q, A.inits q0 ∧ ε_nfa_δ_star A q0 w q1 ∧ A.final q1 theorem inversion : ∀ A : ε_nfa Sigma, ∀ q q' : A.Q, ∀ w : word Sigma, ε_nfa_δ_star A q w q' → (w = [] ∧ q' = q) ∨ (∃ x : Sigma, ∃ w' : word Sigma, ∃ q'' : A.Q , w = x :: w' ∧ A.δ q (some x) q'' ∧ ε_nfa_δ_star A q'' w' q') ∨ (∃ q'' : A.Q , A.δ q none q'' ∧ ε_nfa_δ_star A q'' w q') := begin assume A q q' w, assume h, cases h, left, constructor, refl, refl, right, left, existsi h_x, existsi h_w, existsi h_q1, constructor, refl, constructor, exact h_ᾰ, exact h_ᾰ_1, right, right, existsi h_q1, constructor, exact h_ᾰ, exact h_ᾰ_1, end end ε_nfa section dfa2nfa variables {Sigma : Type} def dfa2nfa(A : dfa Sigma) : nfa Sigma := { Q := A.Q, finQ := A.finQ, decQ := A.decQ, inits := λ q : A.Q, q = A.init, decI := λ q, A.decQ q A.init, final := A.final, decF := A.decF, δ := λ q0 x q1 , q1 = A.δ q0 x, decD := λ q , A.decQ q.snd (A.δ q.fst.fst q.fst.snd), } lemma nfaδ2dfaδ : ∀ A : dfa Sigma, ∀ w : word Sigma, ∀ q0 q1 : A.Q, dfa_δ_star A q0 w = q1 ↔ nfa_δ_star (dfa2nfa A) q0 w q1 := begin assume A w, induction w, { assume q0 q1, constructor, dsimp [dfa_δ_star, nfa_δ_star], assume h, exact h, dsimp [dfa_δ_star, nfa_δ_star], assume h, exact h, }, { assume q0 q1, constructor, { assume h, dsimp [nfa_δ_star], existsi A.δ q0 w_hd, constructor, dsimp [dfa2nfa], reflexivity, apply (iff.mp (w_ih (A.δ q0 w_hd) q1)), exact h, }, { dsimp [dfa_δ_star, nfa_δ_star], assume g, apply (iff.mpr (w_ih (A.δ q0 w_hd) q1)), cases g with q00 gg, have eq: A.δ q0 w_hd = q00, dsimp [dfa2nfa] at gg, exact (eq.symm (and.elim_left gg)), rewrite eq, exact (and.elim_right gg), } } end lemma emb11 : ∀ A : dfa Sigma, ∀ w : word Sigma, dfa_lang A w → nfa_lang (dfa2nfa A) w := begin assume A w, dsimp [dfa_lang, nfa_lang], induction w, { dsimp [dfa_δ_star], assume h, existsi A.init, existsi A.init, constructor, dsimp [dfa2nfa], reflexivity, constructor, dsimp [nfa_δ_star], reflexivity, dsimp [dfa2nfa], exact h, }, { assume h, existsi A.init, existsi (dfa_δ_star A A.init (w_hd :: w_tl)), constructor, dsimp [dfa2nfa], reflexivity, constructor, dsimp [nfa_δ_star], existsi A.δ A.init w_hd, constructor, dsimp [dfa2nfa], reflexivity, dsimp [dfa_δ_star], apply iff.mp (nfaδ2dfaδ A w_tl (A.δ A.init w_hd) (dfa_δ_star A (A.δ A.init w_hd) w_tl)), reflexivity, dsimp [dfa2nfa], exact h, } end lemma emb12 : ∀ A : dfa Sigma, ∀ w : word Sigma, nfa_lang (dfa2nfa A) w → dfa_lang A w := begin assume A w, dsimp [nfa_lang, dfa_lang], assume h, induction w, { dsimp [dfa_δ_star] at *, cases h with q0 h2, cases h2 with q1 h3, dsimp [nfa_δ_star, dfa2nfa] at h3, rewrite← (and.elim_left h3), rewrite (and.elim_left (and.elim_right h3)), exact (and.elim_right (and.elim_right h3)), }, { dsimp [dfa_δ_star] at *, cases h with q0 h2, cases h2 with q1 h3, have eq: q0 = A.init, dsimp [dfa2nfa] at h3, exact and.elim_left h3, have g: dfa_δ_star A (A.δ A.init w_hd) w_tl = q1, rewrite← eq, change dfa_δ_star A q0 (w_hd :: w_tl) = q1, apply (iff.mpr (nfaδ2dfaδ A (w_hd :: w_tl) q0 q1)), exact and.elim_left (and.elim_right h3), rewrite g, exact and.elim_right (and.elim_right h3), } end lemma emb1 : ∀ A : dfa Sigma, ∀ w : word Sigma, dfa_lang A w ↔ nfa_lang (dfa2nfa A) w := begin assume A w, constructor, exact (emb11 A w), exact (emb12 A w), end end dfa2nfa section nfa2dfa variables {Sigma : Type} @[reducible] def decPow (A : Type*) := Σ (p : A → Prop), decidable_pred p -- https://leanprover.zulipchat.com/#narrow/stream/113488-general/topic/fintype.20for.20functions/near/291226330 def equiv.sigma_decidable_pred {α : Type*} : (Σ (p : α → Prop), decidable_pred p) ≃ (α → bool) := { to_fun := λ A i, @to_bool (A.1 i) (A.2 i), inv_fun := λ p, ⟨λ a, p a, by apply_instance⟩, left_inv := λ p, sigma.ext (by simp) $ subsingleton.helim (by simp) _ _, right_inv := λ p, funext $ λ i, bool.to_bool_coe _ } instance finpow {A : Type*} (fin : fintype A) (dec : decidable_eq A) : fintype (decPow A) := fintype.of_equiv (A → bool) $ equiv.symm equiv.sigma_decidable_pred instance decpow {A : Type*} (fin : fintype A) (dec : decidable_eq A) : decidable_eq (decPow A) := equiv.decidable_eq equiv.sigma_decidable_pred def nfa2dfa (A : nfa Sigma) : dfa Sigma := { Q := decPow A.Q, finQ := finpow A.finQ A.decQ, decQ := decpow A.finQ A.decQ, init := sigma.mk (A.inits) A.decI, final := λ p, ∃ q, p.1 q ∧ A.final q, decF := begin assume p, casesI p, letI decF: decidable_pred A.final := A.decF, letI finQ: fintype A.Q := A.finQ, simp at *, apply fintype.decidable_exists_fintype, end, δ := λ p x, ⟨(λ q1, ∃ q0 : A.Q, p.1 q0 ∧ A.δ q0 x q1), λ q1, begin casesI p, letI finQ: fintype A.Q := A.finQ, simp at *, letI decD: decidable_pred (λ q0, p_fst q0 ∧ A.δ q0 x q1), { assume q0, simp at *, casesI p_snd q0, have f: ¬ (p_fst q0 ∧ A.δ q0 x q1), { assume h2, apply h, exact (and.elim_left h2), }, exact is_false f, casesI A.decD ⟨⟨q0, x⟩, q1⟩ with no yes, have f: ¬ (p_fst q0 ∧ A.δ q0 x q1), { assume h2, apply no, dsimp [sigma.uncurry], exact (and.elim_right h2), }, exact is_false f, dsimp [sigma.uncurry] at yes, exact is_true (and.intro h yes), }, apply fintype.decidable_exists_fintype, end⟩, } lemma dfaδ2nfaδ : ∀ A : nfa Sigma, ∀ w : word Sigma, ∀ q1 : A.Q, ∀ p : (nfa2dfa A).Q, (∃ q0 : A.Q, p.1 q0 ∧ nfa_δ_star A q0 w q1) ↔ (dfa_δ_star (nfa2dfa A) p w).1 q1 := begin assume A w, induction w, { assume q1 p, dsimp [nfa_δ_star, dfa_δ_star], constructor, { assume h, cases h with q0 h2, rewrite← (and.elim_right h2), exact (and.elim_left h2), }, { assume h, existsi q1, exact (and.intro h rfl), }, }, { assume q1 p, dsimp [nfa_δ_star, dfa_δ_star], constructor, { assume h, cases h with q0 h2, cases (and.elim_right h2) with q2 h3, have g: ((nfa2dfa A).δ p w_hd).1 q2, { dsimp [nfa2dfa], existsi q0, exact (and.intro (and.elim_left h2) (and.elim_left h3)), }, apply (iff.mp (w_ih q1 ((nfa2dfa A).δ p w_hd))), existsi q2, exact (and.intro g (and.elim_right h3)), }, { assume h, cases iff.mpr (w_ih q1 ((nfa2dfa A).δ p w_hd)) h with q2 h2, dsimp [nfa2dfa] at h2, cases (and.elim_left h2) with q0 h3, existsi q0, constructor, exact (and.elim_left h3), existsi q2, exact (and.intro (and.elim_right h3) (and.elim_right h2)), }, } end lemma emb21 : ∀ A : nfa Sigma, ∀ w : word Sigma, nfa_lang A w → dfa_lang (nfa2dfa A) w := begin assume A w, dsimp [nfa_lang, dfa_lang], induction w, { dsimp [nfa_δ_star, dfa_δ_star], assume h, dsimp [nfa2dfa], cases h with q0 h2, cases h2 with q1 h3, existsi q0, constructor, exact (and.elim_left h3), rewrite (and.elim_left (and.elim_right h3)), exact (and.elim_right (and.elim_right h3)), }, { dsimp [nfa_δ_star, dfa_δ_star], assume h, cases h with q0 h2, cases h2 with q1 h3, have g: (dfa_δ_star (nfa2dfa A) ((nfa2dfa A).δ (nfa2dfa A).init w_hd) w_tl).1 q1, { apply iff.mp (dfaδ2nfaδ A w_tl q1 ((nfa2dfa A).δ (nfa2dfa A).init w_hd)), cases (and.elim_left (and.elim_right h3)) with q2 h4, existsi q2, dsimp [nfa2dfa], constructor, existsi q0, exact (and.intro (and.elim_left h3) (and.elim_left h4)), exact (and.elim_right h4), }, existsi q1, exact (and.intro g (and.elim_right (and.elim_right h3))), } end lemma emb22 : ∀ A : nfa Sigma, ∀ w : word Sigma, dfa_lang (nfa2dfa A) w → nfa_lang A w := begin assume A w, dsimp [nfa_lang, dfa_lang], induction w, { dsimp [nfa_δ_star, dfa_δ_star, nfa2dfa], assume h, cases h with q0 h2, existsi q0, existsi q0, simp, exact h2, }, { assume h, cases h with q1 h2, have g: ∃ q0 : A.Q, (nfa2dfa A).init.1 q0 ∧ nfa_δ_star A q0 (w_hd :: w_tl) q1, { apply iff.mpr (dfaδ2nfaδ A (w_hd :: w_tl) q1 (nfa2dfa A).init), exact (and.elim_left h2), }, cases g with q0 gg, existsi q0, existsi q1, constructor, exact (and.elim_left gg), exact (and.intro (and.elim_right gg) (and.elim_right h2)), } end lemma emb2 : ∀ A : nfa Sigma, ∀ w : word Sigma, nfa_lang A w ↔ dfa_lang (nfa2dfa A) w := begin assume A w, constructor, exact emb21 A w, exact emb22 A w, end end nfa2dfa section nfa2ε_nfa variables {Sigma : Type} [decidable_eq Sigma] def nfa2ε_nfa(A : nfa Sigma) : ε_nfa Sigma := { Q := A.Q, finQ := A.finQ, decQ := A.decQ, inits := A.inits, decI := A.decI, final := A.final, decF := A.decF, δ := λ q0 x q1, x.cases_on' false (λ x, A.δ q0 x q1), decD := λ q, begin dsimp[sigma.uncurry], cases q with q0x q1, cases q0x with q0 x, cases x with x empty, { dsimp[option.cases_on'], letI g:= A.decQ, apply_instance, }, { dsimp[option.cases_on'], exact (A.decD ⟨⟨q0, x⟩, q1⟩), } end, } lemma nfaδ2ε_nfaδ : ∀ A : nfa Sigma, ∀ w : word Sigma, ∀ q0 q1 : A.Q, (nfa_δ_star A q0 w q1) ↔ ε_nfa_δ_star (nfa2ε_nfa A) q0 w q1 := begin assume A w, induction w, { assume q0 q1, dsimp [nfa_δ_star], constructor, assume eq, rewrite eq, fconstructor, assume h, cases h, { refl, }, { dsimp [nfa2ε_nfa] at *, cases h_ᾰ, }, }, { assume q0 q1, dsimp [nfa_δ_star], constructor, { assume h, cases h with q2 h2, dsimp [nfa2ε_nfa], cases h2, fconstructor, exact q2, exact h2_left, apply iff.mp (w_ih q2 q1), exact h2_right, }, { assume h, cases h, { existsi h_q1, constructor, exact h_ᾰ, apply iff.mpr (w_ih h_q1 q1), exact h_ᾰ_1, }, { dsimp [nfa2ε_nfa] at h_ᾰ, cases h_ᾰ, }, }, } end lemma emb3 : ∀ A : nfa Sigma, ∀ w : word Sigma, nfa_lang A w ↔ ε_nfa_lang (nfa2ε_nfa A) w := begin assume A w, dsimp [nfa_lang, ε_nfa_lang], constructor, { assume h, cases h with q0 h2, cases h2 with q1 h3, existsi q0, existsi q1, constructor, exact (and.elim_left h3), constructor, { cases h3, apply iff.mp (nfaδ2ε_nfaδ A w q0 q1), exact and.elim_left h3_right, }, exact (and.elim_right (and.elim_right h3)), }, { assume h, cases h with q0 h2, cases h2 with q1 h3, cases h3 with h4 h5, cases h5 with h6 h7, existsi q0, existsi q1, constructor, exact h4, constructor, apply iff.mpr (nfaδ2ε_nfaδ A w q0 q1), exact h6, exact h7, } end end nfa2ε_nfa
lemma tendsto_zero_mult_left_iff [simp]: fixes c::"'a::{topological_semigroup_mult,field}" assumes "c \<noteq> 0" shows "(\<lambda>n. c * a n)\<longlonglongrightarrow> 0 \<longleftrightarrow> a \<longlonglongrightarrow> 0"
# Author: Alan Ruttenberg # Project: OHD # Date: 2015-04-24 # # WIP survival analysis # Querying for pairs of events for survival analysis # Todo - bring in correlates - age, gender, tooth type # collect all known resin restoration failures - cases where another restoration or procedure indicates failure # ?patient - Label of patient # ?proci1 - The initial resin restoration # ?date1 - The date of the initial restoration # ?proci2 - The restoration that indicates failure # ?soonest_date2" - The date of the restoration that indicates failure # ?previous_visit_date - The date of the visit just prior to date of the # restoration that indicates failure. For later use with interval censoring. # The query strategy is slightly odd. The job of the inner select is # to find pairs of restoration and subsequent restoration. However We # only want the soonest one, so we aggregate to get date2 which is # the least date. # However if we projected ?date2 or ?proci2 out of the inner select # the grouping wouldn't have anything to max over. So we bind proci2 # and date2 again given the results. # surface_restoration_pattern() defines the contraints for the initial restoration # surface_restoration_failure_pattern() defines the constraints for considering it a failure collect_restoration_failures <-function () { queryc("select distinct ?patienti ?proci1 ?date1 ?birthdate ?proci2 ?soonest_date2", " (max(?one_before_date) as ?previous_visit_date)", " (coalesce(?is_male,?is_female,\"unrecorded\") as ?gender)", " (coalesce(?is_anterior,?is_posterior,\"dunno\") as ?tooth_type)", "where {", "{select distinct ?patienti ?proci1 ?date1 ?toothi ?surfacei (min(?date2) as ?soonest_date2)", "where {", patient_tooth_surface_pattern(), surface_restoration_pattern(proci="?proci1",date="?date1",procedure_type="resin_filling_restoration:"), "?proci1 later_encounter: ?proci2.", surface_restoration_failure_pattern(proci="?proci2",date="?date2"), "} group by ?patienti ?toothi ?surfacei ?proci1 ?date1", "}", surface_restoration_failure_pattern(proci="?proci2",date="?soonest_date2"), "?proc_minus_1 next_encounter: ?proci2.", "?proc_minus_1 occurrence_date: ?one_before_date.", gender_pattern(personi="?patienti"), posterior_anterior_pattern(), "optional {?patienti birth_date: ?birthdate.}", "} group by ?patienti ?proci1 ?date1 ?birthdate ?proci2 ?soonest_date2 ?is_male ?is_female ?is_anterior ?is_posterior") } # Note: pairs of procedure and surface are not unique (since one procedure can be multi-surface) # For each of the initial restorations, find the latest date that there was a visit. # ?proci1 - The initial resin restoration # ?date1 - The date of the initial restoration # ?latest_date2" - The date of the last visit of any kind the patient had # The structure of the query is very similar to the above, except we # omit the constraint on the second visit. collect_all_restorations_and_latest_encounter_after <-function () { queryc("select distinct ?proci1 ?date1 ?birthdate ?latest_date2 ", " (coalesce(?is_male,?is_female,\"unrecorded\") as ?gender)", " (coalesce(?is_anterior,?is_posterior) as ?tooth_type)", " ?patienti", "where{", " {select distinct ?patienti ?proci1 ?date1 ?toothi ?surfacei (max(?date2) as ?latest_date2) ", " where {", patient_tooth_surface_pattern(), surface_restoration_pattern(proci="?proci1",date="?date1",procedure_type="resin_filling_restoration:"), " optional", " { ?proci1 later_encounter: ?proci2.", " ?proci2 occurrence_date: ?date2 }", " }", " group by ?proci1 ?patienti ?date1 ?toothi ?surfacei ", " }", posterior_anterior_pattern(), gender_pattern(personi="?patienti"), "optional{?patienti birth_date: ?birthdate.}", "} ", "order by ?date1") } collect_restorations_followed_by_endodontic_procedure <- function () { queryc("select distinct ?restoration ?restoration_date ?toothi ?root_canal ?root_canal_date ?restoration_type", " (max(?one_before_date) as ?previous_visit_date)", " (coalesce(?is_male,?is_female,\"unrecorded\") as ?gender)", " (coalesce(?is_anterior,?is_posterior,\"dunno\") as ?tooth_type)", " where{", "{select distinct ?patienti ?restoration ?restoration_type ?toothi (min(?date2) as ?root_canal_date)", "where {", patient_tooth_pattern(), tooth_restoration_pattern(proci="?restoration",date="?restoration_date",procedure_type="tooth_restoration_procedure:"), "?restoration asserted_type: ?restoration_type.", "?restoration later_encounter: ?root_canal.", root_canal_failure_pattern(proci="?root_canal",date="?date2"), "} group by ?patienti ?toothi ?surfacei ?root_canal ?restoration_date ?restoration ?restoration_type", "}", root_canal_failure_pattern(proci="?root_canal",date="?root_canal_date"), "?proc_minus_1 next_encounter: ?root_canal.", "?proc_minus_1 occurrence_date: ?one_before_date.", gender_pattern(personi="?patienti"), "optional {?patienti birth_date: ?birthdate.}", "} group by ?patienti ?restoration m?restoration_date ?toothi ?root_canal ?root_canal_date ?restoration_type ?birthdate ?is_male ?is_female ?is_anterior ?is_posterior") } ## The issue here is that the restoration that precedes a root canal is ## not always single-values. A simple case is that there are two single ## surface restorations, and the next thing that happens to the tooth is ## is a root canal. ## This makes it tricky to count. Does one give split credit to the two ## surface restorations? Before I realized this I was trying to get a ## single row per endo, and then count by restoration type. That ## doesn't work. ## So what is desirable? ## - In the unique cases the restoration, specified to the surfaces that were restored.? ## - Perhaps the characterization of the state of the tooth just before - something about every surface. ## - Perhaps, when there is more than one restoration choose randomly (with sparql sample) ## - count_restorations_followed_by_endodontic_procedure <- function() { queryc("select distinct ?restoration_type (count(?restoration_type) as ?count)", " where{", "{select distinct ?patienti (max(?restoration_date) as ?latest_restoration_date) ?restoration_type ?toothi (min(?date2) as ?root_canal_date)", "where {", patient_tooth_pattern(), tooth_restoration_pattern(proci="?restoration",date="?restoration_date",procedure_type="tooth_restoration_procedure:"), "?restoration asserted_type: ?restoration_typec.", "?restoration_typec rdfs:label ?restoration_type.", "?restoration later_encounter: ?root_canal.", "?restoration a tooth_restoration_procedure:.", root_canal_failure_pattern(proci="?root_canal",date="?date2"), "} group by ?patienti ?toothi ?surfacei ?root_canal ?restoration_date ?restoration_type", "}", "?restoration occurrence_date: ?latest_restoration_date.", "?restoration a tooth_restoration_procedure:.", "?restoration has_participant: ?toothi.", root_canal_failure_pattern(proci="?root_canal",date="?root_canal_date"), "?proc_minus_1 next_encounter: ?root_canal.", "?proc_minus_1 occurrence_date: ?one_before_date.", gender_pattern(personi="?patienti"), "optional {?patienti birth_date: ?birthdate.}", "} group by ?restoration_type order by desc(?count)" ) } role_inheres_realizes_pattern <- function (...) #role_type, bearer, procedure) { print("hello") bgp = tb( " ?proci a proc_type: .", " _:role a role_type: .", " _:role inheres_in: ?bearer .", " ?proci realizes: _:role .") sparql_interpolate(bgp); } surface_restoration_pattern <- function (...) { sparql_interpolate( "?proci a procedure_type:.", "_:role a tooth_to_be_restored_role:.", "_:role inheres_in: ?toothi.", "?proci realizes: _:role.", "?proci occurrence_date: ?date.", "?proci has_participant: ?surfacei." ); } tooth_restoration_pattern <- function (...) { sparql_interpolate( "?proci a procedure_type:.", "_:role a tooth_to_be_restored_role:.", "_:role inheres_in: ?toothi.", "?proci realizes: _:role.", "?proci occurrence_date: ?date.", "?proci has_participant: ?toothi." ); } surface_restoration_failure_pattern <- function (...) { sparql_interpolate( sparql_union( tb( sparql_union("?proci a tooth_restoration_procedure:.", "?proci a inlay_restoration:." ), "_:role a tooth_to_be_restored_role: .", "_:role inheres_in: ?toothi.", "?proci realizes: _:role .", "?proci occurrence_date: ?date .", "?proci has_participant: ?surfacei ."), tb(sparql_union("?proci a crown_restoration: .", "?proci a tooth_extraction: .", "?proci a endodontic_procedure: ." ), "_:role1 a target_of_tooth_procedure: .", "_:role1 inheres_in: ?toothi.", "?proci realizes: _:role1 .", "?proci occurrence_date: ?date."))) # ,missing_tooth_pattern(exam="?proci",tooth="toothi",date,patient) } patient_tooth_surface_pattern <- function (...) { sparql_interpolate( "?patienti a homo_sapiens: .", "?toothi rdf:type tooth: .", "?toothi is_part_of: ?patienti .", "?surfacei rdf:type tooth_surface: .", "?surfacei is_part_of: ?toothi .") } patient_tooth_pattern <- function (...) { sparql_interpolate( "?patienti a homo_sapiens: .", "?toothi rdf:type tooth: .", "?toothi is_part_of: ?patienti ." ) } root_canal_failure_pattern <- function(...) { sparql_interpolate( "?proci a endodontic_procedure: .", "_:role1 a tooth_to_undergo_endodontic_procedure_role:.", "_:role1 inheres_in: ?toothi.", "?proci realizes: _:role1 .", "?proci has_participant: ?toothi.", "?proci occurrence_date: ?date.") } #constrained: ?patient,?tooth (which used to exist), #constrains: ?proci,?date (when it doesn't exist) missing_tooth_pattern <- function(...){ sparql_interpolate( "?patienti participates_in: ?exam.", "?exam a oral_evaluation:.", "?exam occurrence_date: ?date.", "?exam has_specified_output: _:finding", "_:finding is_about: ?patient.", "_:finding a missing_tooth_finding:.", "?tooth_number is_part_of: _:finding.", "?toothi a _:tooth_class.", "?tooth_number is_about: _:tooth_class." ) } posterior_anterior_pattern <- function(...) { sparql_interpolate( "optional { BIND(\"posterior\" as ?is_posterior). {{?toothi a pre_molar:.} UNION {?toothi a molar:}} }", "optional { BIND(\"anterior\" as ?is_anterior). {{?toothi a canine:.} UNION {?toothi a incisor:}} }" ) } #"BIND(coalesce(?is_anterior,?is_posterior) as ?tooth_type)}" gender_pattern <- function(...) { sparql_interpolate( "optional { BIND(\"male\" as ?is_male). ?personi a male:.}", "optional { BIND(\"female\" as ?is_female). ?personi a female:.}"); } #"BIND(coalesce(?is_male,?is_female,\"unrecorded\") as ?gender)}"
module Slice where open import Logic.Relations open import Logic.Equivalence open import Logic.Base open import Category module SliceCat (ℂ : Cat)(Γ : Category.Obj ℂ) where open module CC = Category.Category ℂ record SlObj : Set1 where field dom : Obj arr : dom ─→ Γ record _Sl→_ (f f' : SlObj) : Set where field h : (SlObj.dom f) ─→ (SlObj.dom f') π : (SlObj.arr f') ∘ h == (SlObj.arr f) SlId : {f : SlObj} -> f Sl→ f SlId = record { h = id ; π = idRight } _o_ : {f f' f'' : SlObj} -> f' Sl→ f'' -> f Sl→ f' -> f Sl→ f'' _o_ {F} {F'} {F''} F₁ F₂ = let f = SlObj.arr F in let f' = SlObj.arr F' in let f'' = SlObj.arr F'' in let h' = _Sl→_.h F₁ in let h = _Sl→_.h F₂ in record { h = (_Sl→_.h F₁) ∘ (_Sl→_.h F₂) -- Proof of f'' ∘ (h' ∘ h) == f ; π = trans (trans (sym assoc) (congL (_Sl→_.π F₁))) (_Sl→_.π F₂) } SlRel : {A B : SlObj} -> Rel (A Sl→ B) SlRel f f' = (_Sl→_.h f) == (_Sl→_.h f') SlRefl : {A B : SlObj} -> Reflexive {A Sl→ B} SlRel SlRefl = refl SlSym : {A B : SlObj} -> Symmetric {A Sl→ B} SlRel SlSym = sym SlTrans : {A B : SlObj} -> Transitive {A Sl→ B} SlRel SlTrans = trans SlEq : {A B : SlObj} -> Equivalence (A Sl→ B) SlEq {A} {B} = record { _==_ = SlRel {A} {B} ; refl = \{f : A Sl→ B} -> SlRefl {A}{B}{f} ; sym = \{f g : A Sl→ B} -> SlSym {A}{B}{f}{g} ; trans = \{f g h : A Sl→ B} -> SlTrans {A}{B}{f}{g}{h} } SlCong : {A B C : SlObj}{f f' : B Sl→ C}{g g' : A Sl→ B} -> SlRel f f' -> SlRel g g' -> SlRel (f o g) (f' o g') SlCong = cong SlIdLeft : {A B : SlObj}{f : A Sl→ B} -> SlRel (SlId o f) f SlIdLeft = idLeft SlIdRight : {A B : SlObj}{f : A Sl→ B} -> SlRel (f o SlId) f SlIdRight = idRight SlAssoc : {A B C D : SlObj}{f : C Sl→ D}{g : B Sl→ C}{h : A Sl→ B} -> SlRel ((f o g) o h) (f o (g o h)) SlAssoc = assoc Slice : Cat Slice = record { Obj = SlObj ; _─→_ = _Sl→_ ; id = SlId ; _∘_ = _o_ ; Eq = SlEq ; cong = \{A B C : SlObj}{f f' : B Sl→ C}{g g' : A Sl→ B} -> SlCong {A}{B}{C}{f}{f'}{g}{g'} ; idLeft = \{A B : SlObj}{f : A Sl→ B} -> SlIdLeft {A} {B} {f} ; idRight = \{A B : SlObj}{f : A Sl→ B} -> SlIdRight {A} {B} {f} ; assoc = \{A B C D : SlObj}{f : C Sl→ D}{g : B Sl→ C}{h : A Sl→ B} -> SlAssoc {A}{B}{C}{D}{f}{g}{h} }
```python # Initialize OK from client.api.notebook import Notebook ok = Notebook('hw6.ok') ``` # Homework 6: Exploring fairness through Cook County’s property assessments ## Due Date: 11:59pm Monday, April 6 ### Collaboration Policy Data science is a collaborative activity. While you may talk with others about the homework, we ask that you **write your solutions individually**. If you do discuss the assignments with others please **include their names** in the collaborators cell below. **Collaborators:** *write names here* ## Introduction This assignment will continue from where we left off in in Homework 5. Recall that the linear model that you created failed to produce accurate estimates of the observed housing prices because the model was too simple. The goal of this homework is to guide you through the iterative process of specifying, fitting, and analyzing the performance of more complex linear models used to predict prices of houses in Ames, Iowa. Additionally, you will have the opportunity to choose your own features and create your own regression model! By the end of this homework, you should feel comfortable: 1. Identifying informative variables through EDA 2. Feature engineering categorical variables 3. Using sklearn to build more complex linear models Additionally, as a continuation of the last homework, we’ll explore the dynamics of the CCAO’s appraisal system with more depth as you continue developing your housing prediction model. Alongside our discussion on implicit bias, however, we’ll tackle another central facet of the CCAO’s work: transparency. As you work through this assignment, consider the balance of power between the CCAO and its constituents - how might transparency redistribute this balance? And what are the limits of transparency as a solution for systemic inequity? ## Learning Outcomes Through the completion of this homework, student will be able to: * Understand the relationship between bias and fairness. * Analyze the technical and performative functions of transparency initiatives. * Recognize the social aspects of transparency in regard to the redistribution of power between different stakeholders. * Weigh the effectiveness and limitations of transparency as a means of arriving at fair algorithmic systems in order to reimagine equitable practices in data science. ## Score Breakdown *To be determined by course staff* ```python import numpy as np import pandas as pd from pandas.api.types import CategoricalDtype from sklearn.feature_extraction import DictVectorizer %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns ``` ```python # Plot settings plt.rcParams['figure.figsize'] = (12, 9) plt.rcParams['font.size'] = 12 ``` # The Data (to be edited) As a reminder, the [Ames dataset](http://jse.amstat.org/v19n3/decock.pdf) consists of 2930 records taken from the Ames, Iowa, Assessor’s Office describing houses sold in Ames from 2006 to 2010. The data set has 23 nominal, 23 ordinal, 14 discrete, and 20 continuous variables (and 2 additional observation identifiers) --- 82 features in total. An explanation of each variable can be found in the included `codebook.txt` file. The information was used in computing assessed values for individual residential properties sold in Ames, Iowa from 2006 to 2010. The raw data are split into training and test sets with 2000 and 930 observations, respectively. To save some time, we've used a slightly modified data cleaning pipeline from last week's assignment to prepare the training data. This data is stored in `ames_train_cleaned.csv`. It consists of 1998 observations and 83 features (we added TotalBathrooms from Homework 5). ```python training_data = pd.read_csv("ames_train_cleaned.csv") ``` ### Bias and Fairness When the use of algorithms and statistical modeling has real-world consequences, we often refer to the idea of fairness as a measurement of how socially-responsible our work is. Does our algorithm make similar predictions across race and gender identities? Does our model adequately represent reality? In the case of the Cook County Assessor’s Office, fair property tax rates are contingent on whether property values are assessed accurately - that they’re valued at what they’re worth, relative to properties with similar characteristics. This implies that having a more accurate model results in fairer assessments. The goal of creating a property assessment model, then, is to be as accurate as possible, which is typically approached by attempting to minimize human-related biases in the data collection and modeling process. In our previous examination of bias through Homework 1, however, we established that, because of human involvement and historical/institutional contexts, bias is impossible to eradicate. This adds a new dimension to our understanding of fairness in home assessments: Having “accurate” (and therefore fair) assessments requires us to constantly reflect on the decisions we make throughout the data lifecycle, as well as the contexts in which we’re working. Keep this relationship between bias and fairness in mind as we continue building our model! ## HCE: Question -1 Based on your work in the previous homework, what would you define as a fair property assessment? There isn’t one right answer! Share your thoughts in 1-2 sentences. `TODO`: *Write your answer here* ## HCE: Question 0 Does removing human judgment from the assessment process make assessments fairer? In what ways? How might it be (still) unfair? `TODO`: *Write your answer here* # Part 4: More Feature Selection and Engineering In this section, we identify two more features of the dataset that will increase our linear regression model's accuracy. Additionally, we will implement one-hot encoding so that we can include binary and categorical variables in our improved model.Before we make changes to our basic linear model, let’s go back to the data. In this section, we’ll identify two more features of the dataset that will increase our linear regression model's accuracy. Additionally, we will implement one-hot encoding so we can include binary and categorical variables in our improved model. We’ll start by first diving into the hallmark of the CCAO’s existing model: mass appraisal. ### Mass Appraisal A unique technique employed by the current assessor’s office is the idea of mass appraisal. Rather than assessing homes one by one, mass appraisal evaluates value by looking to the real estate market for local trends based on location and property characteristics. This differs from the classic system, where human evaluation was a more significant factor in evaluating housing prices. The CCAO’s website states that [“mass appraisal is a way to put fairness into the assessment system.”](https://www.cookcountyassessor.com/index.php/about-cook-county-assessors-office) The dataset we’re currently working with is the same one used for mass appraisal. Let’s examine the column `Neighborhood Code` and see how the location of homes might influence the fairness of mass appraisal. ## HCE: Question 2 How does mass appraisal appeal to fairness? Consider how it might both benefit and hurt homeowners. `TODO:` *Write your answer here* ## Question 1: Neighborhood vs Sale Price First, let's take a look at the relationship between neighborhood and sale prices of the houses in our data set. ```python fig, axs = plt.subplots(nrows=2) sns.boxplot( x='Neighborhood', y='SalePrice', data=training_data.sort_values('Neighborhood'), ax=axs[0] ) sns.countplot( x='Neighborhood', data=training_data.sort_values('Neighborhood'), ax=axs[1] ) # Draw median price axs[0].axhline( y=training_data['SalePrice'].median(), color='red', linestyle='dotted' ) # Label the bars with counts for patch in axs[1].patches: x = patch.get_bbox().get_points()[:, 0] y = patch.get_bbox().get_points()[1, 1] axs[1].annotate(f'{int(y)}', (x.mean(), y), ha='center', va='bottom') # Format x-axes axs[1].set_xticklabels(axs[1].xaxis.get_majorticklabels(), rotation=90) axs[0].xaxis.set_visible(False) # Narrow the gap between the plots plt.subplots_adjust(hspace=0.01) ``` ### Question 1a <a name="q1a"></a> Based on the plot above, what can be said about the relationship between the houses' sale prices and their neighborhoods? <!-- BEGIN QUESTION name: q1a points: 1 manual: True --> <!-- EXPORT TO PDF --> *Write your answer here, replacing this text.* ### Question 1b <a name="q1b"></a> One way we can deal with the lack of data from some neighborhoods is to create a new feature that bins neighborhoods together. Let's categorize our neighborhoods in a crude way: we'll take the top 3 neighborhoods measured by median `SalePrice` and identify them as "rich neighborhoods"; the other neighborhoods are not marked. Write a function that returns list of the top n most pricy neighborhoods as measured by our choice of aggregating function. For example, in the setup above, we would want to call `find_rich_neighborhoods(training_data, 3, np.median)` to find the top 3 neighborhoods measured by median `SalePrice`. *The provided tests check that you answered correctly, so that future analyses are not corrupted by a mistake.* <!-- BEGIN QUESTION name: q1b points: 1 --> ```python def find_rich_neighborhoods(data, n=3, metric=np.median): """ Input: data (data frame): should contain at least a string-valued Neighborhood and a numeric SalePrice column n (int): the number of top values desired metric (function): function used for aggregating the data in each neighborhood. for example, np.median for median prices Output: a list of the top n richest neighborhoods as measured by the metric function """ neighborhoods = ... return neighborhoods rich_neighborhoods = find_rich_neighborhoods(training_data, 3, np.median) rich_neighborhoods ``` ```python ok.grade("q1b"); ``` ### Question 1c <a name="q1c"></a> We now have a list of neighborhoods we've deemed as richer than others. Let's use that information to make a new variable `in_rich_neighborhood`. Write a function `add_rich_neighborhood` that adds an indicator variable which takes on the value 1 if the house is part of `rich_neighborhoods` and the value 0 otherwise. **Hint:** [`pd.Series.astype`](https://pandas.pydata.org/pandas-docs/version/0.23.4/generated/pandas.Series.astype.html) may be useful for converting True/False values to integers. *The provided tests check that you answered correctly, so that future analyses are not corrupted by a mistake.* <!-- BEGIN QUESTION name: q1c points: 1 --> ```python def add_in_rich_neighborhood(data, neighborhoods): """ Input: data (data frame): a data frame containing a 'Neighborhood' column with values found in the codebook neighborhoods (list of strings): strings should be the names of neighborhoods pre-identified as rich Output: data frame identical to the input with the addition of a binary in_rich_neighborhood column """ data['in_rich_neighborhood'] = ... return data rich_neighborhoods = find_rich_neighborhoods(training_data, 3, np.median) training_data = add_in_rich_neighborhood(training_data, rich_neighborhoods) ``` ```python ok.grade("q1c"); ``` Having identified rich neighborhoods, let’s now look toward the other end of the spectrum: lower-valued properties. According to the CCAO, their assessment system struggles with accurately predicting the values of properties that are worth less than 150k because they lack data for those properties. This ultimately diminishes the usefulness of mass appraisal for these properties. ## HCE: Question 2.5 In what situation does mass appraisal fail? How might mass appraisal work unfairly in this way? `TODO:` *Write your answer here* ## Question 2: Floodplain In 2019, the Cook County Assessor’s Office added the Federal Emergency Management Agency’s [floodplain data](https://msc.fema.gov/portal/home) to its assessment models. As described in their [Medium article](https://medium.com/@AssessorCook/why-and-how-floodplain-data-is-used-in-cook-county-property-assessments-6269d75189d7), “a floodplain is an area near a body of water that has a high risk of flooding.” A value of 0 indicates that a property is not on a floodplain, while a value of 1 indicates that a property is on a floodplain. There are, however, missing values in the dataset. ### Question 2a <a name="q2a"></a> Let's see if our data set has any missing values. Create a Series object containing the counts of missing values in each of the columns of our data set, sorted from greatest to least. The Series should be indexed by the variable names. For example, `missing_counts['Fireplace_Qu']` should return 975. **Hint:** [`pandas.DataFrame.isnull`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.isnull.html) may help here. *The provided tests check that you answered correctly, so that future analyses are not corrupted by a mistake.* <!-- BEGIN QUESTION name: q2a points: 1 --> ```python missing_counts = ... missing_counts ``` ```python ok.grade("q2a"); ``` It turns out that if we look at the codebook carefully, some of these "missing values" aren't missing at all! The Assessor's Office just used `NA` to denote a special value or that the information was truly not applicable for one reason or another. One such example is the `Fireplace_Qu` variable. ``` FireplaceQu (Ordinal): Fireplace quality Ex Excellent - Exceptional Masonry Fireplace Gd Good - Masonry Fireplace in main level TA Average - Prefabricated Fireplace in main living area or Masonry Fireplace inbasement Fa Fair - Prefabricated Fireplace in basement Po Poor - Ben Franklin Stove NA No Fireplace ``` ### Question 2b <a name="q2b"></a> An `NA` here actually means that the house had no fireplace to rate. Let's fix this in our data set. Write a function that replaces the missing values in `Fireplace_Qu` with `'No Fireplace'`. In addition, it should replace each abbreviated condition with its full word. For example, `'TA'` should be changed to `'Average'`. Hint: the [DataFrame.replace](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.replace.html) method may be useful here. *The provided tests check that part of your answer is correct, but they are not fully comprehensive.* <!-- BEGIN QUESTION name: q2b points: 2 --> ```python def fix_fireplace_qu(data): """ Input: data (data frame): a data frame containing a Fireplace_Qu column. Its values should be limited to those found in the codebook Output: data frame identical to the input except with a refactored Fireplace_Qu column """ ... return data training_data = fix_fireplace_qu(training_data) ``` ```python ok.grade("q2b"); ``` ```python training_data['Fireplace_Qu'] ``` ### An Important Note on One Hot Encoding <a name="important_note"></a> Unfortunately, simply fixing these missing values isn't sufficient for using `Fireplace_Qu` in our model. Since `Fireplace_Qu` is a categorical variable, we will have to one-hot-encode the data using `DictVectorizer` from Lab 6. Note that we dropped the first one-hot-encoded column. For more information on categorical data in pandas, refer to this [link](https://pandas-docs.github.io/pandas-docs-travis/categorical.html). ```python def ohe_fireplace_qu(data): """ One-hot-encodes fireplace quality. New columns are of the form Fireplace_Qu=QUALITY """ vec_enc = DictVectorizer() vec_enc.fit(data[['Fireplace_Qu']].to_dict(orient='records')) fireplace_qu_data = vec_enc.transform(data[['Fireplace_Qu']].to_dict(orient='records')).toarray() fireplace_qu_cats = vec_enc.get_feature_names() fireplace_qu = pd.DataFrame(fireplace_qu_data, columns=fireplace_qu_cats) data = pd.concat([data, fireplace_qu], axis=1) data = data.drop(columns=fireplace_qu_cats[0]) return data ``` ```python training_data = ohe_fireplace_qu(training_data) training_data.filter(regex='Fireplace_Qu').head(10) ``` # Part 5: Improved Linear Models In this section, we will create linear models that produce more accurate estimates of the housing prices in Ames than the model created in Homework 5, but at the expense of increased complexity. ## Question 3: Adding Covariates to our Model It's finally time to fit our updated linear regression model using the ordinary least squares estimator! Our new model consists of the linear model from Homework 5, with the addition of the our newly created `in_rich_neighborhood` variable and our one-hot-encoded fireplace quality variables: $$\begin{align} \text{SalePrice} & = \theta_0 + \theta_1 \cdot \text{Gr_Liv_Area} + \theta_2 \cdot \text{Garage_Area} + \theta_3 \cdot \text{TotalBathrooms} + \theta_4 \cdot \text{in_rich_neighborhood} + \\ & \quad \: \theta_5 \cdot \text{Fireplace_Qu=Excellent} + \theta_6 \cdot \text{Fireplace_Qu=Fair} + \theta_7 \cdot \text{Fireplace_Qu=Good} + \\ & \quad \: \theta_8 \cdot \text{Fireplace_Qu=No Fireplace} + \theta_9 \cdot \text{Fireplace_Qu=Poor} \end{align}$$ ### Question 3a <a name="q3a"></a> Although the fireplace quality variable that we explored in Question 2 has six categories, only five of these categories' indicator variables are included in our model. Is this a mistake, or is it done intentionally? Why? <!-- BEGIN QUESTION name: q3a points: 1 manual: True --> <!-- EXPORT TO PDF --> *Write your answer here, replacing this text.* ### Question 3b <a name="q3b"></a> We still have a little bit of work to do prior to esimating our linear regression model's coefficients. Instead of having you go through the process of selecting the pertinent convariates and creating a [`sklearn.linear_model.LinearRegression`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) object for our linear model again, we will provide the necessary code from Homework 5. However, we will now use cross validation to help validate our model instead of explicitly splitting the data into a training and testing set. First, we will re-import the data. ```python training_data = pd.read_csv("ames_train_cleaned.csv") ``` Next, we will implement a reusable pipeline that selects the required variables in our data and splits our covariates and response variable into a matrix and a vector, respectively. ```python def select_columns(data, *columns): """Select only columns passed as arguments.""" return data.loc[:, columns] def process_data_gm(data): """Process the data for a guided model.""" # One-hot-encode fireplace quality feature data = fix_fireplace_qu(data) data = ohe_fireplace_qu(data) # Use rich_neighborhoods computed earlier to add in_rich_neighborhoods feature data = add_in_rich_neighborhood(data, rich_neighborhoods) # Transform Data, Select Features data = select_columns(data, 'SalePrice', 'Gr_Liv_Area', 'Garage_Area', 'TotalBathrooms', 'in_rich_neighborhood', 'Fireplace_Qu=Excellent', 'Fireplace_Qu=Fair', 'Fireplace_Qu=Good', 'Fireplace_Qu=No Fireplace', 'Fireplace_Qu=Poor' ) # Return predictors and response variables separately X = data.drop(['SalePrice'], axis = 1) y = data.loc[:, 'SalePrice'] return X, y ``` We then split our dataset into training and testing sets using our data cleaning pipeline. ```python # Pre-process the training data # Our functions make this very easy! X_train, y_train = process_data_gm(training_data) X_train.head() ``` Finally, we initialize a [`sklearn.linear_model.LinearRegression`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) object as our linear model. We set the `fit_intercept=True` to ensure that the linear model has a non-zero intercept. ```python from sklearn import linear_model as lm linear_model = lm.LinearRegression(fit_intercept=True) ``` After a little bit of work, it's finally time to fit our updated linear regression model. Use the cell below to estimate the model, and then use it to compute the fitted value of `SalePrice` over the training data. *The provided tests check that you answered correctly, so that future analyses are not corrupted by a mistake.* <!-- BEGIN QUESTION name: q3b points: 2 --> ```python # Fit the model below # Compute the fitted and predicted values of SalePrice y_fitted = ... ``` ```python ok.grade("q3b"); ``` You can see that, as we consider more features in our model, its computational complexity grows. It isn’t just computational, however - increased complexity requires greater expertise and understanding of data science. ### Question 3c <a name="q3c"></a> Let's assess the performance of our new linear regression model using the Root Mean Squared Error function that we created in Homework 5. $$RMSE = \sqrt{\dfrac{\sum_{\text{houses}}(\text{actual price for house} - \text{predicted price for house})^2}{\text{# of houses}}}$$ The function is provided below. ```python def rmse(predicted, actual): """ Calculates RMSE from actual and predicted values Input: predicted (1D array): vector of predicted/fitted values actual (1D array): vector of actual values Output: a float, the root-mean square error """ return np.sqrt(np.mean((actual - predicted)**2)) ``` Please compute the training error using the `rmse` function above. *The provided tests for this question do not confirm that you have answered correctly; only that you have assigned each variable to a non-negative number.* <!-- BEGIN QUESTION name: q3c points: 1 --> ```python training_error = ... print("Training RMSE: {}".format(training_error)) ``` ```python ok.grade("q3c"); ``` A slighlty modified version of the `cross_validate_rmse` function from Lecture 18 is also provided below. ```python from sklearn.model_selection import KFold from sklearn.base import clone def cross_validate_rmse(model, X, y): model = clone(model) five_fold = KFold(n_splits=5) rmse_values = [] for tr_ind, va_ind in five_fold.split(X): model.fit(X.iloc[tr_ind,:], y.iloc[tr_ind]) rmse_values.append(rmse(y.iloc[va_ind], model.predict(X.iloc[va_ind,:]))) return np.mean(rmse_values) ``` Now use the `cross_validate_rmse` functions to calculate the cross validation error in the cell below. *The provided tests for this question do not confirm that you have answered correctly; only that you have assigned each variable to a non-negative number.* <!-- BEGIN QUESTION name: q3d points: 1 --> ```python cv_error = ... print("Cross Validation RMSE: {}".format(cv_error)) ``` ```python ok.grade("q3d"); ``` # Part 6: Open-Response The following part is purposefully left nearly open-ended. The Ames data in your possession comes from a larger data set. Your goal is to provide a linear regression model that accurately predicts the prices of the held-out homes, measured by root mean square error. $$RMSE = \sqrt{\dfrac{\sum_{\text{houses in public test set}}(\text{actual price for house} - \text{predicted price for house})^2}{\text{# of houses}}}$$ Perfect prediction of house prices would have a score of 0, so you want your score to be as low as possible! ### Grading Scheme Your grade for Question 4 will be based on your training RMSE and test RMSE. The thresholds are as follows: Points | 3 | 2 | 1 | 0 --- | --- | --- | --- | --- Training RMSE | Less than 36k | 36k - 38k | 38k - 40k | More than 40k Points | 3 | 2 | 1 | 0 --- | --- | --- | --- | --- Test RMSE | Less than 37k | 37k - 40k | 40k - 43k | More than 43k ### One Hot Encoding If you choose to include more categorical features in your model, you'll need to one-hot-encode each one. Remember that if a categorical variable has a unique value that is present in the training set but not in the test set, one-hot-encoding this variable will result in different outputs for the training and test sets (different numbers of one-hot columns). Watch out for this! Feel free to look back at how we [one-hot-encoded `Fireplace_Qu`](#important_note). To generate all possible categories for a categorical variable, we suggest reading through `codebook.txt` or finding the values programmatically across both the training and test datasets. ## Question 4: Your Own Linear Model <a name="q4"></a> Just as in the guided model above, you should encapsulate as much of your workflow into functions as possible. Below, we have initialized `final_model` for you. Your job is to select better features and define your own feature engineering pipeline in `process_data_fm`. We recommend using cross validation to help inform your feature selection process. To evaluate your model, we will process training data using your `process_data_fm`, fit `final_model` with this training data, and compute the training RMSE. Then, we will process the test data with your `process_data_fm`, use `final_model` to predict sale prices for the test data, and compute the test RMSE. See below for an example of the code we will run to grade your model: ``` training_data = pd.read_csv('ames_train_cleaned.csv') test_data = pd.read_csv('ames_test_cleaned.csv') X_train, y_train = process_data_fm(training_data) X_test, y_test = process_data_fm(test_data) final_model.fit(X_train, y_train) y_predicted_train = final_model.predict(X_train) y_predicted_test = final_model.predict(X_test) training_rmse = rmse(y_predicted_train, y_train) test_rmse = rmse(y_predicted_test, y_test) ``` **Note:** It is your duty to make sure that all of your feature engineering and selection happens in `process_data_fm`, and that the function performs as expected without errors. We will **NOT** accept regrade requests that require us to go back and run code that require typo/bug fixes. **Hint:** Some features may have missing values in the test set but not in the training set. Make sure `process_data_fm` handles missing values appropriately for each feature! <!-- BEGIN QUESTION name: q4 points: 6 --> ```python final_model = lm.LinearRegression(fit_intercept=True) # No need to change this! def process_data_fm(data): ... # Return predictors and response variables separately X = data.drop(['SalePrice'], axis = 1) y = data.loc[:, 'SalePrice'] return X, y ``` ```python ok.grade("q4"); ``` ## Question 5: EDA for Feature Selection In the following question, explain a choice you made in designing your custom linear model in Question 4. First, make a plot to show something interesting about the data. Then explain your findings from the plot, and describe how these findings motivated a change to your model. ### Question 5a <a name="q5a"></a> In the cell below, create a visualization that shows something interesting about the dataset. <!-- BEGIN QUESTION name: q5a points: 2 manual: True --> <!-- EXPORT TO PDF --> ```python # Code for visualization goes here ... ``` ### Question 5b <a name="q5b"></a> Explain any conclusions you draw from the plot above, and describe how these conclusions affected the design of your model. After creating the plot, did you add/remove certain features from your model, or did you perform some other type of feature engineering? How significantly did these changes affect your rmse? <!-- BEGIN QUESTION name: q5b points: 2 manual: True --> <!-- EXPORT TO PDF --> *Write your answer here, replacing this text.* ### Bias in Modeling As the previous question highlighted, a series of decisions are built into your model: When the goal is to minimize rmse (and receive full homework points!), it makes sense to use features that carry a lot of weight. These choices, however, create a representation of reality - intentional or not - and dictate what a model considers valuable when generating any type of prediction. Having worked through a structured data analysis/modeling process and built your own model, we’ll now take a step back and look at how your work fits into the real world. Because civic data initiatives have multiple stakeholders, it’s imperative to understand how different groups interact with particular aspects of the data. For a student, it’s important to minimize a model’s error for the sake of completing this assignment and learning basic data science principles. For the CCAO, it’s important to project a vision of fairness for housing assessments in order to maintain the trust of Cook County’s constituents. An assessment model has the potential to reach stakeholders beyond the assessor’s office, so let’s explore this further! ## HCE: Question 3 Above, we included `Floodplain` in our model to predict property values. This column could be useful to organizations outside of the CCAO - insurance companies, for example, calculate risk and insure houses based on their characteristics. If an insurance company wanted to assign an insurance rate to a house, why might they use `Floodplain` from the county assessor as part of their calculation? `TODO:` *Write your answer here* ## HCE: Question 4 This dataset, and many like it around the country, are available free or for sale to any business. What kinds of businesses and industries would be interested in the feature(s) you identified? How could a business or industry use the feature(s) to make a decision that would help them? `TODO:` *Write your answer here* ## HCE: Question 5 In the cell below, generate a visualization of the feature(s) you selected which could help the business or industry use this data to make a decision. `TODO:` *Write your answer here* ## HCE: Question 6 While you have argued that this feature could help a given business make an informed decision, is there a potential for the feature(s) to mislead them? How so? *Hint:* If you’re stuck, consider the limitations of bias in data collection. `TODO:` *Write your answer here* As your work through these questions demonstrates, your features and modeling process have relevance to a diversity of fields. You may have noticed, however, that the quality of this information is contingent on its explainability, i.e. what a particular variable would mean or represent to different industries. This need for context and clarity not only reveals the underlying biases of our work but also characterizes another common approach to fairness: transparency. ## HCE: Question 7 What would you consider a transparent and fair process in regard to home assessment, and how might you implement these ideas in your modeling process? `TODO:` *Write your answer here* # Part 7: Approaching Fairness through Transparency In this homework, the validity of your model’s assessments is determined by how closely your test set’s results align with the CCAO’s residential sales dataset. The CCAO, however, does not have an autograder to check its work; its predictions, after all, draw on this dataset to re-establish the standard for fair and accurate property values throughout the triennial assessment period. Instead, the Office champions transparency as a guiding principle in regard to fairness. ### Transparency and the CCAO After a lawsuit was filed against the CCAO for producing [“racially discriminatory assessments and taxes,"](https://harris.uchicago.edu/news-events/news/prof-chris-berry-testifies-institutional-racism-cook-county-property-taxes) the Office decided to tackle these inequities by committing to transparency initiatives. The hope was that, by exposing the inner workings of the CCAO’s property valuation process, their assessment results could be publicly verified as accurate and therefore trusted to be fair. These transparency initiatives include publishing all of the CCAO’s work on [GitLab](https://gitlab.com/ccao-data-science---modeling). By allowing the public to access any updates to the system in real-time, the Office argues that they increase accessibility to a process that had previously been blackboxed - obscured and hidden - from the public. Empowered by transparency, the citizens of Cook County would ideally hold the Assessor’s Office accountable for their work, redistributing the balance of power between the CCAO and its constituents. And in this scenario, this form of transparency would thus contribute to the legitimacy and fairness of the Office’s property assessments. Additionally, these measures were, in part, developed to push back against the inequities of the tax lawyer industry. Because hiring a tax lawyer to negotiate for lower valuations (and therefore taxes) is limited to the wealthy, property owners with a lower socioeconomic status paid a disproportionate amount of tax. However, because the CCAO’s assessment process is now public, tax lawyers can only contest the CCAO’s work through technical means - in other words, tax lawyers must abide by the rules set by the CCAO. In this way, the transparency initiatives aim to shift the balance of power in property assessment and taxes away from the tax lawyer industry. ## HCE: Question 8 How do the CCAO’s transparency initiatives aim to redistribute power between the tax lawyer industry, the CCAO, and the constituents of Cook County? `TODO:` *Write your answer here* With these balances of power in mind, the next step is to critically examine the effectiveness and reach of the CCAO’s transparency initiatives. We can assess them in three ways: * Accessibility - Are there barriers or limits to participating in the transparency initiatives implemented by the CCAO? More specifically, who can and cannot interact with and understand the CCAO’s published code on GitLab? * Explainability - What efforts has the CCAO made to effectively communicate information about their assessment process? To what extent do they elaborate on the documentation of their GitLab repository? * Accountability - In what ways can the CCAO be held accountable through their transparency initiatives? Are there barriers to who can hold them accountable? As you may have noticed, these terms are closely linked to one another. We’ll now examine these standards for transparency by diving deeper into the CCAO’s assessment process. According to the CCAO’s [Progress Report on Implementation of 100 Day Objectives](https://gitlab.com/ccao-data-science---modeling/ccao_sf_cama_dev/-/blob/master/documentation/Progress%20Report%20on%20Implementation%20of%20100%20Day%20Objectives.pdf), their assessment system operates beyond a single model or series of models: It allows “any number of models to be specified and runs all of them (...) Each model is subjected to a battery of tests as outlined by the International Association of Assessing Officers. The algorithm then recommends a model based on its performance on these tests.” This complex modeling, coupled with real estate-based data collection practices, enable the CCAO to leverage their data scientists’ expertise in their final assessments. It’s important to acknowledge this power dynamic because it directly interacts with the notion of transparency in the assessment system. Let’s start with accessibility. To be frank, the CCAO’s algorithmic modeling system is almost completely inaccessible to the everyday person, despite its viewability on GitLab. Understanding code - much less algorithms and statistical models written in code - is a high barrier to entry, severely restricting accessibility on the basis of expertise. This is where the next metric for transparency, explainability, comes in. ## HCE: Question 9 Take a look at the Residential Automated Valuation Model files under the Models subgroup in the CCAO’s [GitLab](https://gitlab.com/ccao-data-science---modeling). Without directly looking at any code, does the documentation sufficiently explain how the residential valuation model works? `TODO:` *Write your answer here* With the barrier of expertise, explanations are perhaps the only way that the CCAO can bridge the technical gaps throughout the assessment process. However, as you might’ve noticed, these explanations are also lacking. That leaves the final piece of the puzzle - the ideal product of transparency, accountability. Without any measures to expand the accessibility and explainability of their algorithmic modeling, the prospects of holding the CCAO accountable for their assessments are dim. The level of expertise needed to check the CCAO’s systems excludes participatory voices from the community. Even community members who have valuable insights into measures of inequity - itself another form of expertise - cannot engage with technical expertise in data and modeling. WIth all that said, it’s imperative to acknowledge that the CCAO is a government institution. Although their work relies on technical expertise, anyone who wishes to critique - or even just interact with - the assessment pipeline should have the means to; all of Cook County’s constituents are affected by property assessments, and they should not be required to have a technical background in order to participate in a technical space. ## HCE: Question 10 In what ways does the CCAO’s implementation of transparency fail? What aspects of it are inaccessible and to whom? Consider the concepts of expertise and power. `TODO:` *Write your answer here* So what role does transparency really play in relation to fairness? Given its limitations in regard to accessibility, the CCAO’s transparency initiatives can come across as merely performative. That said, they certainly still demonstrate the Office’s commitment to fairness and equity, performing the social function of instilling faith in its work. And while there is validity in fostering trust with the local community as a governing institution, it’s nonetheless vital to understand what this faith is built on: The CCAO operates its assessment model by leveraging its expertise, and transparency is then used to reinforce the legitimacy of the CCAO’s work by exemplifying a gesture of good will. And through this process, the CCAO’s power to determine property assessments and taxes is thus maintained through this process. ## HCE: Question 11 How does the CCAO maintain its power in housing assessments and its open-data initiative? `TODO:` *Write your answer here* Fairness, in the end, is as difficult to define as it is to implement. What you make of this process - from the ingrained bias throughout the data lifecycle to the role of transparency in the CCAO’s work - is for your consideration. Regardless, it raises several challenging but pressing questions: How can we envision fair and equitable data science beyond transparency? How can we go one step further and incorporate justice in data science? The Cook County Assessor’s Office is just one case study. Although it is unique in being the first Assessor’s Office to publicly publish its assessment model and data, it is not exempt from the same scrutiny and critique that other public and private institutions receive. Consider how expertise serves as the baseline and standard for fairness - and consider how data science becomes authoritative despite (or, perhaps, because of) its many exceptions, parameters, and metrics. Continue to reflect on these concepts in your data science work as you learn more and more. ## Before You Submit Make sure that if you run Kernel > Restart & Run All, your notebook produces the expected outputs for each cell. Congratulations on finishing the assignment! # Submit Make sure you have run all cells in your notebook in order before running the cell below, so that all images/graphs appear in the output. **Please save before submitting!** <!-- EXPECT 4 EXPORTED QUESTIONS --> ```python # Save your notebook first, then run this cell to submit. import jassign.to_pdf jassign.to_pdf.generate_pdf('hw6.ipynb', 'hw6.pdf') ok.submit() ```
rm(list=ls()) library(raster) library(tidyverse) library(readxl) ###### #FUNCTIONS extractXYZ <- function(raster, nodata = FALSE, addCellID = TRUE){ vals <- raster::extract(raster, 1:ncell(raster)) #specify raster otherwise dplyr used xys <- rowColFromCell(raster,1:ncell(raster)) combine <- cbind(xys,vals) if(addCellID){ combine <- cbind(1:length(combine[,1]), combine) } if(!nodata){ combine <- combine[!rowSums(!is.finite(combine)),] #from https://stackoverflow.com/a/15773560 } return(combine) } getLCs <- function(data) { #calculates proportion of each LC in the muni (ignoring NAs, help from https://stackoverflow.com/a/44290753) data %>% group_by(muniID) %>% dplyr::summarise(LC1 = round(sum(lc == 1, na.rm = T) / sum(!is.na(lc)), 3), LC2 = round(sum(lc == 2, na.rm = T) / sum(!is.na(lc)), 3), LC3 = round(sum(lc == 3, na.rm = T) / sum(!is.na(lc)), 3), LC4 = round(sum(lc == 4, na.rm = T) / sum(!is.na(lc)), 3), LC5 = round(sum(lc == 5, na.rm = T) / sum(!is.na(lc)), 3), NonNAs = sum(!is.na(lc)), NAs = sum(is.na(lc)) ) -> LCs return(LCs) } ###### years <- seq(2001,2018,1) input_path <- "C:/Users/k1076631/Google Drive/Shared/Crafty Telecoupling/Data/" #load the rasters munis.r <- raster(paste0(input_path,"CRAFTYInput/Data/sim10_BRmunis_latlon_5km.asc")) lcname <- paste0(input_path,"CRAFTYInput/Data/ObservedLCmaps/LandCover2015_PastureB_Disagg.asc") lc <- raster(lcname) lc.t <- extractXYZ(lc, addCellID = F) state_weights <- read_excel(paste0(input_path,"CRAFTYInput/Data/HumanDev/StateWeightedCaps.xlsx"), sheet = "Human", range="A1:U11", col_names=T) state_weights <- state_weights %>% mutate_if(is.numeric, round, digits=3) munis.t <- extractXYZ(munis.r, addCellID = F) munis.t <- as.data.frame(munis.t) munis.t <- plyr::rename(munis.t, c("vals" = "muniID")) #set NA in both rasters lc[is.na(munis.r)] <- NA munis.r[is.na(lc)] <- NA lc_munis <- left_join(as.data.frame(munis.t), as.data.frame(lc.t), by = c("row" = "row", "col" = "col")) #add state label #add state ID lc_munis <- lc_munis %>% mutate(state = (muniID %/% 100000)) %>% mutate(state = if_else(state == 17, "TO", if_else(state == 29, "BA", if_else(state == 31, "MG", if_else(state == 35, "SP", if_else(state == 41, "PR", if_else(state == 42, "SC", if_else(state == 43, "RS", if_else(state == 50, "MS", if_else(state == 51, "MT", if_else(state == 52, "GO", "NA" )))))))))) ) new_munis <- left_join(lc_munis, state_weights, by = c("state" = "state")) #set emptyto a raster with same extent as inputs (to the same) with help from https://gis.stackexchange.com/questions/250149/assign-values-to-a-subset-of-cells-of-a-raster) empty.r <- raster(munis.r) empty.r[] <- NA_real_ cells <- cellFromRowCol(empty.r, new_munis$row, new_munis$col) for(year in years){ year_dat <- new_munis %>% select(paste0(year)) final.r <- empty.r final.r[cells] <- year_dat[[1]] #plot(final.r, main=paste0(year)) writeRaster(final.r, paste0(input_path,"CRAFTYInput/Data/HumanDev/HumanCapital",year,".asc"), format = 'ascii', overwrite=T) }
// Copyright 2020-2022 The Defold Foundation // Copyright 2014-2020 King // Copyright 2009-2014 Ragnar Svensson, Christian Murray // Licensed under the Defold License version 1.0 (the "License"); you may not use // this file except in compliance with the License. // // You may obtain a copy of the License, together with FAQs at // https://www.defold.com/license // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "render_script.h" #include <string.h> #include <new> #include <dlib/dstrings.h> #include <dlib/log.h> #include <dlib/hash.h> #include <dlib/message.h> #include <dlib/profile.h> #include <script/script.h> #include <script/lua_source_ddf.h> #include "font_renderer.h" #include "render/render_ddf.h" namespace dmRender { /*# Rendering API documentation * * Rendering functions, messages and constants. The "render" namespace is * accessible only from render scripts. * * The rendering API was originally built on top of OpenGL ES 2.0, and it uses a subset of the * OpenGL computer graphics rendering API for rendering 2D and 3D computer * graphics. Our current target is OpenGLES 3.0 with fallbacks to 2.0 on some platforms. * * [icon:attention] It is possible to create materials and write shaders that * require features not in OpenGL ES 2.0, but those will not work cross platform. * * @document * @name Render * @namespace render */ #define RENDER_SCRIPT_INSTANCE "RenderScriptInstance" #define RENDER_SCRIPT "RenderScript" #define RENDER_SCRIPT_CONSTANTBUFFER "RenderScriptConstantBuffer" #define RENDER_SCRIPT_PREDICATE "RenderScriptPredicate" #define RENDER_SCRIPT_LIB_NAME "render" #define RENDER_SCRIPT_FORMAT_NAME "format" #define RENDER_SCRIPT_WIDTH_NAME "width" #define RENDER_SCRIPT_HEIGHT_NAME "height" #define RENDER_SCRIPT_MIN_FILTER_NAME "min_filter" #define RENDER_SCRIPT_MAG_FILTER_NAME "mag_filter" #define RENDER_SCRIPT_U_WRAP_NAME "u_wrap" #define RENDER_SCRIPT_V_WRAP_NAME "v_wrap" static uint32_t RENDER_SCRIPT_TYPE_HASH = 0; static uint32_t RENDER_SCRIPT_INSTANCE_TYPE_HASH = 0; static uint32_t RENDER_SCRIPT_CONSTANTBUFFER_TYPE_HASH = 0; static uint32_t RENDER_SCRIPT_PREDICATE_TYPE_HASH = 0; const char* RENDER_SCRIPT_FUNCTION_NAMES[MAX_RENDER_SCRIPT_FUNCTION_COUNT] = { "init", "update", "on_message", "on_reload" }; static HNamedConstantBuffer* RenderScriptConstantBuffer_Check(lua_State *L, int index) { return (HNamedConstantBuffer*)dmScript::CheckUserType(L, index, RENDER_SCRIPT_CONSTANTBUFFER_TYPE_HASH, "Expected a constant buffer (acquired from a render.* function)"); } static int RenderScriptConstantBuffer_gc (lua_State *L) { HNamedConstantBuffer* cb = (HNamedConstantBuffer*)lua_touserdata(L, 1); DeleteNamedConstantBuffer(*cb); *cb = 0; return 0; } static int RenderScriptConstantBuffer_tostring (lua_State *L) { lua_pushfstring(L, "ConstantBuffer: %p", lua_touserdata(L, 1)); return 1; } static int RenderScriptConstantBuffer_index(lua_State *L) { HNamedConstantBuffer* cb = (HNamedConstantBuffer*)lua_touserdata(L, 1); assert(cb); const char* name = luaL_checkstring(L, 2); dmhash_t name_hash = dmHashString64(name); dmVMath::Vector4* values; uint32_t num_values = 0; if (GetNamedConstant(*cb, name_hash, &values, &num_values)) { dmScript::PushVector4(L, values[0]); return 1; } else { return luaL_error(L, "Constant %s not set.", dmHashReverseSafe64(name_hash)); } return 0; } static int RenderScriptConstantBuffer_newindex(lua_State *L) { int top = lua_gettop(L); HNamedConstantBuffer* cb = (HNamedConstantBuffer*)lua_touserdata(L, 1); assert(cb); const char* name = luaL_checkstring(L, 2); dmhash_t name_hash = dmHashString64(name); dmVMath::Vector4* value = dmScript::CheckVector4(L, 3); SetNamedConstant(*cb, name_hash, value, 1); assert(top == lua_gettop(L)); return 0; } static const luaL_reg RenderScriptConstantBuffer_methods[] = { {0,0} }; static const luaL_reg RenderScriptConstantBuffer_meta[] = { {"__gc", RenderScriptConstantBuffer_gc}, {"__tostring", RenderScriptConstantBuffer_tostring}, {"__index", RenderScriptConstantBuffer_index}, {"__newindex", RenderScriptConstantBuffer_newindex}, {0, 0} }; static HPredicate* RenderScriptPredicate_Check(lua_State *L, int index) { return (HPredicate*)dmScript::CheckUserType(L, index, RENDER_SCRIPT_PREDICATE_TYPE_HASH, "Expected a render predicate (acquired from the render.predicate function)"); } static int RenderScriptPredicate_gc (lua_State *L) { HPredicate* p = (HPredicate*)lua_touserdata(L, 1); DeletePredicate(*p); *p = 0; return 0; } static int RenderScriptPredicate_tostring (lua_State *L) { lua_pushfstring(L, "Predicate: %p", lua_touserdata(L, 1)); return 1; } static const luaL_reg RenderScriptPredicate_methods[] = { {0,0} }; static const luaL_reg RenderScriptPredicate_meta[] = { {"__gc", RenderScriptPredicate_gc}, {"__tostring", RenderScriptPredicate_tostring}, {0, 0} }; /*# create a new constant buffer. * * Constant buffers are used to set shader program variables and are optionally passed to the `render.draw()` function. The buffer's constant elements can be indexed like an ordinary Lua table, but you can't iterate over them with pairs() or ipairs(). * * @name render.constant_buffer * @return buffer [type:constant_buffer] new constant buffer * @examples * * Set a "tint" constant in a constant buffer in the render script: * * ```lua * local constants = render.constant_buffer() * constants.tint = vmath.vector4(1, 1, 1, 1) * ``` * * Then use the constant buffer when drawing a predicate: * * ```lua * render.draw(self.my_pred, constants) * ``` */ int RenderScript_ConstantBuffer(lua_State* L) { int top = lua_gettop(L); (void) top; HNamedConstantBuffer* p_buffer = (HNamedConstantBuffer*) lua_newuserdata(L, sizeof(HNamedConstantBuffer*)); *p_buffer = NewNamedConstantBuffer(); luaL_getmetatable(L, RENDER_SCRIPT_CONSTANTBUFFER); lua_setmetatable(L, -2); assert(top + 1 == lua_gettop(L)); return 1; } static int RenderScriptGetURL(lua_State* L) { RenderScript* script = (RenderScript*)lua_touserdata(L, 1); dmMessage::URL url; dmMessage::ResetURL(&url); url.m_Socket = script->m_RenderContext->m_Socket; dmScript::PushURL(L, url); return 1; } static int RenderScriptResolvePath(lua_State* L) { dmScript::PushHash(L, dmHashString64(luaL_checkstring(L, 2))); return 1; } static int RenderScriptIsValid(lua_State* L) { RenderScript* script = (RenderScript*)lua_touserdata(L, 1); lua_pushboolean(L, script != 0x0); return 1; } static const luaL_reg RenderScript_methods[] = { {0,0} }; static const luaL_reg RenderScript_meta[] = { {dmScript::META_TABLE_GET_URL, RenderScriptGetURL}, {dmScript::META_TABLE_RESOLVE_PATH, RenderScriptResolvePath}, {dmScript::META_TABLE_IS_VALID, RenderScriptIsValid}, {0, 0} }; static RenderScriptInstance* RenderScriptInstance_Check(lua_State *L, int index) { return (RenderScriptInstance*)dmScript::CheckUserType(L, index, RENDER_SCRIPT_INSTANCE_TYPE_HASH, "You can only access render.* functions and values from a render script instance (.render_script file)"); } static RenderScriptInstance* RenderScriptInstance_Check(lua_State *L) { int top = lua_gettop(L); (void) top; dmScript::GetInstance(L); RenderScriptInstance* i = RenderScriptInstance_Check(L, -1); lua_pop(L, 1); assert(top == lua_gettop(L)); return i; } static int RenderScriptInstance_tostring (lua_State *L) { lua_pushfstring(L, "RenderScript: %p", lua_touserdata(L, 1)); return 1; } static int RenderScriptInstance_index(lua_State *L) { int top = lua_gettop(L); (void) top; RenderScriptInstance* i = RenderScriptInstance_Check(L, 1); assert(i); // Try to find value in instance data lua_rawgeti(L, LUA_REGISTRYINDEX, i->m_RenderScriptDataReference); lua_pushvalue(L, 2); lua_gettable(L, -2); lua_remove(L, 3); assert(top + 1 == lua_gettop(L)); return 1; } static int RenderScriptInstance_newindex(lua_State *L) { int top = lua_gettop(L); (void) top; RenderScriptInstance* i = RenderScriptInstance_Check(L, 1); assert(i); lua_rawgeti(L, LUA_REGISTRYINDEX, i->m_RenderScriptDataReference); lua_pushvalue(L, 2); lua_pushvalue(L, 3); lua_settable(L, -3); lua_pop(L, 1); assert(top == lua_gettop(L)); return 0; } static int RenderScriptInstanceGetURL(lua_State* L) { RenderScriptInstance* i = (RenderScriptInstance*)lua_touserdata(L, 1); dmMessage::URL url; dmMessage::ResetURL(&url); url.m_Socket = i->m_RenderContext->m_Socket; dmScript::PushURL(L, url); return 1; } static int RenderScriptInstanceResolvePath(lua_State* L) { dmScript::PushHash(L, dmHashString64(luaL_checkstring(L, 2))); return 1; } static int RenderScriptInstanceIsValid(lua_State* L) { RenderScriptInstance* i = (RenderScriptInstance*)lua_touserdata(L, 1); lua_pushboolean(L, i != 0x0 && i->m_RenderContext != 0x0); return 1; } static int RenderScriptGetInstanceContextTableRef(lua_State* L) { DM_LUA_STACK_CHECK(L, 1); const int self_index = 1; RenderScriptInstance* i = (RenderScriptInstance*)lua_touserdata(L, self_index); lua_pushnumber(L, i ? i->m_ContextTableReference : LUA_NOREF); return 1; } static const luaL_reg RenderScriptInstance_methods[] = { {0,0} }; static const luaL_reg RenderScriptInstance_meta[] = { {"__tostring", RenderScriptInstance_tostring}, {"__index", RenderScriptInstance_index}, {"__newindex", RenderScriptInstance_newindex}, {dmScript::META_TABLE_GET_URL, RenderScriptInstanceGetURL}, {dmScript::META_TABLE_RESOLVE_PATH, RenderScriptInstanceResolvePath}, {dmScript::META_TABLE_IS_VALID, RenderScriptInstanceIsValid}, {dmScript::META_GET_INSTANCE_CONTEXT_TABLE_REF, RenderScriptGetInstanceContextTableRef}, {0, 0} }; bool InsertCommand(RenderScriptInstance* i, const Command& command) { if (i->m_CommandBuffer.Full()) return false; else i->m_CommandBuffer.Push(command); return true; } /*# * @name render.STATE_DEPTH_TEST * @variable */ /*# * @name render.STATE_STENCIL_TEST * @variable */ /*# * @name render.STATE_BLEND * @variable */ /*# * @name render.STATE_CULL_FACE * @variable */ /*# * @name render.STATE_POLYGON_OFFSET_FILL * @variable */ /*# enables a render state * * Enables a particular render state. The state will be enabled until disabled. * * @name render.enable_state * @param state [type:constant] state to enable * * - `render.STATE_DEPTH_TEST` * - `render.STATE_STENCIL_TEST` * - `render.STATE_BLEND` * - `render.STATE_ALPHA_TEST` ([icon:iOS][icon:android] not available on iOS and Android) * - `render.STATE_CULL_FACE` * - `render.STATE_POLYGON_OFFSET_FILL` * * @examples * * Enable stencil test when drawing the gui predicate, then disable it: * * ```lua * render.enable_state(render.STATE_STENCIL_TEST) * render.draw(self.gui_pred) * render.disable_state(render.STATE_STENCIL_TEST) * ``` */ int RenderScript_EnableState(lua_State* L) { int top = lua_gettop(L); (void) top; RenderScriptInstance* i = RenderScriptInstance_Check(L); uint32_t state = luaL_checknumber(L, 1); if (state != dmGraphics::STATE_DEPTH_TEST && state != dmGraphics::STATE_STENCIL_TEST && state != dmGraphics::STATE_ALPHA_TEST && state != dmGraphics::STATE_BLEND && state != dmGraphics::STATE_CULL_FACE && state != dmGraphics::STATE_POLYGON_OFFSET_FILL) { return luaL_error(L, "Invalid state: %s.enable_state(%d).", RENDER_SCRIPT_LIB_NAME, state); } if (InsertCommand(i, Command(COMMAND_TYPE_ENABLE_STATE, state))) { assert(top == lua_gettop(L)); return 0; } else { return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } } /*# disables a render state * * Disables a render state. * * @name render.disable_state * @param state [type:constant] state to disable * * - `render.STATE_DEPTH_TEST` * - `render.STATE_STENCIL_TEST` * - `render.STATE_BLEND` * - `render.STATE_ALPHA_TEST` ([icon:iOS][icon:android] not available on iOS and Android) * - `render.STATE_CULL_FACE` * - `render.STATE_POLYGON_OFFSET_FILL` * * @examples * Disable face culling when drawing the tile predicate: * * ```lua * render.disable_state(render.STATE_CULL_FACE) * render.draw(self.tile_pred) * ``` */ int RenderScript_DisableState(lua_State* L) { int top = lua_gettop(L); (void) top; RenderScriptInstance* i = RenderScriptInstance_Check(L); uint32_t state = luaL_checknumber(L, 1); if (state != dmGraphics::STATE_DEPTH_TEST && state != dmGraphics::STATE_STENCIL_TEST && state != dmGraphics::STATE_ALPHA_TEST && state != dmGraphics::STATE_BLEND && state != dmGraphics::STATE_CULL_FACE && state != dmGraphics::STATE_POLYGON_OFFSET_FILL) { return luaL_error(L, "Invalid state: %s.disable_state(%d).", RENDER_SCRIPT_LIB_NAME, state); } if (InsertCommand(i, Command(COMMAND_TYPE_DISABLE_STATE, state))) { assert(top == lua_gettop(L)); return 0; } else { return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } } /*# sets the render viewport * * Set the render viewport to the specified rectangle. * * @name render.set_viewport * @param x [type:number] left corner * @param y [type:number] bottom corner * @param width [type:number] viewport width * @param height [type:number] viewport height * @examples * * ```lua * -- Set the viewport to the window dimensions. * render.set_viewport(0, 0, render.get_window_width(), render.get_window_height()) * ``` */ int RenderScript_SetViewport(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); int32_t x = luaL_checknumber(L, 1); int32_t y = luaL_checknumber(L, 2); int32_t width = luaL_checknumber(L, 3); int32_t height = luaL_checknumber(L, 4); if (InsertCommand(i, Command(COMMAND_TYPE_SET_VIEWPORT, x, y, width, height))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } /*# * @name render.FORMAT_LUMINANCE * @variable */ /*# * @name render.FORMAT_RGB * @variable */ /*# * @name render.FORMAT_RGBA * @variable */ /*# * May be nil if the format isn't supported * @name render.FORMAT_RGB16F * @variable */ /*# * May be nil if the format isn't supported * @name render.FORMAT_RGB32F * @variable */ /*# * May be nil if the format isn't supported * @name render.FORMAT_RGBA16F * @variable */ /*# * May be nil if the format isn't supported * @name render.FORMAT_RGBA32F * @variable */ /*# * May be nil if the format isn't supported * @name render.FORMAT_R16F * @variable */ /*# * May be nil if the format isn't supported * @name render.FORMAT_RG16F * @variable */ /*# * May be nil if the format isn't supported * @name render.FORMAT_R32F * @variable */ /*# * May be nil if the format isn't supported * @name render.FORMAT_RG32F * @variable */ /*# * @name render.FORMAT_DEPTH * @variable */ /*# * @name render.FORMAT_STENCIL * @variable */ /*# * @name render.FILTER_LINEAR * @variable */ /*# * @name render.FILTER_NEAREST * @variable */ /*# * @name render.WRAP_CLAMP_TO_BORDER * @variable */ /*# * @name render.WRAP_CLAMP_TO_EDGE * @variable */ /*# * @name render.WRAP_MIRRORED_REPEAT * @variable */ /*# * @name render.WRAP_REPEAT * @variable */ /*# creates a new render target * Creates a new render target according to the supplied * specification table. * * The table should contain keys specifying which buffers should be created * with what parameters. Each buffer key should have a table value consisting * of parameters. The following parameter keys are available: * * Key | Values * ------------ | ---------------------------- * `format` | `render.FORMAT_LUMINANCE`<br/>`render.FORMAT_RGB`<br/>`render.FORMAT_RGBA`<br/>`render.FORMAT_DEPTH`<br/>`render.FORMAT_STENCIL`<br/>`render.FORMAT_RGBA32F`<br/>`render.FORMAT_RGBA16F`<br/> * `width` | number * `height` | number * `min_filter` | `render.FILTER_LINEAR`<br/>`render.FILTER_NEAREST` * `mag_filter` | `render.FILTER_LINEAR`<br/>`render.FILTER_NEAREST` * `u_wrap` | `render.WRAP_CLAMP_TO_BORDER`<br/>`render.WRAP_CLAMP_TO_EDGE`<br/>`render.WRAP_MIRRORED_REPEAT`<br/>`render.WRAP_REPEAT`<br/> * `v_wrap` | `render.WRAP_CLAMP_TO_BORDER`<br/>`render.WRAP_CLAMP_TO_EDGE`<br/>`render.WRAP_MIRRORED_REPEAT`<br/>`render.WRAP_REPEAT` * * @name render.render_target * @param name [type:string] render target name * @param parameters [type:table] table of buffer parameters, see the description for available keys and values * @return render_target [type:render_target] new render target * @examples * * How to create a new render target and draw to it: * * ```lua * function init(self) * -- render target buffer parameters * local color_params = { format = render.FORMAT_RGBA, * width = render.get_window_width(), * height = render.get_window_height(), * min_filter = render.FILTER_LINEAR, * mag_filter = render.FILTER_LINEAR, * u_wrap = render.WRAP_CLAMP_TO_EDGE, * v_wrap = render.WRAP_CLAMP_TO_EDGE } * local depth_params = { format = render.FORMAT_DEPTH, * width = render.get_window_width(), * height = render.get_window_height(), * u_wrap = render.WRAP_CLAMP_TO_EDGE, * v_wrap = render.WRAP_CLAMP_TO_EDGE } * self.my_render_target = render.render_target({[render.BUFFER_COLOR_BIT] = color_params, [render.BUFFER_DEPTH_BIT] = depth_params }) * end * * function update(self, dt) * -- enable target so all drawing is done to it * render.enable_render_target(self.my_render_target) * * -- draw a predicate to the render target * render.draw(self.my_pred) * end * ``` * */ int RenderScript_RenderTarget(lua_State* L) { int top = lua_gettop(L); (void)top; RenderScriptInstance* i = RenderScriptInstance_Check(L); // Legacy support int table_index = 2; if (lua_istable(L, 1)) { table_index = 1; } const char* required_keys[] = { "format", "width", "height" }; uint32_t buffer_type_flags = 0; uint32_t max_tex_size = dmGraphics::GetMaxTextureSize(i->m_RenderContext->m_GraphicsContext); luaL_checktype(L, table_index, LUA_TTABLE); dmGraphics::TextureCreationParams creation_params[dmGraphics::MAX_BUFFER_TYPE_COUNT]; dmGraphics::TextureParams params[dmGraphics::MAX_BUFFER_TYPE_COUNT]; lua_pushnil(L); while (lua_next(L, table_index)) { bool required_found[] = { false, false, false }; uint32_t buffer_type = (uint32_t)luaL_checknumber(L, -2); buffer_type_flags |= buffer_type; uint32_t index = dmGraphics::GetBufferTypeIndex((dmGraphics::BufferType)buffer_type); dmGraphics::TextureParams* p = &params[index]; dmGraphics::TextureCreationParams* cp = &creation_params[index]; luaL_checktype(L, -1, LUA_TTABLE); lua_pushnil(L); // Verify that required keys are supplied while (lua_next(L, -2)) { const char* key = luaL_checkstring(L, -2); for (uint32_t i = 0; i < sizeof(required_found) / sizeof(required_found[0]); ++i) { if (strncmp(key, required_keys[i], strlen(required_keys[i])) == 0) { required_found[i] = true; } } lua_pop(L, 1); } for (uint32_t i = 0; i < sizeof(required_found) / sizeof(required_found[0]); ++i) { if (!required_found[i]) { return luaL_error(L, "Required parameter key not found: '%s'", required_keys[i]); } } lua_pushnil(L); while (lua_next(L, -2)) { const char* key = luaL_checkstring(L, -2); if (lua_isnil(L, -1) != 0) { return luaL_error(L, "nil value supplied to %s.render_target: %s.", RENDER_SCRIPT_LIB_NAME, key); } if (strncmp(key, RENDER_SCRIPT_FORMAT_NAME, strlen(RENDER_SCRIPT_FORMAT_NAME)) == 0) { p->m_Format = (dmGraphics::TextureFormat)(int)luaL_checknumber(L, -1); if(buffer_type == dmGraphics::BUFFER_TYPE_DEPTH_BIT) { if(p->m_Format != dmGraphics::TEXTURE_FORMAT_DEPTH) { return luaL_error(L, "The only valid format for depth buffers is FORMAT_DEPTH."); } } if(buffer_type == dmGraphics::BUFFER_TYPE_STENCIL_BIT) { if(p->m_Format != dmGraphics::TEXTURE_FORMAT_STENCIL) { return luaL_error(L, "The only valid format for stencil buffers is FORMAT_STENCIL."); } } } else if (strncmp(key, RENDER_SCRIPT_WIDTH_NAME, strlen(RENDER_SCRIPT_WIDTH_NAME)) == 0) { p->m_Width = luaL_checknumber(L, -1); cp->m_Width = p->m_Width; } else if (strncmp(key, RENDER_SCRIPT_HEIGHT_NAME, strlen(RENDER_SCRIPT_HEIGHT_NAME)) == 0) { p->m_Height = luaL_checknumber(L, -1); cp->m_Height = p->m_Height; } else if (strncmp(key, RENDER_SCRIPT_MIN_FILTER_NAME, strlen(RENDER_SCRIPT_MIN_FILTER_NAME)) == 0) { p->m_MinFilter = (dmGraphics::TextureFilter)(int)luaL_checknumber(L, -1); } else if (strncmp(key, RENDER_SCRIPT_MAG_FILTER_NAME, strlen(RENDER_SCRIPT_MAG_FILTER_NAME)) == 0) { p->m_MagFilter = (dmGraphics::TextureFilter)(int)luaL_checknumber(L, -1); } else if (strncmp(key, RENDER_SCRIPT_U_WRAP_NAME, strlen(RENDER_SCRIPT_U_WRAP_NAME)) == 0) { p->m_UWrap = (dmGraphics::TextureWrap)(int)luaL_checknumber(L, -1); } else if (strncmp(key, RENDER_SCRIPT_V_WRAP_NAME, strlen(RENDER_SCRIPT_V_WRAP_NAME)) == 0) { p->m_VWrap = (dmGraphics::TextureWrap)(int)luaL_checknumber(L, -1); } else { lua_pop(L, 2); assert(top == lua_gettop(L)); return luaL_error(L, "Unknown key supplied to %s.rendertarget: %s. Available keys are: %s, %s, %s, %s, %s, %s, %s.", RENDER_SCRIPT_LIB_NAME, key, RENDER_SCRIPT_FORMAT_NAME, RENDER_SCRIPT_WIDTH_NAME, RENDER_SCRIPT_HEIGHT_NAME, RENDER_SCRIPT_MIN_FILTER_NAME, RENDER_SCRIPT_MAG_FILTER_NAME, RENDER_SCRIPT_U_WRAP_NAME, RENDER_SCRIPT_V_WRAP_NAME); } lua_pop(L, 1); } lua_pop(L, 1); if (creation_params[index].m_Width > max_tex_size || creation_params[index].m_Height > max_tex_size) { lua_pop(L, 1); assert(top == lua_gettop(L)); return luaL_error(L, "Render target (type %s) of width %d and height %d is greater than max supported texture size %d for this platform.", dmGraphics::GetBufferTypeLiteral((dmGraphics::BufferType)buffer_type), creation_params[index].m_Width, creation_params[index].m_Height, max_tex_size); } } dmGraphics::HRenderTarget render_target = dmGraphics::NewRenderTarget(i->m_RenderContext->m_GraphicsContext, buffer_type_flags, creation_params, params); lua_pushlightuserdata(L, (void*)render_target); assert(top + 1 == lua_gettop(L)); return 1; } /*# deletes a render target * * Deletes a previously created render target. * * @name render.delete_render_target * @param render_target [type:render_target] render target to delete * @examples * * How to delete a render target: * * ```lua * render.delete_render_target(self.my_render_target) * ``` */ int RenderScript_DeleteRenderTarget(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); (void)i; dmGraphics::HRenderTarget render_target = 0x0; if (lua_islightuserdata(L, 1)) { render_target = (dmGraphics::HRenderTarget)lua_touserdata(L, 1); } if (render_target == 0x0) return luaL_error(L, "Invalid render target (nil) supplied to %s.enable_render_target.", RENDER_SCRIPT_LIB_NAME); dmGraphics::DeleteRenderTarget(render_target); return 0; } /*# * @name render.RENDER_TARGET_DEFAULT * @variable */ /*# sets a render target * * Sets a render target. Subsequent draw operations will be to the * render target until it is replaced by a subsequent call to set_render_target. * * @name render.set_render_target * @param render_target [type:render_target] render target to set. render.RENDER_TARGET_DEFAULT to set the default render target * @param [options] [type:table] optional table with behaviour parameters * * `transient` * : [type:table] Transient frame buffer types are only valid while the render target is active, i.e becomes undefined when a new target is set by a subsequent call to set_render_target. * Default is all non-transient. Be aware that some hardware uses a combined depth stencil buffer and when this is the case both are considered non-transient if exclusively selected! * A buffer type defined that doesn't exist in the render target is silently ignored. * * - `render.BUFFER_COLOR_BIT` * - `render.BUFFER_DEPTH_BIT` * - `render.BUFFER_STENCIL_BIT` * * @examples * * How to set a render target and draw to it and then switch back to the default render target * The render target defines the depth/stencil buffers as transient, when set_render_target is called the next time the buffers may be invalidated and allow for optimisations depending on driver support * * ```lua * function update(self, dt) * -- set render target so all drawing is done to it * render.set_render_target(self.my_render_target, { transient = { render.BUFFER_DEPTH_BIT, render.BUFFER_STENCIL_BIT } } ) * * -- draw a predicate to the render target * render.draw(self.my_pred) * * -- set default render target. This also invalidates the depth and stencil buffers of the current target (self.my_render_target) * -- which can be an optimisation on some hardware * render.set_render_target(render.RENDER_TARGET_DEFAULT) * * end * ``` */ int RenderScript_SetRenderTarget(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); dmGraphics::HRenderTarget render_target = 0x0; DM_LUA_STACK_CHECK(L, 0); if (lua_gettop(L) > 0) { if(lua_islightuserdata(L, 1)) { render_target = (dmGraphics::HRenderTarget)lua_touserdata(L, 1); } else { if(!lua_isnil(L, 1) && luaL_checkint(L, 1) != 0) { return luaL_error(L, "Invalid render target supplied to %s.set_render_target.", RENDER_SCRIPT_LIB_NAME); } } } uint32_t transient_buffer_types = 0; if (lua_gettop(L) > 1) { luaL_checktype(L, 2, LUA_TTABLE); lua_pushvalue(L, 2); lua_getfield(L, -1, "transient"); if(!lua_isnil(L, -1)) { lua_pushnil(L); while (lua_next(L, -2)) { transient_buffer_types |= luaL_checkint(L, -1); lua_pop(L, 1); } } lua_pop(L, 2); } if (InsertCommand(i, Command(COMMAND_TYPE_SET_RENDER_TARGET, (uintptr_t)render_target, transient_buffer_types))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } /* DEPRECATED. NO API DOC GENERATED. * enables a render target * * Enables a render target. Subsequent draw operations will be to the enabled render target until * a subsequent call to render.enable_render_target, render.disable_render_target or render.set_render_target. * * @name render.enable_render_target * @param render_target [type:render_target] render target to enable * * @deprecated Use render.set_render_target() instead * * @examples * * How to enable a render target and draw to it: * * ```lua * function update(self, dt) * -- enable target so all drawing is done to it * render.enable_render_target(self.my_render_target) * * -- draw a predicate to the render target * render.draw(self.my_pred) * end * ``` */ int RenderScript_EnableRenderTarget(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); dmGraphics::HRenderTarget render_target = 0x0; DM_LUA_STACK_CHECK(L, 0); if (lua_islightuserdata(L, 1)) { render_target = (dmGraphics::HRenderTarget)lua_touserdata(L, 1); } if (render_target == 0x0) return luaL_error(L, "Invalid render target (nil) supplied to %s.enable_render_target.", RENDER_SCRIPT_LIB_NAME); if (InsertCommand(i, Command(COMMAND_TYPE_SET_RENDER_TARGET, (uintptr_t)render_target, 0))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } /* DEPRECATED. NO API DOC GENERATED. * disables a render target * * Disables a previously enabled render target. Subsequent draw operations * will be drawn to the default frame buffer unless another render target is * enabled. * * @name render.disable_render_target * @param render_target [type:render_target] render target to disable * * @deprecated Use render.set_render_target() instead * * @examples * * How to disable a render target so we can draw to the screen: * * ```lua * function update(self, dt) * -- enable target so all drawing is done to it * render.enable_render_target(self.my_render_target) * * -- draw a predicate to the render target * render.draw(self.my_pred) * * -- disable target * render.disable_render_target(self.my_render_target) * * -- draw a predicate to the screen * render.draw(self.my_other_pred) * end * ``` */ int RenderScript_DisableRenderTarget(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); DM_LUA_STACK_CHECK(L, 0); if (InsertCommand(i, Command(COMMAND_TYPE_SET_RENDER_TARGET, (uintptr_t)0x0, 0))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } /*# sets the render target size * * @name render.set_render_target_size * @param render_target [type:render_target] render target to set size for * @param width [type:number] new render target width * @param height [type:number] new render target height * @examples * * Set the render target size to the window size: * * ```lua * render.set_render_target_size(self.my_render_target, render.get_window_width(), render.get_window_height()) * ``` */ int RenderScript_SetRenderTargetSize(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); (void)i; dmGraphics::HRenderTarget render_target = 0x0; if (lua_islightuserdata(L, 1)) { render_target = (dmGraphics::HRenderTarget)lua_touserdata(L, 1); uint32_t width = luaL_checknumber(L, 2); uint32_t height = luaL_checknumber(L, 3); dmGraphics::SetRenderTargetSize(render_target, width, height); return 0; } else { return luaL_error(L, "Expected render target as the second argument to %s.set_render_target_size.", RENDER_SCRIPT_LIB_NAME); } } /*# enables a texture for a render target * * Sets the specified render target's specified buffer to be * used as texture with the specified unit. * A material shader can then use the texture to sample from. * * @name render.enable_texture * @param unit [type:number] texture unit to enable texture for * @param render_target [type:render_target] render target from which to enable the specified texture unit * @param buffer_type [type:constant] buffer type from which to enable the texture * * - `render.BUFFER_COLOR_BIT` * - `render.BUFFER_DEPTH_BIT` * - `render.BUFFER_STENCIL_BIT` * * @examples * * ```lua * function update(self, dt) * -- enable target so all drawing is done to it * render.enable_render_target(self.my_render_target) * * -- draw a predicate to the render target * render.draw(self.my_pred) * * -- disable target * render.disable_render_target(self.my_render_target) * * render.enable_texture(0, self.my_render_target, render.BUFFER_COLOR_BIT) * -- draw a predicate with the render target available as texture 0 in the predicate * -- material shader. * render.draw(self.my_pred) * end * ``` */ int RenderScript_EnableTexture(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); dmGraphics::HRenderTarget render_target = 0x0; uint32_t unit = luaL_checknumber(L, 1); if (lua_islightuserdata(L, 2)) { render_target = (dmGraphics::HRenderTarget)lua_touserdata(L, 2); dmGraphics::BufferType buffer_type = (dmGraphics::BufferType)(int)luaL_checknumber(L, 3); dmGraphics::HTexture texture = dmGraphics::GetRenderTargetTexture(render_target, buffer_type); if(texture != 0) { if (InsertCommand(i, Command(COMMAND_TYPE_ENABLE_TEXTURE, unit, (uintptr_t)texture))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } return luaL_error(L, "Render target does not have a texture for the specified buffer type."); } else { return luaL_error(L, "%s.enable_texture(unit, render_target, buffer_type) called with illegal parameters.", RENDER_SCRIPT_LIB_NAME); } } /*# disables a texture for a render target * * Disables a texture unit for a render target that has previourly been enabled. * * @name render.disable_texture * @param unit [type:number] texture unit to disable * @examples * * ```lua * function update(self, dt) * render.enable_texture(0, self.my_render_target, render.BUFFER_COLOR_BIT) * -- draw a predicate with the render target available as texture 0 in the predicate * -- material shader. * render.draw(self.my_pred) * -- done, disable the texture * render.disable_texture(0) * end * ``` */ int RenderScript_DisableTexture(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); uint32_t unit = luaL_checknumber(L, 1); if (InsertCommand(i, Command(COMMAND_TYPE_DISABLE_TEXTURE, unit))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } /*# retrieve the buffer width from a render target * * Returns the specified buffer width from a render target. * * @name render.get_render_target_width * @param render_target [type:render_target] render target from which to retrieve the buffer width * @param buffer_type [type:constant] which type of buffer to retrieve the width from * * - `render.BUFFER_COLOR_BIT` * - `render.BUFFER_DEPTH_BIT` * - `render.BUFFER_STENCIL_BIT` * * @return width [type:number] the width of the render target buffer texture * @examples * * ```lua * -- get the width of the render target color buffer * local w = render.get_render_target_width(self.target_right, render.BUFFER_COLOR_BIT) * ``` */ int RenderScript_GetRenderTargetWidth(lua_State* L) { int top = lua_gettop(L); (void) top; RenderScriptInstance* i = RenderScriptInstance_Check(L); (void)i; dmGraphics::HRenderTarget render_target = 0x0; if (lua_islightuserdata(L, 1)) { render_target = (dmGraphics::HRenderTarget)lua_touserdata(L, 1); } else { return luaL_error(L, "Expected render target as the first argument to %s.get_render_target_width.", RENDER_SCRIPT_LIB_NAME); } uint32_t buffer_type = (uint32_t)luaL_checknumber(L, 2); if (buffer_type != dmGraphics::BUFFER_TYPE_COLOR_BIT && buffer_type != dmGraphics::BUFFER_TYPE_DEPTH_BIT && buffer_type != dmGraphics::BUFFER_TYPE_STENCIL_BIT) { return luaL_error(L, "Unknown buffer type supplied to %s.get_render_target_width.", RENDER_SCRIPT_LIB_NAME); } uint32_t width, height; dmGraphics::GetRenderTargetSize(render_target, (dmGraphics::BufferType)buffer_type, width, height); lua_pushnumber(L, width); assert(top + 1 == lua_gettop(L)); return 1; } /*# retrieve a buffer height from a render target * * Returns the specified buffer height from a render target. * * @name render.get_render_target_height * @param render_target [type:render_target] render target from which to retrieve the buffer height * @param buffer_type [type:constant] which type of buffer to retrieve the height from * * - `render.BUFFER_COLOR_BIT` * - `render.BUFFER_DEPTH_BIT` * - `render.BUFFER_STENCIL_BIT` * * @return height [type:number] the height of the render target buffer texture * @examples * * ```lua * -- get the height of the render target color buffer * local h = render.get_render_target_height(self.target_right, render.BUFFER_COLOR_BIT) * ``` */ int RenderScript_GetRenderTargetHeight(lua_State* L) { int top = lua_gettop(L); (void) top; RenderScriptInstance* i = RenderScriptInstance_Check(L); (void)i; dmGraphics::HRenderTarget render_target = 0x0; if (lua_islightuserdata(L, 1)) { render_target = (dmGraphics::HRenderTarget)lua_touserdata(L, 1); } else { return luaL_error(L, "Expected render target as the first argument to %s.get_render_target_height.", RENDER_SCRIPT_LIB_NAME); } uint32_t buffer_type = (uint32_t)luaL_checknumber(L, 2); if (buffer_type != dmGraphics::BUFFER_TYPE_COLOR_BIT && buffer_type != dmGraphics::BUFFER_TYPE_DEPTH_BIT && buffer_type != dmGraphics::BUFFER_TYPE_STENCIL_BIT) { return luaL_error(L, "Unknown buffer type supplied to %s.get_render_target_height.", RENDER_SCRIPT_LIB_NAME); } uint32_t width, height; dmGraphics::GetRenderTargetSize(render_target, (dmGraphics::BufferType)buffer_type, width, height); lua_pushnumber(L, height); assert(top + 1 == lua_gettop(L)); return 1; } /*# * @name render.BUFFER_COLOR_BIT * @variable */ /*# * @name render.BUFFER_DEPTH_BIT * @variable */ /*# * @name render.BUFFER_STENCIL_BIT * @variable */ /*# clears the active render target * Clear buffers in the currently enabled render target with specified value. * * @name render.clear * @param buffers [type:table] table with keys specifying which buffers to clear and values set to clear values. Available keys are: * * - `render.BUFFER_COLOR_BIT` * - `render.BUFFER_DEPTH_BIT` * - `render.BUFFER_STENCIL_BIT` * * @examples * * Clear the color buffer and the depth buffer. * * ```lua * render.clear({[render.BUFFER_COLOR_BIT] = vmath.vector4(0, 0, 0, 0), [render.BUFFER_DEPTH_BIT] = 1}) * ``` */ int RenderScript_Clear(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); luaL_checktype(L, 1, LUA_TTABLE); int top = lua_gettop(L); (void)top; uint32_t flags = 0; dmVMath::Vector4 color(0.0f, 0.0f, 0.0f, 0.0f); float depth = 0.0f; uint32_t stencil = 0; lua_pushnil(L); while (lua_next(L, 1)) { uint32_t buffer_type = luaL_checknumber(L, -2); flags |= buffer_type; if (buffer_type == dmGraphics::BUFFER_TYPE_COLOR_BIT) { color = *dmScript::CheckVector4(L, -1); } else if (buffer_type == dmGraphics::BUFFER_TYPE_DEPTH_BIT) { depth = (float)luaL_checknumber(L, -1); } else if (buffer_type == dmGraphics::BUFFER_TYPE_STENCIL_BIT) { stencil = (uint32_t)luaL_checknumber(L, -1); } else { lua_pop(L, 2); assert(top == lua_gettop(L)); return luaL_error(L, "Unknown buffer type supplied to %s.clear.", RENDER_SCRIPT_LIB_NAME); } lua_pop(L, 1); } assert(top == lua_gettop(L)); uint32_t clear_color = 0; clear_color |= ((uint8_t)(color.getX() * 255.0f)) << 0; clear_color |= ((uint8_t)(color.getY() * 255.0f)) << 8; clear_color |= ((uint8_t)(color.getZ() * 255.0f)) << 16; clear_color |= ((uint8_t)(color.getW() * 255.0f)) << 24; union float_to_uint32_t {float f; uint32_t i;}; float_to_uint32_t ftoi; ftoi.f = depth; if (InsertCommand(i, Command(COMMAND_TYPE_CLEAR, flags, clear_color, ftoi.i, stencil))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } /*# draws all objects matching a predicate * Draws all objects that match a specified predicate. An optional constant buffer can be * provided to override the default constants. If no constants buffer is provided, a default * system constants buffer is used containing constants as defined in materials and set through * [ref:go.set] (or [ref:particlefx.set_constant]) on visual components. * * @name render.draw * @param predicate [type:predicate] predicate to draw for * @param [options] [type:table] optional table with properties: * * `frustum` * : [type:vmath.matrix4] A frustum matrix used to cull renderable items. (E.g. `local frustum = proj * view`). May be nil. * * `constants` * : [type:constant_buffer] optional constants to use while rendering * * @examples * * ```lua * function init(self) * -- define a predicate matching anything with material tag "my_tag" * self.my_pred = render.predicate({hash("my_tag")}) * end * * function update(self, dt) * -- draw everything in the my_pred predicate * render.draw(self.my_pred) * end * ``` * * Draw predicate with constants: * * ```lua * local constants = render.constant_buffer() * constants.tint = vmath.vector4(1, 1, 1, 1) * render.draw(self.my_pred, {constants = constants}) * ``` * Draw with predicate and frustum culling: * * ```lua * local frustum = self.proj * self.view * render.draw(self.my_pred, {frustum = frustum, constants = constants}) * ``` */ int RenderScript_Draw(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); HPredicate predicate = 0x0; if (lua_isuserdata(L, 1)) { HPredicate* tmp = RenderScriptPredicate_Check(L, 1); predicate = *tmp; } else { return luaL_error(L, "No render predicate specified."); } dmVMath::Matrix4* frustum_matrix = 0; HNamedConstantBuffer constant_buffer = 0; if (lua_istable(L, 2)) { luaL_checktype(L, 2, LUA_TTABLE); lua_pushvalue(L, 2); lua_getfield(L, -1, "frustum"); frustum_matrix = lua_isnil(L, -1) ? 0 : dmScript::CheckMatrix4(L, -1); lua_pop(L, 1); lua_getfield(L, -1, "constants"); constant_buffer = lua_isnil(L, -1) ? 0 : *RenderScriptConstantBuffer_Check(L, -1); lua_pop(L, 1); lua_pop(L, 1); } else if (lua_isuserdata(L, 2)) // Deprecated { dmLogOnceWarning("This interface for render.draw() is deprecated. Please see documentation at https://defold.com/ref/stable/render/#render.draw:predicate-[constants]") HNamedConstantBuffer* tmp = RenderScriptConstantBuffer_Check(L, 2); constant_buffer = *tmp; } if (frustum_matrix) { // we need to pass ownership to the command queue dmVMath::Matrix4* copy = new dmVMath::Matrix4; *copy = *frustum_matrix; frustum_matrix = copy; } if (InsertCommand(i, Command(COMMAND_TYPE_DRAW, (uintptr_t)predicate, (uintptr_t) constant_buffer, (uintptr_t) frustum_matrix))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } /*# draws all 3d debug graphics * Draws all 3d debug graphics such as lines drawn with "draw_line" messages and physics visualization. * @name render.draw_debug3d * @param [options] [type:table] optional table with properties: * * `frustum` * : [type:vmath.matrix4] A frustum matrix used to cull renderable items. (E.g. `local frustum = proj * view`). May be nil. * * @replaces render.draw_debug2d * @examples * * ```lua * function update(self, dt) * -- draw debug visualization * render.draw_debug3d() * end * ``` */ int RenderScript_DrawDebug3d(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); dmVMath::Matrix4* frustum_matrix = 0; if (lua_istable(L, 1)) { luaL_checktype(L, 1, LUA_TTABLE); lua_pushvalue(L, 1); lua_getfield(L, -1, "frustum"); frustum_matrix = lua_isnil(L, -1) ? 0 : dmScript::CheckMatrix4(L, -1); lua_pop(L, 1); lua_pop(L, 1); } if (frustum_matrix) { // we need to pass ownership to the command queue dmVMath::Matrix4* copy = new dmVMath::Matrix4; *copy = *frustum_matrix; frustum_matrix = copy; } if (InsertCommand(i, Command(COMMAND_TYPE_DRAW_DEBUG3D, (uintptr_t)frustum_matrix))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } /* DEPRECATED. NO API DOC GENERATED. * draws all 2d debug graphics (Deprecated) * * @name render.draw_debug2d * @deprecated Use render.draw_debug3d() to draw visual debug info. */ int RenderScript_DrawDebug2d(lua_State* L) { RenderScriptInstance_Check(L); dmLogOnceWarning("render.draw_debug2d is deprecated and will be removed in future versions, please use render.draw_debug3d instead."); return 0; } /*# sets the view matrix * * Sets the view matrix to use when rendering. * * @name render.set_view * @param matrix [type:matrix4] view matrix to set * @examples * * How to set the view and projection matrices according to * the values supplied by a camera. * * ```lua * function init(self) * self.view = vmath.matrix4() * self.projection = vmath.matrix4() * end * * function update(self, dt) * -- set the view to the stored view value * render.set_view(self.view) * -- now we can draw with this view * end * * function on_message(self, message_id, message) * if message_id == hash("set_view_projection") then * -- camera view and projection arrives here. * self.view = message.view * self.projection = message.projection * end * end * ``` */ int RenderScript_SetView(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); dmVMath::Matrix4 view = *dmScript::CheckMatrix4(L, 1); dmVMath::Matrix4* matrix = new dmVMath::Matrix4; *matrix = view; if (InsertCommand(i, Command(COMMAND_TYPE_SET_VIEW, (uintptr_t)matrix))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } /*# sets the projection matrix * Sets the projection matrix to use when rendering. * * @name render.set_projection * @param matrix [type:matrix4] projection matrix * @examples * * How to set the projection to orthographic with world origo at lower left, * width and height as set in project settings and depth (z) between -1 and 1: * * ```lua * render.set_projection(vmath.matrix4_orthographic(0, render.get_width(), 0, render.get_height(), -1, 1)) * ``` */ int RenderScript_SetProjection(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); dmVMath::Matrix4 projection = *dmScript::CheckMatrix4(L, 1); dmVMath::Matrix4* matrix = new dmVMath::Matrix4; *matrix = projection; if (InsertCommand(i, Command(COMMAND_TYPE_SET_PROJECTION, (uintptr_t)matrix))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } /*# * @name render.BLEND_ZERO * @variable */ /*# * @name render.BLEND_ONE * @variable */ /*# * @name render.BLEND_SRC_COLOR * @variable */ /*# * @name render.BLEND_ONE_MINUS_SRC_COLOR * @variable */ /*# * @name render.BLEND_DST_COLOR * @variable */ /*# * @name render.BLEND_ONE_MINUS_DST_COLOR * @variable */ /*# * @name render.BLEND_SRC_ALPHA * @variable */ /*# * @name render.BLEND_ONE_MINUS_SRC_ALPHA * @variable */ /*# * @name render.BLEND_DST_ALPHA * @variable */ /*# * @name render.BLEND_ONE_MINUS_DST_ALPHA * @variable */ /*# * @name render.BLEND_SRC_ALPHA_SATURATE * @variable */ /*# * @name render.BLEND_CONSTANT_COLOR * @variable */ /*# * @name render.BLEND_ONE_MINUS_CONSTANT_COLOR * @variable */ /*# * @name render.BLEND_CONSTANT_ALPHA * @variable */ /*# * @name render.BLEND_ONE_MINUS_CONSTANT_ALPHA * @variable */ /*# sets the blending function * * Specifies the arithmetic used when computing pixel values that are written to the frame * buffer. In RGBA mode, pixels can be drawn using a function that blends the source RGBA * pixel values with the destination pixel values already in the frame buffer. * Blending is initially disabled. * * `source_factor` specifies which method is used to scale the source color components. * `destination_factor` specifies which method is used to scale the destination color * components. * * Source color components are referred to as (R<sub>s</sub>,G<sub>s</sub>,B<sub>s</sub>,A<sub>s</sub>). * Destination color components are referred to as (R<sub>d</sub>,G<sub>d</sub>,B<sub>d</sub>,A<sub>d</sub>). * The color specified by setting the blendcolor is referred to as (R<sub>c</sub>,G<sub>c</sub>,B<sub>c</sub>,A<sub>c</sub>). * * The source scale factor is referred to as (s<sub>R</sub>,s<sub>G</sub>,s<sub>B</sub>,s<sub>A</sub>). * The destination scale factor is referred to as (d<sub>R</sub>,d<sub>G</sub>,d<sub>B</sub>,d<sub>A</sub>). * * The color values have integer values between 0 and (k<sub>R</sub>,k<sub>G</sub>,k<sub>B</sub>,k<sub>A</sub>), where k<sub>c</sub> = 2<sup>m<sub>c</sub></sup> - 1 and m<sub>c</sub> is the number of bitplanes for that color. I.e for 8 bit color depth, color values are between `0` and `255`. * Available factor constants and corresponding scale factors: * * Factor constant | Scale factor (f<sub>R</sub>,f<sub>G</sub>,f<sub>B</sub>,f<sub>A</sub>) * --------------------------------------- | ----------------------- * `render.BLEND_ZERO` | (0,0,0,0) * `render.BLEND_ONE` | (1,1,1,1) * `render.BLEND_SRC_COLOR` | (R<sub>s</sub>/k<sub>R</sub>,G<sub>s</sub>/k<sub>G</sub>,B<sub>s</sub>/k<sub>B</sub>,A<sub>s</sub>/k<sub>A</sub>) * `render.BLEND_ONE_MINUS_SRC_COLOR` | (1,1,1,1) - (R<sub>s</sub>/k<sub>R</sub>,G<sub>s</sub>/k<sub>G</sub>,B<sub>s</sub>/k<sub>B</sub>,A<sub>s</sub>/k<sub>A</sub>) * `render.BLEND_DST_COLOR` | (R<sub>d</sub>/k<sub>R</sub>,G<sub>d</sub>/k<sub>G</sub>,B<sub>d</sub>/k<sub>B</sub>,A<sub>d</sub>/k<sub>A</sub>) * `render.BLEND_ONE_MINUS_DST_COLOR` | (1,1,1,1) - (R<sub>d</sub>/k<sub>R</sub>,G<sub>d</sub>/k<sub>G</sub>,B<sub>d</sub>/k<sub>B</sub>,A<sub>d</sub>/k<sub>A</sub>) * `render.BLEND_SRC_ALPHA` | (A<sub>s</sub>/k<sub>A</sub>,A<sub>s</sub>/k<sub>A</sub>,A<sub>s</sub>/k<sub>A</sub>,A<sub>s</sub>/k<sub>A</sub>) * `render.BLEND_ONE_MINUS_SRC_ALPHA` | (1,1,1,1) - (A<sub>s</sub>/k<sub>A</sub>,A<sub>s</sub>/k<sub>A</sub>,A<sub>s</sub>/k<sub>A</sub>,A<sub>s</sub>/k<sub>A</sub>) * `render.BLEND_DST_ALPHA` | (A<sub>d</sub>/k<sub>A</sub>,A<sub>d</sub>/k<sub>A</sub>,A<sub>d</sub>/k<sub>A</sub>,A<sub>d</sub>/k<sub>A</sub>) * `render.BLEND_ONE_MINUS_DST_ALPHA` | (1,1,1,1) - (A<sub>d</sub>/k<sub>A</sub>,A<sub>d</sub>/k<sub>A</sub>,A<sub>d</sub>/k<sub>A</sub>,A<sub>d</sub>/k<sub>A</sub>) * `render.BLEND_CONSTANT_COLOR` | (R<sub>c</sub>,G<sub>c</sub>,B<sub>c</sub>,A<sub>c</sub>) * `render.BLEND_ONE_MINUS_CONSTANT_COLOR` | (1,1,1,1) - (R<sub>c</sub>,G<sub>c</sub>,B<sub>c</sub>,A<sub>c</sub>) * `render.BLEND_CONSTANT_ALPHA` | (A<sub>c</sub>,A<sub>c</sub>,A<sub>c</sub>,A<sub>c</sub>) * `render.BLEND_ONE_MINUS_CONSTANT_ALPHA` | (1,1,1,1) - (A<sub>c</sub>,A<sub>c</sub>,A<sub>c</sub>,A<sub>c</sub>) * `render.BLEND_SRC_ALPHA_SATURATE` | (i,i,i,1) where i = min(A<sub>s</sub>, k<sub>A</sub> - A<sub>d</sub>) /k<sub>A</sub> * * The blended RGBA values of a pixel comes from the following equations: * * - R<sub>d</sub> = min(k<sub>R</sub>, R<sub>s</sub> * s<sub>R</sub> + R<sub>d</sub> * d<sub>R</sub>) * - G<sub>d</sub> = min(k<sub>G</sub>, G<sub>s</sub> * s<sub>G</sub> + G<sub>d</sub> * d<sub>G</sub>) * - B<sub>d</sub> = min(k<sub>B</sub>, B<sub>s</sub> * s<sub>B</sub> + B<sub>d</sub> * d<sub>B</sub>) * - A<sub>d</sub> = min(k<sub>A</sub>, A<sub>s</sub> * s<sub>A</sub> + A<sub>d</sub> * d<sub>A</sub>) * * Blend function `(render.BLEND_SRC_ALPHA, render.BLEND_ONE_MINUS_SRC_ALPHA)` is useful for * drawing with transparency when the drawn objects are sorted from farthest to nearest. * It is also useful for drawing antialiased points and lines in arbitrary order. * * @name render.set_blend_func * @param source_factor [type:constant] source factor * @param destination_factor [type:constant] destination factor * @examples * * Set the blend func to the most common one: * * ```lua * render.set_blend_func(render.BLEND_SRC_ALPHA, render.BLEND_ONE_MINUS_SRC_ALPHA) * ``` */ int RenderScript_SetBlendFunc(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); uint32_t factors[2]; for (uint32_t i = 0; i < 2; ++i) { factors[i] = luaL_checknumber(L, 1+i); } for (uint32_t i = 0; i < 2; ++i) { if (factors[i] != dmGraphics::BLEND_FACTOR_ZERO && factors[i] != dmGraphics::BLEND_FACTOR_ONE && factors[i] != dmGraphics::BLEND_FACTOR_SRC_COLOR && factors[i] != dmGraphics::BLEND_FACTOR_ONE_MINUS_SRC_COLOR && factors[i] != dmGraphics::BLEND_FACTOR_DST_COLOR && factors[i] != dmGraphics::BLEND_FACTOR_ONE_MINUS_DST_COLOR && factors[i] != dmGraphics::BLEND_FACTOR_SRC_ALPHA && factors[i] != dmGraphics::BLEND_FACTOR_ONE_MINUS_SRC_ALPHA && factors[i] != dmGraphics::BLEND_FACTOR_DST_ALPHA && factors[i] != dmGraphics::BLEND_FACTOR_ONE_MINUS_DST_ALPHA && factors[i] != dmGraphics::BLEND_FACTOR_SRC_ALPHA_SATURATE && factors[i] != dmGraphics::BLEND_FACTOR_CONSTANT_COLOR && factors[i] != dmGraphics::BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR && factors[i] != dmGraphics::BLEND_FACTOR_CONSTANT_ALPHA && factors[i] != dmGraphics::BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA) { return luaL_error(L, "Invalid blend types: %s.set_blend_func(self, %d, %d)", RENDER_SCRIPT_LIB_NAME, factors[0], factors[1]); } } if (InsertCommand(i, Command(COMMAND_TYPE_SET_BLEND_FUNC, factors[0], factors[1]))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } /*# sets the color mask * * Specifies whether the individual color components in the frame buffer is enabled for writing (`true`) or disabled (`false`). For example, if `blue` is `false`, nothing is written to the blue component of any pixel in any of the color buffers, regardless of the drawing operation attempted. Note that writing are either enabled or disabled for entire color components, not the individual bits of a component. * * The component masks are all initially `true`. * * @name render.set_color_mask * @param red [type:boolean] red mask * @param green [type:boolean] green mask * @param blue [type:boolean] blue mask * @param alpha [type:boolean] alpha mask * @examples * * ```lua * -- alpha cannot be written to frame buffer * render.set_color_mask(true, true, true, false) * ``` */ int RenderScript_SetColorMask(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); if (lua_isboolean(L, 1) && lua_isboolean(L, 2) && lua_isboolean(L, 3) && lua_isboolean(L, 4)) { bool red = lua_toboolean(L, 1) != 0; bool green = lua_toboolean(L, 2) != 0; bool blue = lua_toboolean(L, 3) != 0; bool alpha = lua_toboolean(L, 4) != 0; if (!InsertCommand(i, Command(COMMAND_TYPE_SET_COLOR_MASK, (uintptr_t)red, (uintptr_t)green, (uintptr_t)blue, (uintptr_t)alpha))) return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } else { return luaL_error(L, "Expected booleans but got %s, %s, %s, %s.", lua_typename(L, lua_type(L, 2)), lua_typename(L, lua_type(L, 3)), lua_typename(L, lua_type(L, 4)), lua_typename(L, lua_type(L, 5))); } return 0; } /*# sets the depth mask * * Specifies whether the depth buffer is enabled for writing. The supplied mask governs * if depth buffer writing is enabled (`true`) or disabled (`false`). * * The mask is initially `true`. * * @name render.set_depth_mask * @param depth [type:boolean] depth mask * @examples * * How to turn off writing to the depth buffer: * * ```lua * render.set_depth_mask(false) * ``` */ int RenderScript_SetDepthMask(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); if (lua_isboolean(L, 1)) { bool mask = lua_toboolean(L, 1) != 0; if (!InsertCommand(i, Command(COMMAND_TYPE_SET_DEPTH_MASK, (uintptr_t)mask))) return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } else { return luaL_error(L, "Expected boolean but got %s.", lua_typename(L, lua_type(L, 2))); } return 0; } /*# sets the stencil mask * * The stencil mask controls the writing of individual bits in the stencil buffer. * The least significant `n` bits of the parameter `mask`, where `n` is the number of * bits in the stencil buffer, specify the mask. * * Where a `1` bit appears in the mask, the corresponding * bit in the stencil buffer can be written. Where a `0` bit appears in the mask, * the corresponding bit in the stencil buffer is never written. * * The mask is initially all `1`'s. * * @name render.set_stencil_mask * @param mask [type:number] stencil mask * * @examples * * ```lua * -- set the stencil mask to all 1:s * render.set_stencil_mask(0xff) * ``` */ int RenderScript_SetStencilMask(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); uint32_t mask = (uint32_t)luaL_checknumber(L, 1); if (InsertCommand(i, Command(COMMAND_TYPE_SET_STENCIL_MASK, mask))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } /*# * @name render.COMPARE_FUNC_NEVER * @variable */ /*# * @name render.COMPARE_FUNC_LESS * @variable */ /*# * @name render.COMPARE_FUNC_LEQUAL * @variable */ /*# * @name render.COMPARE_FUNC_GREATER * @variable */ /*# * @name render.COMPARE_FUNC_GEQUAL * @variable */ /*# * @name render.COMPARE_FUNC_EQUAL * @variable */ /*# * @name render.COMPARE_FUNC_NOTEQUAL * @variable */ /*# * @name render.COMPARE_FUNC_ALWAYS * @variable */ /*# sets the depth test function * * Specifies the function that should be used to compare each incoming pixel * depth value with the value present in the depth buffer. * The comparison is performed only if depth testing is enabled and specifies * the conditions under which a pixel will be drawn. * * Function constants: * * - `render.COMPARE_FUNC_NEVER` (never passes) * - `render.COMPARE_FUNC_LESS` (passes if the incoming depth value is less than the stored value) * - `render.COMPARE_FUNC_LEQUAL` (passes if the incoming depth value is less than or equal to the stored value) * - `render.COMPARE_FUNC_GREATER` (passes if the incoming depth value is greater than the stored value) * - `render.COMPARE_FUNC_GEQUAL` (passes if the incoming depth value is greater than or equal to the stored value) * - `render.COMPARE_FUNC_EQUAL` (passes if the incoming depth value is equal to the stored value) * - `render.COMPARE_FUNC_NOTEQUAL` (passes if the incoming depth value is not equal to the stored value) * - `render.COMPARE_FUNC_ALWAYS` (always passes) * * The depth function is initially set to `render.COMPARE_FUNC_LESS`. * * @name render.set_depth_func * @param func [type:constant] depth test function, see the description for available values * @examples * * Enable depth test and set the depth test function to "not equal". * * ```lua * render.enable_state(render.STATE_DEPTH_TEST) * render.set_depth_func(render.COMPARE_FUNC_NOTEQUAL) * ``` */ int RenderScript_SetDepthFunc(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); uint32_t func = luaL_checknumber(L, 1); if (func != dmGraphics::COMPARE_FUNC_NEVER && func != dmGraphics::COMPARE_FUNC_LESS && func != dmGraphics::COMPARE_FUNC_LEQUAL && func != dmGraphics::COMPARE_FUNC_GREATER && func != dmGraphics::COMPARE_FUNC_GEQUAL && func != dmGraphics::COMPARE_FUNC_EQUAL && func != dmGraphics::COMPARE_FUNC_NOTEQUAL && func != dmGraphics::COMPARE_FUNC_ALWAYS) { return luaL_error(L, "Invalid depth func: %s.set_depth_func(self, %d)", RENDER_SCRIPT_LIB_NAME, func); } if (InsertCommand(i, Command(COMMAND_TYPE_SET_DEPTH_FUNC, func))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } /*# sets the stencil test function * * Stenciling is similar to depth-buffering as it enables and disables drawing on a * per-pixel basis. First, GL drawing primitives are drawn into the stencil planes. * Second, geometry and images are rendered but using the stencil planes to mask out * where to draw. * * The stencil test discards a pixel based on the outcome of a comparison between the * reference value `ref` and the corresponding value in the stencil buffer. * * `func` specifies the comparison function. See the table below for values. * The initial value is `render.COMPARE_FUNC_ALWAYS`. * * `ref` specifies the reference value for the stencil test. The value is clamped to * the range [0, 2<sup>n</sup>-1], where n is the number of bitplanes in the stencil buffer. * The initial value is `0`. * * `mask` is ANDed with both the reference value and the stored stencil value when the test * is done. The initial value is all `1`'s. * * Function constant: * * - `render.COMPARE_FUNC_NEVER` (never passes) * - `render.COMPARE_FUNC_LESS` (passes if (ref & mask) < (stencil & mask)) * - `render.COMPARE_FUNC_LEQUAL` (passes if (ref & mask) <= (stencil & mask)) * - `render.COMPARE_FUNC_GREATER` (passes if (ref & mask) > (stencil & mask)) * - `render.COMPARE_FUNC_GEQUAL` (passes if (ref & mask) >= (stencil & mask)) * - `render.COMPARE_FUNC_EQUAL` (passes if (ref & mask) = (stencil & mask)) * - `render.COMPARE_FUNC_NOTEQUAL` (passes if (ref & mask) != (stencil & mask)) * - `render.COMPARE_FUNC_ALWAYS` (always passes) * * @name render.set_stencil_func * @param func [type:constant] stencil test function, see the description for available values * @param ref [type:number] reference value for the stencil test * @param mask [type:number] mask that is ANDed with both the reference value and the stored stencil value when the test is done * @examples * * ```lua * -- let only 0's pass the stencil test * render.set_stencil_func(render.COMPARE_FUNC_EQUAL, 0, 1) * ``` * */ int RenderScript_SetStencilFunc(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); uint32_t func = luaL_checknumber(L, 1); if (func != dmGraphics::COMPARE_FUNC_NEVER && func != dmGraphics::COMPARE_FUNC_LESS && func != dmGraphics::COMPARE_FUNC_LEQUAL && func != dmGraphics::COMPARE_FUNC_GREATER && func != dmGraphics::COMPARE_FUNC_GEQUAL && func != dmGraphics::COMPARE_FUNC_EQUAL && func != dmGraphics::COMPARE_FUNC_NOTEQUAL && func != dmGraphics::COMPARE_FUNC_ALWAYS) { return luaL_error(L, "Invalid stencil func: %s.set_stencil_func(self, %d)", RENDER_SCRIPT_LIB_NAME, func); } uint32_t ref = luaL_checknumber(L, 2); uint32_t mask = luaL_checknumber(L, 3); if (InsertCommand(i, Command(COMMAND_TYPE_SET_STENCIL_FUNC, func, ref, mask))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } /*# * @name render.STENCIL_OP_KEEP * @variable */ /*# * @name render.STENCIL_OP_ZERO * @variable */ /*# * @name render.STENCIL_OP_REPLACE * @variable */ /*# * @name render.STENCIL_OP_INCR * @variable */ /*# * @name render.STENCIL_OP_INCR_WRAP * @variable */ /*# * @name render.STENCIL_OP_DECR * @variable */ /*# * @name render.STENCIL_OP_DECR_WRAP * @variable */ /*# * @name render.STENCIL_OP_INVERT * @variable */ /*# sets the stencil operator * * The stencil test discards a pixel based on the outcome of a comparison between the * reference value `ref` and the corresponding value in the stencil buffer. * To control the test, call [ref:render.set_stencil_func]. * * This function takes three arguments that control what happens to the stored stencil * value while stenciling is enabled. If the stencil test fails, no change is made to the * pixel's color or depth buffers, and `sfail` specifies what happens to the stencil buffer * contents. * * Operator constants: * * - `render.STENCIL_OP_KEEP` (keeps the current value) * - `render.STENCIL_OP_ZERO` (sets the stencil buffer value to 0) * - `render.STENCIL_OP_REPLACE` (sets the stencil buffer value to `ref`, as specified by [ref:render.set_stencil_func]) * - `render.STENCIL_OP_INCR` (increments the stencil buffer value and clamp to the maximum representable unsigned value) * - `render.STENCIL_OP_INCR_WRAP` (increments the stencil buffer value and wrap to zero when incrementing the maximum representable unsigned value) * - `render.STENCIL_OP_DECR` (decrements the current stencil buffer value and clamp to 0) * - `render.STENCIL_OP_DECR_WRAP` (decrements the current stencil buffer value and wrap to the maximum representable unsigned value when decrementing zero) * - `render.STENCIL_OP_INVERT` (bitwise inverts the current stencil buffer value) * * `dppass` and `dpfail` specify the stencil buffer actions depending on whether subsequent * depth buffer tests succeed (dppass) or fail (dpfail). * * The initial value for all operators is `render.STENCIL_OP_KEEP`. * * @name render.set_stencil_op * @param sfail [type:constant] action to take when the stencil test fails * @param dpfail [type:constant] the stencil action when the stencil test passes * @param dppass [type:constant] the stencil action when both the stencil test and the depth test pass, or when the stencil test passes and either there is no depth buffer or depth testing is not enabled * @examples * * Set the stencil function to never pass and operator to always draw 1's * on test fail. * * ```lua * render.set_stencil_func(render.COMPARE_FUNC_NEVER, 1, 0xFF) * -- always draw 1's on test fail * render.set_stencil_op(render.STENCIL_OP_REPLACE, render.STENCIL_OP_KEEP, render.STENCIL_OP_KEEP) * ``` */ int RenderScript_SetStencilOp(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); uint32_t ops[3]; for (uint32_t i = 0; i < 3; ++i) { ops[i] = luaL_checknumber(L, 1+i); } for (uint32_t i = 0; i < 3; ++i) { if (ops[i] != dmGraphics::STENCIL_OP_KEEP && ops[i] != dmGraphics::STENCIL_OP_ZERO && ops[i] != dmGraphics::STENCIL_OP_REPLACE && ops[i] != dmGraphics::STENCIL_OP_INCR && ops[i] != dmGraphics::STENCIL_OP_INCR_WRAP && ops[i] != dmGraphics::STENCIL_OP_DECR && ops[i] != dmGraphics::STENCIL_OP_DECR_WRAP && ops[i] != dmGraphics::STENCIL_OP_INVERT) { return luaL_error(L, "Invalid stencil ops: %s.set_stencil_op(self, %d, %d, %d)", RENDER_SCRIPT_LIB_NAME, ops[0], ops[1], ops[2]); } } if (InsertCommand(i, Command(COMMAND_TYPE_SET_STENCIL_OP, ops[0], ops[1], ops[2]))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } /*# * @name render.FACE_FRONT * @variable */ /*# * @name render.FACE_BACK * @variable */ /*# * @name render.FACE_FRONT_AND_BACK * @variable */ /*# sets the cull face * * Specifies whether front- or back-facing polygons can be culled * when polygon culling is enabled. Polygon culling is initially disabled. * * If mode is `render.FACE_FRONT_AND_BACK`, no polygons are drawn, but other * primitives such as points and lines are drawn. The initial value for * `face_type` is `render.FACE_BACK`. * * @name render.set_cull_face * @param face_type [type:constant] face type * * - `render.FACE_FRONT` * - `render.FACE_BACK` * - `render.FACE_FRONT_AND_BACK` * * @examples * * How to enable polygon culling and set front face culling: * * ```lua * render.enable_state(render.STATE_CULL_FACE) * render.set_cull_face(render.FACE_FRONT) * ``` */ int RenderScript_SetCullFace(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); uint32_t face_type = luaL_checknumber(L, 1); if (face_type != dmGraphics::FACE_TYPE_FRONT && face_type != dmGraphics::FACE_TYPE_BACK && face_type != dmGraphics::FACE_TYPE_FRONT_AND_BACK) { return luaL_error(L, "Invalid face types: %s.set_cull_face(self, %d)", RENDER_SCRIPT_LIB_NAME, face_type); } if (InsertCommand(i, Command(COMMAND_TYPE_SET_CULL_FACE, (uintptr_t)face_type))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } /*# sets the polygon offset * * Sets the scale and units used to calculate depth values. * If `render.STATE_POLYGON_OFFSET_FILL` is enabled, each fragment's depth value * is offset from its interpolated value (depending on the depth value of the * appropriate vertices). Polygon offset can be used when drawing decals, rendering * hidden-line images etc. * * `factor` specifies a scale factor that is used to create a variable depth * offset for each polygon. The initial value is `0`. * * `units` is multiplied by an implementation-specific value to create a * constant depth offset. The initial value is `0`. * * The value of the offset is computed as `factor` &times; `DZ` + `r` &times; `units` * * `DZ` is a measurement of the depth slope of the polygon which is the change in z (depth) * values divided by the change in either x or y coordinates, as you traverse a polygon. * The depth values are in window coordinates, clamped to the range [0, 1]. * * `r` is the smallest value that is guaranteed to produce a resolvable difference. * It's value is an implementation-specific constant. * * The offset is added before the depth test is performed and before the * value is written into the depth buffer. * * @name render.set_polygon_offset * @param factor [type:number] polygon offset factor * @param units [type:number] polygon offset units * @examples * * ```lua * render.enable_state(render.STATE_POLYGON_OFFSET_FILL) * render.set_polygon_offset(1.0, 1.0) * ``` */ int RenderScript_SetPolygonOffset(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); float factor = luaL_checknumber(L, 1); float units = luaL_checknumber(L, 2); if (InsertCommand(i, Command(COMMAND_TYPE_SET_POLYGON_OFFSET, (uintptr_t)factor, (uintptr_t)units))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } /*# gets the window width, as specified for the project * * Returns the logical window width that is set in the "game.project" settings. * Note that the actual window pixel size can change, either by device constraints * or user input. * * @name render.get_width * @return width [type:number] specified window width (number) * @examples * * Get the width of the window. * * ```lua * local w = render.get_width() * ``` */ int RenderScript_GetWidth(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); (void)i; lua_pushnumber(L, dmGraphics::GetWidth(i->m_RenderContext->m_GraphicsContext)); return 1; } /*# gets the window height, as specified for the project * * Returns the logical window height that is set in the "game.project" settings. * Note that the actual window pixel size can change, either by device constraints * or user input. * * @name render.get_height * @return height [type:number] specified window height * @examples * * Get the height of the window * * ```lua * local h = render.get_height() * ``` */ int RenderScript_GetHeight(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); (void)i; lua_pushnumber(L, dmGraphics::GetHeight(i->m_RenderContext->m_GraphicsContext)); return 1; } /*# gets the actual window width * * Returns the actual physical window width. * Note that this value might differ from the logical width that is set in the * "game.project" settings. * * @name render.get_window_width * @return width [type:number] actual window width * @examples * * Get the actual width of the window * * ```lua * local w = render.get_window_width() * ``` */ int RenderScript_GetWindowWidth(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); (void)i; lua_pushnumber(L, dmGraphics::GetWindowWidth(i->m_RenderContext->m_GraphicsContext)); return 1; } /*# gets the actual window height * * Returns the actual physical window height. * Note that this value might differ from the logical height that is set in the * "game.project" settings. * * @name render.get_window_height * @return height [type:number] actual window height * @examples * * Get the actual height of the window * * ```lua * local h = render.get_window_height() * ``` */ int RenderScript_GetWindowHeight(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); (void)i; lua_pushnumber(L, dmGraphics::GetWindowHeight(i->m_RenderContext->m_GraphicsContext)); return 1; } /*# creates a new render predicate * * This function returns a new render predicate for objects with materials matching * the provided material tags. The provided tags are combined into a bit mask * for the predicate. If multiple tags are provided, the predicate matches materials * with all tags ANDed together. * * The current limit to the number of tags that can be defined is `64`. * * @name render.predicate * @param tags [type:table] table of tags that the predicate should match. The tags can be of either hash or string type * @return predicate [type:predicate] new predicate * @examples * * Create a new render predicate containing all visual objects that * have a material with material tags "opaque" AND "smoke". * * ``` * local p = render.predicate({hash("opaque"), hash("smoke")}) * ``` */ int RenderScript_Predicate(lua_State* L) { int top = lua_gettop(L); (void) top; RenderScriptInstance* i = RenderScriptInstance_Check(L); (void)i; luaL_checktype(L, 1, LUA_TTABLE); HPredicate* p_predicate = (HPredicate*) lua_newuserdata(L, sizeof(HPredicate*)); *p_predicate = NewPredicate(); luaL_getmetatable(L, RENDER_SCRIPT_PREDICATE); lua_setmetatable(L, -2); lua_pushnil(L); /* first key */ while (lua_next(L, 1) != 0) { dmhash_t tag = dmScript::CheckHashOrString(L, -1); if (RESULT_OK != AddPredicateTag(*p_predicate, tag)) { dmLogWarning("Unable to add predicate tag. Max number of tags (%i) reached?", dmRender::Predicate::MAX_TAG_COUNT); } lua_pop(L, 1); } assert(top + 1 == lua_gettop(L)); return 1; } /*# enables a material * If another material was already enabled, it will be automatically disabled * and the specified material is used instead. * * The name of the material must be specified in the ".render" resource set * in the "game.project" setting. * * @name render.enable_material * @param material_id [type:string|hash] material id to enable * @examples * * Enable material named "glow", then draw my_pred with it. * * ```lua * render.enable_material("glow") * render.draw(self.my_pred) * render.disable_material() * ``` */ int RenderScript_EnableMaterial(lua_State* L) { int top = lua_gettop(L); (void)top; RenderScriptInstance* i = RenderScriptInstance_Check(L); if (!lua_isnil(L, 1)) { dmhash_t material_id = dmScript::CheckHashOrString(L, 1); dmRender::HMaterial* mat = i->m_Materials.Get(material_id); if (mat == 0x0) { assert(top == lua_gettop(L)); char str[128]; char buffer[256]; dmSnPrintf(buffer, sizeof(buffer), "Could not find material '%s' %llu", dmScript::GetStringFromHashOrString(L, 1, str, sizeof(str)), (unsigned long long)material_id); // since lua doesn't support proper format arguments return luaL_error(L, "%s", buffer); } else { HMaterial material = *mat; if (InsertCommand(i, Command(COMMAND_TYPE_ENABLE_MATERIAL, (uintptr_t)material))) { assert(top == lua_gettop(L)); return 0; } else { assert(top == lua_gettop(L)); return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } } } else { assert(top == lua_gettop(L)); return luaL_error(L, "%s.enable_material was supplied nil as material.", RENDER_SCRIPT_LIB_NAME); } } /*# disables the currently enabled material * If a material is currently enabled, disable it. * * The name of the material must be specified in the ".render" resource set * in the "game.project" setting. * * @name render.disable_material * @examples * * Enable material named "glow", then draw my_pred with it. * * ```lua * render.enable_material("glow") * render.draw(self.my_pred) * render.disable_material() * ``` */ int RenderScript_DisableMaterial(lua_State* L) { RenderScriptInstance* i = RenderScriptInstance_Check(L); if (InsertCommand(i, Command(COMMAND_TYPE_DISABLE_MATERIAL))) return 0; else return luaL_error(L, "Command buffer is full (%d).", i->m_CommandBuffer.Capacity()); } static const luaL_reg Render_methods[] = { {"enable_state", RenderScript_EnableState}, {"disable_state", RenderScript_DisableState}, {"render_target", RenderScript_RenderTarget}, {"delete_render_target", RenderScript_DeleteRenderTarget}, {"set_render_target", RenderScript_SetRenderTarget}, {"enable_render_target", RenderScript_EnableRenderTarget}, {"disable_render_target", RenderScript_DisableRenderTarget}, {"set_render_target_size", RenderScript_SetRenderTargetSize}, {"enable_texture", RenderScript_EnableTexture}, {"disable_texture", RenderScript_DisableTexture}, {"get_render_target_width", RenderScript_GetRenderTargetWidth}, {"get_render_target_height", RenderScript_GetRenderTargetHeight}, {"clear", RenderScript_Clear}, {"set_viewport", RenderScript_SetViewport}, {"set_view", RenderScript_SetView}, {"set_projection", RenderScript_SetProjection}, {"set_blend_func", RenderScript_SetBlendFunc}, {"set_color_mask", RenderScript_SetColorMask}, {"set_depth_mask", RenderScript_SetDepthMask}, {"set_depth_func", RenderScript_SetDepthFunc}, {"set_stencil_mask", RenderScript_SetStencilMask}, {"set_stencil_func", RenderScript_SetStencilFunc}, {"set_stencil_op", RenderScript_SetStencilOp}, {"set_cull_face", RenderScript_SetCullFace}, {"set_polygon_offset", RenderScript_SetPolygonOffset}, {"draw", RenderScript_Draw}, {"draw_debug3d", RenderScript_DrawDebug3d}, {"draw_debug2d", RenderScript_DrawDebug2d}, {"get_width", RenderScript_GetWidth}, {"get_height", RenderScript_GetHeight}, {"get_window_width", RenderScript_GetWindowWidth}, {"get_window_height", RenderScript_GetWindowHeight}, {"predicate", RenderScript_Predicate}, {"constant_buffer", RenderScript_ConstantBuffer}, {"enable_material", RenderScript_EnableMaterial}, {"disable_material", RenderScript_DisableMaterial}, {0, 0} }; void InitializeRenderScriptContext(RenderScriptContext& context, dmGraphics::HContext graphics_context, dmScript::HContext script_context, uint32_t command_buffer_size) { context.m_CommandBufferSize = command_buffer_size; lua_State *L = dmScript::GetLuaState(script_context); context.m_LuaState = L; int top = lua_gettop(L); (void)top; RENDER_SCRIPT_TYPE_HASH = dmScript::RegisterUserType(L, RENDER_SCRIPT, RenderScript_methods, RenderScript_meta); RENDER_SCRIPT_INSTANCE_TYPE_HASH = dmScript::RegisterUserType(L, RENDER_SCRIPT_INSTANCE, RenderScriptInstance_methods, RenderScriptInstance_meta); RENDER_SCRIPT_CONSTANTBUFFER_TYPE_HASH = dmScript::RegisterUserType(L, RENDER_SCRIPT_CONSTANTBUFFER, RenderScriptConstantBuffer_methods, RenderScriptConstantBuffer_meta); RENDER_SCRIPT_PREDICATE_TYPE_HASH = dmScript::RegisterUserType(L, RENDER_SCRIPT_PREDICATE, RenderScriptPredicate_methods, RenderScriptPredicate_meta); luaL_register(L, RENDER_SCRIPT_LIB_NAME, Render_methods); #define REGISTER_STATE_CONSTANT(name)\ lua_pushnumber(L, (lua_Number) dmGraphics::name); \ lua_setfield(L, -2, #name); REGISTER_STATE_CONSTANT(STATE_DEPTH_TEST); REGISTER_STATE_CONSTANT(STATE_STENCIL_TEST); REGISTER_STATE_CONSTANT(STATE_ALPHA_TEST); REGISTER_STATE_CONSTANT(STATE_BLEND); REGISTER_STATE_CONSTANT(STATE_CULL_FACE); REGISTER_STATE_CONSTANT(STATE_POLYGON_OFFSET_FILL); #undef REGISTER_STATE_CONSTANT #define REGISTER_FORMAT_CONSTANT(name)\ lua_pushnumber(L, (lua_Number) dmGraphics::TEXTURE_FORMAT_##name); \ lua_setfield(L, -2, "FORMAT_"#name); REGISTER_FORMAT_CONSTANT(LUMINANCE); REGISTER_FORMAT_CONSTANT(RGBA); REGISTER_FORMAT_CONSTANT(DEPTH); REGISTER_FORMAT_CONSTANT(STENCIL); #undef REGISTER_FORMAT_CONSTANT #define REGISTER_FORMAT_CONSTANT(name)\ if (dmGraphics::IsTextureFormatSupported(graphics_context, dmGraphics::TEXTURE_FORMAT_##name)) { \ lua_pushnumber(L, (lua_Number) dmGraphics::TEXTURE_FORMAT_##name); \ lua_setfield(L, -2, "FORMAT_"#name); \ } // These depend on driver support REGISTER_FORMAT_CONSTANT(RGB); REGISTER_FORMAT_CONSTANT(RGB16F); REGISTER_FORMAT_CONSTANT(RGB32F); REGISTER_FORMAT_CONSTANT(RGBA16F); REGISTER_FORMAT_CONSTANT(RGBA32F); REGISTER_FORMAT_CONSTANT(R16F); REGISTER_FORMAT_CONSTANT(RG16F); REGISTER_FORMAT_CONSTANT(R32F); REGISTER_FORMAT_CONSTANT(RG32F); #undef REGISTER_FORMAT_CONSTANT #define REGISTER_FILTER_CONSTANT(name)\ lua_pushnumber(L, (lua_Number) dmGraphics::TEXTURE_FILTER_##name); \ lua_setfield(L, -2, "FILTER_"#name); REGISTER_FILTER_CONSTANT(LINEAR); REGISTER_FILTER_CONSTANT(NEAREST); #undef REGISTER_FILTER_CONSTANT #define REGISTER_WRAP_CONSTANT(name)\ lua_pushnumber(L, (lua_Number) dmGraphics::TEXTURE_WRAP_##name); \ lua_setfield(L, -2, "WRAP_"#name); REGISTER_WRAP_CONSTANT(CLAMP_TO_BORDER); REGISTER_WRAP_CONSTANT(CLAMP_TO_EDGE); REGISTER_WRAP_CONSTANT(MIRRORED_REPEAT); REGISTER_WRAP_CONSTANT(REPEAT); #undef REGISTER_WRAP_CONSTANT #define REGISTER_BLEND_CONSTANT(name)\ lua_pushnumber(L, (lua_Number) dmGraphics::BLEND_FACTOR_##name); \ lua_setfield(L, -2, "BLEND_"#name); REGISTER_BLEND_CONSTANT(ZERO); REGISTER_BLEND_CONSTANT(ONE); REGISTER_BLEND_CONSTANT(SRC_COLOR); REGISTER_BLEND_CONSTANT(ONE_MINUS_SRC_COLOR); REGISTER_BLEND_CONSTANT(DST_COLOR); REGISTER_BLEND_CONSTANT(ONE_MINUS_DST_COLOR); REGISTER_BLEND_CONSTANT(SRC_ALPHA); REGISTER_BLEND_CONSTANT(ONE_MINUS_SRC_ALPHA); REGISTER_BLEND_CONSTANT(DST_ALPHA); REGISTER_BLEND_CONSTANT(ONE_MINUS_DST_ALPHA); REGISTER_BLEND_CONSTANT(SRC_ALPHA_SATURATE); REGISTER_BLEND_CONSTANT(CONSTANT_COLOR); REGISTER_BLEND_CONSTANT(ONE_MINUS_CONSTANT_COLOR); REGISTER_BLEND_CONSTANT(CONSTANT_ALPHA); REGISTER_BLEND_CONSTANT(ONE_MINUS_CONSTANT_ALPHA); #undef REGISTER_BLEND_CONSTANT #define REGISTER_COMPARE_FUNC_CONSTANT(name)\ lua_pushnumber(L, (lua_Number) dmGraphics::COMPARE_FUNC_##name); \ lua_setfield(L, -2, "COMPARE_FUNC_"#name); REGISTER_COMPARE_FUNC_CONSTANT(NEVER); REGISTER_COMPARE_FUNC_CONSTANT(LESS); REGISTER_COMPARE_FUNC_CONSTANT(LEQUAL); REGISTER_COMPARE_FUNC_CONSTANT(GREATER); REGISTER_COMPARE_FUNC_CONSTANT(GEQUAL); REGISTER_COMPARE_FUNC_CONSTANT(EQUAL); REGISTER_COMPARE_FUNC_CONSTANT(NOTEQUAL); REGISTER_COMPARE_FUNC_CONSTANT(ALWAYS); #undef REGISTER_COMPARE_FUNC_CONSTANT #define REGISTER_STENCIL_OP_CONSTANT(name)\ lua_pushnumber(L, (lua_Number) dmGraphics::STENCIL_OP_##name); \ lua_setfield(L, -2, "STENCIL_OP_"#name); REGISTER_STENCIL_OP_CONSTANT(KEEP); REGISTER_STENCIL_OP_CONSTANT(ZERO); REGISTER_STENCIL_OP_CONSTANT(REPLACE); REGISTER_STENCIL_OP_CONSTANT(INCR); REGISTER_STENCIL_OP_CONSTANT(INCR_WRAP); REGISTER_STENCIL_OP_CONSTANT(DECR); REGISTER_STENCIL_OP_CONSTANT(DECR_WRAP); REGISTER_STENCIL_OP_CONSTANT(INVERT); #undef REGISTER_STENCIL_OP_CONSTANT #define REGISTER_FACE_CONSTANT(name)\ lua_pushnumber(L, (lua_Number) dmGraphics::FACE_TYPE_##name); \ lua_setfield(L, -2, "FACE_"#name); REGISTER_FACE_CONSTANT(FRONT); REGISTER_FACE_CONSTANT(BACK); REGISTER_FACE_CONSTANT(FRONT_AND_BACK); #undef REGISTER_FACE_CONSTANT #define REGISTER_BUFFER_CONSTANT(name)\ lua_pushnumber(L, (lua_Number) dmGraphics::BUFFER_TYPE_##name); \ lua_setfield(L, -2, "BUFFER_"#name); REGISTER_BUFFER_CONSTANT(COLOR_BIT); REGISTER_BUFFER_CONSTANT(DEPTH_BIT); REGISTER_BUFFER_CONSTANT(STENCIL_BIT); #undef REGISTER_BUFFER_CONSTANT lua_pop(L, 1); assert(top == lua_gettop(L)); } void FinalizeRenderScriptContext(RenderScriptContext& context, dmScript::HContext script_context) { context.m_LuaState = 0; } static bool LoadRenderScript(lua_State* L, dmLuaDDF::LuaSource *source, RenderScript* script) { for (uint32_t i = 0; i < MAX_RENDER_SCRIPT_FUNCTION_COUNT; ++i) script->m_FunctionReferences[i] = LUA_NOREF; bool result = false; int top = lua_gettop(L); (void) top; int ret = dmScript::LuaLoad(L, source); if (ret == 0) { lua_rawgeti(L, LUA_REGISTRYINDEX, script->m_InstanceReference); dmScript::SetInstance(L); ret = dmScript::PCall(L, 0, 0); if (ret == 0) { for (uint32_t i = 0; i < MAX_RENDER_SCRIPT_FUNCTION_COUNT; ++i) { lua_getglobal(L, RENDER_SCRIPT_FUNCTION_NAMES[i]); if (lua_isnil(L, -1) == 0) { if (lua_type(L, -1) == LUA_TFUNCTION) { script->m_FunctionReferences[i] = dmScript::Ref(L, LUA_REGISTRYINDEX); } else { dmLogError("The global name '%s' in '%s' must be a function.", RENDER_SCRIPT_FUNCTION_NAMES[i], source->m_Filename); lua_pop(L, 1); goto bail; } } else { script->m_FunctionReferences[i] = LUA_NOREF; lua_pop(L, 1); } } result = true; // m_SourceFileName will be null if profiling is not enabled, this is fine // as m_SourceFileName will only be used if profiling is enabled script->m_SourceFileName = DM_INTERNALIZE(source->m_Filename); } lua_pushnil(L); dmScript::SetInstance(L); } else { dmLogError("Error running script: %s", lua_tostring(L,-1)); lua_pop(L, 1); } bail: for (uint32_t i = 0; i < MAX_RENDER_SCRIPT_FUNCTION_COUNT; ++i) { lua_pushnil(L); lua_setglobal(L, RENDER_SCRIPT_FUNCTION_NAMES[i]); } assert(top == lua_gettop(L)); return result; } static void ResetRenderScript(HRenderScript render_script) { memset(render_script, 0, sizeof(RenderScript)); render_script->m_InstanceReference = LUA_NOREF; for (uint32_t i = 0; i < MAX_RENDER_SCRIPT_FUNCTION_COUNT; ++i) { render_script->m_FunctionReferences[i] = LUA_NOREF; } } HRenderScript NewRenderScript(HRenderContext render_context, dmLuaDDF::LuaSource *source) { lua_State* L = render_context->m_RenderScriptContext.m_LuaState; int top = lua_gettop(L); (void)top; HRenderScript render_script = (HRenderScript)lua_newuserdata(L, sizeof(RenderScript)); ResetRenderScript(render_script); render_script->m_RenderContext = render_context; luaL_getmetatable(L, RENDER_SCRIPT); lua_setmetatable(L, -2); render_script->m_InstanceReference = dmScript::Ref(L, LUA_REGISTRYINDEX); if (LoadRenderScript(L, source, render_script)) { assert(top == lua_gettop(L)); return render_script; } else { DeleteRenderScript(render_context, render_script); assert(top == lua_gettop(L)); return 0; } } bool ReloadRenderScript(HRenderContext render_context, HRenderScript render_script, dmLuaDDF::LuaSource *source) { return LoadRenderScript(render_context->m_RenderScriptContext.m_LuaState, source, render_script); } void DeleteRenderScript(HRenderContext render_context, HRenderScript render_script) { lua_State* L = render_script->m_RenderContext->m_RenderScriptContext.m_LuaState; for (uint32_t i = 0; i < MAX_RENDER_SCRIPT_FUNCTION_COUNT; ++i) { if (render_script->m_FunctionReferences[i]) dmScript::Unref(L, LUA_REGISTRYINDEX, render_script->m_FunctionReferences[i]); } dmScript::Unref(L, LUA_REGISTRYINDEX, render_script->m_InstanceReference); render_script->~RenderScript(); ResetRenderScript(render_script); } static void ResetRenderScriptInstance(HRenderScriptInstance render_script_instance) { memset(render_script_instance, 0, sizeof(RenderScriptInstance)); render_script_instance->m_InstanceReference = LUA_NOREF; render_script_instance->m_RenderScriptDataReference = LUA_NOREF; render_script_instance->m_ContextTableReference = LUA_NOREF; } HRenderScriptInstance NewRenderScriptInstance(dmRender::HRenderContext render_context, HRenderScript render_script) { lua_State* L = render_context->m_RenderScriptContext.m_LuaState; int top = lua_gettop(L); (void) top; RenderScriptInstance* i = (RenderScriptInstance*)lua_newuserdata(L, sizeof(RenderScriptInstance)); ResetRenderScriptInstance(i); i->m_PredicateCount = 0; i->m_RenderScript = render_script; i->m_ScriptWorld = render_context->m_ScriptWorld; i->m_RenderContext = render_context; i->m_CommandBuffer.SetCapacity(render_context->m_RenderScriptContext.m_CommandBufferSize); i->m_Materials.SetCapacity(16, 8); lua_pushvalue(L, -1); i->m_InstanceReference = dmScript::Ref( L, LUA_REGISTRYINDEX ); lua_newtable(L); i->m_RenderScriptDataReference = dmScript::Ref( L, LUA_REGISTRYINDEX ); lua_newtable(L); i->m_ContextTableReference = dmScript::Ref( L, LUA_REGISTRYINDEX ); luaL_getmetatable(L, RENDER_SCRIPT_INSTANCE); lua_setmetatable(L, -2); dmScript::SetInstance(L); dmScript::InitializeInstance(i->m_ScriptWorld); lua_pushnil(L); dmScript::SetInstance(L); assert(top == lua_gettop(L)); return i; } void DeleteRenderScriptInstance(HRenderScriptInstance render_script_instance) { lua_State* L = render_script_instance->m_RenderContext->m_RenderScriptContext.m_LuaState; int top = lua_gettop(L); (void) top; lua_rawgeti(L, LUA_REGISTRYINDEX, render_script_instance->m_InstanceReference); dmScript::SetInstance(L); dmScript::FinalizeInstance(render_script_instance->m_ScriptWorld); lua_pushnil(L); dmScript::SetInstance(L); dmScript::Unref(L, LUA_REGISTRYINDEX, render_script_instance->m_InstanceReference); dmScript::Unref(L, LUA_REGISTRYINDEX, render_script_instance->m_RenderScriptDataReference); dmScript::Unref(L, LUA_REGISTRYINDEX, render_script_instance->m_ContextTableReference); assert(top == lua_gettop(L)); for (uint32_t i = 0; i < render_script_instance->m_PredicateCount; ++i) { delete render_script_instance->m_Predicates[i]; } render_script_instance->~RenderScriptInstance(); ResetRenderScriptInstance(render_script_instance); } void SetRenderScriptInstanceRenderScript(HRenderScriptInstance render_script_instance, HRenderScript render_script) { render_script_instance->m_RenderScript = render_script; } void AddRenderScriptInstanceMaterial(HRenderScriptInstance render_script_instance, const char* material_name, dmRender::HMaterial material) { if (render_script_instance->m_Materials.Full()) { uint32_t new_capacity = 2 * render_script_instance->m_Materials.Capacity(); render_script_instance->m_Materials.SetCapacity(2 * new_capacity, new_capacity); } render_script_instance->m_Materials.Put(dmHashString64(material_name), material); } void ClearRenderScriptInstanceMaterials(HRenderScriptInstance render_script_instance) { render_script_instance->m_Materials.Clear(); } RenderScriptResult RunScript(HRenderScriptInstance script_instance, RenderScriptFunction script_function, void* args) { DM_PROFILE(Script, "RenderScript"); RenderScriptResult result = RENDER_SCRIPT_RESULT_OK; HRenderScript script = script_instance->m_RenderScript; if (script->m_FunctionReferences[script_function] != LUA_NOREF) { lua_State* L = script_instance->m_RenderContext->m_RenderScriptContext.m_LuaState; int top = lua_gettop(L); (void) top; lua_rawgeti(L, LUA_REGISTRYINDEX, script_instance->m_InstanceReference); dmScript::SetInstance(L); lua_rawgeti(L, LUA_REGISTRYINDEX, script->m_FunctionReferences[script_function]); lua_rawgeti(L, LUA_REGISTRYINDEX, script_instance->m_InstanceReference); int arg_count = 1; const char* message_name = 0; if (script_function == RENDER_SCRIPT_FUNCTION_ONMESSAGE) { arg_count = 4; dmMessage::Message* message = (dmMessage::Message*)args; dmScript::PushHash(L, message->m_Id); if (message->m_Descriptor != 0) { dmDDF::Descriptor* descriptor = (dmDDF::Descriptor*)message->m_Descriptor; // TODO: setjmp/longjmp here... how to handle?!!! We are not running "from lua" here // lua_cpcall? message_name = descriptor->m_Name; dmScript::PushDDF(L, descriptor, (const char*)message->m_Data, true); } else { if (dmProfile::g_IsInitialized) { // Try to find the message name via id and reverse hash message_name = (const char*)dmHashReverse64(message->m_Id, 0); } if (message->m_DataSize > 0) { dmScript::PushTable(L, (const char*)message->m_Data, message->m_DataSize); } else { lua_newtable(L); } } dmScript::PushURL(L, message->m_Sender); } else if (script_function == RENDER_SCRIPT_FUNCTION_UPDATE) { float* dt = (float*)args; lua_pushnumber(L, (lua_Number) *dt); arg_count += 1; } { uint32_t profiler_hash = 0; const char* profiler_string = dmScript::GetProfilerString(L, 0, script->m_SourceFileName, RENDER_SCRIPT_FUNCTION_NAMES[script_function], message_name, &profiler_hash); DM_PROFILE_DYN(Script, profiler_string, profiler_hash); if (dmScript::PCall(L, arg_count, 0) != 0) { assert(top == lua_gettop(L)); result = RENDER_SCRIPT_RESULT_FAILED; } } lua_pushnil(L); dmScript::SetInstance(L); assert(top == lua_gettop(L)); } return result; } RenderScriptResult InitRenderScriptInstance(HRenderScriptInstance instance) { return RunScript(instance, RENDER_SCRIPT_FUNCTION_INIT, 0x0); } struct DispatchContext { HRenderScriptInstance m_Instance; RenderScriptResult m_Result; }; void DispatchCallback(dmMessage::Message *message, void* user_ptr) { DispatchContext* context = (DispatchContext*)user_ptr; HRenderScriptInstance instance = context->m_Instance; if (message->m_Descriptor != 0) { dmDDF::Descriptor* descriptor = (dmDDF::Descriptor*)message->m_Descriptor; if (descriptor == dmRenderDDF::DrawText::m_DDFDescriptor || descriptor == dmRenderDDF::DrawDebugText::m_DDFDescriptor) { if (instance->m_RenderContext->m_SystemFontMap == 0) { dmLogWarning("The text can not be rendered since the system font is not set."); context->m_Result = RENDER_SCRIPT_RESULT_FAILED; return; } DrawTextParams params; if (descriptor == dmRenderDDF::DrawText::m_DDFDescriptor) { dmRenderDDF::DrawText* dt = (dmRenderDDF::DrawText*)message->m_Data; const char* text = (const char*) ((uintptr_t) dt + (uintptr_t) dt->m_Text); params.m_Text = text; params.m_WorldTransform.setTranslation(dmVMath::Vector3(dt->m_Position)); params.m_FaceColor = dmVMath::Vector4(0.0f, 0.0f, 1.0f, 1.0f); } else { dmRenderDDF::DrawDebugText* dt = (dmRenderDDF::DrawDebugText*)message->m_Data; const char* text = (const char*) ((uintptr_t) dt + (uintptr_t) dt->m_Text); params.m_Text = text; params.m_WorldTransform.setTranslation(dmVMath::Vector3(dt->m_Position)); params.m_FaceColor = dt->m_Color; } DrawText(instance->m_RenderContext, instance->m_RenderContext->m_SystemFontMap, 0, 0, params); return; } else if (descriptor == dmRenderDDF::DrawLine::m_DDFDescriptor) { dmRenderDDF::DrawLine* dl = (dmRenderDDF::DrawLine*)message->m_Data; Line3D(instance->m_RenderContext, dl->m_StartPoint, dl->m_EndPoint, dl->m_Color, dl->m_Color); return; } else if (descriptor == dmRenderDDF::Resize::m_DDFDescriptor) { dmRenderDDF::Resize* resize_msg = (dmRenderDDF::Resize*)message->m_Data; dmGraphics::ResizeWindow(instance->m_RenderContext->m_GraphicsContext, resize_msg->m_Width, resize_msg->m_Height); return; } } context->m_Result = RunScript(instance, RENDER_SCRIPT_FUNCTION_ONMESSAGE, message); } RenderScriptResult DispatchRenderScriptInstance(HRenderScriptInstance instance) { DM_PROFILE(RenderScript, "DispatchRSI"); DispatchContext context; context.m_Instance = instance; context.m_Result = RENDER_SCRIPT_RESULT_OK; dmMessage::Dispatch(instance->m_RenderContext->m_Socket, DispatchCallback, (void*)&context); return context.m_Result; } RenderScriptResult UpdateRenderScriptInstance(HRenderScriptInstance instance, float dt) { DM_PROFILE(RenderScript, "UpdateRSI"); instance->m_CommandBuffer.SetSize(0); dmScript::UpdateScriptWorld(instance->m_ScriptWorld, dt); RenderScriptResult result = RunScript(instance, RENDER_SCRIPT_FUNCTION_UPDATE, (void*)&dt); if (instance->m_CommandBuffer.Size() > 0) ParseCommands(instance->m_RenderContext, &instance->m_CommandBuffer.Front(), instance->m_CommandBuffer.Size()); return result; } void OnReloadRenderScriptInstance(HRenderScriptInstance render_script_instance) { RunScript(render_script_instance, RENDER_SCRIPT_FUNCTION_ONRELOAD, 0x0); } }
using BlockArrays using ControlSystems using LinearAlgebra using MatrixEquations using PredictiveControl using Test using SafeTestsets # Some utilities for testing include( "../src/utilities.jl" ) include( "testUtils.jl" ) @testset "PredictiveControl" begin @testset "Problem Types" begin @safetestset "LTI CLQR" begin include( "typeTests.jl" ) end end @testset "Fully condensed problem" begin @safetestset "Initial Propagation Matrix" begin include( "./matrices/FullCondensed_InitialPropagation.jl" ) end @safetestset "Prediction Matrix" begin include( "./matrices/FullCondensed_Prediction.jl" ) end @safetestset "Constraint Matrix" begin include( "./matrices/FullCondensed_Constraints.jl" ) end end @testset "Analysis" begin @safetestset "Condition number bound" begin include( "analysis/conditioning_bound.jl" ) end end @testset "Solvers" begin @testset "Iteration Utilities" begin @safetestset "apply" begin include( "solvers/iterationUtils/apply.jl" ) end @safetestset "apply" begin include( "solvers/iterationUtils/stopping.jl" ) end end @testset "Fast Gradient Method" begin @safetestset "FGM Algorithm" begin include( "solvers/fastGradientMethod/fgm_algorithm.jl" ) end @safetestset "FGM Δ - Cold Start" begin include( "solvers/fastGradientMethod/fgm_colddelta.jl" ) end @safetestset "FGM upper iter bound" begin include( "solvers/fastGradientMethod/fgm_iterationbound.jl" ) end end end end
Releases of the third North American single , " Say It Right " , and the third Europe single , " All Good Things ( Come to an End ) " , took place in November and December , and the third Latin American single , " Promiscuous " , was released in January 2007 . " Say It Right " went to number one in the US and on the Nielsen BDS airplay chart in Canada ( where it was not given a commercial release ) , and it reached the top five in Australia . " All Good Things ( Come to an End ) " reached number one on the pan @-@ European singles chart and the top five in the UK , and it was the album 's most successful single in Germany , where it topped the chart , and in France , where it became a top ten hit . After the release of " Say It Right " in Europe in March 2007 , the single reached the top five in Germany and the top ten in the UK , where it was a download @-@ only release . The video for " All Good Things ( Come to an End ) " was released in North America during this period . " All Good Things ( Come to an End ) " peaked in the top five in Canada and in the top twenty in Australia , though it only reached the lower half of the US Hot 100 .
function varargout = figure_spectrum( varargin ) % FIGURE_SPECTRUM: Creation and callbacks for power density spectrums figures (x-axis = frequency). % % USAGE: hFig = figure_spectrum('CreateFigure', FigureId) % @============================================================================= % This function is part of the Brainstorm software: % https://neuroimage.usc.edu/brainstorm % % Copyright (c) University of Southern California & McGill University % This software is distributed under the terms of the GNU General Public License % as published by the Free Software Foundation. Further details on the GPLv3 % license can be found at http://www.gnu.org/copyleft/gpl.html. % % FOR RESEARCH PURPOSES ONLY. THE SOFTWARE IS PROVIDED "AS IS," AND THE % UNIVERSITY OF SOUTHERN CALIFORNIA AND ITS COLLABORATORS DO NOT MAKE ANY % WARRANTY, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO WARRANTIES OF % MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, NOR DO THEY ASSUME ANY % LIABILITY OR RESPONSIBILITY FOR THE USE OF THIS SOFTWARE. % % For more information type "brainstorm license" at command prompt. % =============================================================================@ % % Authors: Francois Tadel, 2012-2023 % Martin Cousineau, 2017 % Marc Lalancette, 2020 eval(macro_method); end %% ===== CREATE FIGURE ===== function hFig = CreateFigure(FigureId) %#ok<DEFNU> import org.brainstorm.icon.*; MatlabVersion = bst_get('MatlabVersion'); % Get renderer name if (MatlabVersion <= 803) % zbuffer was removed in Matlab 2014b rendererName = 'zbuffer'; elseif (bst_get('DisableOpenGL') == 1) rendererName = 'painters'; else rendererName = 'opengl'; end % Create new figure hFig = figure('Visible', 'off', ... 'NumberTitle', 'off', ... 'IntegerHandle', 'off', ... 'MenuBar', 'none', ... 'Toolbar', 'none', ... 'DockControls', 'on', ... 'Units', 'pixels', ... 'Interruptible', 'off', ... 'BusyAction', 'queue', ... 'Tag', FigureId.Type, ... 'Renderer', rendererName, ... 'Color', [.8 .8 .8], ... 'Pointer', 'arrow', ... 'CloseRequestFcn', @(h,ev)bst_figures('DeleteFigure',h,ev), ... 'KeyPressFcn', @FigureKeyPressedCallback, ... 'WindowButtonDownFcn', @FigureMouseDownCallback, ... 'WindowButtonUpFcn', @FigureMouseUpCallback, ... bst_get('ResizeFunction'), @(h,ev)figure_timeseries('ResizeCallback',h,ev)); % Define Mouse wheel callback separately (not supported by old versions of Matlab) if isprop(hFig, 'WindowScrollWheelFcn') set(hFig, 'WindowScrollWheelFcn', @FigureMouseWheelCallback); end % Disable automatic legends (after 2017a) if (MatlabVersion >= 902) set(hFig, 'defaultLegendAutoUpdate', 'off'); end % Prepare figure appdata setappdata(hFig, 'FigureId', FigureId); setappdata(hFig, 'hasMoved', 0); setappdata(hFig, 'isPlotEditToolbar', 0); setappdata(hFig, 'isSensorsOnly', 0); setappdata(hFig, 'GraphSelection', []); setappdata(hFig, 'isStatic', 0); setappdata(hFig, 'isStaticFreq', 1); setappdata(hFig, 'Colormap', db_template('ColormapInfo')); % Time-freq specific appdata TfInfo = db_template('TfInfo'); setappdata(hFig, 'Timefreq', TfInfo); end %% =========================================================================== % ===== FIGURE CALLBACKS ==================================================== % =========================================================================== %% ===== CURRENT FREQ CHANGED ===== function CurrentTimeChangedCallback(hFig) %#ok<DEFNU> % If no time in this figure if getappdata(hFig, 'isStatic') return; end TfInfo = getappdata(hFig, 'Timefreq'); switch (TfInfo.DisplayMode) % Spectrum: redraw everything case 'Spectrum' UpdateFigurePlot(hFig, 1); % Time series: Move cursor case 'TimeSeries' hAxes = findobj(hFig, '-depth', 1, 'Tag', 'AxesGraph'); if ~isempty(hAxes) PlotCursor(hFig, hAxes); end end end %% ===== CURRENT FREQ CHANGED ===== function CurrentFreqChangedCallback(hFig) %#ok<DEFNU> global GlobalData; % If no frequencies for this figure if getappdata(hFig, 'isStaticFreq') return; end TfInfo = getappdata(hFig, 'Timefreq'); switch (TfInfo.DisplayMode) % Spectrum: Move cursor case 'Spectrum' hAxes = findobj(hFig, '-depth', 1, 'Tag', 'AxesGraph'); if ~isempty(hAxes) PlotCursor(hFig, hAxes); end % Time series: redraw everything case 'TimeSeries' TfInfo.iFreqs = GlobalData.UserFrequencies.iCurrentFreq; setappdata(hFig, 'Timefreq', TfInfo); UpdateFigurePlot(hFig, 1); end end %% ===== DISPLAY OPTIONS CHANGED ===== function DisplayOptionsChangedCallback(hFig) %#ok<DEFNU> % Restore intial view %ResetView(hFig); % Update display UpdateFigurePlot(hFig, 1); end %% ===== SELECTED ROW CHANGED ===== function SelectedRowChangedCallback(iDS, iFig) global GlobalData; % Get figure appdata hFig = GlobalData.DataSet(iDS).Figure(iFig).hFigure; % Get current selection for the figure curSelRows = figure_timeseries('GetFigSelectedRows', hFig); % Get new selection that the figure should show (keep only the ones available for this figure) allFigRows = GlobalData.DataSet(iDS).Figure(iFig).Handles.LinesLabels; % Remove spaces allFigRows = cellfun(@(c)strrep(c,' ',''), allFigRows, 'UniformOutput', 0); % Get new selection that the figure should show (keep only the ones available for this figure) newSelRows = intersect(GlobalData.DataViewer.SelectedRows, allFigRows); % Sensors to select rowsToSel = setdiff(newSelRows, curSelRows); if ~isempty(rowsToSel) figure_timeseries('SetFigSelectedRows', hFig, rowsToSel, 1); end % Sensors to unselect rowsToUnsel = setdiff(curSelRows, newSelRows); if ~isempty(rowsToUnsel) figure_timeseries('SetFigSelectedRows', hFig, rowsToUnsel, 0); end end %% =========================================================================== % ===== KEYBOARD AND MOUSE CALLBACKS ======================================== % =========================================================================== %% ===== FIGURE MOUSE DOWN ===== function FigureMouseDownCallback(hFig, ev) % Get selected object in this figure hObj = get(hFig,'CurrentObject'); if isempty(hObj) return; end % Get object tag objTag = get(hObj, 'Tag'); % Re-select main axes drawnow; hAxes = findobj(hFig, '-depth', 1, 'tag', 'AxesGraph'); set(hFig,'CurrentObject', hAxes(1), 'CurrentAxes', hAxes(1)); % Get figure properties MouseStatus = get(hFig, 'SelectionType'); % Switch between available graphic objects switch (objTag) case 'Spectrum' % Figure: Keep the main axes as clicked object hAxes = hAxes(1); case 'AxesGraph' % Axes: selectec axes = the one that was clicked hAxes = hObj; case 'DataLine' % Time series lines: select if (~strcmpi(MouseStatus, 'alt') || (get(hObj, 'LineWidth') > 1)) LineClickedCallback(hObj); return; end case 'SelectionPatch' % Shift+click: zoom into selection (otherwise, regular click) if strcmpi(MouseStatus, 'extend') ZoomSelection(hFig); return; else hAxes = get(hObj, 'Parent'); end case {'Cursor', 'TextCursor'} hAxes = get(hObj, 'Parent'); case 'legend' legendButtonDownFcn = get(hObj, 'ButtonDownFcn'); if ~isempty(legendButtonDownFcn) if iscell(legendButtonDownFcn) legendButtonDownFcn{1}(hObj, ev, legendButtonDownFcn{2}); else % After Matlab 2014b.... end end return otherwise % Any other object: consider as a click on the main axes end % ===== PROCESS CLICKS ON MAIN TS AXES ===== % Start an action (Move time cursor, pan) switch(MouseStatus) % Left click case 'normal' clickAction = 'selection'; % Initialize time selection [Xcur, iXcur, Xvector] = GetMouseX(hFig, hAxes); if (length(Xvector) > 1) setappdata(hFig, 'GraphSelection', [Xcur, Inf]); else setappdata(hFig, 'GraphSelection', []); end % CTRL+Mouse, or Mouse right case 'alt' clickAction = 'gzoom'; set(hFig, 'Pointer', 'top'); % SHIFT+Mouse case 'extend' clickAction = 'pan'; set(hFig, 'Pointer', 'fleur'); % DOUBLE CLICK case 'open' ResetView(hFig); return; % OTHER : nothing to do otherwise return end % Reset the motion flag setappdata(hFig, 'hasMoved', 0); % Record mouse location in the figure coordinates system setappdata(hFig, 'clickPositionFigure', get(hFig, 'CurrentPoint')); % Record action to perform when the mouse is moved setappdata(hFig, 'clickAction', clickAction); % Record axes ibject that was clicked (usefull when more than one axes object in figure) setappdata(hFig, 'clickSource', hAxes); % Register MouseMoved callbacks for current figure set(hFig, 'WindowButtonMotionFcn', @FigureMouseMoveCallback); end %% ===== FIGURE MOUSE MOVE ===== function FigureMouseMoveCallback(hFig, ev) % Get current mouse action clickAction = getappdata(hFig, 'clickAction'); hAxes = getappdata(hFig, 'clickSource'); if isempty(clickAction) || isempty(hAxes) return end % Set the motion flag setappdata(hFig, 'hasMoved', 1); % Get current mouse location curptFigure = get(hFig, 'CurrentPoint'); motionFigure = (curptFigure - getappdata(hFig, 'clickPositionFigure')) / 100; % Update click point location setappdata(hFig, 'clickPositionFigure', curptFigure); % Switch between different actions (Pan, Rotate, Contrast) switch(clickAction) case 'pan' % maybe could use figure_timeseries.FigurePan % Get initial XLim and YLim XLimInit = getappdata(hAxes, 'XLimInit'); YLimInit = getappdata(hAxes, 'YLimInit'); % Move view along X axis XLim = get(hAxes, 'XLim'); isXLog = strcmpi(get(hAxes, 'XScale'), 'log'); if isXLog XLim = log10(XLim); XLimInit = log10(XLimInit); end XLim = XLim - (XLim(2) - XLim(1)) * motionFigure(1); XLim = bst_saturate(XLim, XLimInit, 1); if isXLog XLim = 10.^XLim; end set(hAxes, 'XLim', XLim); % Move view along Y axis YLim = get(hAxes, 'YLim'); isYLog = strcmpi(get(hAxes, 'YScale'), 'log'); if isYLog YLim = log10(YLim); YLimInit = log10(YLimInit); end YLim = YLim - (YLim(2) - YLim(1)) * motionFigure(2); YLim = bst_saturate(YLim, YLimInit, 1); if isYLog YLim = 10.^YLim; end set(hAxes, 'YLim', YLim); % Set the cursor height hCursor = findobj(hAxes, '-depth', 1, 'Tag', 'Cursor'); set(hCursor, 'YData', YLim); % Set the selection rectangle dimensions hSelectionPatch = findobj(hAxes, '-depth', 1, 'Tag', 'SelectionPatch'); if ~isempty(hSelectionPatch) set(hSelectionPatch, 'YData', [YLim(1), YLim(1), YLim(2), YLim(2)]); end case 'selection' % Get time selection GraphSelection = getappdata(hFig, 'GraphSelection'); % Time selection if ~isempty(GraphSelection) % Update time selection Xcur = GetMouseX(hFig, hAxes); GraphSelection(2) = Xcur; setappdata(hFig, 'GraphSelection', GraphSelection); % Redraw time selection DrawSelection(hFig); end case 'gzoom' % Gain zoom ScrollCount = -motionFigure(2) * 10; figure_timeseries('FigureScroll', hFig, ScrollCount, 'gzoom'); % Apply same limits as when panning YLimInit = getappdata(hAxes, 'YLimInit'); YLim = get(hAxes, 'YLim'); isYLog = strcmpi(get(hAxes, 'YScale'), 'log'); if isYLog YLim = log10(YLim); YLimInit = log10(YLimInit); end YLim = bst_saturate(YLim, YLimInit, 1); if isYLog YLim = 10.^YLim; end set(hAxes, 'YLim', YLim); end end %% ===== FIGURE MOUSE UP ===== function FigureMouseUpCallback(hFig, event) % Get mouse state hasMoved = getappdata(hFig, 'hasMoved'); MouseStatus = get(hFig, 'SelectionType'); % Reset figure mouse fields setappdata(hFig, 'clickAction', ''); setappdata(hFig, 'hasMoved', 0); % Restore mouse pointer set(hFig, 'Pointer', 'arrow'); drawnow; % Get axes handles hAxes = getappdata(hFig, 'clickSource'); if isempty(hAxes) || ~ishandle(hAxes) return end % If mouse has not moved: popup or time change Xmode = 'unknown'; if ~hasMoved && ~isempty(MouseStatus) if strcmpi(MouseStatus, 'normal') % Get current frequency [Xcur, iXcur, Xvector, Xmode] = GetMouseX(hFig, hAxes); % Update plot if ~isempty(Xcur) % Move time cursor to new time hCursor = findobj(hAxes, '-depth', 1, 'Tag', 'Cursor'); set(hCursor, 'XData', Xcur.*[1 1]); drawnow; % Update the current time in the whole application switch(Xmode) case 'Spectrum' panel_freq('SetCurrentFreq', iXcur); case 'TimeSeries' panel_time('SetCurrentTime', Xcur); end % Remove previous time selection patch setappdata(hFig, 'GraphSelection', []); DrawSelection(hFig); end else % Popup DisplayFigurePopup(hFig); end end % Reset MouseMove callbacks for current figure set(hFig, 'WindowButtonMotionFcn', []); % Remove mouse callbacks appdata setappdata(hFig, 'clickSource', []); setappdata(hFig, 'clickAction', []); % Update figure selection bst_figures('SetCurrentFigure', hFig, 'TF'); bst_figures('SetCurrentFigure', hFig, '2D'); end %% ===== GET MOUSE X ===== function [Xcur, iXcur, Xvector, Xmode] = GetMouseX(hFig, hAxes) % Get current point in axes Xcur = get(hAxes, 'CurrentPoint'); XLim = get(hAxes, 'XLim'); TfInfo = getappdata(hFig, 'Timefreq'); % Check whether cursor is out of display time bounds Xcur= bst_saturate(Xcur(1,1), XLim); % Get the X vector [Time, Freqs] = figure_timefreq('GetFigureData', hFig); switch (TfInfo.DisplayMode) case 'Spectrum', Xvector = Freqs; case 'TimeSeries', Xvector = Time; end Xmode = TfInfo.DisplayMode; % Bands (time or freq) if iscell(Xvector) CenterBand = mean(process_tf_bands('GetBounds', Freqs), 2); iXcur = bst_closest(Xcur, CenterBand); Xcur = CenterBand(iXcur); else iXcur = bst_closest(Xcur, Xvector); Xcur = Xvector(iXcur); end end %% ===== DRAW SELECTION ===== function DrawSelection(hFig) % Get axes (can have more than one) hAxesList = findobj(hFig, '-depth', 1, 'Tag', 'AxesGraph'); % Get time selection GraphSelection = getappdata(hFig, 'GraphSelection'); % Get display mode TfInfo = getappdata(hFig, 'Timefreq'); % Process all the axes for i = 1:length(hAxesList) hAxes = hAxesList(i); % Draw new time selection patch if ~isempty(GraphSelection) && ~isinf(GraphSelection(2)) % Get axes limits YLim = get(hAxes, 'YLim'); % Get previous patch hSelPatch = findobj(hAxes, '-depth', 1, 'Tag', 'SelectionPatch'); % Position of the square patch XData = [GraphSelection(1), GraphSelection(2), GraphSelection(2), GraphSelection(1)]; YData = [YLim(1), YLim(1), YLim(2), YLim(2)]; ZData = [0.01 0.01 0.01 0.01]; % If patch do not exist yet: create it if isempty(hSelPatch) % EraseMode: Only for Matlab <= 2014a if (bst_get('MatlabVersion') <= 803) optErase = {'EraseMode', 'xor'}; % INCOMPATIBLE WITH OPENGL RENDERER (BUG), REMOVED IN MATLAB 2014b patchColor = [.3 .3 1]; else optErase = {}; patchColor = [.7 .7 1]; end % Draw patch hSelPatch = patch('XData', XData, ... 'YData', YData, ... 'ZData', ZData, ... 'LineWidth', 1, ... optErase{:}, ... 'FaceColor', patchColor, ... 'FaceAlpha', 1, ... 'EdgeColor', patchColor, ... 'EdgeAlpha', 1, ... 'Tag', 'SelectionPatch', ... 'Parent', hAxes); % Else, patch already exist: update it else % Change patch limits set(hSelPatch, ... 'XData', XData, ... 'YData', YData, ... 'ZData', ZData, ... 'Visible', 'on'); end % === UPDATE X-LABEL === switch (TfInfo.DisplayMode) case 'Spectrum' strSelection = sprintf('Selection: [%1.2f Hz - %1.2f Hz]', min(GraphSelection), max(GraphSelection)); case 'TimeSeries' % Get current time units timeUnit = panel_time('GetTimeUnit'); % Update label according to the time units switch (timeUnit) case 'ms' strSelection = sprintf('Selection: [%1.2f ms - %1.2f ms]', min(GraphSelection)*1000, max(GraphSelection)*1000); case 's' strSelection = sprintf('Selection: [%1.4f s - %1.4f s]', min(GraphSelection), max(GraphSelection)); end strLength = sprintf(' Duration: [%d ms]', round(abs(GraphSelection(2) - GraphSelection(1)) * 1000)); strSelection = [strSelection, strLength]; end % Get selection label hTextTimeSel = findobj(hFig, '-depth', 1, 'Tag', 'TextTimeSel'); if ~isempty(hTextTimeSel) % Update label set(hTextTimeSel, 'Visible', 'on', 'String', strSelection); end else % Remove previous selection patch set(findobj(hAxes, '-depth', 1, 'Tag', 'SelectionPatch'), 'Visible', 'off'); set(findobj(hFig, '-depth', 1, 'Tag', 'TextTimeSel'), 'Visible', 'off'); end end end %% ===== SET FREQ SELECTION ===== % Define manually the freq selection for a given Spectrum figure % % USAGE: SetFreqSelection(hFig, Xsel) % SetFreqSelection(hFig) function SetFreqSelection(hFig, Xsel) % Get figure display mode TfInfo = getappdata(hFig, 'Timefreq'); % Get the X vector for this figure [Time, Freqs] = figure_timefreq('GetFigureData', hFig); switch (TfInfo.DisplayMode) case 'Spectrum' Xvector = Freqs; strUnits = 'Hz'; case 'TimeSeries' Xvector = Time; strUnits = 's'; end if (length(Xvector) <= 1) || iscell(Xvector) return; end % Ask for a frequency range if (nargin < 2) || isempty(Xsel) Xsel = panel_freq('InputSelectionWindow', Xvector([1,end]), 'Set frequency selection', strUnits); if isempty(Xsel) return end end % Select the closest point in time vector Xsel = Xvector(bst_closest(Xsel, Xvector)); % Draw new time selection setappdata(hFig, 'GraphSelection', Xsel); DrawSelection(hFig); end %% ===== ZOOM INTO SELECTION ===== function ZoomSelection(hFig) % Get time selection GraphSelection = getappdata(hFig, 'GraphSelection'); if isempty(GraphSelection) || isinf(GraphSelection(2)) return; end % Set axes bounds to selection hAxesList = findobj(hFig, '-depth', 1, 'Tag', 'AxesGraph'); set(hAxesList, 'XLim', [min(GraphSelection), max(GraphSelection)]); % Draw new time selection setappdata(hFig, 'GraphSelection', []); DrawSelection(hFig); end %% ===== FIGURE MOUSE WHEEL ===== function FigureMouseWheelCallback(hFig, event) if isempty(event) return; end % SHIFT + Scroll if ismember('shift', get(hFig,'CurrentModifier')) figure_timeseries('FigureScroll', hFig, event.VerticalScrollCount, 'gzoom'); % CTRL + Scroll elseif ismember('control', get(hFig,'CurrentModifier')) figure_timeseries('FigureScroll', hFig, event.VerticalScrollCount, 'vertical'); % Regular scroll else figure_timeseries('FigureScroll', hFig, event.VerticalScrollCount, 'horizontal'); end end %% ===== KEYBOARD CALLBACK ===== function FigureKeyPressedCallback(hFig, ev) global GlobalData; % Convert event to Matlab (in case it's coming from a java callback) [keyEvent, isControl, isShift] = gui_brainstorm('ConvertKeyEvent', ev); if isempty(keyEvent.Key) return end % Prevent multiple executions hAxes = findobj(hFig, '-depth', 1, 'Tag', 'AxesGraph')'; set([hFig hAxes], 'BusyAction', 'cancel'); % Get time freq information TfInfo = getappdata(hFig, 'Timefreq'); TfFile = TfInfo.FileName; if isempty(TfFile) return; end % Process event switch (keyEvent.Key) % Arrows case {'leftarrow', 'rightarrow', 'pageup', 'pagedown', 'home', 'end'} switch (TfInfo.DisplayMode) case 'Spectrum', panel_freq('FreqKeyCallback', keyEvent); case 'TimeSeries', panel_time('TimeKeyCallback', keyEvent); end case {'uparrow', 'downarrow'} % UP/DOWN: Change data row if ~isempty(TfInfo.RowName) && (ischar(TfInfo.RowName) || (length(TfInfo.RowName) == 1)) panel_display('SetSelectedRowName', hFig, keyEvent.Key); else switch (TfInfo.DisplayMode) case 'Spectrum', panel_freq('FreqKeyCallback', keyEvent); case 'TimeSeries', panel_time('TimeKeyCallback', keyEvent); end end % CTRL+D : Dock figure case 'd' if isControl isDocked = strcmpi(get(hFig, 'WindowStyle'), 'docked'); bst_figures('DockFigure', hFig, ~isDocked); end % CTRL+I : Save as image case 'i' if isControl out_figure_image(hFig); end % CTRL+J : Open as image case 'j' if isControl out_figure_image(hFig, 'Viewer'); end % CTRL+F : Open as figure case 'f' if isControl out_figure_image(hFig, 'Figure'); end % CTRL+R : Recordings time series case 'r' if isControl % Get figure description [hFig, iFig, iDS] = bst_figures('GetFigure', hFig); % If there is an associated an available DataFile if ~isempty(GlobalData.DataSet(iDS).DataFile) view_timeseries(GlobalData.DataSet(iDS).DataFile, GlobalData.DataSet(iDS).Figure(iFig).Id.Modality); end end % CTRL+T : Default topography case 't' if isControl view_topography(TfFile, [], '2DSensorCap', [], 0); end % Y : Scale to fit Y axis case 'y' TsInfo = getappdata(hFig, 'TsInfo'); if strcmpi(TsInfo.DisplayMode, 'butterfly') figure_timeseries('ScaleToFitY', hFig, ev); end % RETURN: VIEW SELECTED CHANNELS case 'return' DisplaySelectedRows(hFig); % DELETE: SET CHANNELS AS BAD case {'delete', 'backspace'} % Get figure description [hFig, iFig, iDS] = bst_figures('GetFigure', hFig); % Get selected rows SelChan = figure_timeseries('GetFigSelectedRows', hFig); % Only for PSD attached directly to a data file if ~isempty(SelChan) && ~isempty(GlobalData.DataSet(iDS).DataFile) && ... (length(GlobalData.DataSet(iDS).Figure(iFig).SelectedChannels) ~= length(SelChan)) && ... ~isempty(strfind(TfFile, '_psd')) && ... strcmpi(file_gettype(GlobalData.DataSet(iDS).DataFile), 'data') AddParentBadChannels(hFig, SelChan); end % ESCAPE: CLEAR SELECTION case 'escape' bst_figures('SetSelectedRows', []); % OTHER otherwise % Not found: test based on the character that was generated if isfield(keyEvent, 'Character') && ~isempty(keyEvent.Character) switch(keyEvent.Character) % PLUS/MINUS: GAIN CONTROL case '+' figure_timeseries('UpdateTimeSeriesFactor', hFig, 1.1); case '-' figure_timeseries('UpdateTimeSeriesFactor', hFig, .9091); end end end % Restore events if ~isempty(hFig) && ishandle(hFig) hAxes = findobj(hFig, '-depth', 1, 'Tag', 'AxesGraph')'; set([hFig hAxes], 'BusyAction', 'queue'); end end %% ===== ADD BAD CHANNELS ===== function AddParentBadChannels(hFig, BadChan) global GlobalData; % Get figure description [hFig, iFig, iDS] = bst_figures('GetFigure', hFig); if isempty(hFig) return; end % Get indices in the channel file iBad = []; for i = 1:length(BadChan) iBad = [iBad, find(strcmpi(BadChan{i}, {GlobalData.DataSet(iDS).Channel.Name}))]; end % Get selected rows if ~isempty(iBad) && strcmpi(file_gettype(GlobalData.DataSet(iDS).DataFile), 'data') && ~isempty(GlobalData.DataSet(iDS).DataFile) % Add new bad channels newChannelFlag = GlobalData.DataSet(iDS).Measures.ChannelFlag; newChannelFlag(iBad) = -1; % Update channel flag panel_channel_editor('UpdateChannelFlag', GlobalData.DataSet(iDS).DataFile, newChannelFlag); % Reset selection (done in UpdateChannelFlag) %bst_figures('SetSelectedRows', []); end end %% ===== GET DEFAULT FACTOR ===== function defaultFactor = GetDefaultFactor(Modality) global GlobalData if isempty(GlobalData.DataViewer.DefaultFactor) defaultFactor = 1; else iMod = find(cellfun(@(c)isequal(c,Modality), GlobalData.DataViewer.DefaultFactor(:,1))); if isempty(iMod) defaultFactor = 1; else defaultFactor = GlobalData.DataViewer.DefaultFactor{iMod,2}; end end end %% ===== LINE CLICKED ===== function LineClickedCallback(hLine, ev) global GlobalData; % Get figure handle hFig = get(hLine, 'Parent'); while ~strcmpi(get(hFig, 'Type'), 'figure') || isempty(hFig) hFig = get(hFig, 'Parent'); end if isempty(hFig) return; end hAxes = get(hLine, 'Parent'); setappdata(hFig, 'clickSource', []); % Get figure description [hFig, iFig, iDS] = bst_figures('GetFigure', hFig); sFig = GlobalData.DataSet(iDS).Figure(iFig); % Get row indice iRow = find(sFig.Handles.hLines == hLine); if isempty(iRow) return end RowName = sFig.Handles.LinesLabels{iRow}; % Get click type isRightClick = strcmpi(get(hFig, 'SelectionType'), 'alt'); % Right click : display popup menu if isRightClick setappdata(hFig, 'clickSource', hAxes); DisplayFigurePopup(hFig, RowName); setappdata(hFig, 'clickSource', []); % Left click: Select/unselect line else bst_figures('ToggleSelectedRow', RowName); end % Update figure selection bst_figures('SetCurrentFigure', hFig, '2D'); bst_figures('SetCurrentFigure', hFig, 'TF'); end %% ===== DISPLAY SELECTED CHANNELS ===== % USAGE: DisplaySelectedRows(hFig) function DisplaySelectedRows(hFig) % Get selected rows RowNames = figure_timeseries('GetFigSelectedRows', hFig); if isempty(RowNames) return; end % Reset selection bst_figures('SetSelectedRows', []); % Get figure info TfInfo = getappdata(hFig, 'Timefreq'); % Plot figure view_spectrum(TfInfo.FileName, TfInfo.DisplayMode, RowNames, 1); end %% ===== RESET VIEW ===== function ResetView(hFig) % Get list of axes in this figure hAxes = findobj(hFig, '-depth', 1, 'Tag', 'AxesGraph'); % Restore initial X and Y zooms XLim = getappdata(hAxes, 'XLimInit'); YLim = getappdata(hAxes, 'YLimInit'); set(hAxes, 'XLim', XLim); set(hAxes, 'YLim', YLim); % Set the time cursor height to the maximum of the display hCursor = findobj(hAxes, '-depth', 1, 'Tag', 'Cursor'); set(hCursor, 'YData', YLim); % Set the selection rectangle dimensions hSelectionPatch = findobj(hAxes, '-depth', 1, 'Tag', 'SelectionPatch'); if ~isempty(hSelectionPatch) set(hSelectionPatch, 'YData', [YLim(1), YLim(1), YLim(2), YLim(2)]); end end %% ===== HIDE/SHOW LEGENDS ===== function newPropVal = ToggleAxesProperty(hAxes, propName) switch get(hAxes(1), propName) case 'on' set(hAxes, propName, 'off'); newPropVal = 0; case 'off' set(hAxes, propName, 'on'); newPropVal = 1; end end function SetShowLegend(iDS, iFig, ShowLegend) global GlobalData; % Update TsInfo field hFig = GlobalData.DataSet(iDS).Figure(iFig).hFigure; TsInfo = getappdata(hFig, 'TsInfo'); TsInfo.ShowLegend = ShowLegend; setappdata(hFig, 'TsInfo', TsInfo); % Redraw figure UpdateFigurePlot(hFig, 1); end function ToggleGrid(hAxes, hFig, xy) isSel = ToggleAxesProperty(hAxes, [xy 'Grid']); ToggleAxesProperty(hAxes, [xy 'MinorGrid']); TsInfo = getappdata(hFig, 'TsInfo'); TsInfo = setfield(TsInfo, ['Show' xy 'Grid'], isSel); setappdata(hFig, 'TsInfo', TsInfo); RefreshGridBtnDisplay(hFig, TsInfo); end function ToggleLogScaleX(hAxes, hFig, loglin) set(hAxes, 'XScale', loglin); TsInfo = getappdata(hFig, 'TsInfo'); TsInfo.XScale = loglin; setappdata(hFig, 'TsInfo', TsInfo); RefreshLogScaleBtnDisplay(hFig, TsInfo); bst_set('XScale', loglin); end function RefreshLogScaleBtnDisplay(hFig, TsInfo) % Toggle selection of associated button if possible buttonContainer = findobj(hFig, '-depth', 1, 'Tag', 'ButtonSetScaleLog'); if ~isempty(buttonContainer) button = get(buttonContainer, 'UserData'); button.setSelected(strcmp(TsInfo.XScale, 'log')); end end function RefreshGridBtnDisplay(hFig, TsInfo) % Toggle selection of associated button if possible buttonContainer = findobj(hFig, '-depth', 1, 'Tag', 'ButtonShowGrids'); if ~isempty(buttonContainer) button = get(buttonContainer, 'UserData'); button.setSelected((TsInfo.ShowXGrid & TsInfo.ShowYGrid) || ... (strcmpi(TsInfo.DisplayMode, 'column') & TsInfo.ShowXGrid)); end end %% ===== POPUP MENU ===== function DisplayFigurePopup(hFig, menuTitle) import java.awt.event.KeyEvent; import javax.swing.KeyStroke; import org.brainstorm.icon.*; global GlobalData; % If menuTitle not specified if (nargin < 2) menuTitle = ''; end % Get figure description [hFig, iFig, iDS] = bst_figures('GetFigure', hFig); % Get axes handles hAxes = getappdata(hFig, 'clickSource'); if isempty(hAxes) return end % Get time freq information TfInfo = getappdata(hFig, 'Timefreq'); TfFile = TfInfo.FileName; if isempty(TfFile) return; end TsInfo = getappdata(hFig, 'TsInfo'); % Get loaded information iTimefreq = bst_memory('GetTimefreqInDataSet', iDS, TfFile); % Create popup menu jPopup = java_create('javax.swing.JPopupMenu'); % Menu title if ~isempty(menuTitle) jTitle = gui_component('Label', jPopup, [], ['<HTML><B>' menuTitle '</B>']); jTitle.setBorder(javax.swing.BorderFactory.createEmptyBorder(5,35,0,0)); jPopup.addSeparator(); end % ==== DISPLAY OTHER FIGURES ==== % Only for MEG and EEG time series if strcmpi(GlobalData.DataSet(iDS).Timefreq(iTimefreq).DataType, 'data') % === View RECORDINGS === if ~isempty(GlobalData.DataSet(iDS).DataFile) jItem = gui_component('MenuItem', jPopup, [], 'Recordings', IconLoader.ICON_TS_DISPLAY, [], @(h,ev)view_timeseries(GlobalData.DataSet(iDS).DataFile, GlobalData.DataSet(iDS).Figure(iFig).Id.Modality)); jItem.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_R, KeyEvent.CTRL_MASK)); end % === View TOPOGRAPHY === if ~isempty(GlobalData.DataSet(iDS).Figure(iFig).Id.Modality) && ismember(GlobalData.DataSet(iDS).Figure(iFig).Id.Modality, {'MEG MAG','MEG GRAD','MEG','EEG'}) jItem = gui_component('MenuItem', jPopup, [], '2D Sensor cap', IconLoader.ICON_TOPOGRAPHY, [], @(h,ev)bst_call(@view_topography, TfFile, [], '2DSensorCap', [], 0)); jItem.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_T, KeyEvent.CTRL_MASK)); jPopup.addSeparator(); end end % === VIEW SELECTED === jItem = gui_component('MenuItem', jPopup, [], 'View selected', IconLoader.ICON_SPECTRUM, [], @(h,ev)DisplaySelectedRows(hFig)); jItem.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_ENTER, 0)); % ENTER % === SET SELECTED AS BAD CHANNELS === % Get selected rows SelChan = figure_timeseries('GetFigSelectedRows', hFig); % Only for PSD attached directly to a data file if ~isempty(SelChan) && ~isempty(GlobalData.DataSet(iDS).DataFile) && ... (length(GlobalData.DataSet(iDS).Figure(iFig).SelectedChannels) ~= length(SelChan)) && ... ~isempty(strfind(TfFile, '_psd')) && ... strcmpi(file_gettype(GlobalData.DataSet(iDS).DataFile), 'data') jItem = gui_component('MenuItem', jPopup, [], 'Mark selected as bad', IconLoader.ICON_BAD, [], @(h,ev)AddParentBadChannels(hFig, SelChan)); jItem.setAccelerator(KeyStroke.getKeyStroke(int32(KeyEvent.VK_DELETE), 0)); % DEL end % === RESET SELECTION === jItem = gui_component('MenuItem', jPopup, [], 'Reset selection', IconLoader.ICON_SURFACE, [], @(h,ev)bst_figures('SetSelectedRows',[])); jItem.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_ESCAPE, 0)); % ESCAPE jPopup.addSeparator(); % ==== MENU: SELECTION ==== % No time/freq bands if ~iscell(GlobalData.DataSet(iDS).Timefreq(iTimefreq).Freqs) && isempty(GlobalData.DataSet(iDS).Timefreq(iTimefreq).TimeBands) % Menu "Selection" switch(TfInfo.DisplayMode) case 'Spectrum', strMenu = 'Frequency selection'; case 'TimeSeries', strMenu = 'Time selection'; end jMenuSelection = gui_component('Menu', jPopup, [], strMenu, IconLoader.ICON_TS_SELECTION); % Set selection gui_component('MenuItem', jMenuSelection, [], 'Set selection manually...', IconLoader.ICON_TS_SELECTION, [], @(h,ev)SetFreqSelection(hFig)); % Get current time selection GraphSelection = getappdata(hFig, 'GraphSelection'); isSelection = ~isempty(GraphSelection) && ~any(isinf(GraphSelection(:))); if isSelection gui_component('MenuItem', jMenuSelection, [], 'Zoom into selection (Shift+click)', IconLoader.ICON_ZOOM_PLUS, [], @(h,ev)ZoomSelection(hFig)); jMenuSelection.addSeparator(); % === EXPORT TO DATABASE === if ~strcmpi(TfInfo.DisplayMode, 'TimeSeries') gui_component('MenuItem', jMenuSelection, [], 'Export to database', IconLoader.ICON_SPECTRUM, [], @(h,ev)bst_call(@out_figure_timefreq, hFig, 'Database', 'Selection')); end % === EXPORT TO FILE === gui_component('MenuItem', jMenuSelection, [], 'Export to file', IconLoader.ICON_TS_EXPORT, [], @(h,ev)bst_call(@out_figure_timefreq, hFig, [], 'Selection')); % === EXPORT TO MATLAB === gui_component('MenuItem', jMenuSelection, [], 'Export to Matlab', IconLoader.ICON_MATLAB_EXPORT, [], @(h,ev)bst_call(@out_figure_timefreq, hFig, 'Variable', 'Selection')); end jPopup.addSeparator(); end % ==== MENU: SNAPSHOT ==== jMenuSave = gui_component('Menu', jPopup, [], 'Snapshots', IconLoader.ICON_SNAPSHOT); % === SAVE AS IMAGE === jItem = gui_component('MenuItem', jMenuSave, [], 'Save as image', IconLoader.ICON_SAVE, [], @(h,ev)bst_call(@out_figure_image, hFig)); jItem.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_I, KeyEvent.CTRL_MASK)); % === OPEN AS IMAGE === jItem = gui_component('MenuItem', jMenuSave, [], 'Open as image', IconLoader.ICON_IMAGE, [], @(h,ev)bst_call(@out_figure_image, hFig, 'Viewer')); jItem.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_J, KeyEvent.CTRL_MASK)); jItem = gui_component('MenuItem', jMenuSave, [], 'Open as figure', IconLoader.ICON_IMAGE, [], @(h,ev)bst_call(@out_figure_image, hFig, 'Figure')); jItem.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_F, KeyEvent.CTRL_MASK)); jMenuSave.addSeparator(); % === EXPORT TO DATABASE === gui_component('MenuItem', jMenuSave, [], 'Export to database (time-freq)', IconLoader.ICON_TIMEFREQ, [], @(h,ev)bst_call(@out_figure_timefreq, hFig, 'Database')); gui_component('MenuItem', jMenuSave, [], 'Export to database (matrix)', IconLoader.ICON_MATRIX, [], @(h,ev)bst_call(@out_figure_timefreq, hFig, 'Database', 'Matrix')); % === EXPORT TO FILE === gui_component('MenuItem', jMenuSave, [], 'Export to file', IconLoader.ICON_TS_EXPORT, [], @(h,ev)bst_call(@out_figure_timefreq, hFig, [])); % === EXPORT TO MATLAB === gui_component('MenuItem', jMenuSave, [], 'Export to Matlab', IconLoader.ICON_MATLAB_EXPORT, [], @(h,ev)bst_call(@out_figure_timefreq, hFig, 'Variable')); % === EXPORT TO PLOTLY === gui_component('MenuItem', jMenuSave, [], 'Export to Plotly', IconLoader.ICON_PLOTLY, [], @(h,ev)bst_call(@out_figure_plotly, hFig)); % ==== MENU: FIGURE ==== jMenuFigure = gui_component('Menu', jPopup, [], 'Figure', IconLoader.ICON_LAYOUT_SHOWALL); % XScale isXLog = strcmpi(get(hAxes, 'XScale'), 'log'); if isXLog jItem = gui_component('CheckBoxMenuItem', jMenuFigure, [], 'X scale: linear', IconLoader.ICON_LOG, [], @(h,ev)ToggleLogScaleX(hAxes, hFig, 'linear')); else jItem = gui_component('CheckBoxMenuItem', jMenuFigure, [], 'X scale: log', IconLoader.ICON_LOG, [], @(h,ev)ToggleLogScaleX(hAxes, hFig, 'log')); end % YScale isYLog = strcmpi(get(hAxes, 'YScale'), 'log'); if isYLog jItem = gui_component('CheckBoxMenuItem', jMenuFigure, [], 'Y scale: linear', IconLoader.ICON_LOG, [], @(h,ev)figure_timeseries('SetScaleModeY', hFig, 'linear')); else jItem = gui_component('CheckBoxMenuItem', jMenuFigure, [], 'Y scale: log', IconLoader.ICON_LOG, [], @(h,ev)figure_timeseries('SetScaleModeY', hFig, 'log')); end jMenuFigure.addSeparator(); % Legend jItem = gui_component('CheckBoxMenuItem', jMenuFigure, [], 'Show legend', IconLoader.ICON_LABELS, [], @(h,ev)SetShowLegend(iDS, iFig, ~TsInfo.ShowLegend)); jItem.setSelected(TsInfo.ShowLegend); % XGrid isXGrid = strcmpi(get(hAxes(1), 'XGrid'), 'on'); jItem = gui_component('CheckBoxMenuItem', jMenuFigure, [], 'Show XGrid', IconLoader.ICON_GRID_X, [], @(h,ev)ToggleGrid(hAxes, hFig, 'X')); jItem.setSelected(isXGrid); % YGrid isYGrid = strcmpi(get(hAxes(1), 'YGrid'), 'on'); jItem = gui_component('CheckBoxMenuItem', jMenuFigure, [], 'Show YGrid', IconLoader.ICON_GRID_Y, [], @(h,ev)ToggleGrid(hAxes, hFig, 'Y')); jItem.setSelected(isYGrid); % Change background color jMenuFigure.addSeparator(); gui_component('MenuItem', jMenuFigure, [], 'Change background color', IconLoader.ICON_COLOR_SELECTION, [], @(h,ev)bst_figures('SetBackgroundColor', hFig)); jMenuFigure.addSeparator(); % Show Matlab controls isMatlabCtrl = ~strcmpi(get(hFig, 'MenuBar'), 'none') && ~strcmpi(get(hFig, 'ToolBar'), 'none'); jItem = gui_component('CheckBoxMenuItem', jMenuFigure, [], 'Matlab controls', IconLoader.ICON_MATLAB_CONTROLS, [], @(h,ev)bst_figures('ShowMatlabControls', hFig, ~isMatlabCtrl)); jItem.setSelected(isMatlabCtrl); % Show plot edit toolbar isPlotEditToolbar = getappdata(hFig, 'isPlotEditToolbar'); jItem = gui_component('CheckBoxMenuItem', jMenuFigure, [], 'Plot edit toolbar', IconLoader.ICON_PLOTEDIT, [], @(h,ev)bst_figures('TogglePlotEditToolbar', hFig)); jItem.setSelected(isPlotEditToolbar); % Dock figure isDocked = strcmpi(get(hFig, 'WindowStyle'), 'docked'); jItem = gui_component('CheckBoxMenuItem', jMenuFigure, [], 'Dock figure', IconLoader.ICON_DOCK, [], @(h,ev)bst_figures('DockFigure', hFig, ~isDocked)); jItem.setSelected(isDocked); jItem.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_D, KeyEvent.CTRL_MASK)); % Clone figure jMenuFigure.addSeparator(); gui_component('MenuItem', jMenuFigure, [], 'Clone figure', IconLoader.ICON_COPY, [], @(h,ev)bst_figures('CloneFigure', hFig)); % Display Popup menu gui_popup(jPopup, hFig); end %% =========================================================================== % ===== PLOT FUNCTIONS ====================================================== % =========================================================================== %% ===== UPDATE FIGURE ===== function UpdateFigurePlot(hFig, isForced) if (nargin < 2) || isempty(isForced) isForced = 0; end % ===== GET DATA ===== % If spectrum: get current time only FigureId = getappdata(hFig, 'FigureId'); isSpectrum = strcmpi(FigureId.SubType, 'Spectrum'); if isSpectrum TimeDef = 'CurrentTimeIndex'; else TimeDef = []; end % Get data to plot [Time, Freqs, TfInfo, TF, RowNames, FullTimeVector, DataType, tmp, iTimefreq] = figure_timefreq('GetFigureData', hFig, TimeDef); if isempty(TF) return; end % Plot figure PlotFigure(hFig, isForced, isSpectrum, Time, Freqs, TfInfo, TF, RowNames, iTimefreq); end %% ===== PLOT FIGURE ===== function PlotFigure(hFig, isForced, isSpectrum, Time, Freqs, TfInfo, TF, RowNames, iTimefreq) global GlobalData; % Get figure description [hFig, iFig, iDS] = bst_figures('GetFigure', hFig); sFig = GlobalData.DataSet(iDS).Figure(iFig); % Row names if ~isempty(RowNames) && ischar(RowNames) RowNames = {RowNames}; end % Exclude symmetric values (for producing simpler legends) if isfield(GlobalData.DataSet(iDS).Timefreq(iTimefreq).Options, 'isSymmetric') && GlobalData.DataSet(iDS).Timefreq(iTimefreq).Options.isSymmetric ... && (isequal(GlobalData.DataSet(iDS).Timefreq(iTimefreq).RefRowNames, GlobalData.DataSet(iDS).Timefreq(iTimefreq).RowNames) || ... isequal(GlobalData.DataSet(iDS).Timefreq(iTimefreq).RefRowNames, GlobalData.DataSet(iDS).Timefreq(iTimefreq).RowNames'))... && (sqrt(length(RowNames)) == length(GlobalData.DataSet(iDS).Timefreq(iTimefreq).RowNames)) N = length(GlobalData.DataSet(iDS).Timefreq(iTimefreq).RowNames); indKeep = []; for ij = 1:N for ii = ij:N indKeep(end+1) = ii + (ij-1) * N; end end RowNames = RowNames(indKeep); TF = TF(indKeep,:,:); end % Line labels if iscell(RowNames) LinesLabels = RowNames(:); elseif isnumeric(RowNames) LinesLabels = cell(length(RowNames),1); for i = 1:length(RowNames) LinesLabels{i} = num2str(RowNames(i)); end end % Replicate inputs when ScoutFunction='All' nLines = size(TF,1); if ~isempty(LinesLabels) && (size(LinesLabels,1) == 1) && (size(LinesLabels,2) == nLines) && (nLines > 1) LinesLabels = LinesLabels'; elseif ~isempty(LinesLabels) && (length(LinesLabels) == 1) && (nLines > 1) LinesLabels = repmat(LinesLabels, nLines, 1); end % Remove the first frequency bin (0) : SPECTRUM ONLY, EXCLUDE CONNECTIVITY isConnectivity = ~isempty(GlobalData.DataSet(iDS).Timefreq(iTimefreq).RefRowNames); % To check, but RowNames not only connectivity if isSpectrum && ~iscell(Freqs) && (size(TF,3)>1) && ~isConnectivity iZero = find(Freqs == 0); if ~isempty(iZero) Freqs(iZero) = []; TF(:,:,iZero) = []; end end % Get figure time series TsInfo = getappdata(hFig, 'TsInfo'); % ===== X AXIS ===== switch (TfInfo.DisplayMode) case 'TimeSeries' X = Time; XLegend = 'Time (s)'; case 'Spectrum' X = Freqs; if isfield(GlobalData.DataSet(iDS).Timefreq(iTimefreq).Options, 'PowerUnits') && ~isempty(GlobalData.DataSet(iDS).Timefreq(iTimefreq).Options.PowerUnits) switch GlobalData.DataSet(iDS).Timefreq(iTimefreq).Options.PowerUnits case 'physical' XLegend = 'Frequency (Hz)'; TfInfo.FreqUnits = 'Hz'; case 'normalized' XLegend = 'Normalized frequency (Hz⋅s)'; TfInfo.FreqUnits = 'Hz⋅s'; case 'old' XLegend = 'Frequency (Hz)'; TfInfo.FreqUnits = '"bin"'; otherwise error('Unknown power spectrum units.'); end else XLegend = 'Frequency (Hz)'; end otherwise error('Invalid display mode'); end % Case of one frequency point for spectrum: replicate frequency if isSpectrum && (size(TF,3) == 1) TF = cat(3,TF,TF); replicateFreq = 1; else replicateFreq = 0; end % Bands (time/freq), or linear axes if iscell(X) Xbands = process_tf_bands('GetBounds', X); if replicateFreq Xbands(:, end) = Xbands(:, end) + 0.1; end if (size(Xbands,1) == 1) X = Xbands; XLim = Xbands; else X = mean(Xbands,2); XLim = [min(Xbands(:)), max(Xbands(:))]; end else if replicateFreq X = [X, X + 0.1]; end XLim = [X(1), X(end)]; end if (length(XLim) ~= 2) || any(isnan(XLim)) || (XLim(2) <= XLim(1)) disp('BST> Error: No data to display...'); XLim = [0 1]; end % Auto-detect if legend should be displayed, reset if changed FOOOF display. if isempty(TsInfo.ShowLegend) || (isfield(TfInfo, 'isFooofDispChanged') && TfInfo.isFooofDispChanged) % If more than 15 lines, or all lines have the same label: do not show legend TsInfo.ShowLegend = (length(LinesLabels) <= 15) && ~((length(LinesLabels) > 1) && all(cellfun(@(c)isequal(c,LinesLabels{1}), LinesLabels))); setappdata(hFig, 'TsInfo', TsInfo); end % ===== Y AXIS ===== % Get global maximum if not defined yet if isempty(sFig.Handles.DataMinMax) || isForced sFig.Handles.DataMinMax = [min(TF(:)), max(TF(:))]; % In case there are infinite values, due to the log10(0) operation, look only for non-inf values if any(isinf(sFig.Handles.DataMinMax)) iNotInf = ~isinf(TF(:)); sFig.Handles.DataMinMax = [min(TF(iNotInf)), max(TF(iNotInf))]; end end % Display units DisplayUnits = GlobalData.DataSet(iDS).Timefreq(iTimefreq).DisplayUnits; DisplayFactor = 1; % Check measure for baseline normalized data. if ~isfield(TfInfo, 'Measure') || isempty(TfInfo.Measure) TfInfo.Measure = GlobalData.DataSet(iDS).Timefreq(iTimefreq).Measure; end if ~isfield(TfInfo, 'OptMeasure') || isempty(TfInfo.OptMeasure) if isfield(GlobalData.DataSet(iDS).Timefreq(iTimefreq).Options, 'Measure') && ~isempty(GlobalData.DataSet(iDS).Timefreq(iTimefreq).Options.Measure) TfInfo.OptMeasure = GlobalData.DataSet(iDS).Timefreq(iTimefreq).Options.Measure; % previous measure, e.g. 'power', for stats, but display units are set for stats. elseif isfield(GlobalData.DataSet(iDS).Timefreq(iTimefreq).Options, 'Method') switch GlobalData.DataSet(iDS).Timefreq(iTimefreq).Options.Method case {'cohere', 'henv'} TfInfo.OptMeasure = GlobalData.DataSet(iDS).Timefreq(iTimefreq).Options.CohMeasure; case {'plv', 'plvt', 'ciplv', 'ciplvt', 'wpli', 'wplit'} TfInfo.OptMeasure = GlobalData.DataSet(iDS).Timefreq(iTimefreq).Options.Method; if strcmpi(GlobalData.DataSet(iDS).Timefreq(iTimefreq).Options.PlvMeasure, 'magnitude') TfInfo.OptMeasure = [TfInfo.OptMeasure 'm']; end otherwise TfInfo.OptMeasure = GlobalData.DataSet(iDS).Timefreq(iTimefreq).Options.Method; end else TfInfo.OptMeasure = ''; end end if isempty(DisplayUnits) % Get signal units and display factor if ~isempty(GlobalData.DataSet(iDS).Timefreq(iTimefreq).Modality) && numel(GlobalData.DataSet(iDS).Timefreq(iTimefreq).AllModalities) == 1 [valScaled, DisplayFactor, DisplayUnits] = bst_getunits(mean(sFig.Handles.DataMinMax), GlobalData.DataSet(iDS).Timefreq(iTimefreq).Modality); else DisplayUnits = 'signal units'; end end if isequal(TfInfo.Function, 'power') DisplayFactor = DisplayFactor.^2; end % ===== DISPLAY ===== % Clear figure % clf(hFig); => Commented out by FT on 17-nov-2020: not sure why this is needed % Plot data in the axes PlotHandles = PlotAxes(hFig, X, XLim, TF, TfInfo, TsInfo, sFig.Handles.DataMinMax, LinesLabels, DisplayUnits, DisplayFactor); hAxes = PlotHandles.hAxes; % Store initial XLim and YLim setappdata(hAxes, 'XLimInit', XLim); setappdata(hAxes, 'YLimInit', get(hAxes, 'YLim')); % Update figure list of handles GlobalData.DataSet(iDS).Figure(iFig).Handles = PlotHandles; % X Axis legend xlabel(hAxes, XLegend, ... 'FontSize', bst_get('FigFont'), ... 'FontUnits', 'points', ... 'Interpreter', 'none'); % ===== SCALE BAR ===== % For column displays: add a scale display if strcmpi(TsInfo.DisplayMode, 'column') % Get figure background color bgColor = get(hFig, 'Color'); % Create axes PlotHandles.hColumnScale = axes('Position', [0, 0, .01, .01]); set(PlotHandles.hColumnScale, ... 'Interruptible', 'off', ... 'BusyAction', 'queue', ... 'Tag', 'AxesColumnScale', ... 'YGrid', 'off', ... 'YMinorGrid', 'off', ... 'XTick', [], ... 'YTick', [], ... 'TickLength', [0,0], ... 'Color', bgColor, ... 'XLim', [0 1], ... 'YLim', get(hAxes, 'YLim'), ... 'Box', 'off'); % Update figure list of handles GlobalData.DataSet(iDS).Figure(iFig).Handles = PlotHandles; end % Update scale depending on settings if TsInfo.ShowXGrid set(hAxes, 'XGrid', 'on'); set(hAxes, 'XMinorGrid', 'on'); end if TsInfo.ShowYGrid && ~strcmpi(TsInfo.DisplayMode, 'column') set(hAxes, 'YGrid', 'on'); set(hAxes, 'YMinorGrid', 'on'); end set(hAxes, 'XScale', TsInfo.XScale); % Hide high amplitudes for very low frequencies when linear y scale. if strcmpi(TsInfo.DisplayMode, 'column') TsInfo.YScale = 'linear'; figure_timeseries('SetScaleModeY', hFig, TsInfo.YScale); % also calls ScaleToFitY elseif isSpectrum && isequal(TsInfo.YScale, 'linear') && any(strcmpi(TfInfo.Function, {'power', 'magnitude'})) && all(TF(:)>=0) figure_timeseries('SetScaleModeY', hFig, TsInfo.YScale); % also calls ScaleToFitY else set(hAxes, 'YScale', TsInfo.YScale); end % Create scale buttons if isempty(findobj(hFig, 'Tag', 'ButtonGainPlus')) figure_timeseries('CreateScaleButtons', iDS, iFig); else RefreshGridBtnDisplay(hFig, TsInfo); RefreshLogScaleBtnDisplay(hFig, TsInfo); end % Update stat clusters if ~isempty(TfInfo) && ~isempty(TfInfo.FileName) && strcmpi(file_gettype(TfInfo.FileName), 'ptimefreq') ViewStatClusters(hFig); end % Resize callback if only one axes figure_timeseries('ResizeCallback', hFig, []); % Set current object/axes set(hFig, 'CurrentAxes', hAxes, 'CurrentObject', hAxes); % Update selected channels SelectedRowChangedCallback(iDS, iFig); end %% ===== PLOT AXES ===== function PlotHandles = PlotAxes(hFig, X, XLim, TF, TfInfo, TsInfo, DataMinMax, LinesLabels, DisplayUnits, DisplayFactor) % ===== CREATE AXES ===== % Look for existing axes hAxes = findobj(hFig, '-depth', 1, 'Tag', 'AxesGraph'); % If nothing found: Create axes if isempty(hAxes) set(0, 'CurrentFigure', hFig); hAxes = axes; set(hAxes, 'Interruptible', 'off', ... 'BusyAction', 'queue', ... 'Tag', 'AxesGraph', ... 'XLim', XLim, ... 'Box', 'on', ... 'FontName', 'Default', ... 'FontUnits', 'Points', ... 'FontWeight', 'Normal',... 'FontSize', bst_get('FigFont'), ... 'Units', 'pixels', ... 'Visible', 'on'); % Remove the Tex interpreter if isprop(hAxes, 'TickLabelInterpreter') set(hAxes, 'TickLabelInterpreter', 'none'); end else cla(hAxes); end % Redimension TF according to what we want to display switch (TfInfo.DisplayMode) % Convert to [rows x time] case 'TimeSeries' TF = TF(:,:,1); % Convert to [rows x freq] case 'Spectrum' TF = reshape(TF(:,1,:), [size(TF,1), size(TF,3)]); end % Set color table for lines DefaultColor = [.2 .2 .2]; if (TsInfo.ShowLegend) ColorOrder = panel_scout('GetScoutsColorTable'); else ColorOrder = DefaultColor; end set(hAxes, 'ColorOrder', ColorOrder); % Create handles structure PlotHandles = db_template('DisplayHandlesTimeSeries'); PlotHandles.hAxes = hAxes; PlotHandles.DataMinMax = DataMinMax; PlotHandles.DisplayUnits = DisplayUnits; PlotHandles.DisplayFactor = DisplayFactor; % ===== SWITCH DISPLAY MODE ===== switch (lower(TsInfo.DisplayMode)) case 'butterfly' PlotHandles = PlotAxesButterfly(hAxes, PlotHandles, TfInfo, TsInfo, X, TF, LinesLabels); case 'column' PlotHandles = PlotAxesColumn(hAxes, PlotHandles, X, TF, LinesLabels); otherwise error('Invalid display mode.'); end % Lines labels PlotHandles.LinesLabels = LinesLabels; % Get lines initial colors for i = 1:length(PlotHandles.hLines) if (PlotHandles.hLines(i) ~= -1) PlotHandles.LinesColor(i,:) = get(PlotHandles.hLines(i), 'Color'); end end % ===== TIME OR FREQUENCY CURSOR ===== % Plot freq cursor [PlotHandles.hCursor, PlotHandles.hTextCursor] = PlotCursor(hFig, hAxes); end %% ===== PLOT AXES BUTTERFLY ===== function PlotHandles = PlotAxesButterfly(hAxes, PlotHandles, TfInfo, TsInfo, X, TF, LinesLabels) % ===== NORMALIZE ===== % Get data maximum TF = TF * PlotHandles.DisplayFactor; Fmax = PlotHandles.DataMinMax * PlotHandles.DisplayFactor; % ===== PLOT TIME SERIES ===== % Plot lines ZData = 1.5; PlotHandles.hLines = line(X, TF', ZData * ones(size(TF)), ... 'Parent', hAxes); set(PlotHandles.hLines, 'Tag', 'DataLine'); % ===== YLIM ===== % Get automatic YLim % For log display: avoid zero values if strcmpi(TsInfo.YScale, 'log') && Fmax(1) <= 0 if Fmax(2) > 0 Fmax(1) = min(TF(TF(:)>0)); else % All negative, just set default scale, user should turn off log scale. % Should not happen normally, as we force lin scale if any negative. Fmax = [0.1, 1]; end end if (Fmax(1) ~= Fmax(2)) % Default YLim: range to cover all values YLim = [Fmax(1), Fmax(2) + (Fmax(2) - Fmax(1)) * 0.02]; elseif Fmax(2) == 0 YLim = [-1, 1]; else YLim = [Fmax(1), Fmax(2) + abs(Fmax(2)) * 0.01]; end % Set axes legend for Y axis if ~isfield(TfInfo, 'FreqUnits') || isempty(TfInfo.FreqUnits) TfInfo.FreqUnits = 'Hz'; end if isempty(PlotHandles.DisplayUnits) PlotHandles.DisplayUnits = 'signal units'; end % Detect non standard measures with provided units. if ~isfield(TfInfo, 'Measure') TfInfo.Measure = ''; end if ~isfield(TfInfo, 'OptMeasure') TfInfo.OptMeasure = ''; end switch lower(TfInfo.Measure) case {'zscore', 'ersd', 'db'} strAmp = ['Baseline normalized ' TfInfo.OptMeasure ' (' PlotHandles.DisplayUnits ')']; case 'divmean' strAmp = ['Baseline relative ' TfInfo.OptMeasure ' (no units)']; case 'contrast' strAmp = ['Baseline contrasted ' TfInfo.OptMeasure ' (no units)']; case 'bl' switch lower(TfInfo.OptMeasure) case 'power', strAmp = ['Baseline subtracted ' TfInfo.OptMeasure ' (' PlotHandles.DisplayUnits '^2/' TfInfo.FreqUnits ')']; case 'magnitude', strAmp = ['Baseline subtracted ' TfInfo.OptMeasure ' (' PlotHandles.DisplayUnits '/sqrt(' TfInfo.FreqUnits ')']; otherwise, strAmp = ['Baseline subtracted ' TfInfo.OptMeasure ' (' PlotHandles.DisplayUnits ')']; end case {'power', 'magnitude'} switch TfInfo.Normalized case {'relative', 'relative2020'} switch lower(TfInfo.Function) % Relative is always compared to total power, then sqrt when magnitude. case 'power', strAmp = 'Relative power per bin (no units)'; case 'magnitude', strAmp = 'Sqrt relative power per bin (no units)'; case 'log', strAmp = 'Log relative power per bin (dB)'; otherwise, strAmp = 'No units'; end case 'multiply2020' % Normalized by frequency. switch lower(TfInfo.Function) case 'power', strAmp = ['Normalized power (' PlotHandles.DisplayUnits '^2)']; case 'magnitude', strAmp = ['Sqrt normalized power (' PlotHandles.DisplayUnits ')']; case 'log', strAmp = 'Log normalized power (dB)'; otherwise, strAmp = 'No units'; end case 'multiply' % Normalized by frequency squared. switch lower(TfInfo.Function) case 'power', strAmp = ['Normalized power (' PlotHandles.DisplayUnits '^2*' TfInfo.FreqUnits ')']; case 'magnitude', strAmp = ['Normalized magnitude (' PlotHandles.DisplayUnits '*sqrt(' TfInfo.FreqUnits '))']; case 'log', strAmp = 'Log normalized power (dB)'; otherwise, strAmp = 'No units'; end otherwise switch lower(TfInfo.Function) case 'power', strAmp = ['Power (' PlotHandles.DisplayUnits '^2/' TfInfo.FreqUnits ')']; case 'magnitude', strAmp = ['Magnitude (' PlotHandles.DisplayUnits '/sqrt(' TfInfo.FreqUnits '))']; case 'log', strAmp = 'Log power (dB)'; case 'phase', strAmp = 'Angle'; otherwise, strAmp = 'No units'; end end case 'other' % Stats if ~ismember(PlotHandles.DisplayUnits, {'No units', ''}) switch PlotHandles.DisplayUnits case 't' strAmp = 'Student''s t statistic'; case 'T' strAmp = 'Absolute mean T statistic'; case 'F' strAmp = 'Power F statistic'; % Not sure if these are used for spectra case 'z' strAmp = 'z statistic'; case 'chi2' strAmp = 'chi^2 statistic'; otherwise strAmp = PlotHandles.DisplayUnits; end else % Connectivity switch lower(TfInfo.OptMeasure) case 'corr' strAmp = 'Correlation'; % cohere case 'mscohere' strAmp = 'Magnitude-squared coherence'; case {'icohere2019', 'icohere'} strAmp = 'Imaginary coherence'; case 'lcohere2019' strAmp = 'Lagged coherence'; case {'granger', 'spgranger'} strAmp = 'Granger causality'; case {'plv', 'plvt'} strAmp = 'Phase locking value'; case {'ciplv', 'ciplvt'} strAmp = 'Weighted phase lag index'; case {'wpli', 'wplit'} strAmp = 'Corrected imaginary phase locking value'; case {'plvm', 'plvtm'} strAmp = 'Phase locking value magnitude'; case 'aec' % DEPRECATED strAmp = 'Average envelope correlation'; % Hilbert (time-varying) case 'coh' strAmp = 'Time-resolved coherence'; case 'lcoh' strAmp = 'Time-resolved lagged coherence'; case 'penv' strAmp = 'Envelope correlation'; case 'oenv' strAmp = 'Orthogonalized envelope correlation'; otherwise strAmp = [TfInfo.OptMeasure '(' PlotHandles.DisplayUnits ')']; end end % Unknown measure (or not yet implemented) case '' strAmp = [TfInfo.OptMeasure ' (' PlotHandles.DisplayUnits ')']; otherwise strAmp = [TfInfo.Measure ' (' PlotHandles.DisplayUnits ')']; end ylabel(hAxes, strAmp, ... 'FontSize', bst_get('FigFont'), ... 'FontUnits', 'points', ... 'Interpreter', 'tex'); % Set Y ticks in auto mode set(hAxes, 'YLim', YLim, ... 'YTickMode', 'auto', ... 'YTickLabelMode', 'auto'); % ===== EXTRA LINES ===== % Y=0 Line if (YLim(1) == 0) hLineY0 = line(get(hAxes,'XLim'), [0 0], [ZData ZData], 'Color', [0 0 0], 'Parent', hAxes); else hLineY0 = line(get(hAxes,'XLim'), [0 0], [ZData ZData], 'Color', .8*[1 1 1], 'Parent', hAxes); end % ===== LINES LEGENDS ===== % Plotting the names of the channels if ~isempty(LinesLabels) && TsInfo.ShowLegend && ((length(LinesLabels) > 1) || ~isempty(LinesLabels{1})) if (length(LinesLabels) == 1) && (length(PlotHandles.hLines) > 1) [hLegend, hLegendObjects] = legend(PlotHandles.hLines(1), strrep(LinesLabels{1}, '_', '-')); elseif (length(PlotHandles.hLines) == length(LinesLabels)) [hLegend, hLegendObjects] = legend(PlotHandles.hLines, strrep(LinesLabels(:), '_', '-')); else disp('BST> Error: Number of legend entries do not match the number of lines. Ignoring...'); end end end %% ===== PLOT AXES: COLUMN ===== function PlotHandles = PlotAxesColumn(hAxes, PlotHandles, X, TF, LinesLabels) ZData = 1.5; nLines = size(TF,1); % ===== DISPLAY SETUP ===== % sMontage = panel_montage('GetCurrentMontage', Modality); % if ~isempty(sMontage) && ~isempty(sMontage.ChanNames) && ~isempty(Modality) && (Modality(1) ~= '$') % % Get channels that are selected for display % selChan = sMontage.ChanNames; % % Remove all the spaces % selChan = cellfun(@(c)c(c~=' '), selChan, 'UniformOutput', 0); % LinesLabels = cellfun(@(c)c(c~=' '), LinesLabels, 'UniformOutput', 0); % % Look for each of these selected channels in the list of loaded channels % iDispChan = []; % for i = 1:length(selChan) % iTmp = find(strcmpi(selChan{i}, LinesLabels)); % % If channel was found: add it to the display list % if ~isempty(iTmp) % iDispChan(end+1) = iTmp; % end % end % % Sort channels % %iDispChan = sort(iDispChan); % % If no channel displayed: display all % if isempty(iDispChan) % iDispChan = 1:nLines; % end % else % iDispChan = 1:nLines; % end % ===== SPLIT IN BLOCKS ===== % Normalized range of Y values YLim = [0, 1]; % Data minumum/maximum Fmax = PlotHandles.DataMinMax; Frange = Fmax(2) - Fmax(1); % Subdivide Y-range in nLines blocks blockY = (YLim(2) - YLim(1)) / (nLines + 2); rowOffsets = blockY * (nLines:-1:1)' + blockY / 2; % Build an offset list for ALL channels (unselected channels: offset = -10) PlotHandles.ChannelOffsets = rowOffsets; % Normalize all channels to fit in one block only PlotHandles.DisplayFactor = blockY ./ Frange; % Add previous display factor PlotHandles.DisplayFactor = PlotHandles.DisplayFactor * GetDefaultFactor('spectrum'); % Center each sensor line on its average over frequencies TF = bst_bsxfun(@minus, TF, mean(TF,2)); % Apply final factor to recordings + Keep only the displayed lines TF = TF .* PlotHandles.DisplayFactor; % ===== PLOT TIME SERIES ===== % Add offset to each channel TF = bst_bsxfun(@plus, TF, PlotHandles.ChannelOffsets); % Display time series PlotHandles.hLines = line(X, TF', ZData*ones(size(TF)), 'Parent', hAxes); set(PlotHandles.hLines, 'Tag', 'DataLine'); % ===== PLOT ZERO-LINES ===== Xzeros = repmat(get(hAxes,'XLim'), [nLines, 1]); Yzeros = [PlotHandles.ChannelOffsets, PlotHandles.ChannelOffsets]; Zzeros = repmat(.5 * [1 1], [nLines, 1]); hLineY0 = line(Xzeros', Yzeros', Zzeros', ... 'Color', .9*[1 1 1], ... 'Parent', hAxes); % ===== CHANNELS LABELS ====== if ~isempty(LinesLabels) % Special case: If scout function is "All" if (length(nLines) > 1) && (length(LinesLabels) == 1) YtickLabel = []; else % Remove all the common parts of the labels YtickLabel = str_remove_common(LinesLabels); if ~isempty(YtickLabel) YtickLabel = LinesLabels; end % Scouts time series: remove everything after the @ for iLabel = 1:numel(YtickLabel) iAt = find(YtickLabel{iLabel} == '@', 1); if ~isempty(iAt) YtickLabel{iLabel} = strtrim(YtickLabel{iLabel}(1:iAt-1)); end end % Limit the size of the comments to 15 characters YtickLabel = cellfun(@(c)c(max(1,length(c)-14):end), YtickLabel, 'UniformOutput', 0); end % Set Y Legend set(hAxes, 'YTickMode', 'manual', ... 'YTickLabelMode', 'manual', ... 'YTick', bst_flip(rowOffsets,1), ... 'Yticklabel', bst_flip(YtickLabel,1)); end % Set Y axis scale set(hAxes, 'YLim', YLim); % Remove axes legend for Y axis ylabel(''); end %% ===== PLOT TIME CURSOR ===== function [hCursor,hTextCursor] = PlotCursor(hFig, hAxes) global GlobalData; ZData = 1.6; % Get display mode TfInfo = getappdata(get(hAxes,'Parent'), 'Timefreq'); % Get current time switch (TfInfo.DisplayMode) case 'Spectrum' if iscell(GlobalData.UserFrequencies.Freqs) BandBounds = process_tf_bands('GetBounds', GlobalData.UserFrequencies.Freqs(GlobalData.UserFrequencies.iCurrentFreq, :)); curX = mean(BandBounds); else curX = GlobalData.UserFrequencies.Freqs(GlobalData.UserFrequencies.iCurrentFreq); end textCursor = sprintf('%1.2f Hz', curX); case 'TimeSeries' curX = GlobalData.UserTimeWindow.CurrentTime; textCursor = sprintf('%1.4f s', curX); end YLim = get(hAxes, 'YLim'); % ===== VERTICAL LINE ===== hCursor = findobj(hAxes, '-depth', 1, 'Tag', 'Cursor'); if ~isempty(curX) if isempty(hCursor) % EraseMode: Only for Matlab <= 2014a if (bst_get('MatlabVersion') <= 803) optErase = {'EraseMode', 'xor'}; % INCOMPATIBLE WITH OPENGL RENDERER (BUG), REMOVED IN MATLAB 2014b else optErase = {}; end % Create line hCursor = line([curX curX], YLim, [ZData ZData], ... 'LineWidth', 1, ... optErase{:}, ... 'Color', 'r', ... 'Tag', 'Cursor', ... 'Parent', hAxes); else set(hCursor, 'XData', [curX curX], 'YData', YLim, 'ZData', [ZData ZData]); end end % Get background color bgcolor = get(hFig, 'Color'); % ===== TEXT CURSOR ===== hTextCursor = findobj(hFig, '-depth', 1, 'Tag', 'TextCursor'); if isempty(hTextCursor) % Create text object hTextCursor = uicontrol(... 'Style', 'text', ... 'String', textCursor, ... 'Units', 'Pixels', ... 'HorizontalAlignment', 'left', ... 'FontUnits', 'points', ... 'FontSize', bst_get('FigFont'), ... 'FontWeight', 'bold', ... 'ForegroundColor', [0 0 0], ... 'BackgroundColor', bgcolor, ... 'Parent', hFig, ... 'Tag', 'TextCursor', ... 'Visible', get(hFig, 'Visible')); else set(hTextCursor, 'String', textCursor); end % ===== SELECTION TEXT ===== hTextTimeSel = findobj(hFig, '-depth', 1, 'Tag', 'TextTimeSel'); if isempty(hTextTimeSel) hTextTimeSel = uicontrol(... 'Style', 'text', ... 'String', 'Selection', ... 'Units', 'Pixels', ... 'HorizontalAlignment', 'center', ... 'FontUnits', 'points', ... 'FontSize', bst_get('FigFont') + 1, ... 'FontWeight', 'normal', ... 'ForegroundColor', [0 0 0], ... 'BackgroundColor', bgcolor, ... 'Parent', hFig, ... 'Tag', 'TextTimeSel', ... 'Visible', 'off'); end end %% ===== VIEW STAT CLUSTERS ===== function ViewStatClusters(hFig) global GlobalData; % Get figure description [hFig, iFig, iDS] = bst_figures('GetFigure', hFig); if isempty(iDS) return end % Get axes hAxes = findobj(hFig, '-depth', 1, 'Tag', 'AxesGraph'); YLim = get(hAxes, 'YLim'); % Delete existing markers hClusterMarkers = findobj(hAxes, '-depth', 1, 'Tag', 'ClusterMarkers'); if ~isempty(hClusterMarkers) delete(hClusterMarkers); end % Get active clusters sClusters = panel_stat('GetDisplayedClusters', hFig); if isempty(sClusters) return; end % Get TimeVector [TimeVector, iTime] = bst_memory('GetTimeVector', iDS); % Get frequency vector if iscell(GlobalData.UserFrequencies.Freqs) BandBounds = process_tf_bands('GetBounds', GlobalData.UserFrequencies.Freqs); FreqVector = mean(BandBounds,2); else FreqVector = GlobalData.UserFrequencies.Freqs; end % Constants yOffset = 0.99; % Plot each cluster separately for iClust = 1:length(sClusters) % If there is only one time point: ignore current time if (size(sClusters(iClust).mask,2) == 1) || (iTime > size(sClusters(iClust).mask,2)) iTime = 1; end % Get the time frequencies for which the cluster is significative iSelFreq = find(any(sClusters(iClust).mask(:,iTime,:), 1)); if ~isempty(iSelFreq) % Get the coordinates of the cluster markers if (length(iSelFreq) > 1) X = [FreqVector(iSelFreq(1)), FreqVector(iSelFreq(end))]; else X = FreqVector(iSelFreq(1)) + [0, 0.01] * (FreqVector(end)-FreqVector(1)); end Y = yOffset * YLim(2) * [1 1]; Z = [4 4]; % Plot a line at the top of the figure line(X, Y, Z, ... 'Parent', hAxes, ... 'LineWidth', 3, ... 'LineStyle', '-', ... 'Color', sClusters(iClust).color, ... 'Tag', 'ClusterMarkers'); % Print each cluster lower in the figure yOffset = yOffset - 0.02; end end end
using Test include("welcome.jl") @testset "1. Create the welcome message" begin @testset "Welcome message for customer with first letter capitalized" begin @test welcome("Formora") == "Welcome to the Tech Palace, FORMORA" end @testset "Welcome message for customer with only lowercase letters" begin @test welcome("oromis") == "Welcome to the Tech Palace, OROMIS" end @testset "Welcome message for customer with dash in name" begin @test welcome("Svit-kona") == "Welcome to the Tech Palace, SVIT-KONA" end @testset "Welcome message for customer with only uppercase letters" begin @test welcome("ARVA") == "Welcome to the Tech Palace, ARVA" end @testset "Welcome message for customer with non-latin letters" begin @test welcome("Andumë") == "Welcome to the Tech Palace, ANDUMË" end end @testset "2. Add a fancy border" begin @testset "Add border with 10 stars per line" begin @test add_border("Welcome!", 10) == "**********\nWelcome!\n**********" end @testset "Add border with 2 stars per line" begin @test add_border("Hi", 2) == "**\nHi\n**" end end @testset "3. Clean up old marketing messages" begin @testset "Cleanup message with leading whitespace" begin @test clean(" DISCOUNT") == "DISCOUNT" end @testset "Cleanup message with trailing whitespace" begin @test clean("SALE ") == "SALE" end @testset "Cleanup message with leading and trailing whitespace" begin @test clean(" BUY NOW, SAVE 10% ") == "BUY NOW, SAVE 10%" end end
/* # # Copyright (c) 2006-2012 University of Houston. All rights reserved. # $COPYRIGHT$ # # Additional copyrights may follow # # $HEADER$ # */ #ifndef __SL_INTERNAL__ #define __SL_INTERNAL__ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <sys/types.h> //#include <gsl/gsl_fit.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #include <fcntl.h> #include <math.h> #include <errno.h> //#include <papi.h> #ifdef MINGW #include <windows.h> #include <winsock2.h> #include <ws2tcpip.h> #else #include <sys/socket.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <arpa/inet.h> #include <netdb.h> #include <sys/wait.h> #include <sys/resource.h> #include <pwd.h> #include <sys/utsname.h> #endif #include "SL_array.h" extern fd_set SL_send_fdset; extern fd_set SL_recv_fdset; extern int SL_this_procid; extern int SL_this_procport; extern int SL_this_listensock; extern int SL_numprocs; extern int SL_init_numprocs; extern int SL_proxy_numprocs; /* Message header send before any message */ struct SL_msg_header { int cmd; /* what type of message is this */ int from; /* id of the src process */ int to; /* id of the dest. process */ int tag; /* tag of the message */ int context; /* context id */ int len; /* Message length in bytes */ int id; /* Id of the last fragment */ int loglength; int temp; }; typedef struct SL_msg_header SL_msg_header; /* Process structure containing all relevant contact information, communication status etc. */ struct SL_proc; typedef int SL_msg_comm_fnct ( struct SL_proc *dproc, int fd ); struct SL_proc { int id; char *hostname; int port; int sock; int state; int connect_attempts; /* number of connect attempts */ double connect_start_tstamp; /* time stamp when we started to accept or connect for this proc */ double timeout; /* max time a process should wait before disconnecting */ struct SL_msgq_head *squeue; /* Send queue */ struct SL_msgq_head *rqueue; /* Recv queue */ struct SL_msgq_head *urqueue; /* Unexpected msgs queue */ struct SL_msgq_head *scqueue; /* Send complete queue */ struct SL_msgq_head *rcqueue; /* Recv complete queue */ struct SL_qitem *currecvelem; struct SL_qitem *cursendelem; SL_msg_comm_fnct *recvfunc; SL_msg_comm_fnct *sendfunc; struct SL_msg_perf *msgperf; /*to keep track of time and msglenth for each communication */ struct SL_msg_perf *insertpt; struct SL_network_perf *netperf; }; struct SL_msg_perf { struct SL_msg_perf *fwd; struct SL_msg_perf *back; int msglen; double time; int pos; int msgtype; /*send type(0) or recieve type(1)*/ int elemid; struct SL_proc *proc; }; typedef struct SL_msg_perf SL_msg_perf; struct SL_network_perf { struct SL_network_perf *fwd; struct SL_network_perf *back; double latency; double bandwidth; int pos; }; typedef struct SL_network_perf SL_network_perf; typedef struct SL_proc SL_proc; #ifdef MINGW struct iovec { char *iov_base; /* Base address. */ size_t iov_len; /* Length. */ }; #define TCP_MAXSEG 0x02 /* set maximum segment size */ #define F_GETFL 3 /* get file->f_flags */ #define F_SETFL 4 /* set file->f_flags */ #define O_NONBLOCK 00004 #endif /* A message queue item containing the operation it decsribes */ struct SL_msgq_head; struct SL_qitem { int id; int iovpos; int lenpos; int error; struct iovec iov[2]; struct SL_msgq_head *move_to; struct SL_msgq_head *head; struct SL_qitem *next; struct SL_qitem *prev; double starttime; double endtime; }; typedef struct SL_qitem SL_qitem; /* A message queue */ struct SL_msgq_head { int count; char *name; struct SL_qitem *first; struct SL_qitem *last; }; typedef struct SL_msgq_head SL_msgq_head; /* Request object identifying an ongoing communication */ struct SL_msg_request { struct SL_proc *proc; int type; /* Send or Recv */ int id; struct SL_qitem *elem; struct SL_msgq_head *cqueue; /* completion queue to look for */ }; typedef struct SL_msg_request SL_msg_request; struct SL_msgq_head *SL_event_sendq; struct SL_msgq_head *SL_event_recvq; struct SL_msgq_head *SL_event_sendcq; /* MACROS */ /*#ifdef PRINTF #undef PRINTF #define PRINTF(A) printf A #else #define PRINTF(A) #endif*/ #define FALSE 0 #define TRUE 1 #define SEND 0 #define RECV 1 #define SL_RECONN_MAX 20 #define SL_ACCEPT_MAX_TIME 10 #define SL_READ_MAX_TIME 5 #define SL_ACCEPT_INFINITE_TIME -1 #define SL_BIND_PORTSHIFT 200 #define SL_SLEEP_TIME 1 #define SL_TCP_BUFFER_SIZE 262142 #define SL_MAX_EVENT_HANDLE 1 #define SL_CONSTANT_ID -32 #define SL_EVENT_MANAGER -1 #define SL_PROXY_SERVER -2 #define PERFBUFSIZE 20 #define MTU (1024L*4L) #define SL_PROC_ID -64 int SL_socket ( void ); int SL_bind_static ( int handle, int port ); int SL_bind_dynamic ( int handle, int port ); int SL_socket_close ( int handle ); int SL_open_socket_conn ( int *handle, const char *as_host, int port ); int SL_open_socket_bind ( int *handle, int port ); int SL_open_socket_listen ( int sock ); int SL_open_socket_listen_nb ( int *handle, int port ); int SL_open_socket_conn_nb ( int *handle, const char *as_host, int port ); int SL_socket_read ( int hdl, char *buf, int num, double timeout ); int SL_socket_write ( int hdl, char *buf, int num, double timeout ); int SL_socket_write_nb ( int hdl, char *buf, int num, int *numwritten ); int SL_socket_read_nb ( int hdl, char *buf, int num, int* numread ); void SL_print_socket_options ( int fd ); void SL_configure_socket ( int sd ); void SL_configure_socket_nb ( int sd ); int SL_init_internal(); double SL_papi_time(); /* status object t.b.d */ #endif /* __SL_INTERNALL__ */
program xw_test use, intrinsic :: iso_fortran_env, only : dp => REAL64 use :: gauss_mod implicit none type(gen_gauss_type) :: gen_gauss_quad integer, parameter :: default_n = 10 integer :: n character(len=20) :: buffer if (command_argument_count() >= 1) then call get_command_argument(1, buffer) read (buffer, '(I10)') n else n = default_n end if call gen_gauss_quad%init_gen_gauss(n, precision=1.0e-15_dp) print '(2A20)', 'abscis', 'weight' call gen_gauss_quad%print_params() end program xw_test
theory CK_Machine imports "../Nominal" begin text {* This theory establishes soundness and completeness for a CK-machine with respect to a cbv-big-step semantics. The language includes functions, recursion, booleans and numbers. In the soundness proof the small-step cbv-reduction relation is used in order to get the induction through. The type-preservation property is proved for the machine and also for the small- and big-step semantics. Finally, the progress property is proved for the small-step semantics. The development is inspired by notes about context machines written by Roshan James (Indiana University) and also by the lecture notes written by Andy Pitts for his semantics course. See @{url "http://www.cs.indiana.edu/~rpjames/lm.pdf"} @{url "http://www.cl.cam.ac.uk/teaching/2001/Semantics/"} *} atom_decl name nominal_datatype lam = VAR "name" | APP "lam" "lam" | LAM "\<guillemotleft>name\<guillemotright>lam" ("LAM [_]._") | NUM "nat" | DIFF "lam" "lam" ("_ -- _") (* subtraction *) | PLUS "lam" "lam" ("_ ++ _") (* addition *) | TRUE | FALSE | IF "lam" "lam" "lam" | FIX "\<guillemotleft>name\<guillemotright>lam" ("FIX [_]._") (* recursion *) | ZET "lam" (* zero test *) | EQI "lam" "lam" (* equality test on numbers *) section {* Capture-Avoiding Substitution *} nominal_primrec subst :: "lam \<Rightarrow> name \<Rightarrow> lam \<Rightarrow> lam" ("_[_::=_]" [100,100,100] 100) where "(VAR x)[y::=s] = (if x=y then s else (VAR x))" | "(APP t\<^sub>1 t\<^sub>2)[y::=s] = APP (t\<^sub>1[y::=s]) (t\<^sub>2[y::=s])" | "x\<sharp>(y,s) \<Longrightarrow> (LAM [x].t)[y::=s] = LAM [x].(t[y::=s])" | "(NUM n)[y::=s] = NUM n" | "(t\<^sub>1 -- t\<^sub>2)[y::=s] = (t\<^sub>1[y::=s]) -- (t\<^sub>2[y::=s])" | "(t\<^sub>1 ++ t\<^sub>2)[y::=s] = (t\<^sub>1[y::=s]) ++ (t\<^sub>2[y::=s])" | "x\<sharp>(y,s) \<Longrightarrow> (FIX [x].t)[y::=s] = FIX [x].(t[y::=s])" | "TRUE[y::=s] = TRUE" | "FALSE[y::=s] = FALSE" | "(IF t1 t2 t3)[y::=s] = IF (t1[y::=s]) (t2[y::=s]) (t3[y::=s])" | "(ZET t)[y::=s] = ZET (t[y::=s])" | "(EQI t1 t2)[y::=s] = EQI (t1[y::=s]) (t2[y::=s])" apply(finite_guess)+ apply(rule TrueI)+ apply(simp add: abs_fresh)+ apply(fresh_guess)+ done lemma subst_eqvt[eqvt]: fixes pi::"name prm" shows "pi\<bullet>(t1[x::=t2]) = (pi\<bullet>t1)[(pi\<bullet>x)::=(pi\<bullet>t2)]" by (nominal_induct t1 avoiding: x t2 rule: lam.strong_induct) (auto simp add: perm_bij fresh_atm fresh_bij) lemma fresh_fact: fixes z::"name" shows "\<lbrakk>z\<sharp>s; (z=y \<or> z\<sharp>t)\<rbrakk> \<Longrightarrow> z\<sharp>t[y::=s]" by (nominal_induct t avoiding: z y s rule: lam.strong_induct) (auto simp add: abs_fresh fresh_prod fresh_atm fresh_nat) lemma subst_rename: assumes a: "y\<sharp>t" shows "t[x::=s] = ([(y,x)]\<bullet>t)[y::=s]" using a by (nominal_induct t avoiding: x y s rule: lam.strong_induct) (auto simp add: calc_atm fresh_atm abs_fresh perm_nat_def) section {* Evaluation Contexts *} datatype ctx = Hole ("\<box>") | CAPPL "ctx" "lam" | CAPPR "lam" "ctx" | CDIFFL "ctx" "lam" | CDIFFR "lam" "ctx" | CPLUSL "ctx" "lam" | CPLUSR "lam" "ctx" | CIF "ctx" "lam" "lam" | CZET "ctx" | CEQIL "ctx" "lam" | CEQIR "lam" "ctx" text {* The operation of filling a term into a context: *} fun filling :: "ctx \<Rightarrow> lam \<Rightarrow> lam" ("_\<lbrakk>_\<rbrakk>") where "\<box>\<lbrakk>t\<rbrakk> = t" | "(CAPPL E t')\<lbrakk>t\<rbrakk> = APP (E\<lbrakk>t\<rbrakk>) t'" | "(CAPPR t' E)\<lbrakk>t\<rbrakk> = APP t' (E\<lbrakk>t\<rbrakk>)" | "(CDIFFL E t')\<lbrakk>t\<rbrakk> = (E\<lbrakk>t\<rbrakk>) -- t'" | "(CDIFFR t' E)\<lbrakk>t\<rbrakk> = t' -- (E\<lbrakk>t\<rbrakk>)" | "(CPLUSL E t')\<lbrakk>t\<rbrakk> = (E\<lbrakk>t\<rbrakk>) ++ t'" | "(CPLUSR t' E)\<lbrakk>t\<rbrakk> = t' ++ (E\<lbrakk>t\<rbrakk>)" | "(CIF E t1 t2)\<lbrakk>t\<rbrakk> = IF (E\<lbrakk>t\<rbrakk>) t1 t2" | "(CZET E)\<lbrakk>t\<rbrakk> = ZET (E\<lbrakk>t\<rbrakk>)" | "(CEQIL E t')\<lbrakk>t\<rbrakk> = EQI (E\<lbrakk>t\<rbrakk>) t'" | "(CEQIR t' E)\<lbrakk>t\<rbrakk> = EQI t' (E\<lbrakk>t\<rbrakk>)" text {* The operation of composing two contexts: *} fun ctx_compose :: "ctx \<Rightarrow> ctx \<Rightarrow> ctx" ("_ \<circ> _") where "\<box> \<circ> E' = E'" | "(CAPPL E t') \<circ> E' = CAPPL (E \<circ> E') t'" | "(CAPPR t' E) \<circ> E' = CAPPR t' (E \<circ> E')" | "(CDIFFL E t') \<circ> E' = CDIFFL (E \<circ> E') t'" | "(CDIFFR t' E) \<circ> E' = CDIFFR t' (E \<circ> E')" | "(CPLUSL E t') \<circ> E' = CPLUSL (E \<circ> E') t'" | "(CPLUSR t' E) \<circ> E' = CPLUSR t' (E \<circ> E')" | "(CIF E t1 t2) \<circ> E' = CIF (E \<circ> E') t1 t2" | "(CZET E) \<circ> E' = CZET (E \<circ> E')" | "(CEQIL E t') \<circ> E' = CEQIL (E \<circ> E') t'" | "(CEQIR t' E) \<circ> E' = CEQIR t' (E \<circ> E')" lemma ctx_compose: shows "(E1 \<circ> E2)\<lbrakk>t\<rbrakk> = E1\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>" by (induct E1 rule: ctx.induct) (auto) text {* Composing a list (stack) of contexts. *} fun ctx_composes :: "ctx list \<Rightarrow> ctx" ("_\<down>") where "[]\<down> = \<box>" | "(E#Es)\<down> = (Es\<down>) \<circ> E" section {* The CK-Machine *} inductive val :: "lam\<Rightarrow>bool" where v_LAM[intro]: "val (LAM [x].e)" | v_NUM[intro]: "val (NUM n)" | v_FALSE[intro]: "val FALSE" | v_TRUE[intro]: "val TRUE" equivariance val inductive machine :: "lam\<Rightarrow>ctx list\<Rightarrow>lam\<Rightarrow>ctx list\<Rightarrow>bool" ("<_,_> \<mapsto> <_,_>") where m1[intro]: "<APP e1 e2,Es> \<mapsto> <e1,(CAPPL \<box> e2)#Es>" | m2[intro]: "val v \<Longrightarrow> <v,(CAPPL \<box> e2)#Es> \<mapsto> <e2,(CAPPR v \<box>)#Es>" | m3[intro]: "val v \<Longrightarrow> <v,(CAPPR (LAM [y].e) \<box>)#Es> \<mapsto> <e[y::=v],Es>" | m4[intro]: "<e1 -- e2, Es> \<mapsto> <e1,(CDIFFL \<box> e2)#Es>" | m5[intro]: "<NUM n1,(CDIFFL \<box> e2)#Es> \<mapsto> <e2,(CDIFFR (NUM n1) \<box>)#Es>" | m6[intro]: "<NUM n2,(CDIFFR (NUM n1) \<box>)#Es> \<mapsto> <NUM (n1 - n2),Es>" | m4'[intro]:"<e1 ++ e2, Es> \<mapsto> <e1,(CPLUSL \<box> e2)#Es>" | m5'[intro]:"<NUM n1,(CPLUSL \<box> e2)#Es> \<mapsto> <e2,(CPLUSR (NUM n1) \<box>)#Es>" | m6'[intro]:"<NUM n2,(CPLUSR (NUM n1) \<box>)#Es> \<mapsto> <NUM (n1+n2),Es>" | m7[intro]: "<IF e1 e2 e3,Es> \<mapsto> <e1,(CIF \<box> e2 e3)#Es>" | m8[intro]: "<TRUE,(CIF \<box> e1 e2)#Es> \<mapsto> <e1,Es>" | m9[intro]: "<FALSE,(CIF \<box> e1 e2)#Es> \<mapsto> <e2,Es>" | mA[intro]: "<FIX [x].t,Es> \<mapsto> <t[x::=FIX [x].t],Es>" | mB[intro]: "<ZET e,Es> \<mapsto> <e,(CZET \<box>)#Es>" | mC[intro]: "<NUM 0,(CZET \<box>)#Es> \<mapsto> <TRUE,Es>" | mD[intro]: "0 < n \<Longrightarrow> <NUM n,(CZET \<box>)#Es> \<mapsto> <FALSE,Es>" | mE[intro]: "<EQI e1 e2,Es> \<mapsto> <e1,(CEQIL \<box> e2)#Es>" | mF[intro]: "<NUM n1,(CEQIL \<box> e2)#Es> \<mapsto> <e2,(CEQIR (NUM n1) \<box>)#Es>" | mG[intro]: "<NUM n,(CEQIR (NUM n) \<box>)#Es> \<mapsto> <TRUE,Es>" | mH[intro]: "n1\<noteq>n2 \<Longrightarrow> <NUM n1,(CEQIR (NUM n2) \<box>)#Es> \<mapsto> <FALSE,Es>" inductive "machine_star" :: "lam\<Rightarrow>ctx list\<Rightarrow>lam\<Rightarrow>ctx list\<Rightarrow>bool" ("<_,_> \<mapsto>* <_,_>") where ms1[intro]: "<e,Es> \<mapsto>* <e,Es>" | ms2[intro]: "\<lbrakk><e1,Es1> \<mapsto> <e2,Es2>; <e2,Es2> \<mapsto>* <e3,Es3>\<rbrakk> \<Longrightarrow> <e1,Es1> \<mapsto>* <e3,Es3>" lemma ms3[intro,trans]: assumes a: "<e1,Es1> \<mapsto>* <e2,Es2>" "<e2,Es2> \<mapsto>* <e3,Es3>" shows "<e1,Es1> \<mapsto>* <e3,Es3>" using a by (induct) (auto) lemma ms4[intro]: assumes a: "<e1,Es1> \<mapsto> <e2,Es2>" shows "<e1,Es1> \<mapsto>* <e2,Es2>" using a by (rule ms2) (rule ms1) section {* The Evaluation Relation (Big-Step Semantics) *} inductive eval :: "lam\<Rightarrow>lam\<Rightarrow>bool" ("_ \<Down> _") where eval_NUM[intro]: "NUM n \<Down> NUM n" | eval_DIFF[intro]: "\<lbrakk>t1 \<Down> (NUM n1); t2 \<Down> (NUM n2)\<rbrakk> \<Longrightarrow> t1 -- t2 \<Down> NUM (n1 - n2)" | eval_PLUS[intro]: "\<lbrakk>t1 \<Down> (NUM n1); t2 \<Down> (NUM n2)\<rbrakk> \<Longrightarrow> t1 ++ t2 \<Down> NUM (n1 + n2)" | eval_LAM[intro]: "LAM [x].t \<Down> LAM [x].t" | eval_APP[intro]: "\<lbrakk>t1\<Down> LAM [x].t; t2\<Down> t2'; t[x::=t2']\<Down> t'\<rbrakk> \<Longrightarrow> APP t1 t2 \<Down> t'" | eval_FIX[intro]: "t[x::= FIX [x].t] \<Down> t' \<Longrightarrow> FIX [x].t \<Down> t'" | eval_IF1[intro]: "\<lbrakk>t1 \<Down> TRUE; t2 \<Down> t'\<rbrakk> \<Longrightarrow> IF t1 t2 t3 \<Down> t'" | eval_IF2[intro]: "\<lbrakk>t1 \<Down> FALSE; t3 \<Down> t'\<rbrakk> \<Longrightarrow> IF t1 t2 t3 \<Down> t'" | eval_TRUE[intro]: "TRUE \<Down> TRUE" | eval_FALSE[intro]:"FALSE \<Down> FALSE" | eval_ZET1[intro]: "t \<Down> NUM 0 \<Longrightarrow> ZET t \<Down> TRUE" | eval_ZET2[intro]: "\<lbrakk>t \<Down> NUM n; 0 < n\<rbrakk> \<Longrightarrow> ZET t \<Down> FALSE" | eval_EQ1[intro]: "\<lbrakk>t1 \<Down> NUM n; t2 \<Down> NUM n\<rbrakk> \<Longrightarrow> EQI t1 t2 \<Down> TRUE" | eval_EQ2[intro]: "\<lbrakk>t1 \<Down> NUM n1; t2 \<Down> NUM n2; n1\<noteq>n2\<rbrakk> \<Longrightarrow> EQI t1 t2 \<Down> FALSE" declare lam.inject[simp] inductive_cases eval_elim: "APP t1 t2 \<Down> t'" "IF t1 t2 t3 \<Down> t'" "ZET t \<Down> t'" "EQI t1 t2 \<Down> t'" "t1 ++ t2 \<Down> t'" "t1 -- t2 \<Down> t'" "(NUM n) \<Down> t" "TRUE \<Down> t" "FALSE \<Down> t" declare lam.inject[simp del] lemma eval_to: assumes a: "t \<Down> t'" shows "val t'" using a by (induct) (auto) lemma eval_val: assumes a: "val t" shows "t \<Down> t" using a by (induct) (auto) text {* The Completeness Property: *} theorem eval_implies_machine_star_ctx: assumes a: "t \<Down> t'" shows "<t,Es> \<mapsto>* <t',Es>" using a by (induct arbitrary: Es) (metis eval_to machine.intros ms1 ms2 ms3 ms4 v_LAM)+ corollary eval_implies_machine_star: assumes a: "t \<Down> t'" shows "<t,[]> \<mapsto>* <t',[]>" using a by (auto dest: eval_implies_machine_star_ctx) section {* The CBV Reduction Relation (Small-Step Semantics) *} lemma less_eqvt[eqvt]: fixes pi::"name prm" and n1 n2::"nat" shows "(pi\<bullet>(n1 < n2)) = ((pi\<bullet>n1) < (pi\<bullet>n2))" by (simp add: perm_nat_def perm_bool) inductive cbv :: "lam\<Rightarrow>lam\<Rightarrow>bool" ("_ \<longrightarrow>cbv _") where cbv1: "\<lbrakk>val v; x\<sharp>v\<rbrakk> \<Longrightarrow> APP (LAM [x].t) v \<longrightarrow>cbv t[x::=v]" | cbv2[intro]: "t \<longrightarrow>cbv t' \<Longrightarrow> APP t t2 \<longrightarrow>cbv APP t' t2" | cbv3[intro]: "t \<longrightarrow>cbv t' \<Longrightarrow> APP t2 t \<longrightarrow>cbv APP t2 t'" | cbv4[intro]: "t \<longrightarrow>cbv t' \<Longrightarrow> t -- t2 \<longrightarrow>cbv t' -- t2" | cbv5[intro]: "t \<longrightarrow>cbv t' \<Longrightarrow> t2 -- t \<longrightarrow>cbv t2 -- t'" | cbv6[intro]: "(NUM n1) -- (NUM n2) \<longrightarrow>cbv NUM (n1 - n2)" | cbv4'[intro]: "t \<longrightarrow>cbv t' \<Longrightarrow> t ++ t2 \<longrightarrow>cbv t' ++ t2" | cbv5'[intro]: "t \<longrightarrow>cbv t' \<Longrightarrow> t2 ++ t \<longrightarrow>cbv t2 ++ t'" | cbv6'[intro]:"(NUM n1) ++ (NUM n2) \<longrightarrow>cbv NUM (n1 + n2)" | cbv7[intro]: "t \<longrightarrow>cbv t' \<Longrightarrow> IF t t1 t2 \<longrightarrow>cbv IF t' t1 t2" | cbv8[intro]: "IF TRUE t1 t2 \<longrightarrow>cbv t1" | cbv9[intro]: "IF FALSE t1 t2 \<longrightarrow>cbv t2" | cbvA[intro]: "FIX [x].t \<longrightarrow>cbv t[x::=FIX [x].t]" | cbvB[intro]: "t \<longrightarrow>cbv t' \<Longrightarrow> ZET t \<longrightarrow>cbv ZET t'" | cbvC[intro]: "ZET (NUM 0) \<longrightarrow>cbv TRUE" | cbvD[intro]: "0 < n \<Longrightarrow> ZET (NUM n) \<longrightarrow>cbv FALSE" | cbvE[intro]: "t \<longrightarrow>cbv t' \<Longrightarrow> EQI t t2 \<longrightarrow>cbv EQI t' t2" | cbvF[intro]: "t \<longrightarrow>cbv t' \<Longrightarrow> EQI t2 t \<longrightarrow>cbv EQI t2 t'" | cbvG[intro]: "EQI (NUM n) (NUM n) \<longrightarrow>cbv TRUE" | cbvH[intro]: "n1\<noteq>n2 \<Longrightarrow> EQI (NUM n1) (NUM n2) \<longrightarrow>cbv FALSE" equivariance cbv nominal_inductive cbv by (simp_all add: abs_fresh fresh_fact) lemma better_cbv1[intro]: assumes a: "val v" shows "APP (LAM [x].t) v \<longrightarrow>cbv t[x::=v]" proof - obtain y::"name" where fs: "y\<sharp>(x,t,v)" by (rule exists_fresh, rule fin_supp, blast) have "APP (LAM [x].t) v = APP (LAM [y].([(y,x)]\<bullet>t)) v" using fs by (auto simp add: lam.inject alpha' fresh_prod fresh_atm) also have "\<dots> \<longrightarrow>cbv ([(y,x)]\<bullet>t)[y::=v]" using fs a by (auto simp add: cbv.eqvt cbv1) also have "\<dots> = t[x::=v]" using fs by (simp add: subst_rename[symmetric]) finally show "APP (LAM [x].t) v \<longrightarrow>cbv t[x::=v]" by simp qed inductive "cbv_star" :: "lam\<Rightarrow>lam\<Rightarrow>bool" (" _ \<longrightarrow>cbv* _") where cbvs1[intro]: "e \<longrightarrow>cbv* e" | cbvs2[intro]: "\<lbrakk>e1\<longrightarrow>cbv e2; e2 \<longrightarrow>cbv* e3\<rbrakk> \<Longrightarrow> e1 \<longrightarrow>cbv* e3" lemma cbvs3[intro,trans]: assumes a: "e1 \<longrightarrow>cbv* e2" "e2 \<longrightarrow>cbv* e3" shows "e1 \<longrightarrow>cbv* e3" using a by (induct) (auto) lemma cbv_in_ctx: assumes a: "t \<longrightarrow>cbv t'" shows "E\<lbrakk>t\<rbrakk> \<longrightarrow>cbv E\<lbrakk>t'\<rbrakk>" using a by (induct E) (auto) lemma machine_implies_cbv_star_ctx: assumes a: "<e,Es> \<mapsto> <e',Es'>" shows "(Es\<down>)\<lbrakk>e\<rbrakk> \<longrightarrow>cbv* (Es'\<down>)\<lbrakk>e'\<rbrakk>" using a by (induct) (auto simp add: ctx_compose intro: cbv_in_ctx) lemma machine_star_implies_cbv_star_ctx: assumes a: "<e,Es> \<mapsto>* <e',Es'>" shows "(Es\<down>)\<lbrakk>e\<rbrakk> \<longrightarrow>cbv* (Es'\<down>)\<lbrakk>e'\<rbrakk>" using a by (induct) (auto dest: machine_implies_cbv_star_ctx) lemma machine_star_implies_cbv_star: assumes a: "<e,[]> \<mapsto>* <e',[]>" shows "e \<longrightarrow>cbv* e'" using a by (auto dest: machine_star_implies_cbv_star_ctx) lemma cbv_eval: assumes a: "t1 \<longrightarrow>cbv t2" "t2 \<Down> t3" shows "t1 \<Down> t3" using a by (induct arbitrary: t3) (auto elim!: eval_elim intro: eval_val) lemma cbv_star_eval: assumes a: "t1 \<longrightarrow>cbv* t2" "t2 \<Down> t3" shows "t1 \<Down> t3" using a by (induct) (auto simp add: cbv_eval) lemma cbv_star_implies_eval: assumes a: "t \<longrightarrow>cbv* v" "val v" shows "t \<Down> v" using a by (induct) (auto simp add: eval_val cbv_star_eval dest: cbvs2) text {* The Soundness Property *} theorem machine_star_implies_eval: assumes a: "<t1,[]> \<mapsto>* <t2,[]>" and b: "val t2" shows "t1 \<Down> t2" proof - from a have "t1 \<longrightarrow>cbv* t2" by (simp add: machine_star_implies_cbv_star) then show "t1 \<Down> t2" using b by (simp add: cbv_star_implies_eval) qed section {* Typing *} text {* Types *} nominal_datatype ty = tVAR "string" | tBOOL | tINT | tARR "ty" "ty" ("_ \<rightarrow> _") declare ty.inject[simp] lemma ty_fresh: fixes x::"name" and T::"ty" shows "x\<sharp>T" by (induct T rule: ty.induct) (auto simp add: fresh_string) text {* Typing Contexts *} type_synonym tctx = "(name\<times>ty) list" text {* Sub-Typing Contexts *} abbreviation "sub_tctx" :: "tctx \<Rightarrow> tctx \<Rightarrow> bool" ("_ \<subseteq> _") where "\<Gamma>\<^sub>1 \<subseteq> \<Gamma>\<^sub>2 \<equiv> \<forall>x. x \<in> set \<Gamma>\<^sub>1 \<longrightarrow> x \<in> set \<Gamma>\<^sub>2" text {* Valid Typing Contexts *} inductive valid :: "tctx \<Rightarrow> bool" where v1[intro]: "valid []" | v2[intro]: "\<lbrakk>valid \<Gamma>; x\<sharp>\<Gamma>\<rbrakk>\<Longrightarrow> valid ((x,T)#\<Gamma>)" equivariance valid lemma valid_elim[dest]: assumes a: "valid ((x,T)#\<Gamma>)" shows "x\<sharp>\<Gamma> \<and> valid \<Gamma>" using a by (cases) (auto) lemma valid_insert: assumes a: "valid (\<Delta>@[(x,T)]@\<Gamma>)" shows "valid (\<Delta> @ \<Gamma>)" using a by (induct \<Delta>) (auto simp add: fresh_list_append fresh_list_cons dest!: valid_elim) lemma fresh_set: shows "y\<sharp>xs = (\<forall>x\<in>set xs. y\<sharp>x)" by (induct xs) (simp_all add: fresh_list_nil fresh_list_cons) lemma context_unique: assumes a1: "valid \<Gamma>" and a2: "(x,T) \<in> set \<Gamma>" and a3: "(x,U) \<in> set \<Gamma>" shows "T = U" using a1 a2 a3 by (induct) (auto simp add: fresh_set fresh_prod fresh_atm) section {* The Typing Relation *} inductive typing :: "tctx \<Rightarrow> lam \<Rightarrow> ty \<Rightarrow> bool" ("_ \<turnstile> _ : _") where t_VAR[intro]: "\<lbrakk>valid \<Gamma>; (x,T)\<in>set \<Gamma>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> VAR x : T" | t_APP[intro]: "\<lbrakk>\<Gamma> \<turnstile> t\<^sub>1 : T\<^sub>1\<rightarrow>T\<^sub>2; \<Gamma> \<turnstile> t\<^sub>2 : T\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> APP t\<^sub>1 t\<^sub>2 : T\<^sub>2" | t_LAM[intro]: "\<lbrakk>x\<sharp>\<Gamma>; (x,T\<^sub>1)#\<Gamma> \<turnstile> t : T\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> LAM [x].t : T\<^sub>1 \<rightarrow> T\<^sub>2" | t_NUM[intro]: "\<Gamma> \<turnstile> (NUM n) : tINT" | t_DIFF[intro]: "\<lbrakk>\<Gamma> \<turnstile> t\<^sub>1 : tINT; \<Gamma> \<turnstile> t\<^sub>2 : tINT\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> t\<^sub>1 -- t\<^sub>2 : tINT" | t_PLUS[intro]: "\<lbrakk>\<Gamma> \<turnstile> t\<^sub>1 : tINT; \<Gamma> \<turnstile> t\<^sub>2 : tINT\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> t\<^sub>1 ++ t\<^sub>2 : tINT" | t_TRUE[intro]: "\<Gamma> \<turnstile> TRUE : tBOOL" | t_FALSE[intro]: "\<Gamma> \<turnstile> FALSE : tBOOL" | t_IF[intro]: "\<lbrakk>\<Gamma> \<turnstile> t1 : tBOOL; \<Gamma> \<turnstile> t2 : T; \<Gamma> \<turnstile> t3 : T\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> IF t1 t2 t3 : T" | t_ZET[intro]: "\<Gamma> \<turnstile> t : tINT \<Longrightarrow> \<Gamma> \<turnstile> ZET t : tBOOL" | t_EQI[intro]: "\<lbrakk>\<Gamma> \<turnstile> t1 : tINT; \<Gamma> \<turnstile> t2 : tINT\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> EQI t1 t2 : tBOOL" | t_FIX[intro]: "\<lbrakk>x\<sharp>\<Gamma>; (x,T)#\<Gamma> \<turnstile> t : T\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> FIX [x].t : T" declare lam.inject[simp] inductive_cases typing_inversion[elim]: "\<Gamma> \<turnstile> t\<^sub>1 -- t\<^sub>2 : T" "\<Gamma> \<turnstile> t\<^sub>1 ++ t\<^sub>2 : T" "\<Gamma> \<turnstile> IF t1 t2 t3 : T" "\<Gamma> \<turnstile> ZET t : T" "\<Gamma> \<turnstile> EQI t1 t2 : T" "\<Gamma> \<turnstile> APP t1 t2 : T" "\<Gamma> \<turnstile> TRUE : T" "\<Gamma> \<turnstile> FALSE : T" "\<Gamma> \<turnstile> NUM n : T" declare lam.inject[simp del] equivariance typing nominal_inductive typing by (simp_all add: abs_fresh ty_fresh) lemma t_LAM_inversion[dest]: assumes ty: "\<Gamma> \<turnstile> LAM [x].t : T" and fc: "x\<sharp>\<Gamma>" shows "\<exists>T\<^sub>1 T\<^sub>2. T = T\<^sub>1 \<rightarrow> T\<^sub>2 \<and> (x,T\<^sub>1)#\<Gamma> \<turnstile> t : T\<^sub>2" using ty fc by (cases rule: typing.strong_cases) (auto simp add: alpha lam.inject abs_fresh ty_fresh) lemma t_FIX_inversion[dest]: assumes ty: "\<Gamma> \<turnstile> FIX [x].t : T" and fc: "x\<sharp>\<Gamma>" shows "(x,T)#\<Gamma> \<turnstile> t : T" using ty fc by (cases rule: typing.strong_cases) (auto simp add: alpha lam.inject abs_fresh ty_fresh) section {* The Type-Preservation Property for the CBV Reduction Relation *} lemma weakening: fixes \<Gamma>1 \<Gamma>2::"tctx" assumes a: "\<Gamma>1 \<turnstile> t : T" and b: "valid \<Gamma>2" and c: "\<Gamma>1 \<subseteq> \<Gamma>2" shows "\<Gamma>2 \<turnstile> t : T" using a b c by (nominal_induct \<Gamma>1 t T avoiding: \<Gamma>2 rule: typing.strong_induct) (auto | atomize)+ lemma type_substitution_aux: assumes a: "(\<Delta>@[(x,T')]@\<Gamma>) \<turnstile> e : T" and b: "\<Gamma> \<turnstile> e' : T'" shows "(\<Delta>@\<Gamma>) \<turnstile> e[x::=e'] : T" using a b proof (nominal_induct "\<Delta>@[(x,T')]@\<Gamma>" e T avoiding: x e' \<Delta> rule: typing.strong_induct) case (t_VAR y T x e' \<Delta>) then have a1: "valid (\<Delta>@[(x,T')]@\<Gamma>)" and a2: "(y,T) \<in> set (\<Delta>@[(x,T')]@\<Gamma>)" and a3: "\<Gamma> \<turnstile> e' : T'" . from a1 have a4: "valid (\<Delta>@\<Gamma>)" by (rule valid_insert) { assume eq: "x=y" from a1 a2 have "T=T'" using eq by (auto intro: context_unique) with a3 have "\<Delta>@\<Gamma> \<turnstile> VAR y[x::=e'] : T" using eq a4 by (auto intro: weakening) } moreover { assume ineq: "x\<noteq>y" from a2 have "(y,T) \<in> set (\<Delta>@\<Gamma>)" using ineq by simp then have "\<Delta>@\<Gamma> \<turnstile> VAR y[x::=e'] : T" using ineq a4 by auto } ultimately show "\<Delta>@\<Gamma> \<turnstile> VAR y[x::=e'] : T" by blast qed (auto | force simp add: fresh_list_append fresh_list_cons)+ corollary type_substitution: assumes a: "(x,T')#\<Gamma> \<turnstile> e : T" and b: "\<Gamma> \<turnstile> e' : T'" shows "\<Gamma> \<turnstile> e[x::=e'] : T" using a b by (auto intro: type_substitution_aux[where \<Delta>="[]",simplified]) theorem cbv_type_preservation: assumes a: "t \<longrightarrow>cbv t'" and b: "\<Gamma> \<turnstile> t : T" shows "\<Gamma> \<turnstile> t' : T" using a b apply(nominal_induct avoiding: \<Gamma> T rule: cbv.strong_induct) apply(auto elim!: typing_inversion dest: t_LAM_inversion simp add: type_substitution) apply(frule t_FIX_inversion) apply(auto simp add: type_substitution) done corollary cbv_star_type_preservation: assumes a: "t \<longrightarrow>cbv* t'" and b: "\<Gamma> \<turnstile> t : T" shows "\<Gamma> \<turnstile> t' : T" using a b by (induct) (auto intro: cbv_type_preservation) section {* The Type-Preservation Property for the Machine and Evaluation Relation *} theorem machine_type_preservation: assumes a: "<t,[]> \<mapsto>* <t',[]>" and b: "\<Gamma> \<turnstile> t : T" shows "\<Gamma> \<turnstile> t' : T" proof - from a have "t \<longrightarrow>cbv* t'" by (simp add: machine_star_implies_cbv_star) then show "\<Gamma> \<turnstile> t' : T" using b by (simp add: cbv_star_type_preservation) qed theorem eval_type_preservation: assumes a: "t \<Down> t'" and b: "\<Gamma> \<turnstile> t : T" shows "\<Gamma> \<turnstile> t' : T" proof - from a have "<t,[]> \<mapsto>* <t',[]>" by (simp add: eval_implies_machine_star) then show "\<Gamma> \<turnstile> t' : T" using b by (simp add: machine_type_preservation) qed text {* The Progress Property *} lemma canonical_tARR[dest]: assumes a: "[] \<turnstile> t : T1 \<rightarrow> T2" and b: "val t" shows "\<exists>x t'. t = LAM [x].t'" using b a by (induct) (auto) lemma canonical_tINT[dest]: assumes a: "[] \<turnstile> t : tINT" and b: "val t" shows "\<exists>n. t = NUM n" using b a by (induct) (auto simp add: fresh_list_nil) lemma canonical_tBOOL[dest]: assumes a: "[] \<turnstile> t : tBOOL" and b: "val t" shows "t = TRUE \<or> t = FALSE" using b a by (induct) (auto simp add: fresh_list_nil) theorem progress: assumes a: "[] \<turnstile> t : T" shows "(\<exists>t'. t \<longrightarrow>cbv t') \<or> (val t)" using a by (induct \<Gamma>\<equiv>"[]::tctx" t T) (auto dest!: canonical_tINT intro!: cbv.intros gr0I) end
> may i apply a custom style sheet to some content type, ex: forums, blogs ?? > what i need to modify for this!?
#' Utah Lake trophic data #' #' @docType data #' #' @usage data(ul_trophic) #' #' @format A data.frame with 729 rows and 15 columns "ul_trophic"
/* Sound display/edit/etc * * originally intended as a re-implementation of my much-missed dpysnd -- the Foonly/SAIL/E/Mus10/Grnlib sound editor from ca 1983. */ #include "snd.h" snd_state *ss = NULL; static bool ignore_mus_error(int type, char *msg) { XEN result = XEN_FALSE; if (XEN_HOOKED(ss->mus_error_hook)) result = run_or_hook(ss->mus_error_hook, XEN_LIST_2(C_TO_XEN_INT(type), C_TO_XEN_STRING(msg)), S_mus_error_hook); return(XEN_NOT_FALSE_P(result)); } #if HAVE_SETJMP_H void top_level_catch(int ignore); #endif void mus_error_to_snd(int type, char *msg) { if (!ss) { fprintf(stderr, "%s", msg); return; } if (!(ignore_mus_error(type, msg))) { #if HAVE_EXTENSION_LANGUAGE if (msg == NULL) XEN_ERROR(XEN_ERROR_TYPE("mus-error"), XEN_LIST_1(C_TO_XEN_STRING((char *)mus_error_type_to_string(type)))); else XEN_ERROR(XEN_ERROR_TYPE("mus-error"), XEN_LIST_1(C_TO_XEN_STRING(msg))); #endif snd_error("%s: %s", mus_error_type_to_string(type), msg); #if HAVE_SETJMP_H ss->jump_ok = true; top_level_catch(1); /* sigh -- try to keep going */ #endif } } static void mus_print_to_snd(char *msg) { if (!ss) { fprintf(stderr, "%s", msg); return; } if (!(ignore_mus_error(MUS_NO_ERROR, msg))) if (msg) { int i, len; listener_append(";"); len = strlen(msg); for (i = 1; i < len - 1; i++) if ((msg[i] == '\n') && (msg[i + 1] == ' ')) msg[i + 1] = ';'; if (msg[0] == '\n') listener_append((char *)(msg + 1)); else listener_append(msg); if (msg[strlen(msg) - 1] != '\n') listener_append("\n"); } } static void initialize_load_path(void) { /* look for SND_PATH env var, add dirs to %load-path or load_path */ char *path; path = getenv("SND_PATH"); if (path) { /* colon-separated list of directory names, pushed on load-path in reverse order (hopefully = search order) */ int i, len, dirs = 1, curdir = 0, start = 0; char **dirnames; len = strlen(path); for (i = 0; i < len; i++) if (path[i] == ':') dirs++; dirnames = (char **)calloc(dirs, sizeof(char *)); for (i = 0; i < len; i++) { if ((path[i] == ':') || (i == len - 1)) { if (i > start) { int j, lim; char *tmp; if (i == (len - 1)) lim = i + 1; else lim = i; tmp = (char *)calloc(lim - start + 1, sizeof(char)); for (j = start; j < lim; j++) tmp[j - start] = path[j]; dirnames[curdir++] = mus_expand_filename(tmp); start = i + 1; free(tmp); } } } for (i = curdir - 1; i >= 0; i--) { XEN_ADD_TO_LOAD_PATH(dirnames[i]); free(dirnames[i]); } free(dirnames); } } void snd_set_global_defaults(bool need_cleanup) { if (need_cleanup) { if (ss->HTML_Program) {free(ss->HTML_Program); ss->HTML_Program = NULL;} if (ss->HTML_Dir) {free(ss->HTML_Dir); ss->HTML_Dir = NULL;} if (ss->Temp_Dir) {free(ss->Temp_Dir); ss->Temp_Dir = NULL;} if (ss->Save_Dir) {free(ss->Save_Dir); ss->Save_Dir = NULL;} if (ss->Ladspa_Dir) {free(ss->Ladspa_Dir); ss->Ladspa_Dir = NULL;} if (ss->Save_State_File) {free(ss->Save_State_File); ss->Save_State_File = NULL;} if (ss->Eps_File) {free(ss->Eps_File); ss->Eps_File = NULL;} if (ss->Listener_Prompt) {free(ss->Listener_Prompt); ss->Listener_Prompt = NULL;} if (ss->Open_File_Dialog_Directory) {free(ss->Open_File_Dialog_Directory); ss->Open_File_Dialog_Directory = NULL;} /* not sure about the next two... */ if ((cursor_style(ss) == CURSOR_PROC) && (XEN_PROCEDURE_P(ss->cursor_proc))) snd_unprotect_at(ss->cursor_proc_loc); if ((zoom_focus_style(ss) == ZOOM_FOCUS_PROC) && (XEN_PROCEDURE_P(ss->zoom_focus_proc))) snd_unprotect_at(ss->zoom_focus_proc_loc); } ss->Transform_Size = DEFAULT_TRANSFORM_SIZE; ss->Fft_Window = DEFAULT_FFT_WINDOW; ss->Fft_Window_Alpha = DEFAULT_FFT_WINDOW_ALPHA; ss->Fft_Window_Beta = DEFAULT_FFT_WINDOW_BETA; ss->Transform_Graph_Type = DEFAULT_TRANSFORM_GRAPH_TYPE; ss->Sinc_Width = DEFAULT_SINC_WIDTH; ss->Zero_Pad = DEFAULT_ZERO_PAD; ss->Wavelet_Type = DEFAULT_WAVELET_TYPE; ss->Transform_Type = DEFAULT_TRANSFORM_TYPE; ss->Transform_Normalization = DEFAULT_TRANSFORM_NORMALIZATION; ss->Show_Transform_Peaks = DEFAULT_SHOW_TRANSFORM_PEAKS; ss->Show_Sonogram_Cursor = DEFAULT_SHOW_SONOGRAM_CURSOR; ss->Fft_Log_Magnitude = DEFAULT_FFT_LOG_MAGNITUDE; ss->Fft_Log_Frequency = DEFAULT_FFT_LOG_FREQUENCY; ss->Fft_With_Phases = DEFAULT_FFT_WITH_PHASES; ss->Max_Transform_Peaks = DEFAULT_MAX_TRANSFORM_PEAKS; ss->Log_Freq_Start = DEFAULT_LOG_FREQ_START; ss->Min_dB = DEFAULT_MIN_DB; ss->lin_dB = pow(10.0, DEFAULT_MIN_DB * 0.05); ss->Show_Selection_Transform = DEFAULT_SHOW_SELECTION_TRANSFORM; ss->Default_Output_Chans = DEFAULT_OUTPUT_CHANS; ss->Default_Output_Srate = DEFAULT_OUTPUT_SRATE; ss->Default_Output_Header_Type = DEFAULT_OUTPUT_HEADER_TYPE; ss->Default_Output_Data_Format = DEFAULT_OUTPUT_DATA_FORMAT; ss->Audio_Input_Device = DEFAULT_AUDIO_INPUT_DEVICE; ss->Audio_Output_Device = DEFAULT_AUDIO_OUTPUT_DEVICE; ss->Dac_Size = DEFAULT_DAC_SIZE; ss->Dac_Combines_Channels = DEFAULT_DAC_COMBINES_CHANNELS; ss->Auto_Resize = DEFAULT_AUTO_RESIZE; ss->Auto_Update = DEFAULT_AUTO_UPDATE; ss->Auto_Update_Interval = DEFAULT_AUTO_UPDATE_INTERVAL; ss->Ask_Before_Overwrite = DEFAULT_ASK_BEFORE_OVERWRITE; ss->With_Toolbar = DEFAULT_WITH_TOOLBAR; ss->With_Tooltips = DEFAULT_WITH_TOOLTIPS; ss->Remember_Sound_State = DEFAULT_REMEMBER_SOUND_STATE; ss->Ask_About_Unsaved_Edits = DEFAULT_ASK_ABOUT_UNSAVED_EDITS; ss->Save_As_Dialog_Src = DEFAULT_SAVE_AS_DIALOG_SRC; ss->Save_As_Dialog_Auto_Comment = DEFAULT_SAVE_AS_DIALOG_AUTO_COMMENT; ss->Show_Full_Duration = DEFAULT_SHOW_FULL_DURATION; ss->Show_Full_Range = DEFAULT_SHOW_FULL_RANGE; ss->Initial_Beg = DEFAULT_INITIAL_BEG; ss->Initial_Dur = DEFAULT_INITIAL_DUR; ss->With_Background_Processes = DEFAULT_WITH_BACKGROUND_PROCESSES; ss->With_File_Monitor = DEFAULT_WITH_FILE_MONITOR; ss->Selection_Creates_Region = DEFAULT_SELECTION_CREATES_REGION; ss->Channel_Style = DEFAULT_CHANNEL_STYLE; ss->Sound_Style = DEFAULT_SOUND_STYLE; ss->Graphs_Horizontal = DEFAULT_GRAPHS_HORIZONTAL; ss->Graph_Style = DEFAULT_GRAPH_STYLE; ss->Region_Graph_Style = DEFAULT_GRAPH_STYLE; ss->Time_Graph_Type = DEFAULT_TIME_GRAPH_TYPE; ss->X_Axis_Style = DEFAULT_X_AXIS_STYLE; ss->Beats_Per_Minute = DEFAULT_BEATS_PER_MINUTE; ss->Beats_Per_Measure = DEFAULT_BEATS_PER_MEASURE; ss->With_Relative_Panes = DEFAULT_WITH_RELATIVE_PANES; ss->With_GL = DEFAULT_WITH_GL; ss->Dot_Size = DEFAULT_DOT_SIZE; ss->Grid_Density = DEFAULT_GRID_DENSITY; ss->Zoom_Focus_Style = DEFAULT_ZOOM_FOCUS_STYLE; ss->zoom_focus_proc = XEN_UNDEFINED; ss->zoom_focus_proc_loc = NOT_A_GC_LOC; ss->Max_Regions = DEFAULT_MAX_REGIONS; ss->Show_Y_Zero = DEFAULT_SHOW_Y_ZERO; ss->Show_Grid = DEFAULT_SHOW_GRID; ss->Show_Axes = DEFAULT_SHOW_AXES; ss->Show_Indices = DEFAULT_SHOW_INDICES; ss->Show_Backtrace = DEFAULT_SHOW_BACKTRACE; ss->With_Inset_Graph = DEFAULT_WITH_INSET_GRAPH; ss->With_Interrupts = DEFAULT_WITH_INTERRUPTS; ss->With_Menu_Icons = DEFAULT_WITH_MENU_ICONS; ss->With_Smpte_Label = DEFAULT_WITH_SMPTE_LABEL; ss->With_Pointer_Focus = DEFAULT_WITH_POINTER_FOCUS; ss->Play_Arrow_Size = DEFAULT_PLAY_ARROW_SIZE; ss->Sync_Style = DEFAULT_SYNC_STYLE; ss->Listener_Prompt = mus_strdup(DEFAULT_LISTENER_PROMPT); ss->listener_prompt_length = mus_strlen(ss->Listener_Prompt); ss->Minibuffer_History_Length = DEFAULT_MINIBUFFER_HISTORY_LENGTH; ss->Clipping = DEFAULT_CLIPPING; ss->Optimization = DEFAULT_OPTIMIZATION; ss->Print_Length = DEFAULT_PRINT_LENGTH; ss->View_Files_Sort = DEFAULT_VIEW_FILES_SORT; ss->Just_Sounds = DEFAULT_JUST_SOUNDS; ss->Open_File_Dialog_Directory = NULL; ss->HTML_Dir = mus_strdup(DEFAULT_HTML_DIR); ss->HTML_Program = mus_strdup(DEFAULT_HTML_PROGRAM); ss->Cursor_Size = DEFAULT_CURSOR_SIZE; ss->Cursor_Style = DEFAULT_CURSOR_STYLE; ss->Tracking_Cursor_Style = DEFAULT_TRACKING_CURSOR_STYLE; ss->With_Tracking_Cursor = DEFAULT_WITH_TRACKING_CURSOR; ss->cursor_proc = XEN_UNDEFINED; ss->cursor_proc_loc = NOT_A_GC_LOC; ss->Verbose_Cursor = DEFAULT_VERBOSE_CURSOR; ss->Cursor_Update_Interval = DEFAULT_CURSOR_UPDATE_INTERVAL; ss->Cursor_Location_Offset = DEFAULT_CURSOR_LOCATION_OFFSET; ss->Show_Mix_Waveforms = DEFAULT_SHOW_MIX_WAVEFORMS; ss->Mix_Waveform_Height = DEFAULT_MIX_WAVEFORM_HEIGHT; ss->Mix_Tag_Width = DEFAULT_MIX_TAG_WIDTH; ss->Mix_Tag_Height = DEFAULT_MIX_TAG_HEIGHT; ss->With_Mix_Tags = DEFAULT_WITH_MIX_TAGS; ss->Mark_Tag_Width = DEFAULT_MARK_TAG_WIDTH; ss->Mark_Tag_Height = DEFAULT_MARK_TAG_HEIGHT; ss->Show_Marks = DEFAULT_SHOW_MARKS; ss->Color_Map = DEFAULT_COLOR_MAP; ss->Color_Map_Size = DEFAULT_COLOR_MAP_SIZE; ss->Color_Cutoff = DEFAULT_COLOR_CUTOFF; ss->Color_Scale = DEFAULT_COLOR_SCALE; ss->Color_Inverted = DEFAULT_COLOR_INVERTED; ss->Color_Map = DEFAULT_COLOR_MAP; ss->Wavo_Hop = DEFAULT_WAVO_HOP; ss->Wavo_Trace = DEFAULT_WAVO_TRACE; ss->Spectro_Hop = DEFAULT_SPECTRO_HOP; ss->Spectro_X_Scale = DEFAULT_SPECTRO_X_SCALE; ss->Spectro_Y_Scale = DEFAULT_SPECTRO_Y_SCALE; ss->Spectro_Z_Scale = DEFAULT_SPECTRO_Z_SCALE; ss->Spectro_Z_Angle = DEFAULT_SPECTRO_Z_ANGLE; ss->Spectro_X_Angle = DEFAULT_SPECTRO_X_ANGLE; ss->Spectro_Y_Angle = DEFAULT_SPECTRO_Y_ANGLE; ss->Spectrum_End = DEFAULT_SPECTRUM_END; ss->Spectrum_Start = DEFAULT_SPECTRUM_START; ss->Enved_Base = DEFAULT_ENVED_BASE; ss->Enved_Power = DEFAULT_ENVED_POWER; ss->Enved_Wave_p = DEFAULT_ENVED_WAVE_P; ss->Enved_Style = DEFAULT_ENVED_STYLE; ss->Enved_Target = DEFAULT_ENVED_TARGET; ss->Enved_Filter_Order = DEFAULT_ENVED_FILTER_ORDER; ss->Eps_Bottom_Margin = DEFAULT_EPS_BOTTOM_MARGIN; ss->Eps_Left_Margin = DEFAULT_EPS_LEFT_MARGIN; ss->Eps_Size = DEFAULT_EPS_SIZE; ss->Expand_Control_Min = DEFAULT_EXPAND_CONTROL_MIN; ss->Expand_Control_Max = DEFAULT_EXPAND_CONTROL_MAX; ss->Amp_Control_Min = DEFAULT_AMP_CONTROL_MIN; ss->Amp_Control_Max = DEFAULT_AMP_CONTROL_MAX; ss->Speed_Control_Min = DEFAULT_SPEED_CONTROL_MIN; ss->Speed_Control_Max = DEFAULT_SPEED_CONTROL_MAX; ss->Contrast_Control_Min = DEFAULT_CONTRAST_CONTROL_MIN; ss->Contrast_Control_Max = DEFAULT_CONTRAST_CONTROL_MAX; ss->Contrast_Control_Amp = DEFAULT_CONTRAST_CONTROL_AMP; ss->Expand_Control_Length = DEFAULT_EXPAND_CONTROL_LENGTH; ss->Expand_Control_Ramp = DEFAULT_EXPAND_CONTROL_RAMP; ss->Expand_Control_Hop = DEFAULT_EXPAND_CONTROL_HOP; ss->Expand_Control_Jitter = DEFAULT_EXPAND_CONTROL_JITTER; ss->Reverb_Control_Feedback = DEFAULT_REVERB_CONTROL_FEEDBACK; ss->Reverb_Control_Lowpass = DEFAULT_REVERB_CONTROL_LOWPASS; ss->Reverb_Control_Scale_Min = DEFAULT_REVERB_CONTROL_SCALE_MIN; ss->Reverb_Control_Scale_Max = DEFAULT_REVERB_CONTROL_SCALE_MAX; ss->Reverb_Control_Decay = DEFAULT_REVERB_CONTROL_DECAY; ss->Speed_Control_Tones = DEFAULT_SPEED_CONTROL_TONES; ss->Speed_Control_Style = DEFAULT_SPEED_CONTROL_STYLE; ss->Reverb_Control_Length_Min = DEFAULT_REVERB_CONTROL_LENGTH_MIN; ss->Reverb_Control_Length_Max = DEFAULT_REVERB_CONTROL_LENGTH_MAX; ss->Filter_Control_Order = DEFAULT_FILTER_CONTROL_ORDER; ss->Filter_Control_In_Db = DEFAULT_FILTER_CONTROL_IN_DB; ss->Filter_Control_In_Hz = DEFAULT_FILTER_CONTROL_IN_HZ; ss->Show_Controls = DEFAULT_SHOW_CONTROLS; if (MUS_DEFAULT_TEMP_DIR != (char *)NULL) ss->Temp_Dir = mus_strdup(MUS_DEFAULT_TEMP_DIR); else ss->Temp_Dir = NULL; if (MUS_DEFAULT_SAVE_DIR != (char *)NULL) ss->Save_Dir = mus_strdup(MUS_DEFAULT_SAVE_DIR); else ss->Save_Dir = NULL; if (DEFAULT_LADSPA_DIR != (char *)NULL) ss->Ladspa_Dir = mus_strdup(DEFAULT_LADSPA_DIR); else ss->Ladspa_Dir = NULL; if (DEFAULT_SAVE_STATE_FILE != (char *)NULL) ss->Save_State_File = mus_strdup(DEFAULT_SAVE_STATE_FILE); else ss->Save_State_File = NULL; if (DEFAULT_PEAK_ENV_DIR != (char *)NULL) ss->Peak_Env_Dir = mus_strdup(DEFAULT_PEAK_ENV_DIR); else ss->Peak_Env_Dir = NULL; if (DEFAULT_EPS_FILE != (char *)NULL) ss->Eps_File = mus_strdup(DEFAULT_EPS_FILE); else ss->Eps_File = NULL; } #if HAVE_SETJMP_H && HAVE_SCHEME static void jump_to_top_level(void) { top_level_catch(1); } #endif #if HAVE_GSL #include <gsl/gsl_ieee_utils.h> #include <gsl/gsl_errno.h> /* default gsl error handler apparently aborts main program! */ static void snd_gsl_error(const char *reason, const char *file, int line, int gsl_errno) { XEN_ERROR(XEN_ERROR_TYPE("gsl-error"), XEN_LIST_6(C_TO_XEN_STRING("GSL: ~A, ~A in ~A line ~A, gsl err: ~A"), C_TO_XEN_STRING(gsl_strerror(gsl_errno)), C_TO_XEN_STRING(reason), C_TO_XEN_STRING(file), C_TO_XEN_INT(line), C_TO_XEN_INT(gsl_errno))); } #endif #if SND_AS_WIDGET snd_state *snd_main(int argc, char **argv) #else int main(int argc, char **argv) #endif { int i; #if HAVE_GSL /* if HAVE_GSL and the environment variable GSL_IEEE_MODE exists, use it */ /* GSL_IEEE_MODE=double-precision,mask-underflow,mask-denormalized */ if (getenv("GSL_IEEE_MODE") != NULL) gsl_ieee_env_setup(); gsl_set_error_handler(snd_gsl_error); #endif ss = (snd_state *)calloc(1, sizeof(snd_state)); /* not calloc! */ ss->fam_ok = false; ss->startup_errors = NULL; #if HAVE_GTK_3 g_type_init(); #endif mus_sound_initialize(); /* has to precede version check (mus_audio_moniker needs to be setup in Alsa/Oss) */ xen_initialize(); #if HAVE_SCHEME && HAVE_SETJMP_H s7_set_error_exiter(s7, jump_to_top_level); #endif for (i = 1; i < argc; i++) { if (strcmp(argv[i], "--version") == 0) { fprintf(stdout, "%s", version_info()); snd_exit(0); } else { if (strcmp(argv[i], "--help") == 0) { fprintf(stdout, "%s", "Snd is a sound editor; see http://ccrma.stanford.edu/software/snd/.\n"); fprintf(stdout, "%s", version_info()); snd_exit(0); } } } initialize_format_lists(); snd_set_global_defaults(false); #if MUS_DEBUGGING ss->Trap_Segfault = false; #else ss->Trap_Segfault = DEFAULT_TRAP_SEGFAULT; #endif ss->jump_ok = false; allocate_regions(max_regions(ss)); ss->init_window_x = DEFAULT_INIT_WINDOW_X; ss->init_window_y = DEFAULT_INIT_WINDOW_Y; ss->init_window_width = DEFAULT_INIT_WINDOW_WIDTH; ss->init_window_height = DEFAULT_INIT_WINDOW_HEIGHT; ss->click_time = 100; init_sound_file_extensions(); ss->max_sounds = 4; /* expands to accommodate any number of files */ ss->sound_sync_max = 0; ss->stopped_explicitly = false; /* C-g sets this flag so that we can interrupt various loops */ ss->checking_explicitly = false; ss->selection_play_stop = false; ss->reloading_updated_file = 0; ss->selected_sound = NO_SELECTION; ss->sounds = (snd_info **)calloc(ss->max_sounds, sizeof(snd_info *)); ss->print_choice = PRINT_SND; ss->graph_hook_active = false; ss->lisp_graph_hook_active = false; ss->exiting = false; ss->deferred_regions = 0; ss->fam_connection = NULL; ss->snd_error_data = NULL; ss->snd_error_handler = NULL; ss->snd_warning_data = NULL; ss->snd_warning_handler = NULL; ss->xen_error_data = NULL; ss->xen_error_handler = NULL; ss->update_sound_channel_style = NOT_A_CHANNEL_STYLE; #if HAVE_GL && WITH_GL2PS ss->gl_printing = false; #endif g_xen_initialize(); ss->search_proc = XEN_UNDEFINED; ss->search_expr = NULL; ss->search_tree = NULL; mus_error_set_handler(mus_error_to_snd); mus_print_set_handler(mus_print_to_snd); initialize_load_path(); /* merge SND_PATH entries into the load-path */ #ifdef SND_AS_WIDGET return(ss); #else snd_doit(argc, argv); return(0); #endif } void g_init_base(void) { #define H_mus_error_hook S_mus_error_hook " (error-type error-message): called upon mus_error. \ If it returns " PROC_TRUE ", Snd ignores the error (it assumes you've handled it via the hook)." ss->mus_error_hook = XEN_DEFINE_HOOK(S_mus_error_hook, 2, H_mus_error_hook); /* arg = error-type error-message */ }
//////////////////////////////////////////////////////////////////////////// // Header file for this // //////////////////////////////////////////////////////////////////////////////// #include "HLTriggerOffline/Egamma/interface/EmDQMReco.h" //////////////////////////////////////////////////////////////////////////////// // Collaborating Class Header // //////////////////////////////////////////////////////////////////////////////// #include "DataFormats/EgammaCandidates/interface/Electron.h" #include "DataFormats/HLTReco/interface/TriggerEventWithRefs.h" #include "DataFormats/RecoCandidate/interface/RecoEcalCandidate.h" #include "FWCore/Framework/interface/Frameworkfwd.h" #include "FWCore/Framework/interface/MakerMacros.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" //#include "DataFormats/EgammaCandidates/interface/PhotonFwd.h" //#include "DataFormats/EgammaCandidates/interface/Photon.h" #include "DataFormats/L1Trigger/interface/L1EmParticle.h" #include "DataFormats/L1Trigger/interface/L1EmParticleFwd.h" //#include "SimDataFormats/HepMCProduct/interface/HepMCProduct.h" #include "DataFormats/Common/interface/AssociationMap.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" #include "SimDataFormats/GeneratorProducts/interface/HepMCProduct.h" #include "DataFormats/Common/interface/Handle.h" #include "DataFormats/Common/interface/RefToBase.h" #include "FWCore/ServiceRegistry/interface/Service.h" //#include "PhysicsTools/UtilAlgos/interface/TFileService.h" #include "DataFormats/Common/interface/TriggerResults.h" #include "DataFormats/HLTReco/interface/TriggerEvent.h" #include "DataFormats/HLTReco/interface/TriggerObject.h" #include "DataFormats/HLTReco/interface/TriggerTypeDefs.h" #include "FWCore/Utilities/interface/Exception.h" #include <boost/format.hpp> //////////////////////////////////////////////////////////////////////////////// // Root include files // //////////////////////////////////////////////////////////////////////////////// #include "TDirectory.h" #include "TFile.h" #include "TH1F.h" #include <Math/VectorUtil.h> #include <iostream> #include <string> using namespace ROOT::Math::VectorUtil; //---------------------------------------------------------------------- // class EmDQMReco::FourVectorMonitorElements //---------------------------------------------------------------------- EmDQMReco::FourVectorMonitorElements::FourVectorMonitorElements(EmDQMReco *_parent, DQMStore::IBooker &iBooker, const std::string &histogramNameTemplate, const std::string &histogramTitleTemplate) : parent(_parent) { // introducing variables for better code readability later on std::string histName; std::string histTitle; // et histName = boost::str(boost::format(histogramNameTemplate) % "et"); histTitle = boost::str(boost::format(histogramTitleTemplate) % "E_{T}"); etMonitorElement = iBooker.book1D(histName.c_str(), histTitle.c_str(), parent->plotBins, parent->plotPtMin, parent->plotPtMax); // eta histName = boost::str(boost::format(histogramNameTemplate) % "eta"); histTitle = boost::str(boost::format(histogramTitleTemplate) % "#eta"); etaMonitorElement = iBooker.book1D(histName.c_str(), histTitle.c_str(), parent->plotBins, -parent->plotEtaMax, parent->plotEtaMax); // phi histName = boost::str(boost::format(histogramNameTemplate) % "phi"); histTitle = boost::str(boost::format(histogramTitleTemplate) % "#phi"); phiMonitorElement = iBooker.book1D(histName.c_str(), histTitle.c_str(), parent->plotBins, -parent->plotPhiMax, parent->plotPhiMax); } //---------------------------------------------------------------------- void EmDQMReco::FourVectorMonitorElements::fill(const math::XYZTLorentzVector &momentum) { etMonitorElement->Fill(momentum.Et()); etaMonitorElement->Fill(momentum.eta()); phiMonitorElement->Fill(momentum.phi()); } //---------------------------------------------------------------------- //////////////////////////////////////////////////////////////////////////////// // Constructor // //////////////////////////////////////////////////////////////////////////////// EmDQMReco::EmDQMReco(const edm::ParameterSet &pset) { //////////////////////////////////////////////////////////// // Read from configuration file // //////////////////////////////////////////////////////////// dirname_ = "HLT/HLTEgammaValidation/" + pset.getParameter<std::string>("@module_label"); // parameters for generator study reqNum = pset.getParameter<unsigned int>("reqNum"); pdgGen = pset.getParameter<int>("pdgGen"); recoEtaAcc = pset.getParameter<double>("genEtaAcc"); recoEtAcc = pset.getParameter<double>("genEtAcc"); // plotting parameters (untracked because they don't affect the physics) plotPtMin = pset.getUntrackedParameter<double>("PtMin", 0.); plotPtMax = pset.getUntrackedParameter<double>("PtMax", 1000.); plotEtaMax = pset.getUntrackedParameter<double>("EtaMax", 2.7); plotPhiMax = pset.getUntrackedParameter<double>("PhiMax", 3.15); plotBins = pset.getUntrackedParameter<unsigned int>("Nbins", 50); useHumanReadableHistTitles = pset.getUntrackedParameter<bool>("useHumanReadableHistTitles", false); triggerNameRecoMonPath = pset.getUntrackedParameter<std::string>("triggerNameRecoMonPath", "HLT_MinBias"); processNameRecoMonPath = pset.getUntrackedParameter<std::string>("processNameRecoMonPath", "HLT"); recoElectronsInput = consumes<reco::GsfElectronCollection>( pset.getUntrackedParameter<edm::InputTag>("recoElectrons", edm::InputTag("gsfElectrons"))); recoObjectsEBT = consumes<std::vector<reco::SuperCluster>>(edm::InputTag("correctedHybridSuperClusters")); recoObjectsEET = consumes<std::vector<reco::SuperCluster>>(edm::InputTag("correctedMulti5x5SuperClustersWithPreshower")); hltResultsT = consumes<edm::TriggerResults>(edm::InputTag("TriggerResults", "", processNameRecoMonPath)); triggerObjT = consumes<trigger::TriggerEventWithRefs>(edm::InputTag("hltTriggerSummaryRAW")); // preselction cuts // recocutCollection_= pset.getParameter<edm::InputTag>("cutcollection"); recocut_ = pset.getParameter<int>("cutnum"); // prescale = 10; eventnum = 0; // just init isHltConfigInitialized_ = false; //////////////////////////////////////////////////////////// // Read in the Vector of Parameter Sets. // // Information for each filter-step // //////////////////////////////////////////////////////////// std::vector<edm::ParameterSet> filters = pset.getParameter<std::vector<edm::ParameterSet>>("filters"); int i = 0; for (std::vector<edm::ParameterSet>::iterator filterconf = filters.begin(); filterconf != filters.end(); filterconf++) { theHLTCollectionLabels.push_back(filterconf->getParameter<edm::InputTag>("HLTCollectionLabels")); theHLTOutputTypes.push_back(filterconf->getParameter<int>("theHLTOutputTypes")); // Grab the human-readable name, if it is not specified, use the Collection // Label theHLTCollectionHumanNames.push_back( filterconf->getUntrackedParameter<std::string>("HLTCollectionHumanName", theHLTCollectionLabels[i].label())); std::vector<double> bounds = filterconf->getParameter<std::vector<double>>("PlotBounds"); // If the size of plot "bounds" vector != 2, abort assert(bounds.size() == 2); plotBounds.push_back(std::pair<double, double>(bounds[0], bounds[1])); isoNames.push_back(filterconf->getParameter<std::vector<edm::InputTag>>("IsoCollections")); for (unsigned int i = 0; i < isoNames.back().size(); i++) { switch (theHLTOutputTypes.back()) { case trigger::TriggerL1NoIsoEG: histoFillerL1NonIso->isoNameTokens_.push_back( consumes<edm::AssociationMap<edm::OneToValue<l1extra::L1EmParticleCollection, float>>>( isoNames.back()[i])); break; case trigger::TriggerL1IsoEG: // Isolated Level 1 histoFillerL1Iso->isoNameTokens_.push_back( consumes<edm::AssociationMap<edm::OneToValue<l1extra::L1EmParticleCollection, float>>>( isoNames.back()[i])); break; case trigger::TriggerPhoton: // Photon histoFillerPho->isoNameTokens_.push_back( consumes<edm::AssociationMap<edm::OneToValue<reco::RecoEcalCandidateCollection, float>>>( isoNames.back()[i])); break; case trigger::TriggerElectron: // Electron histoFillerEle->isoNameTokens_.push_back( consumes<edm::AssociationMap<edm::OneToValue<reco::ElectronCollection, float>>>(isoNames.back()[i])); break; case trigger::TriggerCluster: // TriggerCluster histoFillerClu->isoNameTokens_.push_back( consumes<edm::AssociationMap<edm::OneToValue<reco::RecoEcalCandidateCollection, float>>>( isoNames.back()[i])); break; default: throw(cms::Exception("Release Validation Error") << "HLT output type not implemented: theHLTOutputTypes[n]"); } } // If the size of the isoNames vector is not greater than zero, abort assert(!isoNames.back().empty()); if (isoNames.back().at(0).label() == "none") { plotiso.push_back(false); } else { plotiso.push_back(true); } i++; } // END of loop over parameter sets // Record number of HLTCollectionLabels numOfHLTCollectionLabels = theHLTCollectionLabels.size(); } /// /// /// void EmDQMReco::dqmBeginRun(const edm::Run &iRun, const edm::EventSetup &iSetup) { bool isHltConfigChanged = false; // change of cfg at run boundaries? isHltConfigInitialized_ = hltConfig_.init(iRun, iSetup, "HLT", isHltConfigChanged); } //////////////////////////////////////////////////////////////////////////////// // book DQM histograms // //////////////////////////////////////////////////////////////////////////////// void EmDQMReco::bookHistograms(DQMStore::IBooker &iBooker, edm::Run const &iRun, edm::EventSetup const &iSetup) { // edm::Service<TFileService> fs; iBooker.setCurrentFolder(dirname_); //////////////////////////////////////////////////////////// // Set up Histogram of Effiency vs Step. // // theHLTCollectionLabels is a vector of InputTags // // from the configuration file. // //////////////////////////////////////////////////////////// std::string histName = "total_eff"; std::string histTitle = "total events passing"; // This plot will have bins equal to 2+(number of // HLTCollectionLabels in the config file) totalreco = iBooker.book1D( histName.c_str(), histTitle.c_str(), numOfHLTCollectionLabels + 2, 0, numOfHLTCollectionLabels + 2); totalreco->setBinLabel(numOfHLTCollectionLabels + 1, "Total"); totalreco->setBinLabel(numOfHLTCollectionLabels + 2, "Reco"); for (unsigned int u = 0; u < numOfHLTCollectionLabels; u++) { totalreco->setBinLabel(u + 1, theHLTCollectionLabels[u].label()); } histName = "total_eff_RECO_matched"; histTitle = "total events passing (Reco matched)"; totalmatchreco = iBooker.book1D( histName.c_str(), histTitle.c_str(), numOfHLTCollectionLabels + 2, 0, numOfHLTCollectionLabels + 2); totalmatchreco->setBinLabel(numOfHLTCollectionLabels + 1, "Total"); totalmatchreco->setBinLabel(numOfHLTCollectionLabels + 2, "Reco"); for (unsigned int u = 0; u < numOfHLTCollectionLabels; u++) { totalmatchreco->setBinLabel(u + 1, theHLTCollectionLabels[u].label()); } // MonitorElement* tmphisto; MonitorElement *tmpiso; //////////////////////////////////////////////////////////// // Set up generator-level histograms // //////////////////////////////////////////////////////////// std::string pdgIdString; switch (pdgGen) { case 11: pdgIdString = "Electron"; break; case 22: pdgIdString = "Photon"; break; default: pdgIdString = "Particle"; } //-------------------- // reco // (note that reset(..) must be used to set the value of the scoped_ptr...) histReco.reset(new FourVectorMonitorElements(this, iBooker, "reco_%s", // pattern for histogram name "%s of " + pdgIdString + "s")); //-------------------- // monpath histRecoMonpath.reset(new FourVectorMonitorElements(this, iBooker, "reco_%s_monpath", // pattern for histogram name "%s of " + pdgIdString + "s monpath")); //-------------------- // TODO: WHAT ARE THESE HISTOGRAMS FOR ? THEY SEEM NEVER REFERENCED ANYWHERE // IN THIS FILE... final X monpath histMonpath.reset(new FourVectorMonitorElements(this, iBooker, "final_%s_monpath", // pattern for histogram name "Final %s Monpath")); //-------------------- //////////////////////////////////////////////////////////// // Set up histograms of HLT objects // //////////////////////////////////////////////////////////// // Determine what strings to use for histogram titles std::vector<std::string> HltHistTitle; if (theHLTCollectionHumanNames.size() == numOfHLTCollectionLabels && useHumanReadableHistTitles) { HltHistTitle = theHLTCollectionHumanNames; } else { for (unsigned int i = 0; i < numOfHLTCollectionLabels; i++) { HltHistTitle.push_back(theHLTCollectionLabels[i].label()); } } for (unsigned int i = 0; i < numOfHLTCollectionLabels; i++) { //-------------------- // distributions of HLT objects passing filter i //-------------------- // // Et // histName = theHLTCollectionLabels[i].label()+"et_all"; // histTitle = HltHistTitle[i]+" Et (ALL)"; // tmphisto = // iBooker.book1D(histName.c_str(),histTitle.c_str(),plotBins,plotPtMin,plotPtMax); // ethist.push_back(tmphisto); // // // Eta // histName = theHLTCollectionLabels[i].label()+"eta_all"; // histTitle = HltHistTitle[i]+" #eta (ALL)"; // tmphisto = // iBooker.book1D(histName.c_str(),histTitle.c_str(),plotBins,-plotEtaMax,plotEtaMax); // etahist.push_back(tmphisto); // // // phi // histName = theHLTCollectionLabels[i].label()+"phi_all"; // histTitle = HltHistTitle[i]+" #phi (ALL)"; // tmphisto = // iBooker.book1D(histName.c_str(),histTitle.c_str(),plotBins,-plotPhiMax,plotPhiMax); // phiHist.push_back(tmphisto); standardHist.push_back(std::make_unique<FourVectorMonitorElements>( this, iBooker, theHLTCollectionLabels[i].label() + "%s_all", // histogram name HltHistTitle[i] + " %s (ALL)" // histogram title )); //-------------------- // distributions of reco object matching HLT object passing filter i //-------------------- // Et // histName = theHLTCollectionLabels[i].label()+"et_RECO_matched"; // histTitle = HltHistTitle[i]+" Et (RECO matched)"; // tmphisto = // iBooker.book1D(histName.c_str(),histTitle.c_str(),plotBins,plotPtMin,plotPtMax); // ethistmatchreco.push_back(tmphisto); // // Eta // histName = theHLTCollectionLabels[i].label()+"eta_RECO_matched"; // histTitle = HltHistTitle[i]+" #eta (RECO matched)"; // tmphisto = // iBooker.book1D(histName.c_str(),histTitle.c_str(),plotBins,-plotEtaMax,plotEtaMax); // etahistmatchreco.push_back(tmphisto); // // // phi // histName = theHLTCollectionLabels[i].label()+"phi_RECO_matched"; // histTitle = HltHistTitle[i]+" #phi (RECO matched)"; // tmphisto = // iBooker.book1D(histName.c_str(),histTitle.c_str(),plotBins,-plotPhiMax,plotPhiMax); // phiHistMatchReco.push_back(tmphisto); histMatchReco.push_back(std::make_unique<FourVectorMonitorElements>( this, iBooker, theHLTCollectionLabels[i].label() + "%s_RECO_matched", // histogram name HltHistTitle[i] + " %s (RECO matched)" // histogram title )); //-------------------- // distributions of reco object matching HLT object passing filter i //-------------------- // // Et // histName = // theHLTCollectionLabels[i].label()+"et_RECO_matched_monpath"; histTitle // = HltHistTitle[i]+" Et (RECO matched, monpath)"; tmphisto = // iBooker.book1D(histName.c_str(),histTitle.c_str(),plotBins,plotPtMin,plotPtMax); // ethistmatchrecomonpath.push_back(tmphisto); // // // Eta // histName = // theHLTCollectionLabels[i].label()+"eta_RECO_matched_monpath"; // histTitle = HltHistTitle[i]+" #eta (RECO matched, monpath)"; // tmphisto = // iBooker.book1D(histName.c_str(),histTitle.c_str(),plotBins,-plotEtaMax,plotEtaMax); // etahistmatchrecomonpath.push_back(tmphisto); // // // phi // histName = // theHLTCollectionLabels[i].label()+"phi_RECO_matched_monpath"; // histTitle = HltHistTitle[i]+" #phi (RECO matched, monpath)"; // tmphisto = // iBooker.book1D(histName.c_str(),histTitle.c_str(),plotBins,-plotPhiMax,plotPhiMax); // phiHistMatchRecoMonPath.push_back(tmphisto); histMatchRecoMonPath.push_back(std::make_unique<FourVectorMonitorElements>( this, iBooker, theHLTCollectionLabels[i].label() + "%s_RECO_matched_monpath", // histogram name HltHistTitle[i] + " %s (RECO matched, monpath)" // histogram title )); //-------------------- // distributions of HLT object that is closest delta-R match to sorted reco // particle(s) //-------------------- // Et // histName = theHLTCollectionLabels[i].label()+"et_reco"; // histTitle = HltHistTitle[i]+" Et (reco)"; // tmphisto = // iBooker.book1D(histName.c_str(),histTitle.c_str(),plotBins,plotPtMin,plotPtMax); // histEtOfHltObjMatchToReco.push_back(tmphisto); // // // eta // histName = theHLTCollectionLabels[i].label()+"eta_reco"; // histTitle = HltHistTitle[i]+" eta (reco)"; // tmphisto = // iBooker.book1D(histName.c_str(),histTitle.c_str(),plotBins,-plotEtaMax,plotEtaMax); // histEtaOfHltObjMatchToReco.push_back(tmphisto); // // // phi // histName = theHLTCollectionLabels[i].label()+"phi_reco"; // histTitle = HltHistTitle[i]+" phi (reco)"; // tmphisto = // iBooker.book1D(histName.c_str(),histTitle.c_str(),plotBins,-plotPhiMax,plotPhiMax); // histPhiOfHltObjMatchToReco.push_back(tmphisto); histHltObjMatchToReco.push_back(std::make_unique<FourVectorMonitorElements>( this, iBooker, theHLTCollectionLabels[i].label() + "%s_reco", // histogram name HltHistTitle[i] + " %s (reco)" // histogram title )); //-------------------- if (!plotiso[i]) { tmpiso = nullptr; etahistiso.push_back(tmpiso); ethistiso.push_back(tmpiso); phiHistIso.push_back(tmpiso); etahistisomatchreco.push_back(tmpiso); ethistisomatchreco.push_back(tmpiso); phiHistIsoMatchReco.push_back(tmpiso); histEtaIsoOfHltObjMatchToReco.push_back(tmpiso); histEtIsoOfHltObjMatchToReco.push_back(tmpiso); histPhiIsoOfHltObjMatchToReco.push_back(tmpiso); } else { //-------------------- // 2D plot: Isolation values vs X for all objects //-------------------- // X = eta histName = theHLTCollectionLabels[i].label() + "eta_isolation_all"; histTitle = HltHistTitle[i] + " isolation vs #eta (all)"; tmpiso = iBooker.book2D(histName.c_str(), histTitle.c_str(), plotBins, -plotEtaMax, plotEtaMax, plotBins, plotBounds[i].first, plotBounds[i].second); etahistiso.push_back(tmpiso); // X = et histName = theHLTCollectionLabels[i].label() + "et_isolation_all"; histTitle = HltHistTitle[i] + " isolation vs Et (all)"; tmpiso = iBooker.book2D(histName.c_str(), histTitle.c_str(), plotBins, plotPtMin, plotPtMax, plotBins, plotBounds[i].first, plotBounds[i].second); ethistiso.push_back(tmpiso); // X = phi histName = theHLTCollectionLabels[i].label() + "phi_isolation_all"; histTitle = HltHistTitle[i] + " isolation vs #phi (all)"; tmpiso = iBooker.book2D(histName.c_str(), histTitle.c_str(), plotBins, -plotPhiMax, plotPhiMax, plotBins, plotBounds[i].first, plotBounds[i].second); phiHistIso.push_back(tmpiso); //-------------------- // 2D plot: Isolation values vs X for reco matched objects //-------------------- // X = eta histName = theHLTCollectionLabels[i].label() + "eta_isolation_RECO_matched"; histTitle = HltHistTitle[i] + " isolation vs #eta (reco matched)"; tmpiso = iBooker.book2D(histName.c_str(), histTitle.c_str(), plotBins, -plotEtaMax, plotEtaMax, plotBins, plotBounds[i].first, plotBounds[i].second); etahistisomatchreco.push_back(tmpiso); // X = et histName = theHLTCollectionLabels[i].label() + "et_isolation_RECO_matched"; histTitle = HltHistTitle[i] + " isolation vs Et (reco matched)"; tmpiso = iBooker.book2D(histName.c_str(), histTitle.c_str(), plotBins, plotPtMin, plotPtMax, plotBins, plotBounds[i].first, plotBounds[i].second); ethistisomatchreco.push_back(tmpiso); // X = eta histName = theHLTCollectionLabels[i].label() + "phi_isolation_RECO_matched"; histTitle = HltHistTitle[i] + " isolation vs #phi (reco matched)"; tmpiso = iBooker.book2D(histName.c_str(), histTitle.c_str(), plotBins, -plotPhiMax, plotPhiMax, plotBins, plotBounds[i].first, plotBounds[i].second); phiHistIsoMatchReco.push_back(tmpiso); //-------------------- // 2D plot: Isolation values vs X for HLT object that // is closest delta-R match to sorted reco particle(s) //-------------------- // X = eta histName = theHLTCollectionLabels[i].label() + "eta_isolation_reco"; histTitle = HltHistTitle[i] + " isolation vs #eta (reco)"; tmpiso = iBooker.book2D(histName.c_str(), histTitle.c_str(), plotBins, -plotEtaMax, plotEtaMax, plotBins, plotBounds[i].first, plotBounds[i].second); histEtaIsoOfHltObjMatchToReco.push_back(tmpiso); // X = et histName = theHLTCollectionLabels[i].label() + "et_isolation_reco"; histTitle = HltHistTitle[i] + " isolation vs Et (reco)"; tmpiso = iBooker.book2D(histName.c_str(), histTitle.c_str(), plotBins, plotPtMin, plotPtMax, plotBins, plotBounds[i].first, plotBounds[i].second); histEtIsoOfHltObjMatchToReco.push_back(tmpiso); // X = phi histName = theHLTCollectionLabels[i].label() + "phi_isolation_reco"; histTitle = HltHistTitle[i] + " isolation vs #phi (reco)"; tmpiso = iBooker.book2D(histName.c_str(), histTitle.c_str(), plotBins, -plotPhiMax, plotPhiMax, plotBins, plotBounds[i].first, plotBounds[i].second); histPhiIsoOfHltObjMatchToReco.push_back(tmpiso); //-------------------- } // END of HLT histograms } } //////////////////////////////////////////////////////////////////////////////// // Destructor // //////////////////////////////////////////////////////////////////////////////// EmDQMReco::~EmDQMReco() {} //////////////////////////////////////////////////////////////////////////////// // method called to for each event // //////////////////////////////////////////////////////////////////////////////// void EmDQMReco::analyze(const edm::Event &event, const edm::EventSetup &setup) { // protect from hlt config failure if (!isHltConfigInitialized_) return; eventnum++; bool plotMonpath = false; bool plotReco = true; edm::Handle<edm::View<reco::Candidate>> recoObjects; edm::Handle<std::vector<reco::SuperCluster>> recoObjectsEB; edm::Handle<std::vector<reco::SuperCluster>> recoObjectsEE; if (pdgGen == 11) { event.getByToken(recoElectronsInput, recoObjects); if (recoObjects->size() < (unsigned int)recocut_) { // edm::LogWarning("EmDQMReco") << "Less than "<< recocut_ <<" Reco // particles with pdgId=" << pdgGen << ". Only " << // cutRecoCounter->size() << " particles."; return; } } else if (pdgGen == 22) { event.getByToken(recoObjectsEBT, recoObjectsEB); event.getByToken(recoObjectsEET, recoObjectsEE); if (recoObjectsEB->size() + recoObjectsEE->size() < (unsigned int)recocut_) { // edm::LogWarning("EmDQMReco") << "Less than "<< recocut_ <<" Reco // particles with pdgId=" << pdgGen << ". Only " << cutRecoCounter.size() // << " particles."; return; } } edm::Handle<edm::TriggerResults> HLTR; event.getByToken(hltResultsT, HLTR); /// /// NOTE: /// hltConfigProvider initialization has been moved to beginRun() /// /* if (theHLTCollectionHumanNames[0] == "hltL1sRelaxedSingleEgammaEt8"){ triggerIndex = hltConfig.triggerIndex("HLT_L1SingleEG8"); } else if (theHLTCollectionHumanNames[0] == "hltL1sRelaxedSingleEgammaEt5") { triggerIndex = hltConfig.triggerIndex("HLT_L1SingleEG5"); } else if (theHLTCollectionHumanNames[0] == "hltL1sRelaxedDoubleEgammaEt5") { triggerIndex = hltConfig.triggerIndex("HLT_L1DoubleEG5"); } else { triggerIndex = hltConfig.triggerIndex(""); } */ unsigned int triggerIndex; triggerIndex = hltConfig_.triggerIndex(triggerNameRecoMonPath); // triggerIndex must be less than the size of HLTR or you get a CMSException bool isFired = false; if (triggerIndex < HLTR->size()) { isFired = HLTR->accept(triggerIndex); } // fill L1 and HLT info // get objects possed by each filter edm::Handle<trigger::TriggerEventWithRefs> triggerObj; event.getByToken(triggerObjT, triggerObj); if (!triggerObj.isValid()) { edm::LogWarning("EmDQMReco") << "RAW-type HLT results not found, skipping event"; return; } //////////////////////////////////////////////////////////// // Fill the bin labeled "Total" // // This will be the number of events looked at. // //////////////////////////////////////////////////////////// totalreco->Fill(numOfHLTCollectionLabels + 0.5); totalmatchreco->Fill(numOfHLTCollectionLabels + .5); //////////////////////////////////////////////////////////// // Fill the bin labeled "Total" // // This will be the number of events looked at. // //////////////////////////////////////////////////////////// // total->Fill(numOfHLTCollectionLabels+0.5); // totalmatch->Fill(numOfHLTCollectionLabels+0.5); //////////////////////////////////////////////////////////// // Fill reconstruction info // //////////////////////////////////////////////////////////// // the recocut_ highest Et generator objects of the preselected type are our // matches std::vector<reco::Particle> sortedReco; if (plotReco == true) { if (pdgGen == 11) { for (edm::View<reco::Candidate>::const_iterator recopart = recoObjects->begin(); recopart != recoObjects->end(); recopart++) { reco::Particle tmpcand( recopart->charge(), recopart->p4(), recopart->vertex(), recopart->pdgId(), recopart->status()); sortedReco.push_back(tmpcand); } } else if (pdgGen == 22) { for (std::vector<reco::SuperCluster>::const_iterator recopart2 = recoObjectsEB->begin(); recopart2 != recoObjectsEB->end(); recopart2++) { float en = recopart2->energy(); float er = sqrt(pow(recopart2->x(), 2) + pow(recopart2->y(), 2) + pow(recopart2->z(), 2)); float px = recopart2->energy() * recopart2->x() / er; float py = recopart2->energy() * recopart2->y() / er; float pz = recopart2->energy() * recopart2->z() / er; reco::Candidate::LorentzVector thisLV(px, py, pz, en); reco::Particle tmpcand(0, thisLV, math::XYZPoint(0., 0., 0.), 22, 1); sortedReco.push_back(tmpcand); } for (std::vector<reco::SuperCluster>::const_iterator recopart2 = recoObjectsEE->begin(); recopart2 != recoObjectsEE->end(); recopart2++) { float en = recopart2->energy(); float er = sqrt(pow(recopart2->x(), 2) + pow(recopart2->y(), 2) + pow(recopart2->z(), 2)); float px = recopart2->energy() * recopart2->x() / er; float py = recopart2->energy() * recopart2->y() / er; float pz = recopart2->energy() * recopart2->z() / er; reco::Candidate::LorentzVector thisLV(px, py, pz, en); reco::Particle tmpcand(0, thisLV, math::XYZPoint(0., 0., 0.), 22, 1); sortedReco.push_back(tmpcand); } } std::sort(sortedReco.begin(), sortedReco.end(), pTComparator_); // Now the collection of gen particles is sorted by pt. // So, remove all particles from the collection so that we // only have the top "1 thru recocut_" particles in it sortedReco.erase(sortedReco.begin() + recocut_, sortedReco.end()); for (unsigned int i = 0; i < recocut_; i++) { // validity has been implicitily checked by the cut on recocut_ above histReco->fill(sortedReco[i].p4()); // etreco ->Fill( sortedReco[i].et() ); // etareco->Fill( sortedReco[i].eta() ); // phiReco->Fill( sortedReco[i].phi() ); if (isFired) { histRecoMonpath->fill(sortedReco[i].p4()); plotMonpath = true; } } // END of loop over Reconstructed particles if (recocut_ >= reqNum) totalreco->Fill(numOfHLTCollectionLabels + 1.5); // this isn't really needed anymore keep for backward comp. if (recocut_ >= reqNum) totalmatchreco->Fill(numOfHLTCollectionLabels + 1.5); // this isn't really needed anymore keep for backward comp. } //////////////////////////////////////////////////////////// // Loop over filter modules // //////////////////////////////////////////////////////////// for (unsigned int n = 0; n < numOfHLTCollectionLabels; n++) { // These numbers are from the Parameter Set, such as: // theHLTOutputTypes = cms.uint32(100) switch (theHLTOutputTypes[n]) { case trigger::TriggerL1NoIsoEG: // Non-isolated Level 1 histoFillerL1NonIso->fillHistos(triggerObj, event, n, sortedReco, plotReco, plotMonpath); break; case trigger::TriggerL1IsoEG: // Isolated Level 1 histoFillerL1Iso->fillHistos(triggerObj, event, n, sortedReco, plotReco, plotMonpath); break; case trigger::TriggerPhoton: // Photon histoFillerPho->fillHistos(triggerObj, event, n, sortedReco, plotReco, plotMonpath); break; case trigger::TriggerElectron: // Electron histoFillerEle->fillHistos(triggerObj, event, n, sortedReco, plotReco, plotMonpath); break; case trigger::TriggerCluster: // TriggerCluster histoFillerClu->fillHistos(triggerObj, event, n, sortedReco, plotReco, plotMonpath); break; default: throw(cms::Exception("Release Validation Error") << "HLT output type not implemented: theHLTOutputTypes[n]"); } } // END of loop over filter modules } //////////////////////////////////////////////////////////////////////////////// // fillHistos // // Called by analyze method. // //////////////////////////////////////////////////////////////////////////////// template <class T> void HistoFillerReco<T>::fillHistos(edm::Handle<trigger::TriggerEventWithRefs> &triggerObj, const edm::Event &iEvent, unsigned int n, std::vector<reco::Particle> &sortedReco, bool plotReco, bool plotMonpath) { std::vector<edm::Ref<T>> recoecalcands; if ((triggerObj->filterIndex(dqm->theHLTCollectionLabels[n]) >= triggerObj->size())) { // only process if available return; } //////////////////////////////////////////////////////////// // Retrieve saved filter objects // //////////////////////////////////////////////////////////// triggerObj->getObjects( triggerObj->filterIndex(dqm->theHLTCollectionLabels[n]), dqm->theHLTOutputTypes[n], recoecalcands); // Danger: special case, L1 non-isolated // needs to be merged with L1 iso if (dqm->theHLTOutputTypes[n] == trigger::TriggerL1NoIsoEG) { std::vector<edm::Ref<T>> isocands; triggerObj->getObjects(triggerObj->filterIndex(dqm->theHLTCollectionLabels[n]), trigger::TriggerL1IsoEG, isocands); if (!isocands.empty()) { for (unsigned int i = 0; i < isocands.size(); i++) recoecalcands.push_back(isocands[i]); } } // END of if theHLTOutputTypes == 82 if (recoecalcands.empty()) { // stop if no object passed the previous filter return; } if (recoecalcands.size() >= dqm->reqNum) dqm->totalreco->Fill(n + 0.5); /////////////////////////////////////////////////// // check for validity // // prevents crash in CMSSW_3_1_0_pre6 // /////////////////////////////////////////////////// for (unsigned int j = 0; j < recoecalcands.size(); j++) { if (!(recoecalcands.at(j).isAvailable())) { edm::LogError("EmDQMReco") << "Event content inconsistent: TriggerEventWithRefs contains " "invalid Refs" << std::endl << "invalid refs for: " << dqm->theHLTCollectionLabels[n].label(); return; } } //////////////////////////////////////////////////////////// // Loop over all HLT objects in this filter step, and // // fill histograms. // //////////////////////////////////////////////////////////// // bool foundAllMatches = false; // unsigned int numOfHLTobjectsMatched = 0; for (unsigned int i = 0; i < recoecalcands.size(); i++) { dqm->standardHist[n]->fill(recoecalcands[i]->p4()); //////////////////////////////////////////////////////////// // Plot isolation variables (show the not-yet-cut // // isolation, i.e. associated to next filter) // //////////////////////////////////////////////////////////// if (n + 1 < dqm->numOfHLTCollectionLabels) { // can't plot beyond last if (dqm->plotiso[n + 1]) { for (unsigned int j = 0; j < isoNameTokens_.size(); j++) { edm::Handle<edm::AssociationMap<edm::OneToValue<T, float>>> depMap; iEvent.getByToken(isoNameTokens_.at(j), depMap); if (depMap.isValid()) { // Map may not exist if only one candidate // passes a double filter typename edm::AssociationMap<edm::OneToValue<T, float>>::const_iterator mapi = depMap->find(recoecalcands[i]); if (mapi != depMap->end()) { // found candidate in isolation map! dqm->etahistiso[n + 1]->Fill(recoecalcands[i]->eta(), mapi->val); dqm->ethistiso[n + 1]->Fill(recoecalcands[i]->et(), mapi->val); dqm->phiHistIso[n + 1]->Fill(recoecalcands[i]->phi(), mapi->val); } } } } } // END of if n+1 < then the number of hlt collections } //////////////////////////////////////////////////////////// // Loop over the Reconstructed Particles, and find the // // closest HLT object match. // //////////////////////////////////////////////////////////// if (plotReco == true) { for (unsigned int i = 0; i < dqm->recocut_; i++) { math::XYZVector currentRecoParticleMomentum = sortedReco[i].momentum(); // float closestRecoDeltaR = 0.5; float closestRecoDeltaR = 1000.; int closestRecoEcalCandIndex = -1; for (unsigned int j = 0; j < recoecalcands.size(); j++) { float deltaR = DeltaR(recoecalcands[j]->momentum(), currentRecoParticleMomentum); if (deltaR < closestRecoDeltaR) { closestRecoDeltaR = deltaR; closestRecoEcalCandIndex = j; } } // If an HLT object was found within some delta-R // of this reco particle, store it in a histogram if (closestRecoEcalCandIndex >= 0) { // histEtOfHltObjMatchToReco[n] ->Fill( // recoecalcands[closestRecoEcalCandIndex]->et() ); // histEtaOfHltObjMatchToReco[n]->Fill( // recoecalcands[closestRecoEcalCandIndex]->eta() ); // histPhiOfHltObjMatchToReco[n]->Fill( // recoecalcands[closestRecoEcalCandIndex]->phi() ); dqm->histHltObjMatchToReco[n]->fill(recoecalcands[closestRecoEcalCandIndex]->p4()); // Also store isolation info if (n + 1 < dqm->numOfHLTCollectionLabels) { // can't plot beyond last if (dqm->plotiso[n + 1]) { // only plot if requested in config for (unsigned int j = 0; j < isoNameTokens_.size(); j++) { edm::Handle<edm::AssociationMap<edm::OneToValue<T, float>>> depMap; iEvent.getByToken(isoNameTokens_.at(j), depMap); if (depMap.isValid()) { // Map may not exist if only one candidate // passes a double filter typename edm::AssociationMap<edm::OneToValue<T, float>>::const_iterator mapi = depMap->find(recoecalcands[closestRecoEcalCandIndex]); if (mapi != depMap->end()) { // found candidate in isolation map! dqm->histEtaIsoOfHltObjMatchToReco[n + 1]->Fill(recoecalcands[closestRecoEcalCandIndex]->eta(), mapi->val); dqm->histEtIsoOfHltObjMatchToReco[n + 1]->Fill(recoecalcands[closestRecoEcalCandIndex]->et(), mapi->val); dqm->histPhiIsoOfHltObjMatchToReco[n + 1]->Fill(recoecalcands[closestRecoEcalCandIndex]->phi(), mapi->val); } } } } } } // END of if closestEcalCandIndex >= 0 } //////////////////////////////////////////////////////////// // Fill reco matched objects into histograms // //////////////////////////////////////////////////////////// unsigned int mtachedRecoParts = 0; float minrecodist = 0.3; if (n == 0) minrecodist = 0.5; // low L1-resolution => allow wider matching for (unsigned int i = 0; i < dqm->recocut_; i++) { // match generator candidate bool matchThis = false; math::XYZVector candDir = sortedReco[i].momentum(); unsigned int closest = 0; double closestDr = 1000.; for (unsigned int trigOb = 0; trigOb < recoecalcands.size(); trigOb++) { double dr = DeltaR(recoecalcands[trigOb]->momentum(), candDir); if (dr < closestDr) { closestDr = dr; closest = trigOb; } if (closestDr > minrecodist) { // it's not really a "match" if it's that far away closest = -1; } else { mtachedRecoParts++; matchThis = true; } } if (!matchThis) continue; // only plot matched candidates // fill coordinates of mc particle matching trigger object // ethistmatchreco[n] ->Fill( sortedReco[i].et() ); // etahistmatchreco[n]->Fill( sortedReco[i].eta() ); // phiHistMatchReco[n]->Fill( sortedReco[i].phi() ); dqm->histMatchReco[n]->fill(sortedReco[i].p4()); if (plotMonpath) { // ethistmatchrecomonpath[n]->Fill( sortedReco[i].et() ); // etahistmatchrecomonpath[n]->Fill( sortedReco[i].eta() ); // phiHistMatchRecoMonPath[n]->Fill( sortedReco[i].phi() ); dqm->histMatchRecoMonPath[n]->fill(sortedReco[i].p4()); } //////////////////////////////////////////////////////////// // Plot isolation variables (show the not-yet-cut // // isolation, i.e. associated to next filter) // //////////////////////////////////////////////////////////// if (n + 1 < dqm->numOfHLTCollectionLabels) { // can't plot beyond last if (dqm->plotiso[n + 1]) { // only plot if requested in config for (unsigned int j = 0; j < isoNameTokens_.size(); j++) { edm::Handle<edm::AssociationMap<edm::OneToValue<T, float>>> depMapReco; iEvent.getByToken(isoNameTokens_.at(j), depMapReco); if (depMapReco.isValid()) { // Map may not exist if only one // candidate passes a double filter typename edm::AssociationMap<edm::OneToValue<T, float>>::const_iterator mapi = depMapReco->find(recoecalcands[closest]); if (mapi != depMapReco->end()) { // found candidate in isolation map! dqm->etahistisomatchreco[n + 1]->Fill(sortedReco[i].eta(), mapi->val); dqm->ethistisomatchreco[n + 1]->Fill(sortedReco[i].et(), mapi->val); dqm->phiHistIsoMatchReco[n + 1]->Fill(sortedReco[i].eta(), mapi->val); } } } } } // END of if n+1 < then the number of hlt collections } // fill total reco matched efficiency if (mtachedRecoParts >= dqm->reqNum) dqm->totalmatchreco->Fill(n + 0.5); } } DEFINE_FWK_MODULE(EmDQMReco);
-- Principal component analysis import Numeric.LinearAlgebra import System.Directory(doesFileExist) import System.Process(system) import Control.Monad(when) type Vec = Vector Double type Mat = Matrix Double -- Vector with the mean value of the columns of a matrix mean a = constant (recip . fromIntegral . rows $ a) (rows a) <> a -- covariance matrix of a list of observations stored as rows cov x = (trans xc <> xc) / fromIntegral (rows x - 1) where xc = x - asRow (mean x) -- creates the compression and decompression functions from the desired number of components pca :: Int -> Mat -> (Vec -> Vec , Vec -> Vec) pca n dataSet = (encode,decode) where encode x = vp <> (x - m) decode x = x <> vp + m m = mean dataSet c = cov dataSet (_,v) = eigSH' c vp = takeRows n (trans v) norm = pnorm PNorm2 main = do ok <- doesFileExist ("mnist.txt") when (not ok) $ do putStrLn "\nTrying to download test datafile..." system("wget -nv http://dis.um.es/~alberto/material/sp/mnist.txt.gz") system("gunzip mnist.txt.gz") return () m <- loadMatrix "mnist.txt" -- fromFile "mnist.txt" (5000,785) let xs = takeColumns (cols m -1) m -- the last column is the digit type (class label) let x = toRows xs !! 4 -- an arbitrary test Vec let (pe,pd) = pca 10 xs let y = pe x print y -- compressed version print $ norm (x - pd y) / norm x --reconstruction quality
module Rogue @doc read(joinpath(dirname(@__DIR__), "README.md"), String) Rogue using Base.Filesystem: rename using Base: PkgId using Pkg: Pkg, TOML using PyCall: pyimport using Setfield using Transducers using UUIDs include("utils.jl") include("manifests.jl") include("downstreams.jl") include("api.jl") end # module
A Flower Fairies Treasury ; Frederick Warne , 1997
# Cat vs coherent states in a Kerr resonator, and the role of measurement $\newcommand{\ket}[1]{| #1 \rangle}$ $\newcommand{\bra}[1]{\langle #1 |}$ $\newcommand{\braket}[1]{\langle #1 \rangle}$ $\newcommand{\CC}{\mathcal{C}}$ Author: F. Minganti ([email protected]) In this notebook we show how the same system can produce extremely different results according to the way an observer collects the emitted field of a resonator. This notebook closely follows the results obtained in Refs. [1-3]. ```python import matplotlib.pyplot as plt import numpy as np from qutip import * from IPython.display import display, Math, Latex ``` ## The two-photon Kerr Resontator Let us consider a single nonlinear Kerr resonator subject to a parametric two-photon driving. In a frame rotating at the pump frequency, the Hamiltonian reads \begin{equation}\label{Eq:Hamiltonian} \hat{H} =\frac{U}{2}\,\hat{a}^\dagger\hat{a}^\dagger\hat{a}\hat{a} +\frac{G}{2}\left(\hat{a}^\dagger\hat{a}^\dagger+\hat{a}\hat{a}\right), \end{equation} where $U$ is the Kerr photon-photon interaction strength, $G$ is the two-photon driving amplitude, and $\hat{a}^\dagger$ ($\hat{a}$) is the bosonic creation (annihilation) operator. The time dynamics of the density matrix $\hat{\rho}$ of this sytem is given by a Lindblad master equation $i \partial_t \hat{\rho} = \mathcal{L} \hat{\rho}$, where $\mathcal{L}$ is the Liouvillian superoperator. The superoperator $\mathcal{L}$ is made of an Hamiltonian part and a non-hermitian contribution, which describe the dissipation of energy, particle and information into the environment, as detailed in e.g. [5]. Given the parametric drive, the dissipation processes include one- and two-photon dissipation, and the Lindblad superoperator become \begin{equation}\label{Eq:Lindblad} \mathcal{L} \hat{\rho} = - i \left[\hat{H},\hat{\rho}\right] +\frac{\gamma}{2} \left(2\hat{a}\hat{\rho}\hat{a}^\dagger -\hat{a}^\dagger\hat{a}\hat{\rho} -\hat{\rho}\hat{a}^\dagger\hat{a}\right) + \, \frac{\eta}{2} \left(2\hat{a}\hat{a}\hat{\rho}\hat{a}^\dagger\hat{a}^\dagger -\hat{a}^\dagger\hat{a}^\dagger\hat{a}\hat{a}\hat{\rho} -\hat{\rho}\hat{a}^\dagger\hat{a}^\dagger\hat{a}\hat{a}\right), \end{equation} where $\gamma$ and $\eta$ are, respectively, the one- and two-photon dissipation rates. We define the system parameters in the following cells. ```python font_size=20 label_size=30 title_font=35 ``` ```python a=destroy(20) U=1 G=4 gamma=1 eta=1 H=U*a.dag()*a.dag()*a*a + G*(a*a + a.dag()*a.dag()) c_ops=[np.sqrt(gamma)*a,np.sqrt(eta)*a*a] parity=1.j*np.pi*a.dag()*a parity=parity.expm() rho_ss=steadystate(H, c_ops) ``` This model can be solved exactly for its steady state [2,3]. The corresponding density matrix $\hat{\rho}_{\rm ss}$ is well approximated by the statistical mixture of two orthogonal states: \begin{equation}\label{Eq:MixtureCats} \hat{\rho}_{\rm ss}\simeq p^+\,\ket{\CC^+_\alpha}\!\bra{\CC^+_\alpha} +p^-\,\ket{\CC^-_\alpha}\!\bra{\CC^-_\alpha}, \end{equation} where $\ket{\CC^\pm_\alpha}\propto\ket{\alpha}\pm\ket{-\alpha}$ are photonic Schr\"odinger cat states whose complex amplitude $\alpha$ is determined by the system parameters [2-4]. We recall that the coherent state $\ket{\alpha}$ is the eigenstate of the destruction operator: $\hat{a} \ket{\alpha}=\alpha \ket{\alpha}$. The state $\ket{\CC^+_\alpha}$ is called the even cat, since it can be written as a superposition of solely even Fock states, while $\ket{\CC^-_\alpha}$ is the odd cat. In the previous equation, the coefficients $p^\pm$ can be interpreted as the probabilities of the system of being found in the corresponding cat state. Below, we demonstrate this feature by diagonalising the steady-state density matrix, and by plotting the photon-number probability for the two most probable states. ```python vals, vecs = rho_ss.eigenstates(sort='high') print("The mean number of photon is " + str(expect(a.dag()*a, rho_ss))) plt.figure(figsize=(8, 6)) plt.rc('text', usetex=True) plt.rc('font', family='serif', size=font_size) plt.semilogy(range(1,7),vals[0:6], 'rx') plt.xlabel('Eigenvalue', fontsize=label_size) plt.ylabel('Probability', fontsize=label_size) plt.title('Distribution of the eigenvalues',fontsize=title_font) plt.show() ``` ```python state_zero=vecs[0].full() state_one=vecs[1].full() plt.figure(figsize=(8, 6)) plt.rc('text', usetex=True) plt.rc('font', family='serif', size=font_size) plt.plot(range(0,20), [abs(i)**2 for i in state_zero[0:20]], 'rx', label='First state') plt.plot(range(0,20), [abs(i)**2 for i in state_one[0:20]], 'bo', label='Second state') plt.legend() plt.xlabel('Eigenvalue', fontsize=label_size) plt.ylabel('Probability', fontsize=label_size) plt.show() ``` Correctly, the two system have opposite parity. Indeed, for sufficiently intense pumping ($G> U,\gamma,\eta$ and $|\alpha|\gg1$), it was shown in [2] that $p^+\simeq p^- \simeq 1/2$. However, in this strong-pumping regime, the steady-state can be recast as \begin{equation}\label{Eq:MixtureCoherent} \hat{\rho}_{\rm ss}\simeq \frac{1}{2}\ket{\alpha}\!\bra{\alpha} +\frac{1}{2}\ket{-\alpha}\!\bra{-\alpha}. \end{equation} Hence, the steady state can be seen as well as a statistical mixture of two coherent states of opposite phase. Since $\hat{\rho}_{\rm ss}$ is anyhow a mixture of two (quasi-)orthogonal states, the steady state is bimodal. Such a bimodality can be visualised, for instance, through the Wigner function [2,3]. Now, the pivotal question is: if one monitors the evolution of the system, in which states can it be observed? The orthogonal cat states, the two coherent states with opposite phases, or none of them in particular? As we will show in the following, the answer dramatically depends on the type of measurement scheme employed to monitor the trajectory of the system. ```python xvec=np.linspace(-4,4, 500) W_even=wigner(vecs[0], xvec, xvec, g=2) W_odd=wigner(vecs[1], xvec, xvec, g=2) ``` ```python W_ss=wigner(rho_ss, xvec, xvec, g=2) W_ss=np.around(W_ss, decimals=2) plt.figure(figsize=(10, 8)) plt.contourf(xvec,xvec, W_ss, cmap='RdBu', levels=np.linspace(-1, 1, 20)) plt.colorbar() plt.xlabel(r'Re$(\alpha)$', fontsize=label_size) plt.ylabel(r'Im$(\alpha)$', fontsize=label_size) plt.title("Steady state", fontsize=title_font) plt.show()./images/ ``` ```python W_even=np.around(W_even, decimals=2) plt.figure(figsize=(10, 8)) plt.contourf(xvec,xvec, W_even, cmap='RdBu', levels=np.linspace(-1, 1, 20)) plt.colorbar() plt.xlabel(r'Re$(\alpha)$', fontsize=label_size), width="300" plt.ylabel(r'Im$(\alpha)$', fontsize=label_size) plt.title("First state: even cat-like", fontsize=title_font) plt.show() ``` ```python W_odd=np.around(W_odd, decimals=2) plt.figure(figsize=(10, 8)) plt.contourf(xvec,xvec, W_odd, cmap='RdBu', levels=np.linspace(-1, 1, 20)) plt.colorbar() plt.xlabel(r'Re$(\alpha)$', fontsize=label_size) plt.ylabel(r'Im$(\alpha)$', fontsize=label_size) plt.title("Second state: odd cat-like", fontsize=title_font) plt.show() ``` ## Quantum Trajectories From a theoretical point of view, the Lindblad master equation describes the out-of-equilibrium dynamics of a system coupled to a Markovian (i.e., memoryless) environment. Indeed, the density matrix $\hat{\rho}(t)$ solving the Lindblad equation encodes the average evolution of the system when no information is collected about environment state. However, one can imagine to keep track of the system state by continuously probing the environment. Doing so, the time evolution of the system would change at each realisation. However, $\hat{\rho}(t)$ can be retrieved by averaging over an infinite number of such "monitored" realisations. The Monte Carlo wavefunction method has been developed relying exactly on this idea. It is based on the stochastic simulation of the system evolution when one continuously gathers information from the environment. Each simulation of the stochastic evolution of the system gives a single quantum trajectory. The results obtained by solving the master equation are recovered by averaging over many trajectories. In order to simulate the quantum trajectories, it is necessary to explicitly model how an observer measures the environment, thus affecting the system evolution itself (a detailed discussion on this subject is given in [5]. Interestingly, several different measures can be associated with the same master equation. Depending on the chosen measurement, contrasting results and interpretations can emerge. Those incompatibilities are, however, harmonized once the mean value over many trajectories is taken. $\newcommand{\ket}[1]{| #1 \rangle}$ $\newcommand{\bra}[1]{\langle #1 |}$ $\newcommand{\CC}{\mathcal{C}}$ ### Photon counting The most natural way to observe the exchanges between the Kerr resonator and the environment is to just detect every leaked photon (both individually and by couples). This mechanism is described via the action of the one-photon jump operator $\hat{J}_1=\sqrt{\gamma}\, \hat{a}$ and the two-photon one $\hat{J}_2=\sqrt{\eta}\, \hat{a}^2$, which describe the absorption of one or two photons by an ideal photodetector (details in e.g. [6]). Indeed, in typical realisations (e.g. [4]) the one- and two-photon dissipation channels are discernible. Hence, we can assume that the photodetector is capable of distinguishing between one- and two-photon losses. The photon-counting trajectory is then obtained by using the "mcsolve" function of qutip. In conclusion, a photon-counting trajectory is characterised by abrupt jumps corresponding to the projective measure associated to the detection of one or two photons. ```python tlist=np.linspace(0,20,2000) sol_mc=mcsolve(H, fock(20,0), tlist, c_ops, [a.dag()*a, (a+a.dag())/2, -1.j*(a-a.dag())/2, parity], ntraj=1) ``` 100.0%. Run time: 0.38s. Est. time left: 00:00:00:00 Total run time: 0.44s ```python plt.figure(figsize=(18, 8)) plt.subplot(311) plt.plot(tlist, sol_mc.expect[0]) plt.ylabel(r'$\langle \hat{a}^\dagger \hat{a} \rangle$', fontsize=label_size) plt.xlim([0,20]) plt.subplot(312) plt.plot(tlist, sol_mc.expect[3]) plt.ylabel(r'$\langle \hat{P} \rangle$', fontsize=label_size) plt.xlim([0,20]) plt.subplot(313) plt.plot(tlist, sol_mc.expect[1], label=r'$\langle \hat{x} \rangle$') plt.plot(tlist, sol_mc.expect[2], label=r'$\langle \hat{p} \rangle$') plt.xlabel(r'$\gamma t$', fontsize=label_size) plt.xlim([0,20]) plt.ylim([-3,3]) plt.legend() plt.show() ``` As shown in [2], the Hamiltonian $\hat{H}$ and the two-photon dissipation tend to stabilize photonic cat states. On the other hand, the annihilation operator switches from the even (odd) cat to the odd (even) one: $\hat{a}\ket{\CC^\pm_\alpha} \propto \alpha \ket{\CC^\mp_\alpha}$. The operator $\hat{J}_1$ thus induces jumps between the two cat states at a rate proportional to $\gamma \braket{\hat{a}^\dagger \hat{a}}$. This picture is very well captured in the framework of photon-counting trajectories, an example of which is given in the previous figure. The cat states are, indeed, orthogonal eigenstates of the parity operator $\hat{\mathcal{P}}=e^{i \pi \hat{a}^\dagger \hat{a}}$ with eigenvalues $\pm1$. As we can see, along a single trajectory the state intermittently and randomly switches between the two cat states. We stress that, instead, the mean values of the field quadratures $\hat{x}=\left(\hat{a}^\dagger+\hat{a}\right)/2$ and $\hat{p}=i\left(\hat{a}^\dagger-\hat{a}\right)/2$ are practically zero along the trajectory, as expected for any cat state. The parity, hence, appears to be the appropriate observable to detect a bimodal behaviour in a photon-counting environment. Thus, we may interpret $$\hat{\rho}_{\rm ss}\simeq p^+\,\ket{\CC^+_\alpha}\!\bra{\CC^+_\alpha} +p^-\,\ket{\CC^-_\alpha}\!\bra{\CC^-_\alpha}$$ as the steady-state probabilities to find the system in one of the two cat states. The previous analysis seems to point in the direction of privileging the cat states over the coherent ones as the more truthful picture of the steady state. ### Homodyne Another possible way to monitor a quantum-optical system is through homodyne detection, a widely-used experimental technique which allows to access the field quadratures [5-6]. To implement this kind of measurement, the cavity output field is mixed to the coherent field of a reference laser through a beam splitter (here assumed of perfect transmittance). Then, the mixed fields are probed via (perfect) photodectors, whose measures are described by new jump operators. We stress that both the coherent and the cavity fields are measured simultaneously. In our case, we want to probe independently the two dissipation channels. To distinguish between one- and two-photon losses, one can exploit a nonlinear element acting on the cavity output field. Indeed, in experimental realisations such as [4], a nonlinear element is already part of the system and is the key ingredient to realise two-photon processes. More specifically, one-photon losses are due to the finite quality factor of the resonator. They can be probed by directly mixing the output field of the cavity with a coherent beam of amplitude $\beta_1$ acting as local oscillator. Therefore, the homodyne jump operator for one-photon losses can be cast as $\hat{K}_1=\hat{J}_1 +\beta_1 \hat{1}$. Two-photon losses are, instead, mediated by a nonlinear element (a Josephson junction in [4]), which converts two cavity photons of frequency $\omega_c$ into one photon of frequency $\omega_{nl}$. Hence, the field coming out of the nonlinear element can be probed by a second independent oscillator. This whole process can be seen as the action of a nonlinear beam splitter which mixes couples of dissipated photons with a reference oscillator of amplitude $\beta_2$. Therefore, the homodyne two-photon jump operator takes the form $\hat{K}_2=\hat{J}_2 +\beta_2 \hat{1}$. Without loss of generality, in the following, we assume the amplitudes $\beta_{1,2}$ to be real [6]. In the ideal limit $\beta_{1,2}\to\infty$, the system evolves diffusively according to a homodyne stochastic Schr\"odinger equation. Using the ssesolve function with option "method='homodyne'", one can simulate the trajectory. ```python tlist=np.linspace(0,8000,800) sol_hom=ssesolve(H, fock(20,0), tlist, c_ops, [a.dag()*a, (a+a.dag())/2, -1.j*(a-a.dag())/2, parity],ntraj=1,nsubsteps=9500, store_measurement=False, method='homodyne') ``` Total run time: 636.04s ```python plt.figure(figsize=(18, 8)) plt.subplot(311) plt.plot(tlist, sol_hom.expect[0]) plt.ylabel(r'$\langle \hat{a}^\dagger \hat{a} \rangle$', fontsize=label_size) plt.xlim([0,8000]) plt.subplot(312) plt.plot(tlist, sol_hom.expect[3]) plt.ylabel(r'$\langle \hat{P} \rangle$', fontsize=label_size) plt.xlim([0,8000]) plt.subplot(313) plt.plot(tlist, sol_hom.expect[1], label=r'$\langle \hat{x} \rangle$') plt.plot(tlist, sol_hom.expect[2], label=r'$\langle \hat{p} \rangle$') plt.xlabel(r'$\gamma t$', fontsize=label_size) plt.xlim([0,8000]) plt.ylim([-3,3]) plt.legend() plt.show() ``` We see that the mean parity $\braket{\hat{\mathcal{P}}}$ is confined around zero along a single homodyne trajectory, in spite of the "switching cat" picture. These fluctuations are due to the diffusive nature of the homodyne trajectory, which rules the stochastic time evolution of the system wave function under homodyne detection. The bimodal behaviour, instead, is clear in the time evolution of $\braket{\hat{x}}$ and $\braket{\hat{p}}$. This appears compatible with the picture given by $\hat{\rho}_{\rm ss}\simeq \frac{1}{2}\ket{\alpha}\!\bra{\alpha} +\frac{1}{2}\ket{-\alpha}\!\bra{-\alpha}$: at the steady state the system switches between the coherent states $\ket{\pm\alpha}$. We point out that the phase switches observed for homodyne trajectories have a much smaller rate than parity switches in photon-counting trajectories. This is a consequence of the metastable nature of the coherent states $\ket{\pm\alpha}$ [1-4]. # Conciling the two points of view Summing up, we have shown that the behaviour of the system along a single quantum trajectory dramatically depends on the measurement protocol adopted. For photon-counting measurements on the environment, the system switches between the parity-defined cat states, while under homodyne detection, the states explored along a single quantum trajectory are the coherent ones. In other words, one may assign a physical meaning to the probabilities appearing in the mixed-state representation of $\hat{\rho}_{\rm ss}$ only upon specification of the single-trajectory protocol. However, any possible controversy at the single-trajectory level is washed out by averaging over many of them. ```python tlist=np.linspace(0,3,100) sol_mc_mean=mcsolve(H, fock(20,0), tlist, c_ops, [a.dag()*a, (a+a.dag())/2, -1.j*(a-a.dag())/2, parity], ntraj=50) sol_hom_mean=ssesolve(H, fock(20,0), tlist, c_ops, [a.dag()*a, (a+a.dag())/2, -1.j*(a-a.dag())/2, parity],ntraj=50,nsubsteps=350, store_measurement=False, method='homodyne') ``` 10.0%. Run time: 0.24s. Est. time left: 00:00:00:02 20.0%. Run time: 0.32s. Est. time left: 00:00:00:01 30.0%. Run time: 0.42s. Est. time left: 00:00:00:00 40.0%. Run time: 0.50s. Est. time left: 00:00:00:00 50.0%. Run time: 0.59s. Est. time left: 00:00:00:00 60.0%. Run time: 0.64s. Est. time left: 00:00:00:00 70.0%. Run time: 0.72s. Est. time left: 00:00:00:00 80.0%. Run time: 0.81s. Est. time left: 00:00:00:00 90.0%. Run time: 0.89s. Est. time left: 00:00:00:00 100.0%. Run time: 0.96s. Est. time left: 00:00:00:00 Total run time: 0.98s 10.0%. Run time: 14.31s. Est. time left: 00:00:02:08 20.0%. Run time: 28.11s. Est. time left: 00:00:01:52 30.0%. Run time: 42.01s. Est. time left: 00:00:01:38 40.0%. Run time: 57.06s. Est. time left: 00:00:01:25 50.0%. Run time: 71.03s. Est. time left: 00:00:01:11 60.0%. Run time: 85.84s. Est. time left: 00:00:00:57 70.0%. Run time: 99.69s. Est. time left: 00:00:00:42 80.0%. Run time: 113.37s. Est. time left: 00:00:00:28 90.0%. Run time: 127.39s. Est. time left: 00:00:00:14 Total run time: 141.30s ```python plt.figure(figsize=(18, 8)) plt.subplot(311) plt.plot(tlist, sol_mc_mean.expect[0], 'r', label='Conunting') plt.plot(tlist, sol_hom_mean.expect[0], 'b', label='Homodyne') plt.ylabel(r'$\langle \hat{a}^\dagger \hat{a} \rangle$', fontsize=label_size) plt.xlim([0,3]) plt.legend() plt.subplot(312) plt.plot(tlist, sol_mc_mean.expect[3], 'r') plt.plot(tlist, sol_hom_mean.expect[3], 'b') plt.ylabel(r'$\langle \hat{P} \rangle$', fontsize=label_size) plt.xlim([0,3]) plt.subplot(313) plt.plot(tlist, sol_mc_mean.expect[2], 'r') plt.plot(tlist, sol_hom_mean.expect[2], 'b') plt.ylabel(r'$\langle \hat{p} \rangle$', fontsize=label_size) plt.xlim([0,3]) plt.ylim([-2,2]) ``` ## References [1] N. Bartolo, F. Minganti 1 , J. Lolli, and C. Ciuti, Homodyne versus photon-counting quantum trajectories for dissipative Kerr resonators with two-photon driving, The European Physical Journal Special Topics 226, 2705 (2017). The European Physical Journal Special Topics 226, 2705 (2017). [2] F. Minganti, N. Bartolo, J. Lolli, W. Casteels, and C. Ciuti, Exact results for Schrödinger cats in driven-dissipative systems and their feedback control, Scientific Reports 6, 26987 (2016). [3] N. Bartolo, F. Minganti, W. Casteels, and C. Ciuti, Exact steady state of a Kerr resonator with one- and two-photon driving and dissipation: Controllable Wigner-function multimodality and dissipative phase transitions, Physical Review A 94, 033841 (2016). [4] Z. Leghtas et al., Confining the state of light to a quantum manifold by engineered two-photon loss, Science 347, 853 (2015). [5] S. Haroche and J. M. Raimond, Exploring the Quantum: Atoms, Cavities, and Photons (Oxford University Press, 2006). [6] H. Wiseman and G. Milburn, Quantum Measurement and Control (Cambridge University Press, 2010). ```python qutip.about() ``` QuTiP: Quantum Toolbox in Python Copyright (c) 2011 and later. A. J. Pitchford, P. D. Nation, R. J. Johansson, A. Grimsmo, and C. Granade QuTiP Version: 4.3.1 Numpy Version: 1.15.4 Scipy Version: 1.1.0 Cython Version: 0.29.2 Matplotlib Version: 3.0.2 Python Version: 3.7.1 Number of CPUs: 6 BLAS Info: INTEL MKL OPENMP Installed: False INTEL MKL Ext: True Platform Info: Linux (x86_64) Installation path: /home/test/anaconda3/lib/python3.7/site-packages/qutip ============================================================================== Please cite QuTiP in your publication. ============================================================================== For your convenience a bibtex file can be easily generated using `qutip.cite()` ```python ```
r=359.87 https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7302s/media/images/d7302s-037/svc:tesseract/full/full/359.87/default.jpg Accept:application/hocr+xml
// Copyright (c) 2017-2019 The Multiverse developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "dbpservice.h" #include "dbputils.h" #include <boost/assign/list_of.hpp> #include <boost/lexical_cast.hpp> #include <algorithm> using namespace multiverse; CDbpService::CDbpService() : walleve::IIOModule("dbpservice") { pService = NULL; pCoreProtocol = NULL; pWallet = NULL; pDbpServer = NULL; pNetChannel = NULL; std::unordered_map<std::string, bool> temp_map = boost::assign::map_list_of("all-block", true)("all-tx", true)("changed", true)("removed", true); mapCurrentTopicExist = temp_map; } CDbpService::~CDbpService() noexcept { } bool CDbpService::WalleveHandleInitialize() { if (!WalleveGetObject("coreprotocol", pCoreProtocol)) { WalleveError("Failed to request coreprotocol\n"); return false; } if (!WalleveGetObject("service", pService)) { WalleveError("Failed to request service\n"); return false; } if (!WalleveGetObject("wallet", pWallet)) { WalleveError("Failed to request wallet\n"); return false; } if (!WalleveGetObject("dbpserver", pDbpServer)) { WalleveError("Failed to request dbpserver\n"); return false; } if(!WalleveGetObject("dbpclient",pDbpClient)) { WalleveError("Failed to request dbpclient\n"); return false; } if (!WalleveGetObject("netchannel",pNetChannel)) { WalleveError("Failed to request peer net datachannel\n"); return false; } return true; } void CDbpService::WalleveHandleDeinitialize() { pDbpServer = NULL; pService = NULL; pCoreProtocol = NULL; pWallet = NULL; pNetChannel = NULL; } bool CDbpService::HandleEvent(CMvEventDbpPong& event) { (void)event; return true; } bool CDbpService::HandleEvent(CMvEventDbpBroken& event) { mapSessionChildNodeForks.erase(event.strSessionId); return true; } bool CDbpService::HandleEvent(CMvEventDbpRemoveSession& event) { RemoveSession(event.data.session); return true; } static std::string GetHex(std::string data) { int n = 2 * data.length() + 1; std::string ret; const char c_map[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; ret.reserve(n); for (const unsigned char &c : data) { ret.push_back(c_map[c >> 4]); ret.push_back(c_map[c & 15]); } return ret; } static void print_block(CMvDbpBlock &block) { std::string hash(block.hash.begin(), block.hash.end()); std::reverse(hash.begin(), hash.end()); std::string prev_hash(block.hashPrev.begin(), block.hashPrev.end()); std::reverse(prev_hash.begin(), prev_hash.end()); std::cout << "[<]recived block" << std::endl; std::cout << " hash:" << GetHex(hash) << std::endl; std::cout << " height:" << block.nHeight << std::endl; std::cout << " prev hash:" << GetHex(prev_hash) << std::endl; } static void print_tx(CMvDbpTransaction &tx) { std::string hash(tx.hash.begin(),tx.hash.end()); std::reverse(hash.begin(), hash.end()); std::string sig(tx.vchSig.begin(),tx.vchSig.end()); std::reverse(sig.begin(), sig.end()); std::cout << "[<]recived transaction" << std::endl; std::cout << " hash:" << GetHex(hash) << std::endl; std::cout << " sig:" << GetHex(sig) << std::endl; } bool CDbpService::HandleEvent(CMvEventDbpAdded& event) { if(event.data.name == "all-block") { CMvDbpBlock block = boost::any_cast<CMvDbpBlock>(event.data.anyAddedObj); print_block(block); } else if(event.data.name == "all-tx") { CMvDbpTransaction tx = boost::any_cast<CMvDbpTransaction>(event.data.anyAddedObj); print_tx(tx); } else { return false; } return true; } bool CDbpService::HandleEvent(CMvEventDbpConnect& event) { bool isReconnect = event.data.isReconnect; if (isReconnect) { UpdateChildNodeForks(event.strSessionId,event.data.forks); // reply normal CMvEventDbpConnected eventConnected(event.strSessionId); eventConnected.data.session = event.data.session; pDbpServer->DispatchEvent(&eventConnected); } else { if (event.data.version != 1) { // reply failed std::vector<int> versions{1}; CMvEventDbpFailed eventFailed(event.strSessionId); eventFailed.data.reason = "001"; eventFailed.data.versions = versions; eventFailed.data.session = event.data.session; pDbpServer->DispatchEvent(&eventFailed); } else { UpdateChildNodeForks(event.strSessionId,event.data.forks); // reply normal CMvEventDbpConnected eventConnected(event.strSessionId); eventConnected.data.session = event.data.session; pDbpServer->DispatchEvent(&eventConnected); } } return true; } bool CDbpService::HandleEvent(CMvEventDbpSub& event) { std::string id = event.data.id; std::string topicName = event.data.name; if (!IsTopicExist(topicName)) { // reply nosub CMvEventDbpNoSub eventNoSub(event.strSessionId); eventNoSub.data.id = event.data.id; pDbpServer->DispatchEvent(&eventNoSub); } else { SubTopic(id, event.strSessionId, topicName); // reply ready CMvEventDbpReady eventReady(event.strSessionId); eventReady.data.id = id; pDbpServer->DispatchEvent(&eventReady); } return true; } bool CDbpService::HandleEvent(CMvEventDbpUnSub& event) { UnSubTopic(event.data.id); return true; } void CDbpService::HandleGetTransaction(CMvEventDbpMethod& event) { std::string id = event.data.id; std::string txid = boost::any_cast<std::string> (event.data.params["hash"]); uint256 txHash(txid); CTransaction tx; uint256 forkHash; int blockHeight; if (pService->GetTransaction(txHash, tx, forkHash, blockHeight)) { CMvDbpTransaction dbpTx; CreateDbpTransaction(tx, 0, dbpTx); CMvEventDbpMethodResult eventResult(event.strSessionId); eventResult.data.id = id; eventResult.data.anyResultObjs.push_back(dbpTx); pDbpServer->DispatchEvent(&eventResult); } else { CMvEventDbpMethodResult eventResult(event.strSessionId); eventResult.data.id = id; eventResult.data.error = "404"; pDbpServer->DispatchEvent(&eventResult); } } void CDbpService::HandleSendTransaction(CMvEventDbpMethod& event) { std::string data = boost::any_cast <std::string>(event.data.params["data"]); std::vector<unsigned char> txData(data.begin(), data.end()); walleve::CWalleveBufStream ss; ss.Write((char *)&txData[0], txData.size()); CTransaction rawTx; try { ss >> rawTx; } catch (const std::exception& e) { CMvEventDbpMethodResult eventResult(event.strSessionId); eventResult.data.id = event.data.id; eventResult.data.error = "400"; pDbpServer->DispatchEvent(&eventResult); return; } MvErr err = pService->SendTransaction(rawTx); if (err == MV_OK) { CMvEventDbpMethodResult eventResult(event.strSessionId); eventResult.data.id = event.data.id; CMvDbpSendTransactionRet sendTxRet; sendTxRet.hash = data; sendTxRet.result = "succeed"; eventResult.data.anyResultObjs.push_back(sendTxRet); pDbpServer->DispatchEvent(&eventResult); } else { CMvEventDbpMethodResult eventResult(event.strSessionId); eventResult.data.id = event.data.id; CMvDbpSendTransactionRet sendTxRet; sendTxRet.hash = data; sendTxRet.result = "failed"; sendTxRet.reason = std::string(MvErrString(err)); eventResult.data.anyResultObjs.push_back(sendTxRet); pDbpServer->DispatchEvent(&eventResult); } } bool CDbpService::IsTopicExist(const std::string& topic) { return mapCurrentTopicExist.find(topic) != mapCurrentTopicExist.end(); } bool CDbpService::IsHaveSubedTopicOf(const std::string& id) { return mapIdSubedTopic.find(id) != mapIdSubedTopic.end(); } void CDbpService::SubTopic(const std::string& id, const std::string& session, const std::string& topic) { mapIdSubedTopic.insert(std::make_pair(id, topic)); if (topic == "all-block") setSubedAllBlocksIds.insert(id); if (topic == "all-tx") setSubedAllTxIds.insert(id); mapIdSubedSession.insert(std::make_pair(id, session)); } void CDbpService::UnSubTopic(const std::string& id) { setSubedAllBlocksIds.erase(id); setSubedAllTxIds.erase(id); mapIdSubedTopic.erase(id); mapIdSubedSession.erase(id); } void CDbpService::RemoveSession(const std::string& session) { std::vector<std::string> vBeDeletedIds; for(const auto& kv : mapIdSubedSession) { std::string id = kv.first; std::string valueSession = kv.second; if(valueSession == session) { vBeDeletedIds.push_back(id); } } for(const auto& id : vBeDeletedIds) { UnSubTopic(id); } } bool CDbpService::IsEmpty(const uint256& hash) { static const std::string EMPTY_HASH("0000000000000000000000000000000000000000000000000000000000000000"); return hash.ToString() == EMPTY_HASH; } bool CDbpService::IsForkHash(const uint256& hash) { std::vector<std::pair<uint256,CProfile>> forks; pService->ListFork(forks); for(const auto& fork : forks) { if(fork.first == hash) { return true; } } return false; } void CDbpService::TrySwitchFork(const uint256& blockHash,uint256& forkHash) { auto it = mapForkPoint.find(blockHash.ToString()); if(it != mapForkPoint.end()) { auto value = it->second; forkHash = value.first; } } bool CDbpService::CalcForkPoints(const uint256& forkHash) { std::vector<std::pair<uint256,int>> vAncestors; std::vector<std::pair<int,uint256>> vSublines; std::vector<std::pair<uint256,uint256>> path; if(!pService->GetForkGenealogy(forkHash,vAncestors,vSublines)) { return false; } std::vector<std::pair<uint256,uint256>> forkAncestors; for(int i = vAncestors.size() - 1; i >= 0; i--) { CBlock block; uint256 tempFork; int nHeight = 0; pService->GetBlock(vAncestors[i].first,block,tempFork,nHeight); forkAncestors.push_back(std::make_pair(vAncestors[i].first,block.hashPrev)); } path = forkAncestors; CBlock block; uint256 tempFork; int nHeight = 0; pService->GetBlock(forkHash,block,tempFork,nHeight); path.push_back(std::make_pair(forkHash,block.hashPrev)); for(const auto& fork : path) { mapForkPoint.insert(std::make_pair(fork.second.ToString(), std::make_pair(fork.first,fork.second))); } return true; } bool CDbpService::GetBlocks(const uint256& forkHash, const uint256& startHash, int32 n, std::vector<CMvDbpBlock>& blocks) { uint256 connectForkHash = forkHash; uint256 blockHash = startHash; if (IsEmpty(connectForkHash)) { connectForkHash = pCoreProtocol->GetGenesisBlockHash(); } if(!IsForkHash(connectForkHash)) { std::cerr << "connect fork hash is not a fork hash.\n"; return false; } if (IsEmpty(blockHash)) { blockHash = pCoreProtocol->GetGenesisBlockHash(); } int blockHeight = 0; uint256 tempForkHash; if (!pService->GetBlockLocation(blockHash, tempForkHash, blockHeight)) { std::cerr << "GetBlockLocation failed\n"; return false; } if(!CalcForkPoints(connectForkHash)) { std::cerr << "CalcForkPoint failed.\n"; return false; } const std::size_t nonExtendBlockMaxNum = n; std::size_t nonExtendBlockCount = 0; pService->GetBlockLocation(blockHash, tempForkHash, blockHeight); std::vector<uint256> blocksHash; while (nonExtendBlockCount < nonExtendBlockMaxNum && pService->GetBlockHash(tempForkHash, blockHeight, blocksHash)) { for(int i = 0; i < blocksHash.size(); ++i) { CBlockEx block; int height; pService->GetBlockEx(blocksHash[i], block, tempForkHash, height); if (block.nType != CBlock::BLOCK_EXTENDED) { nonExtendBlockCount++; } CMvDbpBlock DbpBlock; CreateDbpBlock(block, tempForkHash, height, DbpBlock); blocks.push_back(DbpBlock); } TrySwitchFork(blocksHash[0],tempForkHash); blockHeight++; blocksHash.clear(); blocksHash.shrink_to_fit(); } return true; } void CDbpService::HandleGetBlocks(CMvEventDbpMethod& event) { std::string forkid = boost::any_cast<std::string>(event.data.params["forkid"]); std::string blockHash = boost::any_cast<std::string>(event.data.params["hash"]); std::string num = boost::any_cast<std::string>(event.data.params["number"]); int32 blockNum = boost::lexical_cast<int32>(num); uint256 startBlockHash(std::vector<unsigned char>(blockHash.begin(), blockHash.end())); uint256 forkHash; forkHash.SetHex(forkid); std::vector<CMvDbpBlock> blocks; if (GetBlocks(forkHash, startBlockHash, blockNum, blocks)) { CMvEventDbpMethodResult eventResult(event.strSessionId); eventResult.data.id = event.data.id; for (auto& block : blocks) { eventResult.data.anyResultObjs.push_back(block); } pDbpServer->DispatchEvent(&eventResult); } else { CMvEventDbpMethodResult eventResult(event.strSessionId); eventResult.data.id = event.data.id; eventResult.data.error = "400"; pDbpServer->DispatchEvent(&eventResult); } } bool CDbpService::HandleEvent(CMvEventDbpRegisterForkID& event) { std::string& forkid = event.data.forkid; setThisNodeForks.insert(forkid); return true; } void CDbpService::HandleRegisterFork(CMvEventDbpMethod& event) { std::string forkid = boost::any_cast<std::string>(event.data.params["forkid"]); UpdateChildNodeForks(event.strSessionId,forkid); CMvEventDbpMethodResult eventResult(event.strSessionId); eventResult.data.id = event.data.id; CMvDbpRegisterForkIDRet ret; ret.forkid = forkid; eventResult.data.anyResultObjs.push_back(ret); pDbpServer->DispatchEvent(&eventResult); // notify dbp client to send message to parent node CMvEventDbpRegisterForkID eventRegister(""); eventRegister.data.forkid = forkid; pDbpClient->DispatchEvent(&eventRegister); } void CDbpService::HandleSendBlock(CMvEventDbpMethod& event) { CMvDbpBlock block = boost::any_cast<CMvDbpBlock>(event.data.params["data"]); // TO DO CMvEventDbpMethodResult eventResult(event.strSessionId); eventResult.data.id = event.data.id; CMvDbpSendBlockRet ret; ret.hash = std::string(block.hash.begin(), block.hash.end()); eventResult.data.anyResultObjs.push_back(ret); pDbpServer->DispatchEvent(&eventResult); CMvEventDbpSendBlock eventSendBlock(""); eventSendBlock.data.block = block; pDbpClient->DispatchEvent(&eventSendBlock); } void CDbpService::HandleSendTx(CMvEventDbpMethod& event) { CMvDbpTransaction tx = boost::any_cast<CMvDbpTransaction>(event.data.params["data"]); // TODO CMvEventDbpMethodResult eventResult(event.strSessionId); eventResult.data.id = event.data.id; CMvDbpSendTxRet ret; ret.hash = std::string(tx.hash.begin(), tx.hash.end()); eventResult.data.anyResultObjs.push_back(ret); pDbpServer->DispatchEvent(&eventResult); CMvEventDbpSendTx eventSendTx(""); eventSendTx.data.tx = tx; pDbpClient->DispatchEvent(&eventSendTx); } bool CDbpService::HandleEvent(CMvEventDbpMethod& event) { if (event.data.method == CMvDbpMethod::Method::GET_BLOCKS) { HandleGetBlocks(event); } else if (event.data.method == CMvDbpMethod::Method::GET_TRANSACTION) { HandleGetTransaction(event); } else if (event.data.method == CMvDbpMethod::Method::SEND_TRANSACTION) { HandleSendTransaction(event); } else if(event.data.method == CMvDbpMethod::Method::REGISTER_FORK) { HandleRegisterFork(event); } else if(event.data.method == CMvDbpMethod::Method::SEND_BLOCK) { HandleSendBlock(event); } else if(event.data.method == CMvDbpMethod::Method::SEND_TX) { } else { return false; } return true; } void CDbpService::CreateDbpBlock(const CBlockEx& blockDetail, const uint256& forkHash, int blockHeight, CMvDbpBlock& block) { block.nVersion = blockDetail.nVersion; block.nType = blockDetail.nType; block.nTimeStamp = blockDetail.nTimeStamp; walleve::CWalleveODataStream hashPrevStream(block.hashPrev); blockDetail.hashPrev.ToDataStream(hashPrevStream); walleve::CWalleveODataStream hashMerkleStream(block.hashMerkle); blockDetail.hashMerkle.ToDataStream(hashMerkleStream); block.vchProof = blockDetail.vchProof; block.vchSig = blockDetail.vchSig; // txMint CreateDbpTransaction(blockDetail.txMint, blockDetail.txMint.GetChange(0), block.txMint); // vtx int k = 0; for (const auto& tx : blockDetail.vtx) { CMvDbpTransaction dbpTx; int64 nValueIn = blockDetail.vTxContxt[k++].GetValueIn(); CreateDbpTransaction(tx, tx.GetChange(nValueIn), dbpTx); block.vtx.push_back(dbpTx); } block.nHeight = blockHeight; walleve::CWalleveODataStream hashStream(block.hash); blockDetail.GetHash().ToDataStream(hashStream); } void CDbpService::CreateDbpTransaction(const CTransaction& tx, int64 nChange, CMvDbpTransaction& dbptx) { dbptx.nVersion = tx.nVersion; dbptx.nType = tx.nType; dbptx.nLockUntil = tx.nLockUntil; walleve::CWalleveODataStream hashAnchorStream(dbptx.hashAnchor); tx.hashAnchor.ToDataStream(hashAnchorStream); for (const auto& input : tx.vInput) { CMvDbpTxIn txin; txin.n = input.prevout.n; walleve::CWalleveODataStream txInHashStream(txin.hash); input.prevout.hash.ToDataStream(txInHashStream); dbptx.vInput.push_back(txin); } dbptx.cDestination.prefix = tx.sendTo.prefix; dbptx.cDestination.size = tx.sendTo.DESTINATION_SIZE; walleve::CWalleveODataStream sendtoStream(dbptx.cDestination.data); tx.sendTo.data.ToDataStream(sendtoStream); dbptx.nAmount = tx.nAmount; dbptx.nTxFee = tx.nTxFee; dbptx.nChange = nChange; dbptx.vchData = tx.vchData; dbptx.vchSig = tx.vchSig; walleve::CWalleveODataStream hashStream(dbptx.hash); tx.GetHash().ToDataStream(hashStream); } void CDbpService::PushBlock(const std::string& forkid, const CMvDbpBlock& block) { for (const auto& kv : mapIdSubedSession) { std::string id = kv.first; std::string session = kv.second; if (setSubedAllBlocksIds.find(id) != setSubedAllBlocksIds.end()) { CMvEventDbpAdded eventAdded(session); eventAdded.data.id = id; eventAdded.data.forkid = forkid; eventAdded.data.name = "all-block"; eventAdded.data.anyAddedObj = block; pDbpServer->DispatchEvent(&eventAdded); } } } void CDbpService::PushTx(const std::string& forkid, const CMvDbpTransaction& dbptx) { for (const auto& kv : mapIdSubedSession) { std::string id = kv.first; std::string session = kv.second; if (setSubedAllTxIds.find(id) != setSubedAllTxIds.end()) { CMvEventDbpAdded eventAdded(session); eventAdded.data.id = id; eventAdded.data.forkid = forkid; eventAdded.data.name = "all-tx"; eventAdded.data.anyAddedObj = dbptx; pDbpServer->DispatchEvent(&eventAdded); } } } void CDbpService::UpdateChildNodeForks(const std::string& session, const std::string& forks) { std::vector<std::string> vForks = CDbpUtils::Split(forks,';'); ForksType setForks; for(const auto& fork : vForks) { setForks.insert(fork); } if(mapSessionChildNodeForks.count(session) == 0) { mapSessionChildNodeForks[session] = setForks; } else { auto& forks = mapSessionChildNodeForks[session]; forks.insert(setForks.begin(),setForks.end()); } } bool CDbpService::HandleEvent(CMvEventDbpUpdateNewBlock& event) { // get details about new block CBlockEx& newBlock = event.data; uint256 forkHash; int blockHeight = 0; if (pService->GetBlockLocation(newBlock.GetHash(),forkHash,blockHeight)) { CMvDbpBlock block; CreateDbpBlock(newBlock, forkHash, blockHeight, block); PushBlock(forkHash.ToString(),block); } return true; } bool CDbpService::HandleEvent(CMvEventDbpUpdateNewTx& event) { decltype(event.data)& newtx = event.data; std::string forkid = event.hashFork.ToString(); int64& change = event.nChange; CMvDbpTransaction dbpTx; CreateDbpTransaction(newtx, change, dbpTx); PushTx(forkid,dbpTx); return true; }
#' Score Iniation Indicators #' #' @param df site MSD file #' #' @export #' @importFrom magrittr %>% score_init <- function(df){ #1.&2. TX_NEW Position relative to OU-/PSNU-level median (median last three quarters) #pull headers to figure out last three quarters to keep headers <- names(df) pds <- headers[stringr::str_detect(headers, "q(?=[:digit:])")] %>% tail(., n =3) #narrow down to data needed for indicator creation init_tx_new <- df %>% dplyr::filter(indicator == "TX_NEW", standardizeddisaggregate == "Total Numerator", typemilitary == "N") %>% dplyr::select(operatingunit, psnu,sitename, orgunituid, pds) %>% dplyr::filter_if(is.numeric, dplyr::any_vars(!is.na(.) & .!=0)) #remove if all quarters are missing #site sum over last 3 pds init_tx_new <- init_tx_new %>% tidyr::gather(pd, val, dplyr::starts_with("fy")) %>% dplyr::select(-pd) %>% dplyr::group_by_if(is.character) %>% dplyr::summarise_if(is.numeric, ~ sum(., na.rm = TRUE)) %>% dplyr::ungroup() #Calculate percentile grouping init_tx_new <- init_tx_new %>% dplyr::group_by(operatingunit) %>% dplyr::mutate(init.tx_new_ou.score = dplyr::case_when(val > quantile(val, .75) ~ 2, val > quantile(val, .50) ~ 1, TRUE ~ 0)) %>% dplyr::ungroup() %>% dplyr::group_by(operatingunit, psnu) %>% dplyr::mutate(init.tx_new_psnu.score = dplyr::case_when(val > quantile(val, .75) ~ 2, val > quantile(val, .50) ~ 1, TRUE ~ 0)) %>% dplyr::ungroup() %>% dplyr::mutate(init.tx_new_ou.value = val) %>% dplyr::rename(init.tx_new_psnu.value = val) #3. Year on Year change in volume qtr <- ICPIutilities::identifypd(df, "quarter") qtr_fltr <- dplyr::case_when(qtr == 1 ~ "1", qtr == 2 ~ "(1|2)", qtr == 3 ~ "(1|2|3)", qtr == 4 ~ "(?=[:digit:])") pds <- headers[stringr::str_detect(headers, paste0("q", qtr_fltr))] init_tx_new_yoy <- df %>% dplyr::filter(indicator == "TX_NEW", standardizeddisaggregate == "Total Numerator", typemilitary == "N") %>% dplyr::select(operatingunit, psnu,sitename, orgunituid, pds) init_tx_new_yoy <- init_tx_new_yoy %>% tidyr::gather(pd, val, dplyr::starts_with("fy"), na.rm = TRUE) %>% dplyr::mutate(pd = stringr::str_remove_all(pd, "q[:digit:]")) %>% dplyr::group_by_if(is.character) %>% dplyr::summarise_if(is.numeric, ~ sum(., na.rm = TRUE)) %>% dplyr::mutate(init.tx_new_yoyd = val - dplyr::lag(val), init.tx_new_yoyc = (val - dplyr::lag(val))/dplyr::lag(val)) %>% dplyr::ungroup() %>% dplyr::filter(pd == "fy2018", is.finite(init.tx_new_yoyc)) #Calculate percentile grouping init_tx_new_yoy <- init_tx_new_yoy %>% dplyr::group_by(operatingunit) %>% dplyr::mutate(init.tx_new_yoyd.score = dplyr::case_when(init.tx_new_yoyd > quantile(init.tx_new_yoyd, .75) ~ 2, init.tx_new_yoyd > quantile(init.tx_new_yoyd, .50) ~ 1, TRUE ~ 0), init.tx_new_yoyc.score = dplyr::case_when(init.tx_new_yoyc > quantile(init.tx_new_yoyc, .75) ~ 2, init.tx_new_yoyc > quantile(init.tx_new_yoyc, .50) ~ 1, TRUE ~ 0)) %>% dplyr::ungroup() %>% dplyr::rename(init.tx_new_yoyd.value = init.tx_new_yoyd, init.tx_new_yoyc.value = init.tx_new_yoyc, init.tx_new_yoy.value = val) %>% dplyr::select(-pd, -init.tx_new_yoy.value) #4. NET NEW change init_tx_netnew_yoy <- df %>% dplyr::filter(indicator == "TX_NET_NEW", standardizeddisaggregate == "Total Numerator", typemilitary == "N") %>% dplyr::select(operatingunit, psnu,sitename, orgunituid, pds) init_tx_netnew_yoy <- init_tx_netnew_yoy %>% tidyr::gather(pd, val, dplyr::starts_with("fy"), na.rm = TRUE) %>% dplyr::mutate(pd = stringr::str_remove_all(pd, "q[:digit:]")) %>% dplyr::group_by_if(is.character) %>% dplyr::summarise_if(is.numeric, ~ sum(., na.rm = TRUE)) %>% dplyr::mutate(init.tx_netnew_yoyd = val - dplyr::lag(val), init.tx_netnew_yoyc = (val - dplyr::lag(val))/dplyr::lag(val)) %>% dplyr::ungroup() %>% dplyr::filter(pd == "fy2018", is.finite(init.tx_netnew_yoyc)) #Calculate percentile grouping init_tx_netnew_yoy <- init_tx_netnew_yoy %>% dplyr::group_by(operatingunit) %>% dplyr::mutate(init.tx_netnew_yoyd.score = dplyr::case_when(init.tx_netnew_yoyd > quantile(init.tx_netnew_yoyd, .75) ~ 2, init.tx_netnew_yoyd > quantile(init.tx_netnew_yoyd, .50) ~ 1, TRUE ~ 0), init.tx_netnew_yoyc.score = dplyr::case_when(init.tx_netnew_yoyc > quantile(init.tx_netnew_yoyc, .75) ~ 2, init.tx_netnew_yoyc > quantile(init.tx_netnew_yoyc, .50) ~ 1, TRUE ~ 0)) %>% dplyr::ungroup() %>% dplyr::rename(init.tx_netnew_yoyd.value = init.tx_netnew_yoyd, init.tx_netnew_yoyc.value = init.tx_netnew_yoyc, init.tx_netnew_yoy.value = val) %>% dplyr::select(-pd, -init.tx_netnew_yoy.value) #combine all together init <- list(init_tx_new, init_tx_new_yoy, init_tx_netnew_yoy) %>% purrr::reduce(dplyr::full_join, by = c("operatingunit", "psnu", "sitename", "orgunituid")) return(init) }
import numpy as np class Basis: """ Class to store basis information Attributes ---------- """ def __init__(self, mol): self.mol = mol
[STATEMENT] lemma leadsETo_weaken_L: "[| F \<in> A leadsTo[CC] A'; B<=A |] ==> F \<in> B leadsTo[CC] A'" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>F \<in> A leadsTo[CC] A'; B \<subseteq> A\<rbrakk> \<Longrightarrow> F \<in> B leadsTo[CC] A' [PROOF STEP] by (blast intro: leadsETo_Trans subset_imp_leadsETo)
(* Title: HOL/Auth/n_g2kAbsAfter_lemma_inv__44_on_rules.thy Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences *) header{*The n_g2kAbsAfter Protocol Case Study*} theory n_g2kAbsAfter_lemma_inv__44_on_rules imports n_g2kAbsAfter_lemma_on_inv__44 begin section{*All lemmas on causal relation between inv__44*} lemma lemma_inv__44_on_rules: assumes b1: "r \<in> rules N" and b2: "(f=inv__44 )" shows "invHoldForRule s f r (invariants N)" proof - have c1: "(\<exists> d. d\<le>N\<and>r=n_n_Store_i1 d)\<or> (\<exists> d. d\<le>N\<and>r=n_n_AStore_i1 d)\<or> (r=n_n_SendReqS_j1 )\<or> (r=n_n_SendReqEI_i1 )\<or> (r=n_n_SendReqES_i1 )\<or> (r=n_n_RecvReq_i1 )\<or> (r=n_n_SendInvE_i1 )\<or> (r=n_n_SendInvS_i1 )\<or> (r=n_n_SendInvAck_i1 )\<or> (r=n_n_RecvInvAck_i1 )\<or> (r=n_n_SendGntS_i1 )\<or> (r=n_n_SendGntE_i1 )\<or> (r=n_n_RecvGntS_i1 )\<or> (r=n_n_RecvGntE_i1 )\<or> (r=n_n_ASendReqIS_j1 )\<or> (r=n_n_ASendReqSE_j1 )\<or> (r=n_n_ASendReqEI_i1 )\<or> (r=n_n_ASendReqES_i1 )\<or> (r=n_n_SendReqEE_i1 )\<or> (r=n_n_ARecvReq_i1 )\<or> (r=n_n_ASendInvE_i1 )\<or> (r=n_n_ASendInvS_i1 )\<or> (r=n_n_ASendInvAck_i1 )\<or> (r=n_n_ARecvInvAck_i1 )\<or> (r=n_n_ASendGntS_i1 )\<or> (r=n_n_ASendGntE_i1 )\<or> (r=n_n_ARecvGntS_i1 )\<or> (r=n_n_ARecvGntE_i1 )" apply (cut_tac b1, auto) done moreover { assume d1: "(\<exists> d. d\<le>N\<and>r=n_n_Store_i1 d)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_Store_i1Vsinv__44) done } moreover { assume d1: "(\<exists> d. d\<le>N\<and>r=n_n_AStore_i1 d)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_AStore_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_SendReqS_j1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendReqS_j1Vsinv__44) done } moreover { assume d1: "(r=n_n_SendReqEI_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendReqEI_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_SendReqES_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendReqES_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_RecvReq_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_RecvReq_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_SendInvE_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendInvE_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_SendInvS_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendInvS_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_SendInvAck_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendInvAck_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_RecvInvAck_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_RecvInvAck_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_SendGntS_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendGntS_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_SendGntE_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendGntE_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_RecvGntS_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_RecvGntS_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_RecvGntE_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_RecvGntE_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_ASendReqIS_j1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendReqIS_j1Vsinv__44) done } moreover { assume d1: "(r=n_n_ASendReqSE_j1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendReqSE_j1Vsinv__44) done } moreover { assume d1: "(r=n_n_ASendReqEI_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendReqEI_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_ASendReqES_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendReqES_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_SendReqEE_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendReqEE_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_ARecvReq_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ARecvReq_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_ASendInvE_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendInvE_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_ASendInvS_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendInvS_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_ASendInvAck_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendInvAck_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_ARecvInvAck_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ARecvInvAck_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_ASendGntS_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendGntS_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_ASendGntE_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendGntE_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_ARecvGntS_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ARecvGntS_i1Vsinv__44) done } moreover { assume d1: "(r=n_n_ARecvGntE_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ARecvGntE_i1Vsinv__44) done } ultimately show "invHoldForRule s f r (invariants N)" by satx qed end
State Before: a b : Int H : a % b = 0 ⊢ b * (a / b) = a State After: a b : Int H : a % b = 0 this : a % b + b * (a / b) = a ⊢ b * (a / b) = a Tactic: have := emod_add_ediv a b State Before: a b : Int H : a % b = 0 this : a % b + b * (a / b) = a ⊢ b * (a / b) = a State After: no goals Tactic: rwa [H, Int.zero_add] at this
Require Import ZArith PArith Lia. Require Import ASN1FP.Types.ASN ASN1FP.Aux.Roundtrip ASN1FP.Aux.StructTactics ASN1FP.Aux.Aux ASN1FP.Aux.Tactics ASN1FP.Aux.Option ASN1FP.Aux.StrongInduction. Require Import Flocq.Core.Zaux Flocq.IEEE754.Binary Flocq.Core.Defs. Open Scope Z. (* * eqivalence on floats of the same type, returning `true` * for normal equality * or for any two NaN values (NaN payloads not taken into account) *) Definition float_eqb_nan_t {prec emax : Z} (x y : binary_float prec emax) : bool := match Bcompare prec emax x y with | Some Eq => true | None => true | _ => false end. Definition binary_bounded_sumbool (prec emax : Z) (m : positive) (e : Z) := Sumbool.sumbool_of_bool (Binary.bounded prec emax m e). Section Base2. Variable prec emax : Z. Hypothesis prec_gt_1 : prec > 1. Hypothesis Hmax : (prec < emax)%Z. (* only radix = 2 is considered for both formats in this section * scaling factor is, therefore, not required in BER * TODO: for arbitrary radix/scaling combinations, refer to another section *) Let r := radix2. Let scl := 0. (* can a given (m,e) pair be represented in IEEE/BER exactly *) Let valid_IEEE := bounded prec emax. Let valid_BER := valid_BER r scl. (* aux: apply variables *) Let IEEE_float := binary_float prec emax. Let valid_IEEE_sumbool := binary_bounded_sumbool prec emax. Let BER_finite_b2 := BER_finite r scl. Let valid_BER_sumbool := valid_BER_sumbool r scl. (* 1 can always be the payload of a NaN *) Lemma def_NaN : nan_pl prec 1 = true. Proof. unfold nan_pl. simpl. apply Z.ltb_lt, Z.gt_lt, prec_gt_1. Qed. Lemma prec_gt_0 : Flocq.Core.FLX.Prec_gt_0 prec. Proof. unfold Flocq.Core.FLX.Prec_gt_0. apply (Z.lt_trans 0 1 prec). - (* 1 < 0 *) reflexivity. - (* 1 < prec *) apply Z.gt_lt. apply prec_gt_1. Qed. Section Def. (* * given a pair (m,e), return (mx, ex) such that * m*2^e = mx*2^ex * and * m is odd *) Definition normalize_BER_finite_nrec (m : positive) (e : Z) : (positive * Z) := let t := N.log2 (((Pos.lxor m (m-1)) + 1) / 2) in (Pos.shiftr m t, e + (Z.of_N t)). Fixpoint normalize_BER_finite (m : positive) (e : Z) : positive * Z := match m with | xO p => normalize_BER_finite p (e + 1) | _ => (m, e) end. (* * given all meaningful parts of a BER real, construct it, if possible * The content is normalized in accordance with [ 11.3.1 ] if possible *) Definition make_BER_finite (s : bool) (m : positive) (e : Z) : option BER_float := let '(mx, ex) := normalize_BER_finite m e in match valid_BER_sumbool mx ex with | left V => Some (BER_finite_b2 s mx ex V) | right _ => None end. (* TODO: radix, scaling (determine `b` and `ff`) *) (* * exact conversion from IEEE to BER * no rounding is performed: if conversion is impossible without rounding * `None` is returned *) Definition BER_of_IEEE_exact (f : IEEE_float) : option BER_float := match f with | B754_zero _ _ s => Some (BER_zero s) | B754_infinity _ _ s => Some (BER_infinity s) | B754_nan _ _ _ _ _ => Some (BER_nan) | B754_finite _ _ s m e _ => make_BER_finite s m e end. (* * turn any pair (m,e) into a pair (mx,ex), representable in * IEEE 745 if possible. No rounding is performed, * (m,e) remains unchanged if normalization is impossible without rounding *) Definition normalize_IEEE_finite : positive -> Z -> (positive * Z) := shl_align_fexp prec emax. (* given all meaningful parts of an IEEE float, construct it, if possible *) Definition make_IEEE_finite (s : bool) (m : positive) (e : Z) : option IEEE_float := let '(mx, ex) := normalize_IEEE_finite m e in match (valid_IEEE_sumbool mx ex) with | left V => Some (B754_finite _ _ s mx ex V) | right _ => None end. (* * exact conversion from BER to IEEE * radix2 with no scaling asssumed: if input does not match, 'None' returned * no rounding is performed: if conversion is impossible without rounding * `None` is returned *) Definition IEEE_of_BER_exact (f : BER_float) : option IEEE_float := match f with | BER_zero s => Some (B754_zero _ _ s) | BER_infinity s => Some (B754_infinity _ _ s) | BER_nan => Some (B754_nan _ _ false 1 def_NaN) | BER_finite b f s m e _ => if andb (b =? 2) (f =? 0) then make_IEEE_finite s m e else None end. (* * given a triple (s,m,e) standing for s*m*2^e, * return a corresponding binary_float, * correctly rounded in accordance with the specified rounding mode *) Definition round_finite (Hmax : prec < emax) (rounding : mode) (s : bool) (m : positive) (e : Z) : IEEE_float := binary_normalize prec emax prec_gt_0 Hmax rounding (cond_Zopp s (Zpos m)) e s. (* * for any ASN.1 BER-encoded real number s*m*(b^e) * return the number's representation in the target IEEE format * rounded in accordnace with the provided rounding mode if necessary * * NOTE: * 1) If initial BER encoding has radix /= 2 or scaling factor /= 0 * `None` is returned * 2) If the ASN encoding is a NaN, * float's NaN payload is set to 1 *) Definition IEEE_of_BER_rounded (Hmax : prec < emax) (rounding : mode) (r : BER_float) : option (IEEE_float) := match r with | BER_zero s => Some (B754_zero _ _ s) | BER_infinity s => Some (B754_infinity _ _ s) | BER_nan => Some (B754_nan _ _ false 1 def_NaN) | BER_finite b f s m e x => if andb (b =? 2) (f =? 0) then Some (round_finite Hmax rounding s m e) else None end. (* * given a binary_float and a rounding mode * convert it to target format, rounding if necessary * * NaN payload is set to 1 uncoditionally *) Definition IEEE_of_IEEE_round_reset_nan (Hmax : prec < emax) (rounding : mode) (f : IEEE_float) : IEEE_float := match f with | B754_nan _ _ _ _ _ => B754_nan _ _ false 1 def_NaN | B754_infinity _ _ s => B754_infinity _ _ s | B754_zero _ _ s => B754_zero _ _ s | B754_finite _ _ s m e _ => round_finite Hmax rounding s m e end. End Def. Section Proof. Definition R_of_float (m : positive) (e : Z) := F2R (Float radix2 (Zpos m) e). Lemma R_of_valid_IEEE_inj {m1 m2 : positive} {e1 e2 : Z} : valid_IEEE m1 e1 = true -> valid_IEEE m2 e2 = true -> R_of_float m1 e1 = R_of_float m2 e2 -> (m1, e1) = (m2,e2). Proof. intros V1 V2 Req. remember (B754_finite prec emax false m1 e1 V1) as f1. remember (B754_finite prec emax false m2 e2 V2) as f2. assert (fin_f1 : is_finite_strict prec emax f1 = true) by (subst; auto). assert (fin_f2 : is_finite_strict prec emax f2 = true) by (subst; auto). generalize (B2R_inj prec emax f1 f2 fin_f1 fin_f2); intros. unfold B2R in H. rewrite Heqf1, Heqf2 in H. apply H in Req. inversion Req. auto. Qed. Lemma normalize_IEEE_eq (m : positive) (e : Z) : R_of_float m e = uncurry R_of_float (normalize_IEEE_finite m e). Proof. unfold R_of_float, normalize_IEEE_finite, uncurry. break_let. generalize (shl_align_fexp_correct prec emax m e). intros H; rewrite Heqp in H; apply proj1 in H. apply H. Qed. Definition Podd (p : positive) : Prop := match p with | xO _ => False | _ => True end. Lemma p_lt_2p (p : positive) : (p < p~0)%positive. Proof. rewrite <- (Pos.add_diag p); lia. Qed. Lemma normalize_BER_odd (m : positive) (e : Z) : let '(mx, ex) := normalize_BER_finite m e in Podd mx. Proof. generalize e. induction m using positive_lt_ind. destruct m; try reflexivity. simpl. assert (H1 : (m < m~0)%positive) by apply p_lt_2p. intros. apply H with (y := m) (e := e0 + 1) in H1. apply H1. Qed. Lemma normalize_BER_eq (m : positive) : forall (e : Z), let '(mx, ex) := normalize_BER_finite m e in (mx, ex) = (m, e) \/ exists (d : positive), m = (mx * 2^d)%positive /\ e = ex - (Zpos d). Proof. clear Hmax valid_IEEE IEEE_float valid_IEEE_sumbool prec emax prec_gt_1 Hmax. clear r valid_BER_sumbool BER_finite_b2 valid_BER. induction m using positive_lt_ind. intros. destruct (normalize_BER_finite m e) as (mx,ex) eqn:NB. destruct m; try (simpl in NB; tuple_inversion; left; trivial). rewrite <- Pos.add_diag. simpl in NB. assert (H1 : (m < m~0)%positive) by apply p_lt_2p. apply H with (y := m) (e := e + 1) in H1. rewrite NB in H1. destruct H1. - tuple_inversion. right. exists (1%positive). replace (2^1)%positive with 2%positive by trivial. lia. - right. destruct H0 as [d H0]. exists (d + 1)%positive. replace (2^(d + 1))%positive with (2 * (2^d))%positive by (rewrite Positive_as_OT.add_1_r, Positive_as_DT.pow_succ_r; trivial). lia. Qed. Lemma normalize_BER_spec (m mx : positive) (e ex : Z) : (mx, ex) = normalize_BER_finite m e -> Podd mx /\ ((mx, ex) = (m, e) \/ exists (d : positive), m = (mx * 2^d)%positive /\ e = ex - (Zpos d)). Proof. intros NB. split. - generalize (normalize_BER_odd m e); intros. rewrite <- NB in H. apply H. - generalize (normalize_BER_eq m e); intros. rewrite <- NB in H. apply H. Qed. Lemma normalize_BER_Req (m : positive) (e : Z) : uncurry R_of_float (normalize_BER_finite m e) = R_of_float m e. Proof. unfold R_of_float, uncurry. break_let. rename p into mx, z into ex, Heqp into H. symmetry in H. apply normalize_BER_spec in H. destruct H as [H0 [H1|H2]]. - tuple_inversion. reflexivity. - destruct H2 as [d [H2 H3]]. subst. rewrite Pos2Z.inj_mul. rewrite Pos2Z.inj_pow. remember (ex - Z.pos d) as ex'. replace (Z.pos d) with (ex - ex') by lia. apply (Float_prop.F2R_change_exp radix2). lia. Qed. Let normalize_roundtrip (m : positive) (e : Z) := uncurry normalize_IEEE_finite (normalize_BER_finite m e). Lemma normalize_roundtrip_eq (m : positive) (e : Z) : uncurry R_of_float (normalize_roundtrip m e) = R_of_float m e. Proof. unfold normalize_roundtrip. rewrite <- normalize_BER_Req. destruct (normalize_BER_finite m e) as (mx,ex) eqn:NB. assert (uncurry normalize_IEEE_finite (mx, ex) = normalize_IEEE_finite mx ex) by (unfold uncurry; reflexivity). rewrite H. rewrite <- normalize_IEEE_eq. auto. Qed. Lemma digits2_size (p : positive) : Digits.digits2_pos p = Pos.size p. Proof. induction p; simpl; try rewrite IHp; reflexivity. Qed. Lemma normalize_roundtrip_valid (m : positive) (e : Z) : valid_IEEE m e = true -> uncurry valid_IEEE (normalize_roundtrip m e) = true. Proof. (* unfold everything, clean up *) unfold normalize_roundtrip. destruct (normalize_BER_finite m e) as (mx, ex) eqn:NB. destruct (uncurry normalize_IEEE_finite (mx, ex)) as (m', e') eqn:NI. unfold uncurry in *. unfold normalize_IEEE_finite, valid_IEEE, shl_align_fexp, shl_align, bounded, canonical_mantissa, uncurry, FLT.FLT_exp in *. clear r valid_IEEE valid_BER IEEE_float valid_IEEE_sumbool BER_finite_b2 valid_BER_sumbool normalize_roundtrip. (* remove bool *) intros H. apply andb_prop in H. destruct H as [H1 H2]. debool. symmetry in NB. apply normalize_BER_spec in NB. destruct NB as [H3 H4]. split_andb_goal; debool; rewrite digits2_size, Psize_log_inf, <- Zlog2_log_inf in *. - destruct H4. tuple_inversion. break_match_hyp; try (tuple_inversion; apply H1). + rewrite H1 in Heqz. rewrite Z.sub_diag in Heqz. inversion Heqz. + destruct H as [d [H0 H5]]. break_match_hyp. * tuple_inversion; lia. * tuple_inversion. exfalso. (* Heqz vs goal *) unfold Z.max in *. repeat break_match_hyp; simpl; debool; try lia; rewrite Pos2Z.inj_mul, Pos2Z.inj_pow, Z.log2_mul_pow2 in *; lia. * subst. remember (shift_pos p mx) as pm. remember (Z.max (Z.succ (Z.log2 (Z.pos mx)) + ex - prec) (3 - emax - prec)) as pe. inversion NI. clear NI. subst pe pm. apply Pos2Z.inj_iff in H0. rewrite shift_pos_correct in H0. rewrite Pos2Z.inj_mul, Pos2Z.inj_pow, Z.log2_mul_pow2 in *; try lia. rewrite Z.pow_pos_fold in H0. subst. rewrite <- H0. clear m' H0. rewrite Z.mul_comm. rewrite Z.log2_mul_pow2; lia. - destruct H4. + tuple_inversion. break_match_hyp. * tuple_inversion. apply H2. * tuple_inversion. apply H2. * lia. + destruct H as [d [H4 H5]]. break_match_hyp. * tuple_inversion. rewrite Pos2Z.inj_mul, Pos2Z.inj_pow, Z.log2_mul_pow2 in *; lia. * tuple_inversion. rewrite Pos2Z.inj_mul, Pos2Z.inj_pow, Z.log2_mul_pow2 in *; lia. * subst. remember (shift_pos p mx) as pm. remember (Z.max (Z.succ (Z.log2 (Z.pos mx)) + ex - prec) (3 - emax - prec)) as pe. inversion NI. clear NI. subst pe pm. apply Pos2Z.inj_iff in H0. rewrite shift_pos_correct in H0. rewrite Pos2Z.inj_mul, Pos2Z.inj_pow, Z.log2_mul_pow2 in *; lia. Qed. Theorem arithmetic_roundtrip (m : positive) (e : Z) : valid_IEEE m e = true -> normalize_roundtrip m e = (m, e). Proof. intros H. copy_apply normalize_roundtrip_valid H. unfold normalize_roundtrip, uncurry in *. destruct normalize_BER_finite as (mx,ex) eqn:NB. destruct normalize_IEEE_finite as (m',e') eqn:NI. apply R_of_valid_IEEE_inj. apply H0. apply H. assert (R_of_float m' e' = uncurry R_of_float (normalize_roundtrip m e)) by (unfold uncurry, normalize_roundtrip; rewrite NB, NI; reflexivity). rewrite H1. apply normalize_roundtrip_eq. Qed. Ltac inv_make_BER_finite := match goal with | [ H : make_BER_finite _ _ _ = Some _ |- _ ] => unfold make_BER_finite in H; destruct normalize_BER_finite; destruct valid_BER_sumbool; inversion H end. Ltac bcompare_nrefl := match goal with | [ H: Bcompare _ _ _ _ = _ |- _] => assert (H1 := H); rewrite -> Bcompare_swap in H1; rewrite -> H in H1; inversion H1 end. Theorem main_roundtrip (scaled : bool) (f : IEEE_float): roundtrip_option IEEE_float BER_float IEEE_float (BER_of_IEEE_exact) IEEE_of_BER_exact (float_eqb_nan_t) f. Proof. intros FPT. unfold bool_het_inverse'; simpl. break_match. - (* forward pass successful *) clear FPT. break_match. + (* backward pass successful *) destruct f; destruct b; simpl in *; repeat try some_inv; try auto. (* structural errors *) * unfold float_eqb_nan_t, Bcompare. repeat break_match; (repeat try some_inv); try compare_nrefl; try reflexivity; try inversion Heqc. * inv_make_BER_finite. * inv_make_BER_finite. * (* arithmetic_roundtrip comes in play *) destruct ((b =? 2) && (f =? 0))%bool; inversion Heqo0; clear H0. (* simplify forward conversions *) unfold make_BER_finite in *. destruct normalize_BER_finite eqn:NB. destruct valid_BER_sumbool; inversion Heqo. clear Heqo; subst. (* simplify backward conversions *) unfold make_IEEE_finite in *. destruct normalize_IEEE_finite eqn:NI. destruct valid_IEEE_sumbool; inversion Heqo0. clear Heqo0; subst. (* apply arithmetic roundtrip *) copy_apply (arithmetic_roundtrip m e) e0. unfold normalize_roundtrip in H. rewrite -> NB in H. simpl in H. rewrite -> NI in H. inversion H; subst. unfold float_eqb_nan_t, Bcompare. repeat break_match; (repeat try some_inv); try compare_nrefl; try reflexivity. + (* backward pass unsuccessful *) destruct f; simpl in Heqo; inversion Heqo; subst; inversion Heqo0. clear H0 H1; exfalso. unfold make_BER_finite in Heqo. destruct normalize_BER_finite eqn:NB, valid_BER_sumbool; inversion Heqo; clear Heqo. subst. unfold BER_finite_b2 in Heqo0. simpl in Heqo0. unfold make_IEEE_finite in Heqo0. destruct normalize_IEEE_finite eqn:NI. destruct valid_IEEE_sumbool; inversion Heqo0; clear Heqo0. copy_apply (arithmetic_roundtrip m e) e0. unfold normalize_roundtrip in H. rewrite -> NB in H. simpl in H. rewrite -> NI in H. inversion H; subst. rewrite e0 in e2; inversion e2. - (* forward pass unsuccessful *) inversion FPT. Qed. End Proof. End Base2.
%!TEX root = labo.tex \subsubsection*{Bridges and the Spanning Tree Protocol} Use the following resources to prepare yourself for this lab session: \begin{enumerate} \item Bridging: Read about LAN switching and bridging at \url{http://docwiki.cisco.com/wiki/Internetworking_Technology_Handbook#Bridging_and_Switching}. \item Transparent Bridges and Spanning Tree Protocol: Read about transparent bridges and the spanning tree protocol at \url{http://docwiki.cisco.com/wiki/Transparent_Bridging}. \item Bridge Protocol Data Unit (BPDU): Familiarize yourself with the format of bridge protocol data units (BPDUs) by reading the information at \url{http://ericleahy.com/index.php/implementing-spanning-tree-protocol-stp/}. \item Configuring a PC as a Bridge: Explore the website \url{http://www.linuxfoundation.org/collaborate/workgroups/networking/bridge}, which describes the bridge-utils software package for configuring a Linux PC as a bridge. \end{enumerate} \newpage \subsection*{Prelab Questions} \begin{questions} \q{1}{Describe the difference between a LAN switch/bridge and a router?} \q{2}{What is the difference between an Ethernet switch and an Ethernet hub? Which is more suitable for a network with a high traffic load, a switch or a hub? Explain.} \q{3}{What motivates the use of the term “transparent” in transparent bridges?} \q{4}{Which role does the spanning tree protocol play when interconnecting LAN switches/bridges?} \q{5.a}{In the context of the IEEE 802.1d specification of the spanning tree protocol, define root bridge.} \q{5.b}{In the context of the IEEE 802.1d specification of the spanning tree protocol, define root port.} \q{5.c}{In the context of the IEEE 802.1d specification of the spanning tree protocol, define designated bridge.} \q{5.d}{In the context of the IEEE 802.1d specification of the spanning tree protocol, define designated port.} \q{5.e}{In the context of the IEEE 802.1d specification of the spanning tree protocol, define blocked port.} \q{6}{In the spanning tree protocol, how does a LAN switch/bridge decide which ports are in a blocking state?} \end{questions}
function a = complex_i ( ) %*****************************************************************************80 % %% COMPLEX_I returns the COMPLEX_I matrix. % % Formula: % % 0 1 % -1 0 % % Properties: % % A is integral: int ( A ) = A. % % A is anti-involutional: A * A = - I % % A * A * A * A = I % % Licensing: % % This code is distributed under the GNU LGPL license. % % Modified: % % 01 October 2007 % % Author: % % John Burkardt % % Parameters: % % Output, real A(2,2), the matrix. % a(1,1) = 0.0; a(1,2) = 1.0; a(2,1) = -1.0; a(2,2) = 0.0; return end
variables p q r : Prop variable ha : not p -> r variable hr : not p example : (not p \/ r) /\ (p \/ q) -> (q \/ r) := assume h1 : (not p \/ r) /\ (p \/ q), have h2 : not p \/ r, from and.left h1, have h3 : r, from ha hr, show q \/ r, from or.inr h3
c c c ############################################################# c ## COPYRIGHT (C) 1999 by ## c ## Marina A. Vorobieva, Nina N. Sokolova & Jay W. Ponder ## c ## All Rights Reserved ## c ############################################################# c c ############################################################### c ## ## c ## program nucleic -- build a nucleic acid from sequence ## c ## ## c ############################################################### c c c "nucleic" builds the internal and Cartesian coordinates c of a polynucleotide from nucleic acid sequence and torsional c angle values for the nucleic acid backbone and side chains c c program nucleic implicit none include 'sizes.i' include 'atoms.i' include 'couple.i' include 'files.i' include 'iounit.i' include 'nucleo.i' include 'titles.i' integer i,natom,mode integer izmt,ixyz,iseq integer freeunit,trimtext logical exist character*120 seqfile character*120 intfile character*120 xyzfile c c c get the name to use for the output structure files c call initial call nextarg (filename,exist) if (.not. exist) then write (iout,10) 10 format (/,' Enter Name to be Used for Output Files : ',$) read (input,20) filename 20 format (a120) end if call basefile (filename) c c get the title line for the output files c write (iout,30) 30 format (/,' Enter Title : ',$) read (input,40) title 40 format (a120) ltitle = trimtext (title) c c read the keyfile and force field parameter file c call getkey call field c c get the sequence, build a Z-matrix, convert to Cartesians c call getseqn call nucchain call connect call molecule call makexyz c c perform the alignment of the strands of a double helix c if (dblhlx) then call watson call inertia (2) end if c c remove dummy atoms and set undefined atoms to type zero c natom = n do i = natom, 1, -1 if (type(i) .eq. 0) call delete (i) if (type(i) .lt. 0) type(i) = 0 end do c c convert to internal and Cartesian coordinates c mode = 0 call makeint (mode) call makexyz c c write out a nucleic acid sequence file c iseq = freeunit () seqfile = filename(1:leng)//'.seq' call version (seqfile,'new') open (unit=iseq,file=seqfile,status='new') call prtseq (iseq) close (unit=iseq) c c write out an internal coordinates file c izmt = freeunit () intfile = filename(1:leng)//'.int' call version (intfile,'new') open (unit=izmt,file=intfile,status='new') call prtint (izmt) close (unit=izmt) c c write out a Cartesian coordinates file c ixyz = freeunit () xyzfile = filename(1:leng)//'.xyz' call version (xyzfile,'new') open (unit=ixyz,file=xyzfile,status='new') call prtxyz (ixyz) close (unit=ixyz) end c c c ################################################################ c ## ## c ## subroutine getseqn -- nucleic acid sequence and angles ## c ## ## c ################################################################ c c c "getseqn" asks the user for the nucleotide sequence and c torsional angle values needed to define a nucleic acid c c subroutine getseqn implicit none include 'sizes.i' include 'iounit.i' include 'nucleo.i' include 'resdue.i' include 'sequen.i' integer i,j,k,next integer start,stop integer length,trimtext logical done logical, allocatable :: purine(:) character*1 answer character*1 ucase(26) character*3 name,resname character*120 record character*120 string data ucase / 'A','B','C','D','E','F','G','H','I','J','K','L', & 'M','N','O','P','Q','R','S','T','U','V','W','X', & 'Y','Z' / c c c choose to generate either an A-, B- or Z-form helix c write (iout,10) 10 format (/,' Enter A-, B- or Z-Form Helix for the Structure', & ' [B] : ',$) read (input,20) record 20 format (a120) call upcase (record) next = 1 call getword (record,answer,next) hlxform = 'B' if (answer .eq. 'A') hlxform = 'A' if (answer .eq. 'Z') hlxform = 'Z' c c provide a header to explain the method of sequence input c write (iout,30) 30 format (/,' Enter One Nucleotide per Line, 5'' to 3'': ', & ' Give PDB Residue Code,', & /,' followed by Backbone Torsions (6F) and', & ' Glycosidic Torsion (1F)', & //,' Use Residue=MOL to Begin a New Strand,', & ' Residue=<CR> to End Input') c c initially, assume that only a single strand is present c nchain = 1 ichain(1,1) = 1 chnnam(1) = ' ' c c get the nucleotide sequence data and dihedral angle values c i = 0 done = .false. do while (.not. done) i = i + 1 do j = 1, 6 bkbone(j,i) = 0.0d0 end do glyco(i) = 0.0d0 pucker(i) = 0 write (iout,40) i 40 format (/,' Enter Residue',i4,' : ',$) read (input,50) record 50 format (a120) call upcase (record) next = 1 call gettext (record,name,next) length = trimtext (name) string = record(next:120) read (string,*,err=60,end=60) (bkbone(j,i),j=1,6),glyco(i) 60 continue c c process and store the current nucleotide type c if (name .eq. 'MOL') then i = i - 1 ichain(2,nchain) = i nchain = nchain + 1 ichain(1,nchain) = i + 1 else if (name .eq. ' ') then done = .true. nseq = i - 1 ichain(2,nchain) = nseq else seq(i) = nuclz(maxnuc) seqtyp(i) = 0 if (length .eq. 1) then do j = 1, maxnuc if (name(1:1) .eq. nuclz1(j)) then seq(i) = nuclz(j) seqtyp(i) = j end if end do else do j = 1, maxnuc if (name .eq. nuclz(j)) then seq(i) = nuclz(j) seqtyp(i) = j end if end do end if if (seqtyp(i) .eq. 0) then i = i - 1 write (iout,70) name 70 format (/,' GETSEQN -- Nucleotide Type ',a3, & ' is Not Supported') end if end if end if end do c c offer the option to construct an idealized double helix c dblhlx = .false. if (nchain .eq. 1) then write (iout,80) 80 format (/,' Build a Double Helix using Complimentary Bases', & ' [N] : ',$) read (input,90) record 90 format (a120) next = 1 call gettext (record,answer,next) call upcase (answer) if (answer .eq. 'Y') dblhlx = .true. else if (nchain .eq. 2) then write (iout,100) 100 format (/,' Combine the Two Single Strands into Double Helix', & ' [Y] : ',$) read (input,110) record 110 format (a120) next = 1 call gettext (record,answer,next) call upcase (answer) if (answer .ne. 'N') dblhlx = .true. end if c c build a second strand as the reverse-complement sequence c if (nchain.eq.1 .and. dblhlx) then start = 1 stop = nseq resname = nuclz(seqtyp(1)) if (resname.eq.'MP ' .or. resname.eq.'DP ' & .or. resname.eq.'TP ') then k = nseq + 1 seq(k) = seq(1) seqtyp(k) = seqtyp(1) start = 2 end if resname = nuclz(seqtyp(nseq)) if (resname.eq.'MP ' .or. resname.eq.'DP ' & .or. resname.eq.'TP ') then k = 2 * nseq seq(k) = seq(nseq) seqtyp(k) = seqtyp(nseq) stop = nseq - 1 end if do i = start, stop resname = nuclz(seqtyp(i)) if (resname .eq. 'A ') then resname = 'U ' else if (resname .eq. 'G ') then resname = 'C ' else if (resname .eq. 'C ') then resname = 'G ' else if (resname .eq. 'U ') then resname = 'A ' else if (resname .eq. 'DA ') then resname = 'DT ' else if (resname .eq. 'DG ') then resname = 'DC ' else if (resname .eq. 'DC ') then resname = 'DG ' else if (resname .eq. 'DT ') then resname = 'DA ' end if k = nseq + stop + start - i do j = 1, maxnuc if (resname .eq. nuclz(j)) then seq(k) = nuclz(j) seqtyp(k) = j end if end do end do do i = 1, nseq k = nseq + i do j = 1, 6 bkbone(j,k) = bkbone(j,i) end do glyco(k) = glyco(i) pucker(k) = pucker(i) end do nchain = 2 nseq = 2 * nseq ichain(1,nchain) = nseq/2 + 1 ichain(2,nchain) = nseq end if c c set chain identifiers if multiple chains are present c if (nchain .gt. 1) then do i = 1, nchain chnnam(i) = ucase(i) end do end if c c perform dynamic allocation of some local arrays c allocate (purine(nseq)) c c set the nucleic acid base and sugar structural type c do i = 1, nseq resname = nuclz(seqtyp(i)) purine(i) = .false. if (resname .eq. 'A ') purine(i) = .true. if (resname .eq. 'G ') purine(i) = .true. if (resname .eq. 'DA ') purine(i) = .true. if (resname .eq. 'DG ') purine(i) = .true. deoxy(i) = .false. if (resname .eq. 'DA ') deoxy(i) = .true. if (resname .eq. 'DG ') deoxy(i) = .true. if (resname .eq. 'DC ') deoxy(i) = .true. if (resname .eq. 'DT ') deoxy(i) = .true. end do c c set the backbone and glycosidic torsions and sugar pucker c do i = 1, nseq done = .false. do j = 1, 6 if (bkbone(j,i) .ne. 0.0d0) done = .true. end do if (glyco(i) .ne. 0.0d0) done = .true. if (pucker(i) .ne. 0) done = .true. if (.not. done) then if (hlxform .eq. 'A') then bkbone(1,i) = -51.8d0 bkbone(2,i) = 174.8d0 bkbone(3,i) = 41.7d0 bkbone(4,i) = 79.1d0 bkbone(5,i) = -148.0d0 bkbone(6,i) = -75.0d0 glyco(i) = -157.2d0 pucker(i) = 3 else if (hlxform .eq. 'B') then bkbone(1,i) = -46.1d0 bkbone(2,i) = -146.5d0 bkbone(3,i) = 36.4d0 bkbone(4,i) = 156.5d0 bkbone(5,i) = 154.7d0 bkbone(6,i) = -95.6d0 glyco(i) = -97.8d0 pucker(i) = 2 else if (hlxform .eq. 'Z') then if (purine(i)) then bkbone(1,i) = 48.0d0 bkbone(2,i) = 179.0d0 bkbone(3,i) = -170.0d0 bkbone(4,i) = 100.0d0 bkbone(5,i) = -104.0d0 bkbone(6,i) = -69.0d0 glyco(i) = 67.0d0 pucker(i) = 3 else bkbone(1,i) = -137.0d0 bkbone(2,i) = -139.0d0 bkbone(3,i) = 55.0d0 bkbone(4,i) = 138.0d0 bkbone(5,i) = -94.0d0 bkbone(6,i) = 80.0d0 glyco(i) = -159.0d0 pucker(i) = 1 end if end if end if end do c c perform deallocation of some local arrays c deallocate (purine) return end c c c ############################################################## c ## ## c ## subroutine nucchain -- build polynucleotide backbone ## c ## ## c ############################################################## c c c "nucchain" builds up the internal coordinates for a nucleic c acid sequence from the sugar type, backbone and glycosidic c torsional values c c subroutine nucchain implicit none include 'sizes.i' include 'atoms.i' include 'nucleo.i' include 'resdue.i' include 'sequen.i' integer i,k,m integer poi,o2i,c1i integer c2i,c3i,c4i integer c5i,o3i,o4i,o5i integer phtyp,ophtyp integer ostyp,ottyp logical single,cap3,cap5 character*3 resname c c c initialize the atom counter to the first atom c n = 1 c c check for single residue and 3'- or 5'-phosphate caps c do m = 1, nchain single = .false. cap5 = .false. cap3 = .false. if (ichain(1,m) .eq. ichain(2,m)) single = .true. i = ichain(1,m) k = seqtyp(i) resname = nuclz(k) if (resname.eq.'MP ' .or. resname.eq.'DP ' & .or. resname.eq.'TP ') cap5 = .true. i = ichain(2,m) k = seqtyp(i) resname = nuclz(k) if (resname.eq.'MP ' .or. resname.eq.'DP ' & .or. resname.eq.'TP ') cap3 = .true. c c build the first residue or a phosphate capping group c i = ichain(1,m) k = seqtyp(i) resname = nuclz(k) if (resname .eq. 'MP ') then if (deoxy(i+1)) then ostyp = 1246 phtyp = 1247 ophtyp = 1248 else ostyp = 1234 phtyp = 1235 ophtyp = 1236 end if if (m .eq. 1) then o3i = n call zatom (ophtyp,0.0d0,0.0d0,0.0d0,0,0,0,0) poi = n call zatom (phtyp,1.52d0,0.0d0,0.0d0,o3i,0,0,0) call zatom (ophtyp,1.52d0,113.0d0,0.0d0,poi,o3i,0,0) else o3i = n call zatom (ophtyp,30.0d0,150.0d0,180.0d0,n-1,n-2,n-3,0) call zatom (-2,0.0d0,0.0d0,0.0d0,n-2,n-1,0,0) poi = n call zatom (phtyp,1.52d0,150.0d0,180.0d0,o3i,n-2,n-3,0) call zatom (ophtyp,1.52d0,113.0d0,180.0d0,poi,o3i,n-3,0) end if call zatom (ophtyp,1.52d0,113.0d0,113.0d0,poi,o3i,n-1,1) o5i = n call zatom (ostyp,1.63d0,106.0d0,106.0d0,poi,o3i,n-2,-1) else if (resname .eq. 'DP ') then continue else if (resname .eq. 'TP ') then continue else if (deoxy(i)) then ottyp = 1244 else ottyp = 1232 end if if (m .eq. 1) then o5i = n call zatom (ottyp,0.0d0,0.0d0,0.0d0,0,0,0,0) c5i = n call zatom (c5typ(k),1.44d0,0.0d0,0.0d0,o5i,0,0,0) c4i = n call zatom (c4typ(k),1.52d0,110.1d0,0.0d0,c5i,o5i,0,0) else o5i = n call zatom (ottyp,0.96d0,150.0d0,180.0d0,n-1,n-2,n-3,0) call zatom (-2,0.0d0,0.0d0,0.0d0,n-2,n-1,0,0) c5i = n call zatom (c5typ(k),1.44d0,119.0d0,180.0d0, & o5i,n-2,n-3,0) c4i = n call zatom (c4typ(k),1.52d0,110.1d0,180.0d0, & c5i,o5i,n-3,0) end if o4i = n call zatom (o4typ(k),1.46d0,108.9d0,bkbone(3,i)-120.0d0, & c4i,c5i,o5i,0) c1i = n if (pucker(i) .eq. 3) then call zatom (c1typ(k),1.42d0,109.8d0,145.0d0, & o4i,c4i,c5i,0) else if (pucker(i) .eq. 2) then call zatom (c1typ(k),1.42d0,109.8d0,107.0d0, & o4i,c4i,c5i,0) else if (pucker(i) .eq. 1) then call zatom (c1typ(k),1.42d0,109.8d0,140.0d0, & o4i,c4i,c5i,0) end if c3i = n call zatom (c3typ(k),1.53d0,115.9d0,bkbone(3,i), & c4i,c5i,o5i,0) c2i = n call zatom (c2typ(k),1.53d0,102.4d0,bkbone(4,i)+120.0d0, & c3i,c4i,c5i,0) call zatom (-1,0.0d0,0.0d0,0.0d0,c1i,c2i,0,0) o3i = n if (deoxy(i)) then if (single) then call zatom (1249,1.42d0,112.1d0,bkbone(4,i), & c3i,c4i,c5i,0) else call zatom (o3typ(k),1.42d0,112.1d0,bkbone(4,i), & c3i,c4i,c5i,0) end if else if (single) then call zatom (1237,1.42d0,112.1d0,bkbone(4,i), & c3i,c4i,c5i,0) else call zatom (o3typ(k),1.42d0,112.1 d0,bkbone(4,i), & c3i,c4i,c5i,0) end if o2i = n call zatom (o2typ(k),1.43d0,109.5d0,109.5d0, & c2i,c3i,c1i,1) end if call zatom (h5ttyp(k),0.96d0,107.0d0,180.0d0, & o5i,c5i,c4i,0) call zatom (h51typ(k),1.09d0,109.5d0,109.5d0,c5i,o5i,c4i,1) call zatom (h52typ(k),1.09d0,109.5d0,109.5d0,c5i,o5i,c4i,-1) call zatom (h4typ(k),1.09d0,109.5d0,109.5d0,c4i,c5i,c3i,-1) if (pucker(i) .eq. 3) then call zatom (h1typ(k),1.09d0,109.5d0,120.0d0, & c1i,o4i,c2i,-1) else if (pucker(i) .eq. 2) then call zatom (h1typ(k),1.09d0,109.5d0,115.0d0, & c1i,o4i,c2i,-1) else if (pucker(i) .eq. 1) then call zatom (h1typ(k),1.09d0,109.5d0,90.0d0, & c1i,o4i,c2i,-1) end if call zatom (h3typ(k),1.09d0,109.5d0,109.5d0,c3i,c4i,c2i,-1) call zatom (h21typ(k),1.09d0,109.5d0,109.5d0,c2i,c3i,c1i,-1) if (deoxy(i)) then call zatom (h22typ(k),1.09d0,109.5d0,109.5d0, & c2i,c3i,c1i,1) else call zatom (h22typ(k),0.96d0,107.0d0,180.0d0, & o2i,c2i,c3i,0) end if if (single) then call zatom (h3ttyp(k),0.96d0,115.0d0,180.0d0, & o3i,c3i,c4i,0) end if call nucbase (resname,i,c1i,o4i,c2i) end if c c build atoms for residues in the middle of the chain c do i = ichain(1,m)+1, ichain(2,m)-1 k = seqtyp(i) resname = nuclz(k) if (cap5) then cap5 = .false. else poi = n call zatom (ptyp(k),1.60d0,119.0d0,bkbone(5,i-1), & o3i,c3i,c4i,0) call zatom (optyp(k),1.48d0,109.0d0, & bkbone(6,i-1)+120.0d0,poi,o3i,c3i,0) call zatom (optyp(k),1.48d0,109.0d0, & bkbone(6,i-1)-120.0d0,poi,o3i,c3i,0) o5i = n call zatom (o5typ(k),1.60d0,101.8d0,bkbone(6,i-1), & poi,o3i,c3i,0) end if c5i = n call zatom (c5typ(k),1.44d0,119.0d0,bkbone(1,i), & o5i,poi,o3i,0) c4i = n call zatom (c4typ(k),1.52d0,110.1d0,bkbone(2,i), & c5i,o5i,poi,0) o4i = n call zatom (o4typ(k),1.46d0,108.9d0,bkbone(3,i)-120.0d0, & c4i,c5i,o5i,0) c1i = n if (pucker(i) .eq. 3) then call zatom (c1typ(k),1.42d0,109.8d0,145.0d0, & o4i,c4i,c5i,0) else if (pucker(i) .eq. 2) then call zatom (c1typ(k),1.42d0,109.8d0,107.0d0, & o4i,c4i,c5i,0) else if (pucker(i) .eq. 1) then call zatom (c1typ(k),1.42d0,109.8d0,140.0d0, & o4i,c4i,c5i,0) end if c3i = n call zatom (c3typ(k),1.53d0,115.9d0,bkbone(3,i), & c4i,c5i,o5i,0) c2i = n call zatom (c2typ(k),1.53d0,102.4d0,bkbone(4,i)+120.0d0, & c3i,c4i,c5i,0) call zatom (-1,0.0d0,0.0d0,0.0d0,c1i,c2i,0,0) o3i = n if (deoxy(i)) then if (cap3) then call zatom (1251,1.42d0,112.1d0,bkbone(4,i), & c3i,c4i,c5i,0) else call zatom (o3typ(k),1.42d0,112.1d0,bkbone(4,i), & c3i,c4i,c5i,0) end if else if (cap3) then call zatom (1239,1.42d0,112.1d0,bkbone(4,i), & c3i,c4i,c5i,0) else call zatom (o3typ(k),1.42d0,112.1d0,bkbone(4,i), & c3i,c4i,c5i,0) end if o2i = n call zatom (o2typ(k),1.43d0,109.5d0,109.5d0, & c2i,c3i,c1i,1) end if call zatom (h51typ(k),1.09d0,109.5d0,109.5d0,c5i,o5i,c4i,1) call zatom (h52typ(k),1.09d0,109.5d0,109.5d0,c5i,o5i,c4i,-1) call zatom (h4typ(k),1.09d0,109.5d0,109.5d0,c4i,c5i,c3i,-1) if (pucker(i) .eq. 3) then call zatom (h1typ(k),1.09d0,109.5d0,120.0d0, & c1i,o4i,c2i,-1) else if (pucker(i) .eq. 2) then call zatom (h1typ(k),1.09d0,109.5d0,115.0d0, & c1i,o4i,c2i,-1) else if (pucker(i) .eq. 1) then call zatom (h1typ(k),1.09d0,109.5d0,90.0d0, & c1i,o4i,c2i,-1) end if call zatom (h3typ(k),1.09d0,109.5d0,109.5d0,c3i,c4i,c2i,-1) call zatom (h21typ(k),1.09d0,109.5d0,109.5d0,c2i,c3i,c1i,-1) if (deoxy(i)) then call zatom (h22typ(k),1.09d0,109.5d0,109.5d0, & c2i,c3i,c1i,1) else call zatom (h22typ(k),0.96d0,107.0d0,180.0d0, & o2i,c2i,c3i,0) end if call nucbase (resname,i,c1i,o4i,c2i) end do c c build the last residue or a phosphate capping group c i = ichain(2,m) k = seqtyp(i) resname = nuclz(k) if (single) then continue else if (resname .eq. 'MP ') then poi = n if (deoxy(i-1)) then call zatom (1252,1.63d0,119.0d0,bkbone(5,i-1), & o3i,c3i,c4i,0) call zatom (1253,1.52d0,106.0d0,60.0d0,poi,o3i,c3i,0) call zatom (1253,1.52d0,106.0d0,-60.0d0,poi,o3i,c3i,0) call zatom (1253,1.52d0,106.0d0,180.0d0,poi,o3i,c3i,0) else call zatom (1240,1.63d0,119.0d0,bkbone(5,i-1), & o3i,c3i,c4i,0) call zatom (1241,1.52d0,106.0d0,60.0d0,poi,o3i,c3i,0) call zatom (1241,1.52d0,106.0d0,-60.0d0,poi,o3i,c3i,0) call zatom (1241,1.52d0,106.0d0,180.0d0,poi,o3i,c3i,0) end if else if (resname .eq. 'DP ') then continue else if (resname .eq. 'TP ') then continue else if (cap5) then cap5 = .false. else poi = n call zatom (ptyp(k),1.60d0,119.0d0,bkbone(5,i-1), & o3i,c3i,c4i,0) call zatom (optyp(k),1.48d0,109.0d0, & bkbone(6,i-1)+120.0d0,poi,o3i,c3i,0) call zatom (optyp(k),1.48d0,109.0d0, & bkbone(6,i-1)-120.0d0,poi,o3i,c3i,0) o5i = n call zatom (o5typ(k),1.60d0,101.8d0,bkbone(6,i-1), & poi,o3i,c3i,0) end if c5i = n call zatom (c5typ(k),1.44d0,119.0d0,bkbone(1,i), & o5i,poi,o3i,0) c4i = n call zatom (c4typ(k),1.52d0,110.1d0,bkbone(2,i), & c5i,o5i,poi,0) o4i = n call zatom (o4typ(k),1.46d0,108.9d0,bkbone(3,i)-120.0d0, & c4i,c5i,o5i,0) c1i = n if (pucker(i) .eq. 3) then call zatom (c1typ(k),1.42d0,109.8d0,145.0d0, & o4i,c4i,c5i,0) else if (pucker(i) .eq. 2) then call zatom (c1typ(k),1.42d0,109.8d0,107.0d0, & o4i,c4i,c5i,0) else if (pucker(i) .eq. 1) then call zatom (c1typ(k),1.42d0,109.8d0,140.0d0, & o4i,c4i,c5i,0) end if c3i = n call zatom (c3typ(k),1.53d0,115.9d0,bkbone(3,i), & c4i,c5i,o5i,0) c2i = n call zatom (c2typ(k),1.53d0,102.4d0,bkbone(4,i)+120.0d0, & c3i,c4i,c5i,0) call zatom (-1,0.0d0,0.0d0,0.0d0,c1i,c2i,0,0) o3i = n if (deoxy(i)) then call zatom (1249,1.42d0,112.1d0,bkbone(4,i), & c3i,c4i,c5i,0) else call zatom (1237,1.42d0,112.1d0,bkbone(4,i), & c3i,c4i,c5i,0) o2i = n call zatom (o2typ(k),1.43d0,109.5d0,109.5d0, & c2i,c3i,c1i,1) end if call zatom (h51typ(k),1.09d0,109.5d0,109.5d0,c5i,o5i,c4i,1) call zatom (h52typ(k),1.09d0,109.5d0,109.5d0,c5i,o5i,c4i,-1) call zatom (h4typ(k),1.09d0,109.5d0,109.5d0,c4i,c5i,c3i,-1) if (pucker(i) .eq. 3) then call zatom (h1typ(k),1.09d0,109.5d0,120.0d0, & c1i,o4i,c2i,-1) else if (pucker(i) .eq. 2) then call zatom (h1typ(k),1.09d0,109.5d0,115.0d0, & c1i,o4i,c2i,-1) else if (pucker(i) .eq. 1) then call zatom (h1typ(k),1.09d0,109.5d0,90.0d0, & c1i,o4i,c2i,-1) end if call zatom (h3typ(k),1.09d0,109.5d0,109.5d0,c3i,c4i,c2i,-1) call zatom (h21typ(k),1.09d0,109.5d0,109.5d0,c2i,c3i,c1i,-1) if (deoxy(i)) then call zatom (h22typ(k),1.09d0,109.5d0,109.5d0, & c2i,c3i,c1i,1) else call zatom (h22typ(k),0.96d0,107.0d0,180.0d0, & o2i,c2i,c3i,0) end if call zatom (h3ttyp(k),0.96d0,115.0d0,180.0d0,o3i,c3i,c4i,0) call nucbase (resname,i,c1i,o4i,c2i) end if end do c c finally, set the total number of atoms c n = n - 1 return end c c c ################################################################ c ## ## c ## subroutine nucbase -- build nucleotide base side chain ## c ## ## c ################################################################ c c c "nucbase" builds the side chain for a single nucleotide base c in terms of internal coordinates c c resname 3-letter name of current nucleotide residue c i number of the current nucleotide residue c c1i atom number of carbon C1' in residue i c o4i atom number of oxygen O4' in residue i c c2i atom number of carbon C2' in residue i c c literature references: c c R. Lavery, K. Zakrzewska, "Base and Base Pair Morphologies, c Helical Parameters, and Definitions" in "Oxford Handbook of c Nucleic Acid Structure", S. Neidel, Editor, Oxford University c Press, 1999, pages 40-42 c c W. Saenger, "Principles of Nucleic Acid Structure", Springer- c Verlag, 1984, page 52 c c subroutine nucbase (resname,i,c1i,o4i,c2i) implicit none include 'sizes.i' include 'atoms.i' include 'nucleo.i' integer i,c1i,o4i,c2i character*3 resname c c c adenine in adenosine residue (A) c if (resname .eq. 'A ') then call zatom (1017,1.48d0,108.1d0,113.7d0,c1i,o4i,c2i,1) call zatom (1021,1.37d0,128.4d0,glyco(i)+180.0d0, & n-1,c1i,o4i,0) call zatom (1020,1.30d0,113.8d0,180.0d0,n-1,n-2,c1i,0) call zatom (1019,1.39d0,104.0d0,0.0d0,n-1,n-2,n-3,0) call zatom (1025,1.40d0,132.4d0,180.0d0,n-1,n-2,n-3,0) call zatom (1027,1.34d0,123.5d0,0.0d0,n-1,n-2,n-3,0) call zatom (1024,1.35d0,117.4d0,180.0d0,n-2,n-3,n-4,0) call zatom (1023,1.33d0,118.8d0,0.0d0,n-1,n-3,n-4,0) call zatom (1022,1.32d0,129.2d0,0.0d0,n-1,n-2,n-4,0) call zatom (1018,1.35d0,110.9d0,0.0d0,n-1,n-2,n-3,0) call zatom (-1,0.0d0,0.0d0,0.0d0,n-1,n-7,0,0) call zatom (-1,0.0d0,0.0d0,0.0d0,n-1,n-10,0,0) call zatom (1030,1.08d0,123.1d0,180.0d0,n-9,n-8,n-7,0) call zatom (1028,1.00d0,120.0d0,180.0d0,n-6,n-7,n-8,0) call zatom (1029,1.00d0,120.0d0,0.0d0,n-7,n-8,n-9,0) call zatom (1026,1.08d0,115.4d0,180.0d0,n-6,n-5,n-4,0) c c guanine in guanosine residue (G) c else if (resname .eq. 'G ') then call zatom (1047,1.48d0,108.1d0,113.7d0,c1i,o4i,c2i,1) call zatom (1051,1.38d0,128.4d0,glyco(i)+180.0d0, & n-1,c1i,o4i,0) call zatom (1050,1.31d0,114.0d0,180.0d0,n-1,n-2,c1i,0) call zatom (1049,1.39d0,103.8d0,0.0d0,n-1,n-2,n-3,0) call zatom (1055,1.40d0,130.1d0,180.0d0,n-1,n-2,n-3,0) call zatom (1060,1.23d0,128.8d0,0.0d0,n-1,n-2,n-3,0) call zatom (1054,1.40d0,111.4d0,180.0d0,n-2,n-3,n-4,0) call zatom (1053,1.38d0,125.2d0,0.0d0,n-1,n-3,n-4,0) call zatom (1057,1.34d0,116.1d0,180.0d0,n-1,n-2,n-4,0) call zatom (1052,1.33d0,123.3d0,0.0d0,n-2,n-3,n-4,0) call zatom (1048,1.36d0,112.3d0,0.0d0,n-1,n-3,n-4,0) call zatom (-1,0.0d0,0.0d0,0.0d0,n-1,n-8,0,0) call zatom (-1,0.0d0,0.0d0,0.0d0,n-1,n-11,0,0) call zatom (1061,1.08d0,123.0d0,180.0d0,n-10,n-9,n-8,0) call zatom (1056,1.00d0,117.4d0,180.0d0,n-6,n-8,n-9,0) call zatom (1058,1.00d0,120.0d0,0.0d0,n-5,n-6,n-7,0) call zatom (1059,1.00d0,120.0d0,180.0d0,n-6,n-7,n-8,0) c c cytosine in cytidine residue (C) c else if (resname .eq. 'C ') then call zatom (1078,1.48d0,108.1d0,113.7d0,c1i,o4i,c2i,1) call zatom (1079,1.37d0,117.8d0,glyco(i),n-1,c1i,o4i,0) call zatom (1084,1.24d0,118.9d0,0.0d0,n-1,n-2,c1i,0) call zatom (1080,1.38d0,118.7d0,180.0d0,n-2,n-3,c1i,0) call zatom (1081,1.34d0,120.6d0,0.0d0,n-1,n-3,n-4,0) call zatom (1085,1.32d0,118.3d0,180.0d0,n-1,n-2,n-4,0) call zatom (1082,1.43d0,121.6d0,0.0d0,n-2,n-3,n-5,0) call zatom (1083,1.36d0,116.9d0,0.0d0,n-1,n-3,n-4,0) call zatom (-1,0.0d0,0.0d0,0.0d0,n-1,n-8,0,0) call zatom (1086,1.00d0,120.0d0,0.0d0,n-3,n-4,n-5,0) call zatom (1087,1.00d0,120.0d0,180.0d0,n-4,n-5,n-6,0) call zatom (1088,1.08d0,121.6d0,180.0d0,n-4,n-6,n-7,0) call zatom (1089,1.08d0,119.5d0,180.0d0,n-4,n-5,n-7,0) c c uracil in uridine residue (U) c else if (resname .eq. 'U ') then call zatom (1106,1.48d0,108.1d0,113.7d0,c1i,o4i,c2i,1) call zatom (1107,1.38d0,117.1d0,glyco(i),n-1,c1i,o4i,0) call zatom (1112,1.22d0,123.2d0,0.0d0,n-1,n-2,c1i,0) call zatom (1108,1.37d0,114.8d0,180.0d0,n-2,n-3,c1i,0) call zatom (1109,1.38d0,127.0d0,0.0d0,n-1,n-3,n-4,0) call zatom (1114,1.23d0,119.8d0,180.0d0,n-1,n-2,n-4,0) call zatom (1110,1.44d0,114.7d0,0.0d0,n-2,n-3,n-5,0) call zatom (1111,1.34d0,119.2d0,0.0d0,n-1,n-3,n-4,0) call zatom (-1,0.0d0,0.0d0,0.0d0,n-1,n-8,0,0) call zatom (1113,1.00d0,116.5d0,180.0d0,n-5,n-7,n-8,0) call zatom (1115,1.08d0,120.4d0,180.0d0,n-3,n-5,n-6,0) call zatom (1116,1.08d0,118.6d0,180.0d0,n-3,n-4,n-6,0) c c adenine in deoxyadenosine residue (DA) c else if (resname .eq. 'DA ') then call zatom (1132,1.48d0,108.1d0,113.7d0,c1i,o4i,c2i,1) call zatom (1136,1.37d0,128.4d0,glyco(i)+180.0d0, & n-1,c1i,o4i,0) call zatom (1135,1.30d0,113.8d0,180.0d0,n-1,n-2,c1i,0) call zatom (1134,1.39d0,104.0d0,0.0d0,n-1,n-2,n-3,0) call zatom (1140,1.40d0,132.4d0,180.0d0,n-1,n-2,n-3,0) call zatom (1142,1.34d0,123.5d0,0.0d0,n-1,n-2,n-3,0) call zatom (1139,1.35d0,117.4d0,180.0d0,n-2,n-3,n-4,0) call zatom (1138,1.33d0,118.8d0,0.0d0,n-1,n-3,n-4,0) call zatom (1137,1.32d0,129.2d0,0.0d0,n-1,n-2,n-4,0) call zatom (1133,1.35d0,110.9d0,0.0d0,n-1,n-2,n-3,0) call zatom (-1,0.0d0,0.0d0,0.0d0,n-1,n-7,0,0) call zatom (-1,0.0d0,0.0d0,0.0d0,n-1,n-10,0,0) call zatom (1145,1.08d0,123.1d0,180.0d0,n-9,n-8,n-7,0) call zatom (1143,1.00d0,120.0d0,180.0d0,n-6,n-7,n-8,0) call zatom (1144,1.00d0,120.0d0,0.0d0,n-7,n-8,n-9,0) call zatom (1141,1.08d0,115.4d0,180.0d0,n-6,n-5,n-4,0) c c guanine in deoxyguanosine residue (DG) c else if (resname .eq. 'DG ') then call zatom (1161,1.48d0,108.1d0,113.7d0,c1i,o4i,c2i,1) call zatom (1165,1.38d0,128.4d0,glyco(i)+180.0d0, & n-1,c1i,o4i,0) call zatom (1164,1.31d0,114.0d0,180.0d0,n-1,n-2,c1i,0) call zatom (1163,1.39d0,103.8d0,0.0d0,n-1,n-2,n-3,0) call zatom (1169,1.40d0,130.1d0,180.0d0,n-1,n-2,n-3,0) call zatom (1174,1.23d0,128.8d0,0.0d0,n-1,n-2,n-3,0) call zatom (1168,1.40d0,111.4d0,180.0d0,n-2,n-3,n-4,0) call zatom (1167,1.38d0,125.2d0,0.0d0,n-1,n-3,n-4,0) call zatom (1171,1.34d0,116.1d0,180.0d0,n-1,n-2,n-4,0) call zatom (1166,1.33d0,123.3d0,0.0d0,n-2,n-3,n-4,0) call zatom (1162,1.36d0,112.3d0,0.0d0,n-1,n-3,n-4,0) call zatom (-1,0.0d0,0.0d0,0.0d0,n-1,n-8,0,0) call zatom (-1,0.0d0,0.0d0,0.0d0,n-1,n-11,0,0) call zatom (1175,1.08d0,123.0d0,180.0d0,n-10,n-9,n-8,0) call zatom (1170,1.00d0,117.4d0,180.0d0,n-6,n-8,n-9,0) call zatom (1172,1.00d0,120.0d0,0.0d0,n-5,n-6,n-7,0) call zatom (1173,1.00d0,120.0d0,180.0d0,n-6,n-7,n-8,0) c c cytosine in deoxycytidine residue (DC) c else if (resname .eq. 'DC ') then call zatom (1191,1.48d0,108.1d0,113.7d0,c1i,o4i,c2i,1) call zatom (1192,1.37d0,117.8d0,glyco(i),n-1,c1i,o4i,0) call zatom (1197,1.24d0,118.9d0,0.0d0,n-1,n-2,c1i,0) call zatom (1193,1.38d0,118.7d0,180.0d0,n-2,n-3,c1i,0) call zatom (1194,1.34d0,120.6d0,0.0d0,n-1,n-3,n-4,0) call zatom (1198,1.32d0,118.3d0,180.0d0,n-1,n-2,n-4,0) call zatom (1195,1.43d0,121.6d0,0.0d0,n-2,n-3,n-5,0) call zatom (1196,1.36d0,116.9d0,0.0d0,n-1,n-3,n-4,0) call zatom (-1,0.0d0,0.0d0,0.0d0,n-1,n-8,0,0) call zatom (1199,1.00d0,120.0d0,0.0d0,n-3,n-4,n-5,0) call zatom (1200,1.00d0,120.0d0,180.0d0,n-4,n-5,n-6,0) call zatom (1201,1.08d0,121.6d0,180.0d0,n-4,n-6,n-7,0) call zatom (1202,1.08d0,119.5d0,180.0d0,n-4,n-5,n-7,0) c c thymine in deoxythymidine residue (DT) c else if (resname .eq. 'DT ') then call zatom (1218,1.48d0,108.1d0,113.7d0,c1i,o4i,c2i,1) call zatom (1219,1.37d0,117.1d0,glyco(i),n-1,c1i,o4i,0) call zatom (1224,1.22d0,122.9d0,0.0d0,n-1,n-2,c1i,0) call zatom (1220,1.38d0,115.4d0,180.0d0,n-2,n-3,c1i,0) call zatom (1221,1.38d0,126.4d0,0.0d0,n-1,n-3,n-4,0) call zatom (1226,1.23d0,120.5d0,180.0d0,n-1,n-2,n-4,0) call zatom (1222,1.44d0,114.1d0,0.0d0,n-2,n-3,n-5,0) call zatom (1227,1.50d0,117.5d0,180.0d0,n-1,n-3,n-4,0) call zatom (1223,1.34d0,120.8d0,0.0d0,n-2,n-4,n-5,0) call zatom (-1,0.0d0,0.0d0,0.0d0,n-1,n-9,0,0) call zatom (1225,1.00d0,116.8d0,180.0d0,n-6,n-8,n-9,0) call zatom (1228,1.09d0,109.5d0,0.0d0,n-3,n-4,n-6,0) call zatom (1228,1.09d0,109.5d0,109.5d0,n-4,n-5,n-1,1) call zatom (1228,1.09d0,109.5d0,109.5d0,n-5,n-6,n-2,-1) call zatom (1229,1.08d0,119.4d0,180.0d0,n-5,n-7,n-9,0) end if return end c c c ############################################################## c ## ## c ## subroutine watson -- align strands of a double helix ## c ## ## c ############################################################## c c c "watson" uses a rigid body optimization to approximately c align the paired strands of a nucleic acid double helix c c subroutine watson implicit none include 'sizes.i' include 'atoms.i' include 'couple.i' include 'group.i' include 'inform.i' include 'katoms.i' include 'kgeoms.i' include 'molcul.i' include 'nucleo.i' include 'output.i' include 'potent.i' include 'resdue.i' include 'rigid.i' include 'sequen.i' include 'usage.i' integer i,j,nvar integer ia,ib,ic,id integer start,stop integer kseq,offset integer nbase,nphos integer, allocatable :: iphos(:) integer, allocatable :: root(:) integer, allocatable :: list(:,:) real*8 minimum,grdmin real*8 watson1,sum,dist real*8, allocatable :: xx(:) character*3 resname external watson1,optsave c c c perform dynamic allocation of some pointer arrays c if (associated(iuse)) deallocate (iuse) if (associated(use)) deallocate (use) allocate (iuse(n)) allocate (use(0:n)) c c set all atoms to be active during energy evaluations c nuse = n do i = 1, n use(i) = .true. end do c c only geometric restraints will by used in optimization c call potoff use_geom = .true. c c set the default values for the restraint variables c npfix = 0 ndfix = 0 ntfix = 0 ngfix = 0 nchir = 0 use_basin = .false. use_wall = .false. c c perform dynamic allocation of some local arrays c allocate (iphos(nseq+10)) allocate (root(nseq)) allocate (list(2,nseq)) c c find root atom and hydrogen bond partners for each base c kseq = 0 nbase = 0 do i = 1, n if (atmnum(type(i)).eq.6 .and. n12(i).eq.4) then ia = atmnum(type(i12(1,i))) ib = atmnum(type(i12(2,i))) ic = atmnum(type(i12(3,i))) id = atmnum(type(i12(4,i))) sum = ia + ib + ic + id if (sum .eq. 22) then nbase = nbase + 1 j = i12(4,i) root(nbase) = j kseq = kseq + 1 resname = nuclz(seqtyp(kseq)) do while (resname.eq.'MP ' .or. resname.eq.'DP ' & .or. resname.eq.'TP ') kseq = kseq + 1 resname = nuclz(seqtyp(kseq)) end do if (resname.eq.'A ' .or. resname.eq.'DA ') then list(1,nbase) = j + 6 list(2,nbase) = j + 11 else if (resname.eq.'G ' .or. resname.eq.'DG ') then list(1,nbase) = j + 12 list(2,nbase) = j + 5 else if (resname.eq.'C ' .or. resname.eq.'DC ') then list(1,nbase) = j + 3 list(2,nbase) = j + 8 else if (resname .eq. 'U ') then list(1,nbase) = j + 8 list(2,nbase) = j + 5 else if (resname .eq. 'DT ') then list(1,nbase) = j + 9 list(2,nbase) = j + 5 end if end if end if end do c c distance restraints for the base pair hydrogen bonds c do i = 1, nbase/2 j = nbase + 1 - i ndfix = ndfix + 1 idfix(1,ndfix) = list(1,i) idfix(2,ndfix) = list(1,j) dfix(1,ndfix) = 50.0d0 dfix(2,ndfix) = 1.85d0 dfix(3,ndfix) = 1.95d0 ndfix = ndfix + 1 idfix(1,ndfix) = list(2,i) idfix(2,ndfix) = list(2,j) dfix(1,ndfix) = 50.0d0 dfix(2,ndfix) = 1.85d0 dfix(3,ndfix) = 1.95d0 end do c c torsional restraints to enforce base pair planarity c do i = 1, nbase/2 j = nbase + 1 - i ntfix = ntfix + 1 itfix(1,ntfix) = root(i) itfix(2,ntfix) = list(1,i) itfix(3,ntfix) = list(2,i) itfix(4,ntfix) = list(1,j) tfix(1,ntfix) = 2.5d0 tfix(2,ntfix) = 180.0d0 tfix(3,ntfix) = 180.0d0 ntfix = ntfix + 1 itfix(1,ntfix) = root(i) itfix(2,ntfix) = list(2,i) itfix(3,ntfix) = list(1,i) itfix(4,ntfix) = list(2,j) tfix(1,ntfix) = 2.5d0 tfix(2,ntfix) = 180.0d0 tfix(3,ntfix) = 180.0d0 ntfix = ntfix + 1 itfix(1,ntfix) = root(j) itfix(2,ntfix) = list(1,j) itfix(3,ntfix) = list(2,j) itfix(4,ntfix) = list(1,i) tfix(1,ntfix) = 2.5d0 tfix(2,ntfix) = 180.0d0 tfix(3,ntfix) = 180.0d0 ntfix = ntfix + 1 itfix(1,ntfix) = root(j) itfix(2,ntfix) = list(2,j) itfix(3,ntfix) = list(1,j) itfix(4,ntfix) = list(2,i) tfix(1,ntfix) = 2.5d0 tfix(2,ntfix) = 180.0d0 tfix(3,ntfix) = 180.0d0 end do c c distance restraints between interstrand phosphates c nphos = 0 do i = 1, n if (atmnum(type(i)) .eq. 15) then nphos = nphos + 1 iphos(nphos) = i end if end do start = 1 stop = nphos / 2 resname = nuclz(seqtyp(1)) if (resname .eq. 'MP ') start = start + 1 if (resname .eq. 'DP ') start = start + 2 if (resname .eq. 'TP ') start = start + 3 resname = nuclz(seqtyp(nseq)) if (resname .eq. 'MP ') stop = stop - 1 if (resname .eq. 'DP ') stop = stop - 2 if (resname .eq. 'TP ') stop = stop - 3 offset = stop + nphos/2 + 1 if (hlxform .eq. 'A') dist = 17.78d0 if (hlxform .eq. 'B') dist = 17.46d0 if (hlxform .eq. 'Z') dist = 13.2d0 do i = start, stop ndfix = ndfix + 1 idfix(1,ndfix) = iphos(i) idfix(2,ndfix) = iphos(offset-i) dfix(1,ndfix) = 100.0d0 dfix(2,ndfix) = dist dfix(3,ndfix) = dist end do c c perform deallocation of some local arrays c deallocate (iphos) deallocate (root) deallocate (list) c c assign each strand to a separate molecule-based group c use_group = .true. ngrp = nmol do i = 1, ngrp igrp(1,i) = imol(1,i) igrp(2,i) = imol(2,i) do j = igrp(1,i), igrp(2,i) kgrp(j) = kmol(j) grplist(kgrp(j)) = i end do end do do i = 0, ngrp do j = 0, ngrp wgrp(j,i) = 1.0d0 end do wgrp(i,i) = 0.0d0 end do c c get rigid body reference coordinates for each strand c call orient c c perform dynamic allocation of some local arrays c allocate (xx(6*ngrp)) c c transfer rigid body coordinates to optimization parameters c nvar = 0 do i = 1, ngrp do j = 1, 6 nvar = nvar + 1 xx(nvar) = rbc(j,i) end do end do c c make the call to the optimization routine c iprint = 0 iwrite = 0 grdmin = 0.1d0 coordtype = 'NONE' call ocvm (nvar,xx,minimum,grdmin,watson1,optsave) c c transfer optimization parameters to rigid body coordinates c nvar = 0 do i = 1, ngrp do j = 1, 6 nvar = nvar + 1 rbc(j,i) = xx(nvar) end do end do c c perform deallocation of some local arrays c deallocate (xx) c c convert from rigid body to Cartesian coordinates c call rigidxyz return end c c c ############################################################ c ## ## c ## function watson1 -- energy and gradient for watson ## c ## ## c ############################################################ c c c "watson1" is a service routine that computes the energy c and gradient for optimally conditioned variable metric c optimization of rigid bodies c c function watson1 (xx,g) implicit none include 'sizes.i' include 'group.i' include 'math.i' include 'rigid.i' integer i,j,nvar real*8 watson1,e real*8 xx(*) real*8 g(*) real*8, allocatable :: derivs(:,:) c c c translate optimization parameters to rigid body coordinates c nvar = 0 do i = 1, ngrp do j = 1, 6 nvar = nvar + 1 rbc(j,i) = xx(nvar) end do end do c c perform dynamic allocation of some local arrays c allocate (derivs(6,ngrp)) c c compute and store the energy and gradient c call rigidxyz call gradrgd (e,derivs) watson1 = e c c translate rigid body gradient to optimization gradient c nvar = 0 do i = 1, ngrp do j = 1, 6 nvar = nvar + 1 g(nvar) = derivs(j,i) end do end do c c perform deallocation of some local arrays c deallocate (derivs) return end
\section{Dispelling Mirages} Protecting a visualization against mirages is an important problem, as unaddressed mirages can have lasting effects both on the immediate viewer (such as in the case of presentational visualizations) as well as on analytical decision making to chart maker themselves. For instance, if a user sees a particular visualization showing one trend, when in fact another is latent in the data, and uses this observed trend as a the basis for subsequent exploration they may be subject to an anchoring effect CITATION. This anchoring can lead to additional downstream mirages and confusions. A SENTENCE DESCRIBING OUR (PROPOSED) METHOD. In this section we will first review defenses against mirages from previous work, detail several new ideas for mirage defense, describe our system implementing those ideas, and finally provide a detailed case study of system in action. (REORDER) \subsection{Systemic Defenses} While the notion of visualization mirages is original to this work there has been some work on dispelling them in the past. Typically these efforts have involved either intelligent systems that try to do away with problems automatically or, as described at length above, improving chart maker's knowledge through guidelines and best practices. % Smart systems can be helpful for creating visualizations as they allow users to focus on their data rather than engineering their visualization, however this type of system is not without problems. % PHACKING Some systems, such as SeeDB and OTHERS CITATIONS, seek to surface interesting insights to users, which as the unfortunate side-effect of creating p-hacked insights (or what might be called v-hacked visualizations). but many create their own problems as they can act as p-hacking machine \cite{pu2018garden} (or what might be coined as v-hacking machines). %TOO MUCH TRUST Users place a lot of trust in systems that appear to be competent, which can cause them to incorrectly trust data that might contain flaws (DOES AN EXAMPLE OF THIS EXIST?). % OVER SPECIALIZED Some specialized analytics systems like QUDE attempt to directly surface data problems in the context of the analysis process \cite{binnig2017toward}, such as possible Simpsons Paradoxes (a problem in which a dataset viewed from granularity appears to have a trend that becomes reversed when viewed from another direction) \cite{guo2017you}. % CANT RECOMMEND WHAT THEY DONT KNOW ABOUT Recommendation and guidance systems can only make commentary on classes of visualization they are explicitly made aware of. For instance while Draco can effectively and efficiently recommend visualization CITATION it is unable to recommend visualization outside of the vega-lite spec which does not contain pie charts. % EDUCATION Finally recommendation systems are not auditable: they are typically unable to provide an explanation for their choices, which prevents users from learning to make the best practices encoded into that particular tool in other tools. \begin{figure}[bth] \centering \includegraphics[width=\columnwidth]{./figures/commutative-diagram.pdf} \caption{Algebraic visualization design commutative diagram. A good visualization will commute: a change to the data $\alpha$ will be matched by a corresponding change $\omega$.} \label{fig:commutative-diagram} \end{figure} % \mc{Include commutative graph here along with some examples} \begin{figure}[bth] \centering \includegraphics[width=\columnwidth]{./figures/confuser-example.pdf} \caption{Algebraic Visualization Design's confusers describe charts that fail to acknowledge plausible and significant changes in to the data.} \label{fig:confuser-example} \end{figure} \subsection{Metamorphic Testing For Disillusionment} In complex systems it is difficult or prohibitively expensive to understand whether or not the software is in producing correct results. In the field of software testing this is known as the oracle problem. The Metamorphic Testing (MT) ideology attempts to address this challenge by verifying properties of system outputs across input changes \cite{segura2016survey}. Rather than checking that particular inputs give correct outputs, MT asserts that properties called \textit{metamorphic relations} should remain invariant across all appropriate metamorphoses of a particular data set. MT has been successfully applied to a wide variety of systems including computer graphics, self-driving cars, and deep learning. Let's consider a specific example from computer graphics for motivation. Donaldson \etal make use of a MT strategy to identify bugs in graphics shader compilers \cite{metamorphicoopsla17}. They do so by considering an ostensibly bug-free shader, making changes to the code that shouldn't effect the the rendered image (such as introducing code paths that will never be reached), and comparing the result through image comparison techniques. They formalize this technique by asserting that following equation should be an invariant: \begin{equation}\label{equation:shader} \forall x: p(f_I (x)) = f_O (p(x)) \end{equation} where $x$ is a given shader program, $p$ a shader compiler, $f_I$ perturbations to the input, and $f_O$ changes to the output (usually the identity under their framework). The selection of the meaning of equality in MT plays a significant role in it's ability to offer effective analysis. To wit Donaldson \etal use a threshold of chi-squared distance between image histograms as a proxy for image equality. Using this methodology they find over 60 bugs in commercial GPU systems. Interestingly, Eqn. \ref{equation:shader} is functionally isomorphic to the equation representation of AVD's principle commutative relation \cite{kindlmann2014algebraic}, which describes the commutative properties of an effective visualization across potential data transformations: \begin{align} v \circ r_2 \circ \alpha = \omega \circ v \circ r_1 \end{align} Which is represented in \figref{fig:commutative-diagram} as a category theoretic commutative diagram. We suggest that this implies that AVD and MT are intrinsically linked UGH. Veras and Collins DO XYZ THING WITH THEIR METRIC \cite{veras2019discriminability}. We now introduce the idea of use metamorphic testing as a mechanism to verify individual visualizations. SENTENCE DESCIBRING WHY USING METAMOPHORIC STUFF IS GOOD. This perspective has the advantage that we can test a wide variety of types of visualization without knowing about much about the chart being rendered. % METAMORPHIC TESTING OFFERS SOME INTERESTING PROPERTIES, YET WILL DOES NOT ALWAYS OFFER THE MOST DIRECT ROUTE TOWARDS DETECTING ERRORS. TO WIT, OUR SHUFFLE RULE IS EFFECTIVE AT CATCHING OVERDRAW, YET THIS IS A VISUALIZATION PROPERTY THAT CAN DETECTED THROUGH OTHER OVERDRAW DETECTION TECHNIQUES. THE ADVANTAGE THAT METAMORPHIC TESTING HAS IS IT'S NAIVITY, THROUGH SIMPLE METAMORPHESES WE ARE ABLE TO CONSIDER A WIDE VARIETY OF AVENUES. \begin{figure}[bth] \centering \includegraphics[width=\columnwidth]{./figures/opacity-permute.pdf} \caption{ Visualization spec permutation over a chart describing the amount of rain in Seattle. After permuting the opacity we would expect there to be the center image, yet permute the opacity reveals the iamge on the right. This indicates that the chart maker forgot to set an aggregation type for the percipitation axis, causing there to be an implicit (and unintended!) max aggregation for this chart. } \label{fig:opacity-permute} \end{figure} To our knowledge metamorphic testing has not previously been used explicitly in the context of data visualization, though there have been a variety of works related to visualization that touch on explicitly or use implicitly related techniques. Ramanathan \etal make use of metamorphic technique in conjunction with visualization in order verify implementations of epidemiological models \cite{ramanathan2012verification}. Guo \etal use a metamorphic like strategy to detect instances of Simpsons's paradox \cite{guo2017you}. McNutt and Kindlmann construct a linting system that provides ad hoc guidance through a linting metaphor \cite{mcnuttlinting} and briefly touch on some ideas related to MT. Chiw \etal make use of MT techniques to analyze a scientific visualization DSL compiler, though their focus is on evaluating the correctness of the compiler rather than the the correctness of the visualizations \cite{chiw2017datm}. \begin{figure}[bth] \centering \includegraphics[width=\columnwidth]{./figures/system-screenshot.png} \caption{ We provide a web-based interface to \SYSTEMNAME{ } which is a feature-light redesign of the online vega editor \protect\cite{vegaonline}. % Users provide their vega-lite chart specification on the left and then receive a rendering of their chart on the right, along with a collection of passing and failing lint rules. % The lint rules include a text explanation and, in the case of the data-metamorphic rules, a visual explanation showing the modified chart and the difference from the original image. } \label{fig:system-diagram} \end{figure} \subsection{System Design} We implement a proof of concept, \SYSTEMNAME, which automatically surfaces potential mirages. \SYSTEMNAME{ } evaluates visualizations created in vega-lite \cite{satyanarayan2016vega} by checking against a variety of assertions. These assertions come in two varieties: those which identify problems through visualization-spec introspection (which focuses on catching reading-level mirages) and metamorphic perturbations (which primarily focus on catching AVD hallucinators and confusers). We focus on vega-lite because of its advantageous API which allows us to apply a wide variety of analytic techniques in programmatic and consistent way. Following the precedent set by Kim \etal we focus on a subset of vega-lite\cite{kim2017graphscape}, in which we ignore concat and layer operations, as well as charts without a true data source (such as those fed by data generators). Following McNutt and Kindlmann's \textit{vislint$\_$mpl}, a linter which principally considers stylistic issues in matplotlib charts \cite{Hunter:2007}, we construct our analysis system as following the structure of a linter \cite{mcnuttlinting}. Linters are a type of software typically seen as a mechanism at employ static analysis techniques to catch semantic and stylistic programming bugs, like a spell-checker for code \cite{johnson1977lint}. Recently a variety of systems have moved beyond this definition and have started consider non-programming domains such as deep learning training data \cite{hynes2017data}, English prose \cite{proselint, writegood}, and spreadsheets \cite{barowy2018excelint}. Linters often give incorrect warnings (false-positives) as they are typically designed with the perspective that it is better for the user to be altered to a non-existent bug than to be left in the dark about real one (false-negatives). To this end they allow their users to opt out of specific lint checks (called rules) both in general or for specific cases, a degree of user-agency which is often not-respected by guidance systems that ignore the users choices (such as the infamously impolite Microsoft Clippy \cite{whitworth2005polite}). We believe that this type of granular and polite control over analysis is an ideal fit for the level and accuracy of analysis our system can provide. Our system is focused on detecting errors in the visualization stage of our taxonomy. We forgo implementing a suite of data error detection systems because there is already a substantial literature on them, both on purely statistical grounds \cite{raman2001potter, kandel2012profiler, naumann2014data} as well as in domain specific contexts \cite{mucslu2015preventing, barowy2014checkcell}. Closely related to our system is Jannah's MetaReader, which surfaces possible statistical and semantic data errors to analysts as a pre-analytics step \cite{jannahmetareader}. In the other direction there has been some initial work on that automatically testing for bias \cite{wall2017warning}, although we are lightly skeptical of any system that believes it can automatically determine fairness\footnote{(MULCHING PROPOSAL?)}. We offer three types of rules in our analysis. Rules which look for deceptive mirages, which are executed by statically analyzing the visualization specification, spec-metamorphic rules in which we modify the spec and compare rendered visualizations, and data-metamorphic rules in which we modify the input data and compare the rendered images. We find the last of these to be the most effective class of rule at inferring mirages as they are agnostic to specific chart configuration and typically only care if out image doesn't change when it should (a confuser) or changes when it shouldn't (a hallucinator). We don't specifically differentiate between classes of AVD failures in our system that super structure is not necessary to alert the user to problem identified using those tools. We verify that our system runs correctly by evaluating it against both the corpus of examples that makes of vega-lite's test suite, as well as a corpus of vega-lite specifications scraped from Github's Gists CITATION. Together this corpus of example charts is comprised of ~800 charts. Within both of these example sets we are able to find a number of interesting test failures, such as XYZ. While these tests are valuable, both for developing and validating our tool, they don't really capture many in the wild errors, likely due to the fact that vega-lite is primarily used as tool on top of which other charting tools are built (see for instance it's origins as part of Voyager CITATION). In future work we would like to apply our system to a more ad hoc charting context, such as Altair CITATION (which is now the default charting library in JupyterLab CITATION) or LitVis CITATION, which both consume vega-lite as charting engine. In order to demonstrate the validity of our this testing strategy we implement a small number of metamorphic relations. \textbf{SHUFFLE}: Reorders the input data. The visualization should remain unchanged across this transformation: if not it indicates that the visualization possess a halluncinators. This primarily allows us to detect over plotting, as in \figref{fig:shuffle-lint}, but surfaces a vareity of other interesting chart properties. \textbf{RANDOMIZE}: Identifies columns being visualized and randomizes their relationship in the vein of Wickham \etals lineup technique \cite{wickham2010graphical}. In our naive pixel difference technique we assert that images should be different across this change. This allows us to detect XXXX. MORE \textbf{REMOVE ROWS}, \textbf{REMOVE OUTLIERS}, \textbf{NULLIFY SOME DATA}, \textbf{SAMPLE FROM AGGREGATE MARKS} This collection is paralleled by Murphy \etals list of metamorphic relations for deep learning systems consisting of \textbf{Additive}, \textbf{Multiplicative}, \textbf{Permutative}, \textbf{Invertive}, \textbf{Inclusive}, and \textbf{Exclusive} relations \cite{murphy2008properties}. \begin{figure}[bth] \centering \includegraphics[width=\columnwidth]{./figures/shuffle-failure.png} \caption{ An example of true positive lint rule execution, shuffling the data. On the left we see the original visualization, in the center we see the image after shuffle, and on the right we see the pixel difference between the two. This reveals a property of this spec and data combo that it is not resilient to order permutation, (in the language of AVD, a hallucinator), or more plainly overdraw. } \label{fig:shuffle-lint} \end{figure}
The connected component of $x$ is equal to the connected component of $y$ if and only if $x$ and $y$ are either both not in $S$ or both in $S$ and $x$ and $y$ are in the same connected component of $S$.
The theater film , The Man from Galveston ( 1963 ) , was the original pilot episode of the proposed NBC western television series Temple Houston , with Jeffrey Hunter cast as Temple Lea Houston , a lawyer and the youngest son of the legendary Sam Houston . For a time the real Temple Houston was the county attorney of Brazoria County , Texas . The Temple Houston series lasted for only twenty @-@ six episodes in the 1963 @-@ 1964 television season .
||| Messages exchanged during the IDE protocol module Protocol.IDE import Protocol.SExp import Data.List import Data.Maybe import public Libraries.Data.Span import public Protocol.IDE.Command as Protocol.IDE import public Protocol.IDE.Decoration as Protocol.IDE import public Protocol.IDE.Formatting as Protocol.IDE import public Protocol.IDE.FileContext as Protocol.IDE import public Protocol.IDE.Holes as Protocol.IDE import public Protocol.IDE.Result as Protocol.IDE import public Protocol.IDE.Highlight as Protocol.IDE %default total ------------------------------------------------------------------------ public export Highlighting : Type Highlighting = List (Span Properties) export SExpable a => SExpable (Span a) where toSExp (MkSpan start width ann) = SExpList [ IntegerAtom (cast start) , IntegerAtom (cast width) , toSExp ann ] export FromSExpable a => FromSExpable (Span a) where fromSExp (SExpList [ start , width , ann ]) = do pure $ MkSpan { start = !(fromSExp start) , length = !(fromSExp width) , property = !(fromSExp ann)} fromSExp _ = Nothing ------------------------------------------------------------------------ public export data ReplyPayload = OK Result Highlighting | HighlightSource (List SourceHighlight) | Error String Highlighting export SExpable ReplyPayload where toSExp (OK result hl) = SExpList (SymbolAtom "ok" :: toSExp result :: case hl of [] => [] _ => [SExpList (map toSExp hl)]) toSExp (HighlightSource hls) = SExpList [ SymbolAtom "ok" , SExpList [ SymbolAtom "highlight-source" , toSExp hls] ] toSExp (Error msg hl) = SExpList (SymbolAtom "error" :: toSExp msg :: case hl of [] => [] _ => [SExpList (map toSExp hl)]) -- Again, not the most efficient. Probably better to index by the -- expected return type in the future export FromSExpable ReplyPayload where fromSExp (SExpList [SymbolAtom "ok", result]) = do pure $ OK !(fromSExp result) [] fromSExp (SExpList [SymbolAtom "ok", result, hl]) = do pure $ OK !(fromSExp result) !(fromSExp hl) fromSExp (SExpList [ SymbolAtom "ok" , SExpList [ SymbolAtom "highlight-source" , hls] ]) = do pure $ HighlightSource !(fromSExp hls) fromSExp (SExpList [SymbolAtom "error", msg]) = do pure $ Error !(fromSExp msg) [] fromSExp (SExpList [SymbolAtom "error", msg, hl]) = do pure $ Error !(fromSExp msg) !(fromSExp hl) fromSExp _ = Nothing public export data Reply = ProtocolVersion Int Int | Immediate ReplyPayload Integer | Intermediate ReplyPayload Integer | WriteString String Integer | SetPrompt String Integer | Warning FileContext String Highlighting Integer export SExpable Reply where toSExp (ProtocolVersion maj min) = toSExp (SymbolAtom "protocol-version", maj, min) toSExp ( Immediate payload id) = SExpList [SymbolAtom "return", toSExp payload, toSExp id] toSExp (Intermediate payload id) = SExpList [SymbolAtom "output", toSExp payload, toSExp id] toSExp (WriteString str id) = SExpList [SymbolAtom "write-string", toSExp str, toSExp id] toSExp (SetPrompt str id) = SExpList [SymbolAtom "set-prompt" , toSExp str, toSExp id] toSExp (Warning fc str spans id) = SExpList [SymbolAtom "warning", SExpList $ toSExp fc.file :: toSExp (fc.range.startLine, fc.range.startCol) :: toSExp (fc.range.endLine , fc.range.endCol ) :: toSExp str :: case spans of [] => [] _ => [SExpList (map toSExp spans)] , toSExp id] export FromSExpable Reply where fromSExp (SExpList [SymbolAtom "protocol-version", major, minor]) = do Just $ ProtocolVersion !(fromSExp major) !(fromSExp minor) fromSExp (SExpList [SymbolAtom "return", payload, iden]) = do Just $ Immediate !(fromSExp payload) !(fromSExp iden) fromSExp (SExpList [SymbolAtom "output", payload, iden]) = do Just $ Intermediate !(fromSExp payload) !(fromSExp iden) fromSExp (SExpList [SymbolAtom "write-string", str, iden]) = do Just $ WriteString !(fromSExp str) !(fromSExp iden) fromSExp (SExpList [SymbolAtom "set-prompt", str, iden]) = do Just $ SetPrompt !(fromSExp str) !(fromSExp iden) fromSExp (SExpList [SymbolAtom "warning" , SExpList [filename, SExpList [startLine, startCol] , SExpList [endLine , endCol ] , str] , iden]) = do pure $ Warning (MkFileContext { file = !(fromSExp filename) , range = MkBounds { startLine = !(fromSExp startLine) , startCol = !(fromSExp startCol) , endLine = !(fromSExp endLine) , endCol = !(fromSExp endCol)} }) !(fromSExp str) [] !(fromSExp iden) fromSExp (SExpList [SymbolAtom "warning" , SExpList [filename, SExpList [startLine, startCol] , SExpList [endLine , endCol ] , str, hl] , iden]) = do pure $ Warning (MkFileContext { file = !(fromSExp filename) , range = MkBounds { startLine = !(fromSExp startLine) , startCol = !(fromSExp startCol) , endLine = !(fromSExp endLine) , endCol = !(fromSExp endCol)} }) !(fromSExp str) !(fromSExp hl) !(fromSExp iden) fromSExp _ = Nothing public export data Request = Cmd IDECommand export SExpable Request where toSExp (Cmd cmd) = toSExp cmd export FromSExpable Request where fromSExp cmd = do pure $ Cmd !(fromSExp cmd)
Two components of a topological space are equal if and only if they intersect.
from gym.utils import seeding class Space(object): """Defines the observation and action spaces, so you can write generic code that applies to any Env. For example, you can choose a random action. WARNING - Custom observation & action spaces can inherit from the `Space` class. However, most use-cases should be covered by the existing space classes (e.g. `Box`, `Discrete`, etc...), and container classes (`Tuple` & `Dict`). Note that parametrized probability distributions (through the `sample()` method), and batching functions (in `gym.vector.VectorEnv`), are only well-defined for instances of spaces provided in gym by default. Moreover, some implementations of Reinforcement Learning algorithms might not handle custom spaces properly. Use custom spaces with care. """ def __init__(self, shape=None, dtype=None, seed=None): import numpy as np # takes about 300-400ms to import, so we load lazily self._shape = None if shape is None else tuple(shape) self.dtype = None if dtype is None else np.dtype(dtype) self._np_random = None if seed is not None: self.seed(seed) @property def np_random(self): """Lazily seed the rng since this is expensive and only needed if sampling from this space. """ if self._np_random is None: self.seed() return self._np_random @property def shape(self): """Return the shape of the space as an immutable property""" return self._shape def sample(self): """Randomly sample an element of this space. Can be uniform or non-uniform sampling based on boundedness of space.""" raise NotImplementedError def seed(self, seed=None): """Seed the PRNG of this space.""" self._np_random, seed = seeding.np_random(seed) return [seed] def contains(self, x): """ Return boolean specifying if x is a valid member of this space """ raise NotImplementedError def __contains__(self, x): return self.contains(x) def __setstate__(self, state): # Don't mutate the original state state = dict(state) # Allow for loading of legacy states. # See: # https://github.com/openai/gym/pull/2397 -- shape # https://github.com/openai/gym/pull/1913 -- np_random # if "shape" in state: state["_shape"] = state["shape"] del state["shape"] if "np_random" in state: state["_np_random"] = state["np_random"] del state["np_random"] # Update our state self.__dict__.update(state) def to_jsonable(self, sample_n): """Convert a batch of samples from this space to a JSONable data type.""" # By default, assume identity is JSONable return sample_n def from_jsonable(self, sample_n): """Convert a JSONable data type to a batch of samples from this space.""" # By default, assume identity is JSONable return sample_n
// // Copyright (c) 2019 Vinnie Falco ([email protected]) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // // Official repository: https://github.com/boostorg/json // #ifndef BOOST_JSON_IMPL_ERROR_IPP #define BOOST_JSON_IMPL_ERROR_IPP #include <boost/json/error.hpp> BOOST_JSON_NS_BEGIN error_code make_error_code(error e) { struct codes : error_category { const char* name() const noexcept override { return "boost.json"; } std::string message(int ev) const override { switch(static_cast<error>(ev)) { default: case error::syntax: return "syntax error"; case error::extra_data: return "extra data"; case error::incomplete: return "incomplete JSON"; case error::exponent_overflow: return "exponent overflow"; case error::too_deep: return "too deep"; case error::illegal_leading_surrogate: return "illegal leading surrogate"; case error::illegal_trailing_surrogate: return "illegal trailing surrogate"; case error::expected_hex_digit: return "expected hex digit"; case error::expected_utf16_escape: return "expected utf16 escape"; case error::object_too_large: return "object too large"; case error::array_too_large: return "array too large"; case error::key_too_large: return "key too large"; case error::string_too_large: return "string too large"; case error::exception: return "got exception"; case error::not_number: return "not a number"; case error::not_exact: return "not exact"; case error::test_failure: return "test failure"; } } error_condition default_error_condition( int ev) const noexcept override { switch(static_cast<error>(ev)) { default: return {ev, *this}; case error::syntax: case error::extra_data: case error::incomplete: case error::exponent_overflow: case error::too_deep: case error::illegal_leading_surrogate: case error::illegal_trailing_surrogate: case error::expected_hex_digit: case error::expected_utf16_escape: case error::object_too_large: case error::array_too_large: case error::key_too_large: case error::string_too_large: case error::exception: return condition::parse_error; case error::not_number: case error::not_exact: return condition::assign_error; } } }; static codes const cat{}; return error_code{static_cast< std::underlying_type<error>::type>(e), cat}; } error_condition make_error_condition(condition c) { struct codes : error_category { const char* name() const noexcept override { return "boost.json"; } std::string message(int cv) const override { switch(static_cast<condition>(cv)) { default: case condition::parse_error: return "A JSON parse error occurred"; case condition::assign_error: return "An error occurred during assignment"; } } }; static codes const cat{}; return error_condition{static_cast< std::underlying_type<condition>::type>(c), cat}; } BOOST_JSON_NS_END #endif
// Copyright (c) 2005 - 2015 Marc de Kamps // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF // USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #define BOOST_TEST_DYN_LINK #include <boost/test/unit_test.hpp> #include <TwoDLib.hpp> #include <vector> #include <iostream> using namespace std; using namespace TwoDLib; BOOST_AUTO_TEST_CASE(TriangleCreationTest) { vector<double> vec_v(3); vector<double> vec_w(3); vec_v[0] = 0.; vec_v[1] = 1.; vec_v[2] = 0.; vec_w[0] = 0.; vec_w[1] = 0.; vec_w[2] = 1.; Triangle tri(vec_v, vec_w); BOOST_REQUIRE( tri.SignedArea() == 0.5); BOOST_REQUIRE( tri.IsClockwise() == -1); } BOOST_AUTO_TEST_CASE(TriangleInsideTest) { vector<double> vec_v(3); vector<double> vec_w(3); vec_v[0] = 0.; vec_v[1] = 1.; vec_v[2] = 0.; vec_w[0] = 0.; vec_w[1] = 0.; vec_w[2] = 1.; Triangle tri(vec_v, vec_w); Point p1(0.1,0.1); BOOST_REQUIRE( tri.IsInside(p1) == 1); Point p2(1.0,1.0); BOOST_REQUIRE( tri.IsInside(p2) == 0); }
(* Title: ZF/ex/misc.thy Author: Lawrence C Paulson, Cambridge University Computer Laboratory Copyright 1993 University of Cambridge Composition of homomorphisms, Pastre's examples, ... *) section\<open>Miscellaneous ZF Examples\<close> theory misc imports ZF begin subsection\<open>Various Small Problems\<close> text\<open>The singleton problems are much harder in HOL.\<close> lemma singleton_example_1: "\<forall>x \<in> S. \<forall>y \<in> S. x \<subseteq> y \<Longrightarrow> \<exists>z. S \<subseteq> {z}" by blast lemma singleton_example_2: "\<forall>x \<in> S. \<Union>S \<subseteq> x \<Longrightarrow> \<exists>z. S \<subseteq> {z}" \<comment> \<open>Variant of the problem above.\<close> by blast lemma "\<exists>!x. f (g(x)) = x \<Longrightarrow> \<exists>!y. g (f(y)) = y" \<comment> \<open>A unique fixpoint theorem --- \<open>fast\<close>/\<open>best\<close>/\<open>auto\<close> all fail.\<close> apply (erule ex1E, rule ex1I, erule subst_context) apply (rule subst, assumption, erule allE, rule subst_context, erule mp) apply (erule subst_context) done text\<open>A weird property of ordered pairs.\<close> lemma "b\<noteq>c \<Longrightarrow> \<langle>a,b\<rangle> \<inter> \<langle>a,c\<rangle> = \<langle>a,a\<rangle>" by (simp add: Pair_def Int_cons_left Int_cons_right doubleton_eq_iff, blast) text\<open>These two are cited in Benzmueller and Kohlhase's system description of LEO, CADE-15, 1998 (page 139-143) as theorems LEO could not prove.\<close> lemma "(X = Y \<union> Z) \<longleftrightarrow> (Y \<subseteq> X \<and> Z \<subseteq> X \<and> (\<forall>V. Y \<subseteq> V \<and> Z \<subseteq> V \<longrightarrow> X \<subseteq> V))" by (blast intro!: equalityI) text\<open>the dual of the previous one\<close> lemma "(X = Y \<inter> Z) \<longleftrightarrow> (X \<subseteq> Y \<and> X \<subseteq> Z \<and> (\<forall>V. V \<subseteq> Y \<and> V \<subseteq> Z \<longrightarrow> V \<subseteq> X))" by (blast intro!: equalityI) text\<open>trivial example of term synthesis: apparently hard for some provers!\<close> schematic_goal "a \<noteq> b \<Longrightarrow> a:?X \<and> b \<notin> ?X" by blast text\<open>Nice blast benchmark. Proved in 0.3s; old tactics can't manage it!\<close> lemma "\<forall>x \<in> S. \<forall>y \<in> S. x \<subseteq> y \<Longrightarrow> \<exists>z. S \<subseteq> {z}" by blast text\<open>variant of the benchmark above\<close> lemma "\<forall>x \<in> S. \<Union>(S) \<subseteq> x \<Longrightarrow> \<exists>z. S \<subseteq> {z}" by blast (*Example 12 (credited to Peter Andrews) from W. Bledsoe. A Maximal Method for Set Variables in Automatic Theorem-proving. In: J. Hayes and D. Michie and L. Mikulich, eds. Machine Intelligence 9. Ellis Horwood, 53-100 (1979). *) lemma "(\<forall>F. {x} \<in> F \<longrightarrow> {y} \<in> F) \<longrightarrow> (\<forall>A. x \<in> A \<longrightarrow> y \<in> A)" by best text\<open>A characterization of functions suggested by Tobias Nipkow\<close> lemma "r \<in> domain(r)->B \<longleftrightarrow> r \<subseteq> domain(r)*B \<and> (\<forall>X. r `` (r -`` X) \<subseteq> X)" by (unfold Pi_def function_def, best) subsection\<open>Composition of homomorphisms is a Homomorphism\<close> text\<open>Given as a challenge problem in R. Boyer et al., Set Theory in First-Order Logic: Clauses for Gödel's Axioms, JAR 2 (1986), 287-327\<close> text\<open>collecting the relevant lemmas\<close> declare comp_fun [simp] SigmaI [simp] apply_funtype [simp] (*Force helps prove conditions of rewrites such as comp_fun_apply, since rewriting does not instantiate Vars.*) lemma "(\<forall>A f B g. hom(A,f,B,g) = {H \<in> A->B. f \<in> A*A->A \<and> g \<in> B*B->B \<and> (\<forall>x \<in> A. \<forall>y \<in> A. H`(f`\<langle>x,y\<rangle>) = g`<H`x,H`y>)}) \<longrightarrow> J \<in> hom(A,f,B,g) \<and> K \<in> hom(B,g,C,h) \<longrightarrow> (K O J) \<in> hom(A,f,C,h)" by force text\<open>Another version, with meta-level rewriting\<close> lemma "(\<And>A f B g. hom(A,f,B,g) \<equiv> {H \<in> A->B. f \<in> A*A->A \<and> g \<in> B*B->B \<and> (\<forall>x \<in> A. \<forall>y \<in> A. H`(f`\<langle>x,y\<rangle>) = g`<H`x,H`y>)}) \<Longrightarrow> J \<in> hom(A,f,B,g) \<and> K \<in> hom(B,g,C,h) \<longrightarrow> (K O J) \<in> hom(A,f,C,h)" by force subsection\<open>Pastre's Examples\<close> text\<open>D Pastre. Automatic theorem proving in set theory. Artificial Intelligence, 10:1--27, 1978. Previously, these were done using ML code, but blast manages fine.\<close> lemmas compIs [intro] = comp_surj comp_inj comp_fun [intro] lemmas compDs [dest] = comp_mem_injD1 comp_mem_surjD1 comp_mem_injD2 comp_mem_surjD2 lemma pastre1: "\<lbrakk>(h O g O f) \<in> inj(A,A); (f O h O g) \<in> surj(B,B); (g O f O h) \<in> surj(C,C); f \<in> A->B; g \<in> B->C; h \<in> C->A\<rbrakk> \<Longrightarrow> h \<in> bij(C,A)" by (unfold bij_def, blast) lemma pastre3: "\<lbrakk>(h O g O f) \<in> surj(A,A); (f O h O g) \<in> surj(B,B); (g O f O h) \<in> inj(C,C); f \<in> A->B; g \<in> B->C; h \<in> C->A\<rbrakk> \<Longrightarrow> h \<in> bij(C,A)" by (unfold bij_def, blast) lemma pastre4: "\<lbrakk>(h O g O f) \<in> surj(A,A); (f O h O g) \<in> inj(B,B); (g O f O h) \<in> inj(C,C); f \<in> A->B; g \<in> B->C; h \<in> C->A\<rbrakk> \<Longrightarrow> h \<in> bij(C,A)" by (unfold bij_def, blast) lemma pastre5: "\<lbrakk>(h O g O f) \<in> inj(A,A); (f O h O g) \<in> surj(B,B); (g O f O h) \<in> inj(C,C); f \<in> A->B; g \<in> B->C; h \<in> C->A\<rbrakk> \<Longrightarrow> h \<in> bij(C,A)" by (unfold bij_def, blast) lemma pastre6: "\<lbrakk>(h O g O f) \<in> inj(A,A); (f O h O g) \<in> inj(B,B); (g O f O h) \<in> surj(C,C); f \<in> A->B; g \<in> B->C; h \<in> C->A\<rbrakk> \<Longrightarrow> h \<in> bij(C,A)" by (unfold bij_def, blast) end
{-# OPTIONS --without-K --safe #-} module Tools.Bool where open import Data.Bool using (Bool; true; false; _∧_; if_then_else_) public
[STATEMENT] lemma distint_verts_subtree: assumes "is_subtree (Node r xs) t" and "t1 \<in> fst ` fset xs" shows "distinct (r @ Dtree.root t1)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. distinct (r @ dtree.root t1) [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. distinct (r @ dtree.root t1) [PROOF STEP] have "(Dtree.root t1) \<in> dverts t" [PROOF STATE] proof (prove) goal (1 subgoal): 1. dtree.root t1 \<in> dverts t [PROOF STEP] using dtree.set_sel(1) assms dverts_subtree_subset [PROOF STATE] proof (prove) using this: dtree.root ?a \<in> dverts ?a is_subtree (Node r xs) t t1 \<in> fst ` fset xs is_subtree ?x ?y \<Longrightarrow> dverts ?x \<subseteq> dverts ?y goal (1 subgoal): 1. dtree.root t1 \<in> dverts t [PROOF STEP] by fastforce [PROOF STATE] proof (state) this: dtree.root t1 \<in> dverts t goal (1 subgoal): 1. distinct (r @ dtree.root t1) [PROOF STEP] then [PROOF STATE] proof (chain) picking this: dtree.root t1 \<in> dverts t [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: dtree.root t1 \<in> dverts t goal (1 subgoal): 1. distinct (r @ dtree.root t1) [PROOF STEP] using verts_distinct assms(1) dverts_subtree_subset child_disjoint_root[OF assms] [PROOF STATE] proof (prove) using this: dtree.root t1 \<in> dverts t ?v \<in> dverts t \<Longrightarrow> distinct ?v is_subtree (Node r xs) t is_subtree ?x ?y \<Longrightarrow> dverts ?x \<subseteq> dverts ?y set r \<inter> set (dtree.root t1) = {} goal (1 subgoal): 1. distinct (r @ dtree.root t1) [PROOF STEP] by force [PROOF STATE] proof (state) this: distinct (r @ dtree.root t1) goal: No subgoals! [PROOF STEP] qed
# Functions as Real-valued Circuits ## Base Case: Single Gate in the Circuit ### Single Multiply Gate The first thing we are going to compute is a simple function of two variables $F = f(X,Y)=X*Y$. The circuit takes two real-valued inputs $X$ and $Y$, computes the product $X*Y$ defined by the function $f$ and stores it in the variable $F$. ```python def f(X,Y): return X*Y ``` ```python F = f(-2,3) print (f'The output F is: {F}') ``` The output F is: -6 ### <font color=blue>*How should one tweak the inputs $(X,Y)$ slightly to increase the output of the function __f__?*</font> ### Strategy #1 - Numerical Gradient The partial derivative of the function $f$ with respect to $X$ can be computed as: \begin{align} \frac{\partial f (X,Y)}{\partial X} = \lim_{h\to 0} \frac{f(X+h,Y)-f(X,Y)}{h} \\ \end{align} This can be simulated in code by choosing $h$ to be a very small number: ```python h = 0.0001 ``` #### Computing a partial derivative with respect to $X$ ```python X = -2; Y = 3 ``` ```python X_derivative = (f(X+h, Y)-f(X, Y))/h print (f'the derivative in respect to X is: {X_derivative}') ``` the derivative in respect to X is: 3.00000000000189 Positive derivative value indicates that $X$ should be increased in order to increase $f$, let's try that: ```python print (f'the original output is: {f(-2,3)}') # increase X for a small value, for example 0.2 print (f'the new output is: {f(-1.8,3)}') ``` the original output is: -6 the new output is: -5.4 $-5.4$ is larger than $-6$, it works! #### compute a partial derivative with respect to $Y$ ```python Y_derivative = (f(X, Y+h)-f(X, Y))/h print (f'the derivative in respect to Y is: {Y_derivative}') ``` the derivative in respect to Y is: -2.0000000000042206 Negative derivative value indicates that $Y$ should be decreased in order to increase $f$, let's try that: ```python print (f'the original output is: {f(-2,3)}') # decrease Y for a small value, for example 0.1 print (f'the new output is: {f(-2,2.9)}') ``` the original output is: -6 the new output is: -5.8 $-5.8$ is larger than $-6$, it works! The __gradient__ of a function is made up of all the partial derivatives of this function concatenated in a vector \begin{align} \nabla f(X,Y)=\left[\frac{\partial f (X,Y)}{\partial X},\frac{\partial f (X,Y)}{\partial Y}\right] \end{align} #### Gradually minimizing a function by using derivatives In order to gradually maximize the function $f$ (step-by-step) towards our desired result, we need to update our parameters. This is done by adding to the parameter's value the value of its partial derivative. To achieve this gradually in small steps, we multiply the value of the partial derivative by a a small number (step). ```python step_size = 0.01 F = f(X,Y) print (f'The original output F is: {F}') ``` The original output F is: -6 ```python X = X + step_size * X_derivative Y = Y + step_size * Y_derivative print (f'X is: {X}, Y is: {Y}') ``` X is: -1.969999999999981, Y is: 2.979999999999958 ```python F_new = f(X,Y) print (f'old output: {F}\nnew output: {F_new}') ``` old output: -6 new output: -5.87059999999986 The new output is larger than the old, thanks to partial derivatives. This approach, however, is still expensive because we need to compute the circuit’s output as we tweak every input value independently a small amount. ### Strategy #2 - Analytic Gradient We can use calculus to compute partial derivatives of the function $f(X,Y)$. \begin{align} \frac{\partial F}{\partial X}=\frac{\partial f (X,Y)}{\partial X} = \lim_{h\to 0} \frac{f(X+h,Y)-f(X,Y)}{h} \\ \end{align} A partial derivative of $F=f(X,Y)$ in respect to $X$ is: \begin{align*} \frac{\partial F}{\partial X}=\frac{\partial f (X,Y)}{\partial X} &= \lim_{h\to 0} \frac{f(X+h,Y)-f(X,Y)}{h} \\\\ &=\lim_{h\to 0}\frac{(X+h)Y -XY}{h} \\ &=\lim_{h\to 0}\frac{XY+Yh-XY}{h} \\ &=\lim_{h\to 0}\frac{Yh}{h} \\ \frac{\partial F}{\partial X}=\frac{\partial f (X,Y)}{\partial X}&=Y \end{align*} A partial derivative of $F=f(X,Y)$ in respect to $Y$ is: \begin{align*} \frac{\partial F}{\partial Y}=\frac{\partial f (X,Y)}{\partial Y} &= \lim_{h\to 0} \frac{f(X,Y+h)-f(X,Y)}{h} \\\\ &=\lim_{h\to 0}\frac{X(Y+h) -XY}{h} \\ &=\lim_{h\to 0}\frac{XY+Xh-XY}{h} \\ &=\lim_{h\to 0}\frac{Xh}{h} \\ \frac{\partial F}{\partial Y}=\frac{\partial f (X,Y)}{\partial Y} &=X \end{align*} Here are both partial derivatives: \begin{align*} \frac{\partial F}{\partial X}=Y ; \frac{\partial F}{\partial Y}=X \\ \end{align*} We can represent this as a gradient: \begin{align} \nabla f(X,Y)=\left[Y,X\right] \end{align} We can now use this information to increase the output of the function __f__: ```python X = -2; Y = 3 F = f(X,Y) print (f'the output F is: {F}') ``` the output F is: -6 ```python X_gradient = Y Y_gradient = X print (f'X-gradient: {X_gradient} \nY-gradient: {Y_gradient}') ``` X-gradient: 3 Y-gradient: -2 ```python step_size = 0.001 X = X + step_size * X_gradient Y = Y + step_size * Y_gradient print (f'X is now: {X}, \nY is now: {Y}') ``` X is now: -1.997, Y is now: 2.998 ```python F_new = f(X,Y) print (f'old output: {F}\nnew output: {F_new}') ``` old output: -6 new output: -5.987006000000001 The new output $-5.8706$ is larger than the old: $-6$. *** ### Single Add Gate The second function we're going to compute is $G=g(X,Y)=X+Y.$ The circuit takes two real-valued inputs $X$ and $Y$ and computes the sum $X+Y$. ```python def g(X,Y): return X+Y ``` ```python X = -2; Y = 3 G = g(X,Y) print (f'The output is: {G}') ``` The output is: 1 As we did before, we can use calculus to compute partial derivatives for the function $G=g(X,Y)$: \begin{align} \frac{\partial G}{\partial X}=\frac{\partial g (X,Y)}{\partial X} = \lim_{h\to 0} \frac{g(X+h,Y)-g(X,Y)}{h} \\ \end{align} A partial derivative of $G=g(X,Y)$ in respect to $X$ is: \begin{align*} \frac{\partial G}{\partial X}=\frac{\partial g (X,Y)}{\partial X} &= \lim_{h\to 0} \frac{g(X+h,Y)-g(X,Y)}{h} \\\\ &=\lim_{h\to 0}\frac{X+h+Y -X-Y}{h} \\ \frac{\partial G}{\partial X}=\frac{\partial g (X,Y)}{\partial X}&=\lim_{h\to 0}\frac{h}{h} =1 \\ \end{align*} A partial derivative of $G=g(X,Y)$ in respect to $Y$ is: \begin{align*} \frac{\partial G}{\partial Y}=\frac{\partial g (X,Y)}{\partial Y} &= \lim_{h\to 0} \frac{g(X,Y+h)-g(X,Y)}{h} \\\\ &=\lim_{h\to 0}\frac{X+Y+h -X-Y}{h} \\ \frac{\partial G}{\partial Y}=\frac{\partial g (X,Y)}{\partial Y}&=\lim_{h\to 0}\frac{h}{h} =1 \\ \end{align*} Both partial derivatives $\frac{\partial G}{\partial X}$ and $\frac{\partial G}{\partial Y}$ in this case are equal to $1$. We can use this information to maximize the function __g__: ```python X_gradient = 1 Y_gradient = 1 print (f'X-gradient: {X_gradient} \nY-gradient: {Y_gradient}') ``` X-gradient: 1 Y-gradient: 1 ```python step_size = 0.01 X = X + step_size * X_gradient Y = Y + step_size * Y_gradient print (f'X is: {X} \nY is: {Y}') ``` X is: -1.99 Y is: 3.01 ```python F_new = g(X,Y) print (f'old output: {F}\nnew output: {F_new}') ``` old output: -6 new output: 1.0199999999999998 ## Recursive Case: Circuits with Multiple Gates The expression we are computing now is $M = m(X,Y,Z)=(X+Y)*Z$. ```python def m(X,Y,Z): return (X+Y)*Z ``` ```python X = -2; Y = 5; Z = -4 ``` this is equal to $M=m(-2,5,-4)=(-2+5)-4=3*-4=-12$ ```python M = m(X,Y,Z) print (f'the output M is: {M}') ``` the output M is: -12 As we did before, we can use calculus to derive partial derivatives for the function $M=m(X,Y,Z)$. \begin{align} \frac{\partial M}{\partial X}=\frac{\partial m (X,Y,Z)}{\partial X} = \lim_{h\to 0} \frac{m(X+h,Y,Z)-m(X,Y,Z)}{h} \\ \end{align} A partial derivative of $M=m(X,Y,Z)$ in respect to $X$ is: \begin{align*} \frac{\partial M}{\partial X}=\frac{\partial m(X,Y,Z)}{\partial X} &= \lim_{h\to 0} \frac{f(X+h,Y,Z)-f(X,Y,Z)}{h} \\\\ &=\lim_{h\to 0}\frac{(X+h+Y)*Z -(X+Y)*Z}{h} \\ &=\lim_{h\to 0}\frac{ZX+Zh+ZY-ZX-ZY}{h} \\ \frac{\partial M}{\partial X}=\frac{\partial m(X,Y,Z)}{\partial X}&=\lim_{h\to 0}\frac{Zh}{h} =Z \\ \end{align*} Similarly, partial derivative of $M=m(X,Y,Z)$ in respect to $Y$ is: \begin{align*} \frac{\partial M}{\partial Y}=\frac{\partial m(X,Y,Z)}{\partial Y} &= \lim_{h\to 0} \frac{f(X,Y+h,Z)-f(X,Y,Z)}{h} \\\\ &=\lim_{h\to 0}\frac{(X+Y+h)*Z -(X+Y)*Z}{h} \\ &=\lim_{h\to 0}\frac{ZX+ZY+Zh-ZX-ZY}{h} \\ \frac{\partial M}{\partial Y}=\frac{\partial m(X,Y,Z)}{\partial Y}&=\lim_{h\to 0}\frac{Zh}{h} =Z \\ \end{align*} A partial derivative of $M=m(X,Y,Z)$ in respect to $Z$ is: \begin{align*} \frac{\partial M}{\partial Z}=\frac{\partial m(X,Y,Z)}{\partial Z} &= \lim_{h\to 0} \frac{f(X,Y,Z+h)-f(X,Y,Z)}{h} \\\\ &=\lim_{h\to 0}\frac{(X+Y)*(Z+h) -(X+Y)*Z}{h} \\ &=\lim_{h\to 0}\frac{XZ+Xh+YZ+Yh-XZ-YZ}{h} \\ &=\lim_{h\to 0}\frac{Xh+Yh}{h} \\ &=\lim_{h\to 0}\frac{h(X+Y)}{h}\\ \frac{\partial M}{\partial Z}=\frac{\partial m(X,Y,Z)}{\partial Z}&=X+Y \\ \end{align*} Here are all three partial derivatives: \begin{align*} \frac{\partial M}{\partial X}=Z ; \frac{\partial M}{\partial Y}=Z ;\frac{\partial M}{\partial Z}=X+Y \\ \end{align*} We can represent this as a gradient: \begin{align} \nabla m(X,Y,Z)=\left[Z,Z,X+Y\right] \end{align} We can now use this information to maximize the output of the function __m__: ```python X_gradient = Z Y_gradient = Z Z_gradient = X+Y print (f'X-gradient: {X_gradient} \nY-gradient: {Y_gradient} \nZ-gradient: {Z_gradient}') ``` X-gradient: -4 Y-gradient: -4 Z-gradient: 3 ```python step_size = 0.01 X = X + step_size * X_gradient Y = Y + step_size * Y_gradient Z = Z + step_size * Z_gradient print (f'X is: {X} \nY is: {Y} \nZ is: {Z}') ``` X is: -2.04 Y is: 4.96 Z is: -3.97 ```python M_new = m(X,Y,Z) print (f'old output: {M}\nnew output: {M_new}') ``` old output: -12 new output: -11.5924 *** ### Backpropagation Instead of working with the function $m(X,Y,Z)=(X+Y)*Z$, we can simplify the computation by composing two new simpler functions: <br>$G=g(X,Y)=X+Y$, and <br>$F=f(G,Z)=G*Z$, into: <br>$F=f(g(X,Y),Z)$ Here, we can apply the chain rule for derivation, because $F$ is a function of $G$ and $G$ is a function of $X$ and $Y$.<br> So instead of computing $\frac{\partial M}{\partial X}$, $\frac{\partial M}{\partial Y}$ and $\frac{\partial M}{\partial Z}$ which gets more complicated to compute with more complex expressions, we compute instead $\frac{\partial F}{\partial X}$, $\frac{\partial F}{\partial Y}$ and $\frac{\partial F}{\partial Z}$, which can be decomposed: \begin{align*} \frac{\partial F}{\partial X}=\frac{\partial F}{\partial G}\frac{\partial G}{\partial X} \end{align*} Here $\frac{\partial F}{\partial G}$ is a simple multiplication gate, whose derivate we have already computed:$\frac{\partial F}{\partial G}$ =$\frac{\partial f(G,Z)}{\partial G}=Z$ Also $\frac{\partial G}{\partial X}$ is a simple addition gate, whose derivate we have already computed: $\frac{\partial G}{\partial X}$ =$\frac{\partial g(X,Y)}{\partial X}=1$, thus: \begin{align*} \frac{\partial F}{\partial X}=\frac{\partial F}{\partial G}\frac{\partial G}{\partial X} = Z*1 = Z \end{align*} *** The same applies when computing the partial $\frac{\partial F}{\partial Y}$: \begin{align*} \frac{\partial F}{\partial Y}=\frac{\partial F}{\partial G}\frac{\partial G}{\partial Y} \end{align*} \begin{align*} \frac{\partial F}{\partial Y}=\frac{\partial F}{\partial G}\frac{\partial G}{\partial Y} = Z*1 = Z \end{align*} *** The partial derivative $\frac{\partial F}{\partial Z}$ does not need to be decomposed: \begin{align*} \frac{\partial F}{\partial Z}=G=X+Y \end{align*} We can now use this information to maximize the output of the function __m_decomposed__: ```python def m_decomposed(X,Y,Z): G = g(X,Y) F = f(G,Z) return F ``` ```python X = -2; Y = 5; Z = -4 ``` ```python F = m_decomposed(X,Y,Z) print (f'the output is: {F}') ``` the output is: -12 ```python X_gradient = Z Y_gradient = Z Z_gradient = X+Y print (f'X-gradient: {X_gradient}, \nY-gradient: {Y_gradient}, \nZ-gradient: {Z_gradient}.') ``` X-gradient: -4, Y-gradient: -4, Z-gradient: 3. Here, we can observe, that in the backward pass, the multiplication gate switches the values of outputs: what used to be (3,-4) in the forward pass, it becomes (-4,3) in the backward pass. The addition gate, on the other hand just passes its input value to the ouput without changing it. ```python step_size = 0.01 X = X + step_size * X_gradient Y = Y + step_size * Y_gradient Z = Z + step_size * Z_gradient print (f'X is: {X}\nY is: {Y}\nZ is: {Z}') ``` X is: -2.04 Y is: 4.96 Z is: -3.97 ```python F_new = m(X,Y,Z) print (f'old output: {F}\nnew output: {F_new}') ``` old output: -12 new output: -11.5924 *** ### Example: More complex functions Here is a seemingly complicated function: \begin{align} l(A,B,C,X,Y)&= \frac{1}{1+e^{-(AX+BY+C)}}\\ &or \\ l(A,B,C,X,Y)&= \sigma (AX+BY+C)\\ \end{align} The function $\sigma$ is called a *sigmoid function*: \begin{align} \sigma&= \frac{1}{1+e^{-x}}\\ \end{align} and it was used a lot in machine learning before. The derivative of the sigmoid function is: \begin{align} \frac{d\sigma(x)}{dx}= \sigma(x) * (1-\sigma(x)) \end{align} which means that once we compute the final activation $F=\sigma(AX+BY+C)$, we can simply calculate the derivative as $F*(1-F)$. We can compute the forward pass of this function without a problem, however, directly computing the partial derivatives $\frac{\partial L}{\partial A}$, $\frac{\partial L}{\partial B}$, $\frac{\partial L}{\partial C}$, ... could be tricky. It is much better to use the chain rule and compose multiple functions togeher. We can create 4 simple functions: \begin{align*} G=g(A,X)&=A*X \\ H=h(B,Y)&=B*Y \\ K=k(G,H,C)&=G+H+C \\ F=f(K)&= \frac{1}{1+e^{-x}}\\ \end{align*} and compose them as: \begin{align*} F=f(k(g(A,X),h(B,Y),C)) \end{align*} This looks much simpler on a diagram: Let's compute all the partial derivatives by using the chain rule: \begin{align*} \frac{\partial F}{\partial A}&=\frac{\partial F}{\partial K}*\frac{\partial K}{\partial G}*\frac{\partial G}{\partial A} \\ \frac{\partial F}{\partial A}&= F(1-F)*1*X \\ \frac{\partial F}{\partial A}&= XF(1-F) \end{align*} \begin{align*} \frac{\partial F}{\partial X}&=\frac{\partial F}{\partial K}*\frac{\partial K}{\partial G}*\frac{\partial G}{\partial X} \\ \frac{\partial F}{\partial X}&= F(1-F)*1*A \\ \frac{\partial F}{\partial X}&= AF(1-F) \end{align*} By following the exact same procedure for $B$, $Y$, and $C$ we get: \begin{align*} \frac{\partial F}{\partial B}&=YF(1-F)\\ \end{align*} \begin{align*} \frac{\partial F}{\partial Y}&=BF(1-F)\\ \end{align*} \begin{align*} \frac{\partial F}{\partial C}&=F(1-F)\\ \end{align*} We can represent this as a gradient: \begin{align} \nabla l(A,B,C,X,Y)=\left[XF(1-F),YF(1-F),F(1-F),AF(1-F),BF(1-F) \right] \end{align} We can now use this information to maximize the output of the function __l__: ```python import numpy as np def sigmoid(x): return 1 / (1 + np.exp(-x)) ``` ```python def l(A,B,C,X,Y): G = f(A,X) H = f(B,Y) K = G + H + C F = sigmoid(K) return F ``` ```python A = 1.0; B = 2.0; C = -3.0; X = -1.0; Y = 3.0 ``` ```python F = l(A,B,C,X,Y) print (f'the output F is: {F}') ``` the output F is: 0.8807970779778823 Since every partial derivative involves $F(1-F)$, we will compute it first as `F_K` ```python gradient_end = 1 F_K = (F * (1 - F)) * gradient_end print (f'F_K = {F_K}') ``` F_K = 0.10499358540350662 ```python A_gradient = X*F_K B_gradient = Y*F_K C_gradient = F_K X_gradient = A*F_K Y_gradient = B*F_K print (f'A-gradient: {A_gradient} \nX-gradient: {X_gradient} \nB-gradient: {B_gradient} \nY-gradient: {Y_gradient}\nC-gradient: {C_gradient}') ``` A-gradient: -0.10499358540350662 X-gradient: 0.10499358540350662 B-gradient: 0.31498075621051985 Y-gradient: 0.20998717080701323 C-gradient: 0.10499358540350662 ```python step_size = 0.01 A = A + step_size * A_gradient B = B + step_size * B_gradient C = C + step_size * C_gradient X = X + step_size * X_gradient Y = Y + step_size * Y_gradient print (f'A is: {A}, \nB is: {B}, \nC is: {C}, \nX is: {X}, \nY is: {Y}') ``` A is: 0.998950064145965, B is: 2.0031498075621053, C is: -2.9989500641459648, X is: -0.998950064145965, Y is: 3.00209987170807 ```python F_new = l(A,B,C,X,Y) print (f'old output: {F}\nnew output: {F_new}') ``` old output: 0.8807970779778823 new output: 0.8825501816218984 The new output is higher than the old one!
A function $f$ is continuous on the closure of a set $S$ if and only if for every $x \in \overline{S}$ and every $\epsilon > 0$, there exists a $\delta > 0$ such that for all $y \in S$ with $|y - x| < \delta$, we have $|f(y) - f(x)| < \epsilon$.
[STATEMENT] lemma vdunion_vsingleton: "(\<Coprod>\<^sub>\<circ>i\<in>\<^sub>\<circ>set{c}. A i) = set {c} \<times>\<^sub>\<circ> A c" [PROOF STATE] proof (prove) goal (1 subgoal): 1. VSigma (set {c}) A = set {c} \<times>\<^sub>\<circ> A c [PROOF STEP] by auto
import filter_world.level3 --hide open set --hide namespace filters --hide localized "notation `P` := principal" in filters --hide /- # Level 4: The meet of a pair of filters -/ def meet_set' {X : Type*} (V F : filter X) := {t | ∃ (v ∈ V) (f ∈ F), v ∩ f ⊆ t} /- Lemma The collection of subsets defined before is a filter. -/ lemma is_filter_meet {X : Type} (V F : filter X): is_filter (meet_set' V F) := begin fconstructor, { exact ⟨univ, V.univ_sets, univ, F.univ_sets, (univ ∩ univ).subset_univ⟩ }, { rintros A B ⟨v, hv, f, hf, H⟩ hAB, exact ⟨v, hv, f, hf, subset.trans H hAB⟩ }, { rintros A B ⟨v₁, hv₁, f₁, hf₁, H₁⟩ ⟨v₂, hv₂, f₂, hf₂, H₂⟩, have : v₁ ∩ v₂ ∩ (f₁ ∩ f₂) = v₁ ∩ f₁ ∩ (v₂ ∩ f₂), by rwa [← inter_assoc, inter_assoc v₁, inter_comm v₂, ← inter_assoc, ← inter_assoc], obtain hvf := inter_subset_inter H₁ H₂, exact ⟨v₁ ∩ v₂, V.inter_sets hv₁ hv₂, f₁ ∩ f₂, F.inter_sets hf₁ hf₂, by rwa this⟩, } end def meet {α : Type*} (V F : filter α) : filter α := --hide { sets := {t | ∃ (v f : set α), v ∈ V ∧ f ∈ F ∧ v ∩ f ⊆ t }, --hide univ_sets := ⟨univ, univ, V.univ_sets, F.univ_sets, (univ ∩ univ).subset_univ⟩, --hide sets_of_superset := (λ A B ⟨v, f, hv, hf, H⟩ hAB, ⟨v, f, hv, hf, subset.trans H hAB⟩), --hide inter_sets := --hide begin --hide rintros A B ⟨v₁, f₁, hv₁, hf₁, H₁⟩ ⟨v₂, f₂, hv₂, hf₂, H₂⟩, --hide have : v₁ ∩ v₂ ∩ (f₁ ∩ f₂) = v₁ ∩ f₁ ∩ (v₂ ∩ f₂), --hide by rwa [← inter_assoc, inter_assoc v₁, inter_comm v₂, ← inter_assoc, ← inter_assoc], --hide obtain hvf := inter_subset_inter H₁ H₂, --hide exact ⟨v₁ ∩ v₂, f₁ ∩ f₂, V.inter_sets hv₁ hv₂, F.inter_sets hf₁ hf₂, by rwa this⟩ --hide end } --hide end filters --hide