Datasets:
AI4M
/

text
stringlengths
0
3.34M
State Before: m n k l : ℕ P Q : ℕ → Prop inst✝ : DecidablePred P ⊢ Nat.findGreatest P k = 0 ↔ ∀ ⦃n : ℕ⦄, 0 < n → n ≤ k → ¬P n State After: no goals Tactic: simp [findGreatest_eq_iff]
/* For more information, please see: http://software.sci.utah.edu The MIT License Copyright (c) 2015 Scientific Computing and Imaging Institute, University of Utah. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ ///@brief ///@file DenseMatrixMultiplication.h #ifndef CORE_DATATYPES_DENSEMATRIXMULTIPLICATION_H #define CORE_DATATYPES_DENSEMATRIXMULTIPLICATION_H #if defined(HAVE_CBLAS) #if defined(__APPLE__) #include <vecLib/cblas.h> #else extern "C"{ #include <cblas.h> } #endif #endif namespace SCIRun { template <typename T> void DenseMatrixGeneric<T>::mult(const ColumnMatrix& x, ColumnMatrix& b, index_type beg, index_type end, int spVec) const { ASSERTEQ(x.nrows(), this->ncols_); ASSERTEQ(b.nrows(), this->nrows_); if (beg == -1) beg = 0; if (end == -1) end = this->nrows_; #if defined(HAVE_CBLAS) double ALPHA = 1.0; double BETA = 0.0; cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, (end-beg), 1, this->ncols_, ALPHA, dataptr_+(beg*this->ncols_), this->ncols_, x.get_data_pointer(), 1, BETA, b.get_data_pointer()+beg, 1); #else double* xdata = x.get_data_pointer(); double* bdata = b.get_data_pointer(); size_type m8 = (this->ncols_)/8; size_type m = (this->ncols_)-m8*8; index_type i, j; if(!spVec) { for (i=beg; i<end; i++) { double sum=0; double* row=data[i]; double* xd=xdata; for (j=0; j<m8; j++) { sum+=(*row)*(*xd); row++; xd++; sum+=(*row)*(*xd); row++; xd++; sum+=(*row)*(*xd); row++; xd++; sum+=(*row)*(*xd); row++; xd++; sum+=(*row)*(*xd); row++; xd++; sum+=(*row)*(*xd); row++; xd++; sum+=(*row)*(*xd); row++; xd++; sum+=(*row)*(*xd); row++; xd++; } for (j=0; j<m; j++) { sum+=(*row)*(*xd); row++; xd++; } (*bdata)=sum; bdata++; } } else { for (i=beg; i<end; i++) b[i]=0; for (j=0; j<this->ncols_; j++) { if (x[j]) for (i=beg; i<end; i++) { b[i]+=data[i][j]*x[j]; } } } #endif } template <typename T> void DenseMatrixGeneric<T>::multiply(ColumnMatrix& x, ColumnMatrix& b) const { index_type i, j; double* xdata = x.get_data_pointer(); double* bdata = b.get_data_pointer(); size_type m8 = (this->ncols_)/8; size_type m = (this->ncols_)-m8*8; for (i=0; i<this->nrows_; i++) { double sum=0; double* row = data[i]; double* xd = xdata; for (j=0; j<m8; j++) { sum+=(*row)*(*xd); row++; xd++; sum+=(*row)*(*xd); row++; xd++; sum+=(*row)*(*xd); row++; xd++; sum+=(*row)*(*xd); row++; xd++; sum+=(*row)*(*xd); row++; xd++; sum+=(*row)*(*xd); row++; xd++; sum+=(*row)*(*xd); row++; xd++; sum+=(*row)*(*xd); row++; xd++; } for (j=0; j<m; j++) { sum+=(*row)*(*xd); row++; xd++; } *bdata=sum; bdata++; } } template <typename T> void DenseMatrixGeneric<T>::mult_transpose(const ColumnMatrix& x, ColumnMatrix& b, index_type beg, index_type end, int spVec) const { // Compute At*x=b ASSERT(x.nrows() == this->nrows_); ASSERT(b.nrows() == this->ncols_); if (beg == -1) beg = 0; if (end == -1) end = this->ncols_; index_type i, j; if (!spVec) { for (i=beg; i<end; i++) { double sum=0; for (j=0; j<this->nrows_; j++) { sum+=data[j][i]*x[j]; } b[i]=sum; } } else { for (i=beg; i<end; i++) b[i]=0; for (j=0; j<this->nrows_; j++) if (x[j]) { double *row=data[j]; for (i=beg; i<end; i++) b[i]+=row[i]*x[j]; } } } template <typename T> DenseMatrix* DenseMatrixGeneric<T>::make_diagonal_from_column(const ColumnMatrix& column, size_type rows, size_type cols) { DenseMatrix* result = zero_matrix(rows, cols); for (size_type i = 0; i < column.nrows(); ++i) (*result)[i][i] = column[i]; return result; } } // End namespace SCIRun #endif
function sct_merge_nii(flist,output) % sct_merge_nii(flist,output) % merge niftis in the 4th dimenstion. % sct_merge_nii(sct_tools_ls('*dwi.nii'),'DWI.nii.gz') --> always .nii.gz sct_unix(['fslmerge -t ' output ' ' strjoin(flist)])
Treatment of bleeding problems without removal of uterus (TCRE) Etc. Screening and treatment for cancer of Uterus, Cervix and Ovary. Breast Lump and Cancer Screening (Mammography) and Treatment.
Require Export MicroBFTprops0. Require Export MicroBFTsubs. Require Export MicroBFTbreak. Require Export ComponentSM6. Section MicroBFTcount. Local Open Scope eo. Local Open Scope proc. Context { dtc : DTimeContext }. Context { microbft_context : MicroBFT_context }. Context { m_initial_keys : MicroBFT_initial_keys }. Context { u_initial_keys : USIG_initial_keys }. Context { usig_hash : USIG_hash }. Context { microbft_auth : MicroBFT_auth }. (* Lemma accepted_if_executed_previous_step : forall {eo : EventOrdering} (e : Event) (req : Request) (i : nat) (l : list name) (r : Rep) (s : MAIN_state) (s1 : USIG_state) (s2 : LOG_state), In (send_accept (accept req i) l) (M_output_ls_on_this_one_event (MicroBFTlocalSys_new r s s1 s2) e) -> i = S (cexec s). Proof. introv h. apply in_M_output_ls_on_this_one_event_implies in h; exrepnd; simpl in *. autorewrite with microbft comp in *. Time microbft_dest_msg Case; simpl in *; tcsp; ginv; repeat smash_microbft2; repndors; tcsp; inversion h0; subst; GC; eauto 4 with microbft. Qed. Lemma operation_inc_counter_ls_step : forall {eo : EventOrdering} (e : Event) (r : Request) (i : nat) (l : list name) (s : Rep) (ls : MicroBFTls), M_run_ls_before_event (MicroBFTlocalSys s) e = Some ls -> In (send_accept (accept r (S i)) l) (M_output_ls_on_this_one_event ls e) -> i = 0 \/ exists r' l' e' ls', e' ⊏ e /\ M_run_ls_before_event (MicroBFTlocalSys s) e' = Some ls' /\ In (send_accept (accept r' i) l') (M_output_ls_on_this_one_event ls' e'). Proof. introv eqls out. applydup M_run_ls_before_event_ls_is_microbft in eqls; exrepnd; subst. applydup accepted_if_executed_previous_step in out; ginv. clear r l out. revert s s0 s1 s2 eqls. induction e as [e ind] using predHappenedBeforeInd;[]; introv eqls. rewrite M_run_ls_before_event_unroll in eqls. destruct (dec_isFirst e) as [d|d]; ginv. { inversion eqls; subst; GC; simpl; tcsp. } apply map_option_Some in eqls; exrepnd; rev_Some. applydup M_run_ls_before_event_ls_is_microbft in eqls1; exrepnd; subst. dup eqls1 as eqbef. rename eqls1 into eqbef_backup. eapply ind in eqbef; eauto 3 with eo;[]. apply map_option_Some in eqls0; exrepnd; simpl in *; rev_Some. autorewrite with microbft comp in *. Time microbft_dest_msg Case; repeat (autorewrite with microbft comp in *; simpl in *; smash_microbft2); try (complete (clear eqbef_backup; repndors; tcsp; []; right; exrepnd; microbft_finish_eexists)); [|]. { Case "Commit". right. applydup invalid_commit_false_implies in Heqx as w. apply valid_commit_implies_executed_prior in w. apply executed_prior_counter_implies_eq_S in w. exists (commit2request c) [MicroBFT_replica s] (local_pred e) (MicroBFTlocalSys_new s s3 s1 s5). dands; eauto 3 with eo. Time unfold M_output_ls_on_this_one_event; simpl; repeat (allrw; simpl); repeat (autorewrite with microbft comp in *; simpl in *; smash_microbft2); try (complete (left; try congruence)). } { Case "Commit". right. applydup invalid_commit_false_implies in Heqx as w. apply valid_commit_implies_executed_prior in w. apply executed_prior_counter_implies_eq_S in w. exists (commit2request c) [MicroBFT_replica s] (local_pred e) (MicroBFTlocalSys_new s s3 s4 s5). dands; eauto 3 with eo. Time unfold M_output_ls_on_this_one_event; simpl; repeat (allrw; simpl); repeat (autorewrite with microbft comp in *; simpl in *; smash_microbft2); try (complete (left; try congruence)). } Qed. Lemma operation_inc_counter_ls : forall {eo : EventOrdering} (e : Event) (r : Request) (i1 i2 : nat) (l : list name) (s : Rep) (ls : MicroBFTls), M_run_ls_before_event (MicroBFTlocalSys s) e = Some ls -> In (send_accept (accept r i2) l) (M_output_ls_on_this_one_event ls e) -> i1 < i2 -> 0 < i1 -> exists r' l' e' ls', e' ⊏ e /\ M_run_ls_before_event (MicroBFTlocalSys s) e' = Some ls' /\ In (send_accept (accept r' i1) l') (M_output_ls_on_this_one_event ls' e'). Proof. intros eo e r i1 i2; revert e r. induction i2; introv eqls out lti lti0; try omega;[]. apply lt_n_Sm_le in lti. eapply operation_inc_counter_ls_step in out; eauto. repndors; subst; try omega. exrepnd. apply le_lt_or_eq in lti; repndors; subst; try (complete microbft_finish_eexists);[]. eapply IHi2 in out1; eauto; try omega. exrepnd. exists r'0 l'0 e'0 ls'0; dands; auto; eauto 3 with eo. Qed. Lemma operation_inc_counter : forall {eo : EventOrdering} (e : Event) (r : Request) (i1 i2 : nat) (l : list name), is_replica e -> In (send_accept (accept r i2) l) (M_output_sys_on_event MicroBFTsys e) -> i1 < i2 -> 0 < i1 -> exists r' l' e', e' ⊏ e /\ In (send_accept (accept r' i1) l') (M_output_sys_on_event MicroBFTsys e'). Proof. introv isr h lti lti0. unfold M_output_sys_on_event in *. unfold MicroBFTsys, is_replica in *; exrepnd. rewrite isr0 in *; simpl in *. apply M_output_ls_on_event_as_run in h; exrepnd. eapply operation_inc_counter_ls in h0; eauto. exrepnd. applydup local_implies_loc in h2 as eqloc. exists r' l' e'; dands; auto. rewrite eqloc. rewrite isr0. apply M_output_ls_on_event_as_run. eexists; dands; eauto. Qed. Lemma accepted_counter_positive : forall {eo : EventOrdering} (e : Event) (r : Request) (i : nat) (l : list name), is_replica e -> In (send_accept (accept r i) l) (M_output_sys_on_event MicroBFTsys e) -> 0 < i. Proof. introv isrep out. unfold M_output_sys_on_event in *. unfold MicroBFTsys, is_replica in *; exrepnd. rewrite isrep0 in *; simpl in *. apply M_output_ls_on_event_implies_run in out; exrepnd. applydup M_run_ls_before_event_ls_is_microbft in out1; exrepnd; subst. eapply accepted_if_executed_previous_step in out0; subst; omega. Qed. Hint Resolve accepted_counter_positive : microbft. *) (*Lemma M_output_ls_on_input_is_log_new_implies : forall u r v o, M_output_ls_on_input (LOGlocalSys u) (log_new r) = (LOGlocalSys v, o) -> in_log r v. Proof. introv out. unfold M_output_ls_on_input in out; simpl in *. unfold M_run_smat_on_inputs in out; simpl in *. unfold M_run_update_on_inputs in out; simpl in *. unfold M_break in *; simpl in *; ginv. inversion out; auto; simpl; tcsp. Qed.*) Lemma invalid_request_false_implies_ui2rep_eq : forall R r s, invalid_commit R r s = false -> ui2rep (commit_ui r) = MicroBFT_primary. Proof. introv inv; unfold invalid_commit, valid_commit, is_primary in *; smash_microbft_2. Qed. Hint Resolve invalid_request_false_implies_ui2rep_eq : microbft. Lemma invalid_request_false_implies_not_primary : forall R r s, invalid_commit R r s = false -> not_primary R = true. Proof. introv inv; unfold invalid_commit, valid_commit, is_primary in *; smash_microbft_2. Qed. Hint Resolve invalid_request_false_implies_not_primary : microbft. (* This uses compositional reasoning, but using [LOG_comp]'s spec defined in [M_output_ls_on_input_is_committed_implies] *) Lemma accepted_counter_if_received_UI_primary : forall {eo : EventOrdering} (e : Event) (R : MicroBFT_node) (r : nat) (i : nat) (l : list name), In (send_accept (accept r i) l) (M_output_ls_on_event (MicroBFTlocalSys R) e) -> exists (s : MAIN_state) (s1 : USIG_state) (s2 : LOG_state) (ui : UI) (rq : Commit), M_run_ls_on_event (MicroBFTlocalSys R) e = Some (MicroBFTlocalSys_new R s s1 s2) /\ in_log rq s2 /\ commit_n rq = r /\ commit_ui rq = ui /\ ui2counter ui = i /\ ui2rep ui = MicroBFT_primary /\ not_primary R = true. Proof. introv h. apply M_output_ls_on_event_as_run in h; exrepnd. rename ls' into ls. rewrite M_run_ls_on_event_unroll2; allrw; simpl. applydup M_run_ls_before_event_ls_is_microbft in h1; exrepnd; subst. apply in_M_output_ls_on_this_one_event_implies in h0; exrepnd; simpl in *; microbft_simp. unfold M_run_ls_on_this_one_event; simpl; allrw; simpl. unfold M_run_ls_on_input_ls, M_run_ls_on_input. unfold statefund_nm in *; simpl in *. autorewrite with microbft comp in *. Time microbft_dest_msg Case; simpl in *; tcsp; ginv; repeat smash_microbft_2; ginv. eexists; eexists; eexists; eexists; eexists; dands; try reflexivity; eauto 3 with microbft; tcsp. Qed. End MicroBFTcount. (* Hint Resolve accepted_counter_positive : microbft. *)
Formal statement is: lemma i_even_power [simp]: "\<i> ^ (n * 2) = (-1) ^ n" Informal statement is: $i^{2n} = (-1)^n$.
{-# OPTIONS --guardedness --cubical #-} module Issue2799 where open import Agda.Primitive.Cubical open import Agda.Builtin.Cubical.Path record Stream (A : Set) : Set where coinductive constructor _,_ field head : A tail : Stream A open Stream mapS : ∀ {A B} → (A → B) → Stream A → Stream B head (mapS f xs) = f (head xs) tail (mapS f xs) = mapS f (tail xs) mapS-id : ∀ {A} {xs : Stream A} → mapS (λ x → x) xs ≡ xs head (mapS-id {xs = xs} i) = head xs tail (mapS-id {xs = xs} i) = mapS-id {xs = tail xs} i
Load "tact_nsl2". Theorem pi1_pi2: phi4 ~ phi24. unfold phi4, phi24. assert( (ostomsg t25) # O). repeat unf. simpl. simpl. (* apply RESTR_rev with (ml1:= [t15; t14; t13; t12; msg (pk (N 2)); msg (pk (N 1))]) (ml2:= [t25; t14; t13; t12; msg (pk (N 2)); msg (pk (N 1))]). (*assert( (ostomsg t25) # O). *) repeat unf. simpl. *) repeat unf; simpl. Ltac aply_andBcomm m1 := match goal with |[|- context[ ?B & (EQ_M ?M m1) ] ] => rewrite andB_comm with (b1:= B) (b2:= (EQ_M M m1)) end. repeat aply_andBcomm (nc 3). pose proof (EQ_BRmsg_msg'). Ltac aply_eqbr B m5 := match goal with | [|- context [(if_then_else_M ((EQ_M ?M1 m5) & B) ?M3 ?M4)] ] => rewrite EQ_BRmsg_msg' with (m1:= M1) (m2:= m5) (m:= M1) (b:= B) (m3:= M3) (m4:=M4) end. aply_eqbr (EQ_M (to x2) (i 1)) (nc 3). simpl. repeat rewrite EQ_BRmsg_msg' with (m1 := (pi1 (dec x2 (sk (N 1)))) ) (m2:= (nc 3)) (m:= (pi1 (dec x2 (sk (N 1)))) ) (b:= (EQ_M (to x2) (i 1))) (m3:= (if_then_else_M ((((EQ_M (reveal x3) (i 1)) & (EQ_M (to x2) (i 1))) & (EQ_M (to x1) (i 1))) & (notb (EQ_M (act x2) new))) & (EQ_M (act x1) new) (pi1 (dec x2 (sk (N 1)))) (if_then_else_M (EQ_M (reveal x3) (i 2)) O (if_then_else_M (EQ_M (to x3) (i 2)) (enc (pi1 (dec x3 (sk (N 2))), (nc 4, pk (N 2))) (pi2 (dec x3 (sk (N 2)))) (sr 6)) O)))). simpl. repeat rewrite EQ_BRmsg_msg' with (m1 := (pi1 (dec x2 (sk (N 1)))) ) (m2:= (nc 3)) (m:= (pi1 (dec x2 (sk (N 1)))) ) (b:= (EQ_M (to x2) (i 1))) (m3:= (if_then_else_M ((((EQ_M (reveal x3) (i 1)) & (EQ_M (to x2) (i 1))) & (EQ_M (to x1) (i 1))) & (notb (EQ_M (act x2) new))) & (EQ_M (act x1) new) (if_then_else_M (EQ_M (reveal x4) (i 2)) O (if_then_else_M (EQ_M (to x4) (i 2)) (enc (pi1 (dec x4 (sk (N 2))), (nc 4, pk (N 2))) (pi2 (dec x4 (sk (N 2)))) (sr 9)) O)) (if_then_else_M (EQ_M (reveal x3) (i 2)) O (if_then_else_M (EQ_M (to x3) (i 2)) (if_then_else_M (EQ_M (reveal x4) (i 2)) & (EQ_M (to x1) (i 2)) (pi1 (dec x1 (sk (N 2)))) (if_then_else_M (EQ_M (reveal x4) (i 2)) & (EQ_M (to x2) (i 2)) (pi1 (dec x2 (sk (N 2)))) (if_then_else_M (EQ_M (reveal x4) (i 2)) & (EQ_M (to x3) (i 2)) (pi1 (dec x3 (sk (N 2)))) (if_then_else_M ((((EQ_M (reveal x4) (i 1)) & (EQ_M (to x2) (i 1))) & (EQ_M (to x1) (i 1))) & (notb (EQ_M (act x2) new))) & (EQ_M (act x1) new) (pi1 (dec x2 (sk (N 1)))) (if_then_else_M ((((EQ_M (reveal x4) (i 1)) & (EQ_M (to x3) (i 1))) & (EQ_M (to x1) (i 1))) & (notb (EQ_M (act x3) new))) & (EQ_M (act x1) new) (pi1 (dec x3 (sk (N 1)))) (if_then_else_M ((((EQ_M (reveal x4) (i 1)) & (EQ_M (to x3) (i 1))) & (EQ_M (to x2) (i 1))) & (notb (EQ_M (act x3) new))) & (EQ_M (act x2) new) (pi1 (dec x3 (sk (N 1)))) O)))))) O)))). simpl. repeat rewrite EQ_BRmsg_msg' with (m1 := (pi1 (dec x2 (sk (N 1)))) ) (m2:= (nc 3)) (m:= (pi1 (dec x2 (sk (N 1)))) ) (b:= (EQ_M (to x3) (i 1))) (m3:= (if_then_else_M (EQ_M (reveal x4) (i 2)) & (EQ_M (to x1) (i 2)) (pi1 (dec x1 (sk (N 2)))) (if_then_else_M (EQ_M (reveal x4) (i 2)) & (EQ_M (to x2) (i 2)) (pi1 (dec x2 (sk (N 2)))) (if_then_else_M (EQ_M (reveal x4) (i 2)) & (EQ_M (to x3) (i 2)) (pi1 (dec x3 (sk (N 2)))) (if_then_else_M ((((EQ_M (reveal x4) (i 1)) & (EQ_M (to x2) (i 1))) & (EQ_M (to x1) (i 1))) & (notb (EQ_M (act x2) new))) & (EQ_M (act x1) new) (pi1 (dec x2 (sk (N 1)))) (if_then_else_M ((((EQ_M (reveal x4) (i 1)) & (EQ_M (to x3) (i 1))) & (EQ_M (to x1) (i 1))) & (notb (EQ_M (act x3) new))) & (EQ_M (act x1) new) (pi1 (dec x3 (sk (N 1)))) (if_then_else_M ((((EQ_M (reveal x4) (i 1)) & (EQ_M (to x3) (i 1))) & (EQ_M (to x2) (i 1))) & (notb (EQ_M (act x3) new))) & (EQ_M (act x2) new) (pi1 (dec x3 (sk (N 1)))) O))))))). simpl. repeat rewrite EQ_BRmsg_msg' with (m1 := (pi1 (dec x2 (sk (N 1)))) ) (m2:= (nc 3)) (m:= (pi1 (dec x2 (sk (N 1)))) ) (b:= (EQ_M (to x2) (i 1))) (m3:= (if_then_else_M ((((EQ_M (reveal x3) (i 1)) & (EQ_M (to x2) (i 1))) & (EQ_M (to x1) (i 1))) & (notb (EQ_M (act x2) new))) & (EQ_M (act x1) new) (if_then_else_M (EQ_M (reveal x4) (i 2)) O (if_then_else_M (EQ_M (to x4) (i 2)) (enc (pi1 (dec x4 (sk (N 2))), (nc 4, pk (N 2))) (pi2 (dec x4 (sk (N 2)))) (sr 9)) O)) (if_then_else_M (EQ_M (reveal x3) (i 2)) O (if_then_else_M (EQ_M (to x3) (i 2)) (if_then_else_M (((((((EQ_M (reveal x4) (i 2)) & (EQ_M (to x3) (i 1))) & (EQ_M (to x2) (i 2))) & (EQ_M (to x1) (i 1))) & (notb (EQ_M (act x3) new))) & (EQ_M (act x1) new)) & (EQ_M (pi1 (dec x3 (sk (N 1)))) (nc 3))) & (EQ_M (pi1 (dec x2 (sk (N 2)))) (nc 3)) (nc 5) (if_then_else_M (((((((EQ_M (reveal x4) (i 1)) & (EQ_M (to x3) (i 1))) & (EQ_M (to x2) (i 2))) & (EQ_M (to x1) (i 1))) & (notb (EQ_M (act x3) new))) & (EQ_M (act x1) new)) & (EQ_M (pi1 (dec x3 (sk (N 1)))) (nc 3))) & (EQ_M (pi1 (dec x2 (sk (N 2)))) (nc 3)) (nc 5) (if_then_else_M (EQ_M (reveal x4) (i 2)) & (EQ_M (to x1) (i 2)) (pi1 (dec x1 (sk (N 2)))) (if_then_else_M (EQ_M (reveal x4) (i 2)) & (EQ_M (to x2) (i 2)) (pi1 (dec x2 (sk (N 2)))) (if_then_else_M (EQ_M (reveal x4) (i 2)) & (EQ_M (to x3) (i 2)) (pi1 (dec x3 (sk (N 2)))) (if_then_else_M ((((EQ_M (reveal x4) (i 1)) & (EQ_M (to x2) (i 1))) & (EQ_M (to x1) (i 1))) & (notb (EQ_M (act x2) new))) & (EQ_M (act x1) new) (pi1 (dec x2 (sk (N 1)))) (if_then_else_M ((((EQ_M (reveal x4) (i 1)) & (EQ_M (to x3) (i 1))) & (EQ_M (to x1) (i 1))) & (notb (EQ_M (act x3) new))) & (EQ_M (act x1) new) (pi1 (dec x3 (sk (N 1)))) (if_then_else_M ((((EQ_M (reveal x4) (i 1)) & (EQ_M (to x3) (i 1))) & (EQ_M (to x2) (i 1))) & (notb (EQ_M (act x3) new))) & (EQ_M (act x2) new) (pi1 (dec x3 (sk (N 1)))) O)))))))) O)))). simpl. repeat rewrite EQ_BRmsg_msg' with (m1 := (pi1 (dec x2 (sk (N 1)))) ) (m2:= (nc 3)) (m:= (pi1 (dec x2 (sk (N 1)))) ) (b:= (EQ_M (to x3) (i 1))) (m3:= (if_then_else_M (((((((EQ_M (reveal x4) (i 2)) & (EQ_M (to x3) (i 1))) & (EQ_M (to x2) (i 2))) & (EQ_M (to x1) (i 1))) & (notb (EQ_M (act x3) new))) & (EQ_M (act x1) new)) & (EQ_M (pi1 (dec x3 (sk (N 1)))) (nc 3))) & (EQ_M (pi1 (dec x2 (sk (N 2)))) (nc 3)) (nc 5) (if_then_else_M (((((((EQ_M (reveal x4) (i 1)) & (EQ_M (to x3) (i 1))) & (EQ_M (to x2) (i 2))) & (EQ_M (to x1) (i 1))) & (notb (EQ_M (act x3) new))) & (EQ_M (act x1) new)) & (EQ_M (pi1 (dec x3 (sk (N 1)))) (nc 3))) & (EQ_M (pi1 (dec x2 (sk (N 2)))) (nc 3)) (nc 5) (if_then_else_M (EQ_M (reveal x4) (i 2)) & (EQ_M (to x1) (i 2)) (pi1 (dec x1 (sk (N 2)))) (if_then_else_M (EQ_M (reveal x4) (i 2)) & (EQ_M (to x2) (i 2)) (pi1 (dec x2 (sk (N 2)))) (if_then_else_M (EQ_M (reveal x4) (i 2)) & (EQ_M (to x3) (i 2)) (pi1 (dec x3 (sk (N 2)))) (if_then_else_M ((((EQ_M (reveal x4) (i 1)) & (EQ_M (to x2) (i 1))) & (EQ_M (to x1) (i 1))) & (notb (EQ_M (act x2) new))) & (EQ_M (act x1) new) (pi1 (dec x2 (sk (N 1)))) (if_then_else_M ((((EQ_M (reveal x4) (i 1)) & (EQ_M (to x3) (i 1))) & (EQ_M (to x1) (i 1))) & (notb (EQ_M (act x3) new))) & (EQ_M (act x1) new) (pi1 (dec x3 (sk (N 1)))) (if_then_else_M ((((EQ_M (reveal x4) (i 1)) & (EQ_M (to x3) (i 1))) & (EQ_M (to x2) (i 1))) & (notb (EQ_M (act x3) new))) & (EQ_M (act x2) new) (pi1 (dec x3 (sk (N 1)))) O))))))))). simpl. (*assert((ostomsg t15) # (ostomsg t25)). simpl. unfold qb10_ss, qb01_ss. unfold qb11_s. unfold qa12. unfold qa02_s.*) (*qb20_s qb21*) apply IFBRANCH_M4 with (ml1:=[msg (pk (N 1)); msg (pk (N 2))]) (ml2 := [msg (pk (N 1)); msg (pk (N 2))]); try reflexivity; simpl. apply IFBRANCH_M4 with (ml1:=[msg (pk (N 1)); msg (pk (N 2)) ; bol (EQ_M (reveal x1) (i 1))]) (ml2 := [msg (pk (N 1)); msg (pk (N 2)) ; bol (EQ_M (reveal x1) (i 1))]); try reflexivity; simpl. apply IFBRANCH_M4 with (ml1:=[msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2))]) (ml2 := [msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2))]) ;try reflexivity; simpl. apply IFBRANCH_M3 with (ml1:= [msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2)); bol (EQ_M (to x1) (i 1)) & (EQ_M (act x1) new); msg (enc (nc 3, pk (N 1)) (pk (N 2)) (sr 1))]) (ml2 := [msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2)); bol (EQ_M (to x1) (i 1)) & (EQ_M (act x1) new); msg (enc (nc 3, pk (N 1)) (pk (N 2)) (sr 1))]); try reflexivity; simpl. apply IFBRANCH_M3 with (ml1:= [msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2)); bol (EQ_M (to x1) (i 1)) & (EQ_M (act x1) new); msg (enc (nc 3, pk (N 1)) (pk (N 2)) (sr 1)); bol (EQ_M (reveal x2) (i 1))]) (ml2 := [msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2)); bol (EQ_M (to x1) (i 1)) & (EQ_M (act x1) new); msg (enc (nc 3, pk (N 1)) (pk (N 2)) (sr 1)); bol (EQ_M (reveal x2) (i 1))]); try reflexivity; simpl. apply IFBRANCH_M3 with (ml1:= [msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2)); bol (EQ_M (to x1) (i 1)) & (EQ_M (act x1) new); msg (enc (nc 3, pk (N 1)) (pk (N 2)) (sr 1)); bol (EQ_M (reveal x2) (i 1)); bol (EQ_M (reveal x2) (i 2))]) (ml2 := [msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2)); bol (EQ_M (to x1) (i 1)) & (EQ_M (act x1) new); msg (enc (nc 3, pk (N 1)) (pk (N 2)) (sr 1)); bol (EQ_M (reveal x2) (i 1)); bol (EQ_M (reveal x2) (i 2))]); try reflexivity; simpl. apply IFBRANCH_M2 with (ml1:= [msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2)); bol (EQ_M (to x1) (i 1)) & (EQ_M (act x1) new); msg (enc (nc 3, pk (N 1)) (pk (N 2)) (sr 1)); bol (EQ_M (reveal x2) (i 1)); bol (EQ_M (reveal x2) (i 2)); bol (EQ_M (pi1 (dec x2 (sk (N 1)))) (nc 3)) & (EQ_M (to x2) (i 1)); msg (enc (pi1 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (pi2 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (rs (N 1)))]) (ml2 := [msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2)); bol (EQ_M (to x1) (i 1)) & (EQ_M (act x1) new); msg (enc (nc 3, pk (N 1)) (pk (N 2)) (sr 1)); bol (EQ_M (reveal x2) (i 1)); bol (EQ_M (reveal x2) (i 2)); bol (EQ_M (pi1 (dec x2 (sk (N 1)))) (nc 3)) & (EQ_M (to x2) (i 1)); msg (enc (pi1 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (pi2 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (rs (N 1)))]); try reflexivity; simpl. apply IFBRANCH_M2 with (ml1:= [msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2)); bol (EQ_M (to x1) (i 1)) & (EQ_M (act x1) new); msg (enc (nc 3, pk (N 1)) (pk (N 2)) (sr 1)); bol (EQ_M (reveal x2) (i 1)); bol (EQ_M (reveal x2) (i 2)); bol (EQ_M (pi1 (dec x2 (sk (N 1)))) (nc 3)) & (EQ_M (to x2) (i 1)); msg (enc (pi1 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (pi2 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (rs (N 1))); bol (if_then_else_B (if_then_else_B (if_then_else_B (if_then_else_B (EQ_M (reveal (f mphi2)) (i 1)) (EQ_M (to (f mphi1)) (i 1)) FAlse) (EQ_M (to (f mphi0)) (i 1)) FAlse) (if_then_else_B (EQ_M (act (f mphi1)) new) FAlse TRue) FAlse) (EQ_M (act (f mphi0)) new) FAlse)]) (ml2 := [msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2)); bol (EQ_M (to x1) (i 1)) & (EQ_M (act x1) new); msg (enc (nc 3, pk (N 1)) (pk (N 2)) (sr 1)); bol (EQ_M (reveal x2) (i 1)); bol (EQ_M (reveal x2) (i 2)); bol (EQ_M (pi1 (dec x2 (sk (N 1)))) (nc 3)) & (EQ_M (to x2) (i 1)); msg (enc (pi1 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (pi2 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (rs (N 1))); bol (if_then_else_B (if_then_else_B (if_then_else_B (if_then_else_B (EQ_M (reveal (f mphi2)) (i 1)) (EQ_M (to (f mphi1)) (i 1)) FAlse) (EQ_M (to (f mphi0)) (i 1)) FAlse) (if_then_else_B (EQ_M (act (f mphi1)) new) FAlse TRue) FAlse) (EQ_M (act (f mphi0)) new) FAlse)]); try reflexivity; simpl. apply IFBRANCH_M2 with (ml1:= [msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2)); bol (EQ_M (to x1) (i 1)) & (EQ_M (act x1) new); msg (enc (nc 3, pk (N 1)) (pk (N 2)) (sr 1)); bol (EQ_M (reveal x2) (i 1)); bol (EQ_M (reveal x2) (i 2)); bol (EQ_M (pi1 (dec x2 (sk (N 1)))) (nc 3)) & (EQ_M (to x2) (i 1)); msg (enc (pi1 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (pi2 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (rs (N 1))); bol (if_then_else_B (if_then_else_B (if_then_else_B (if_then_else_B (EQ_M (reveal (f mphi2)) (i 1)) (EQ_M (to (f mphi1)) (i 1)) FAlse) (EQ_M (to (f mphi0)) (i 1)) FAlse) (if_then_else_B (EQ_M (act (f mphi1)) new) FAlse TRue) FAlse) (EQ_M (act (f mphi0)) new) FAlse); bol (EQ_M (reveal (f mphi2)) (i 2))]) (ml2 := [msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2)); bol (EQ_M (to x1) (i 1)) & (EQ_M (act x1) new); msg (enc (nc 3, pk (N 1)) (pk (N 2)) (sr 1)); bol (EQ_M (reveal x2) (i 1)); bol (EQ_M (reveal x2) (i 2)); bol (EQ_M (pi1 (dec x2 (sk (N 1)))) (nc 3)) & (EQ_M (to x2) (i 1)); msg (enc (pi1 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (pi2 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (rs (N 1))); bol (if_then_else_B (if_then_else_B (if_then_else_B (if_then_else_B (EQ_M (reveal (f mphi2)) (i 1)) (EQ_M (to (f mphi1)) (i 1)) FAlse) (EQ_M (to (f mphi0)) (i 1)) FAlse) (if_then_else_B (EQ_M (act (f mphi1)) new) FAlse TRue) FAlse) (EQ_M (act (f mphi0)) new) FAlse); bol (EQ_M (reveal (f mphi2)) (i 2))]); try reflexivity; simpl. apply IFBRANCH_M1 with (ml1:=[msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2)); bol (EQ_M (to x1) (i 1)) & (EQ_M (act x1) new); msg (enc (nc 3, pk (N 1)) (pk (N 2)) (sr 1)); bol (EQ_M (reveal x2) (i 1)); bol (EQ_M (reveal x2) (i 2)); bol (EQ_M (pi1 (dec x2 (sk (N 1)))) (nc 3)) & (EQ_M (to x2) (i 1)); msg (enc (pi1 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (pi2 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (rs (N 1))); bol (if_then_else_B (if_then_else_B (if_then_else_B (if_then_else_B (EQ_M (reveal (f mphi2)) (i 1)) (EQ_M (to (f mphi1)) (i 1)) FAlse) (EQ_M (to (f mphi0)) (i 1)) FAlse) (if_then_else_B (EQ_M (act (f mphi1)) new) FAlse TRue) FAlse) (EQ_M (act (f mphi0)) new) FAlse); bol (EQ_M (reveal (f mphi2)) (i 2)); bol (EQ_M (to (f mphi2)) (i 2)); msg (enc (pi1 (dec (f mphi2) (pi2 (k (N 2)))), (nc 4, pi1 (k (N 2)))) (pi2 (dec (f mphi2) (pi2 (k (N 2))))) (rs (N 1)))]) (ml2 :=[msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2)); bol (EQ_M (to x1) (i 1)) & (EQ_M (act x1) new); msg (enc (nc 3, pk (N 1)) (pk (N 2)) (sr 1)); bol (EQ_M (reveal x2) (i 1)); bol (EQ_M (reveal x2) (i 2)); bol (EQ_M (pi1 (dec x2 (sk (N 1)))) (nc 3)) & (EQ_M (to x2) (i 1)); msg (enc (pi1 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (pi2 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (rs (N 1))); bol (if_then_else_B (if_then_else_B (if_then_else_B (if_then_else_B (EQ_M (reveal (f mphi2)) (i 1)) (EQ_M (to (f mphi1)) (i 1)) FAlse) (EQ_M (to (f mphi0)) (i 1)) FAlse) (if_then_else_B (EQ_M (act (f mphi1)) new) FAlse TRue) FAlse) (EQ_M (act (f mphi0)) new) FAlse); bol (EQ_M (reveal (f mphi2)) (i 2)); bol (EQ_M (to (f mphi2)) (i 2)); msg (enc (pi1 (dec (f mphi2) (pi2 (k (N 2)))), (nc 4, pi1 (k (N 2)))) (pi2 (dec (f mphi2) (pi2 (k (N 2))))) (rs (N 1)))]); try reflexivity; simpl. Focus 2. apply IFBRANCH_M1 with (ml1:=[msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2)); bol (EQ_M (to x1) (i 1)) & (EQ_M (act x1) new); msg (enc (nc 3, pk (N 1)) (pk (N 2)) (sr 1)); bol (EQ_M (reveal x2) (i 1)); bol (EQ_M (reveal x2) (i 2)); bol (EQ_M (pi1 (dec x2 (sk (N 1)))) (nc 3)) & (EQ_M (to x2) (i 1)); msg (enc (pi1 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (pi2 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (rs (N 1))); bol (if_then_else_B (if_then_else_B (if_then_else_B (if_then_else_B (EQ_M (reveal (f mphi2)) (i 1)) (EQ_M (to (f mphi1)) (i 1)) FAlse) (EQ_M (to (f mphi0)) (i 1)) FAlse) (if_then_else_B (EQ_M (act (f mphi1)) new) FAlse TRue) FAlse) (EQ_M (act (f mphi0)) new) FAlse); bol (EQ_M (reveal (f mphi2)) (i 2)); bol (EQ_M (to (f mphi2)) (i 2)); msg (enc (pi1 (dec (f mphi2) (pi2 (k (N 2)))), (nc 4, pi1 (k (N 2)))) (pi2 (dec (f mphi2) (pi2 (k (N 2))))) (rs (N 1))); bol (if_then_else_B (EQ_M (reveal (f mphi3)) (i 2)) (EQ_M (to (f mphi0)) (i 2)) FAlse)]) (ml2 := [msg (pk (N 1)); msg (pk (N 2)); bol (EQ_M (reveal x1) (i 1)); bol (EQ_M (reveal x1) (i 2)); bol (EQ_M (to x1) (i 1)) & (EQ_M (act x1) new); msg (enc (nc 3, pk (N 1)) (pk (N 2)) (sr 1)); bol (EQ_M (reveal x2) (i 1)); bol (EQ_M (reveal x2) (i 2)); bol (EQ_M (pi1 (dec x2 (sk (N 1)))) (nc 3)) & (EQ_M (to x2) (i 1)); msg (enc (pi1 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (pi2 (pi2 (dec (f mphi1) (pi2 (k (N 1)))))) (rs (N 1))); bol (if_then_else_B (if_then_else_B (if_then_else_B (if_then_else_B (EQ_M (reveal (f mphi2)) (i 1)) (EQ_M (to (f mphi1)) (i 1)) FAlse) (EQ_M (to (f mphi0)) (i 1)) FAlse) (if_then_else_B (EQ_M (act (f mphi1)) new) FAlse TRue) FAlse) (EQ_M (act (f mphi0)) new) FAlse); bol (EQ_M (reveal (f mphi2)) (i 2)); bol (EQ_M (to (f mphi2)) (i 2)); msg (enc (pi1 (dec (f mphi2) (pi2 (k (N 2)))), (nc 4, pi1 (k (N 2)))) (pi2 (dec (f mphi2) (pi2 (k (N 2))))) (rs (N 1))); bol (if_then_else_B (EQ_M (reveal (f mphi3)) (i 2)) (EQ_M (to (f mphi0)) (i 2)) FAlse)]); try reflexivity; simpl. (* Ltac ifbr1 := match goal with |[|- (?L1 ++ (if_then_else_M ?B ?M1 ?M2)) ~ ?L2 ++ (if_then_else_M ?B1 ?M3 ?M4)] => pose proof(IFBRANCH_M1) (*apply IFBRANCH_M1 with (ml1:= L1) (ml2:= L2) (b:=B) (b':= B1); try reflexivity; simpl*) end. *) apply RESTR_rev with (ml1:= apply IFBRANCH_M1. aply_breq_same. repeat redg; repeat rewrite IFTFb. aply_breq_same. repeat rewrite andB_elm'' with (b1 := (EQ_M (to x1) (i 1)))(b2:= (EQ_M (act x1) new)). false_to_sesns_all. aply_breq. repeat redg; repeat rewrite IFTFb. false_to_sesns_all. aply_breq. repeat redg; repeat rewrite IFTFb. aply_breq. repeat redg; repeat rewrite IFTFb. aply_breq_same. repeat rewrite andB_elm'' with (b1 := (EQ_M (to (f mphi1)) (i 1)))(b2:= (EQ_M (pi1 (dec (f mphi1) (pi2 (k (N 1))))) (nc 3))). false_to_sesns_all. aply_breq. repeat redg; repeat rewrite IFTFb. pose proof(EQ_BRmsg_msg''). rewrite EQ_BRmsg_msg''' with (m1 := (pi1 (dec (f mphi1) (pi2 (k (N 1))))) ) (m2:= (nc 3)) (m:= (pi1 (dec (f mphi1) (pi2 (k (N 1))))) ) (m3:= (if_then_else_M (if_then_else_B (EQ_M (reveal (f mphi2)) (i 1)) (if_then_else_B (EQ_M (act (f mphi1)) new) FAlse TRue) FAlse) (if_then_else_M (EQ_M (reveal (f mphi3)) (i 2)) O (if_then_else_M (EQ_M (to (f mphi3)) (i 2)) (enc (pi1 (dec (f mphi3) (pi2 (k (N 2)))), (nc 4, pi1 (k (N 2)))) (pi2 (dec (f mphi3) (pi2 (k (N 2))))) (rs (N 1))) O)) (if_then_else_M (EQ_M (reveal (f mphi2)) (i 2)) O (if_then_else_M (EQ_M (to (f mphi2)) (i 2)) (if_then_else_M (if_then_else_B (EQ_M (reveal (f mphi3)) (i 2)) (EQ_M (to (f mphi2)) (i 2)) FAlse) (pi1 (dec (f mphi2) (pi2 (k (N 2))))) (if_then_else_M (if_then_else_B (EQ_M (reveal (f mphi3)) (i 1)) (if_then_else_B (EQ_M (act (f mphi1)) new) FAlse TRue) FAlse) (pi1 (dec (f mphi1) (pi2 (k (N 1))))) (if_then_else_M (if_then_else_B (if_then_else_B (EQ_M (reveal (f mphi3)) (i 1)) (EQ_M (to (f mphi2)) (i 1)) FAlse) (if_then_else_B (EQ_M (act (f mphi2)) new) FAlse TRue) FAlse) (pi1 (dec (f mphi2) (pi2 (k (N 1))))) (if_then_else_M (if_then_else_B (if_then_else_B (if_then_else_B (EQ_M (reveal (f mphi3)) (i 1)) (EQ_M (to (f mphi2)) (i 1)) FAlse) (if_then_else_B (EQ_M (act (f mphi2)) new) FAlse TRue) FAlse) (EQ_M (act (f mphi1)) new) FAlse) (pi1 (dec (f mphi2) (pi2 (k (N 1))))) O)))) O)))) . simpl. repeat redg; repeat rewrite IFTFb. aply_breq. false_to_sesns_all. aply_breq. false_to_sesns_all. aply_breq. repeat redg; repeat rewrite IFTFb. repeat rewrite andB_elm'' with (b1 := (EQ_M (to (f mphi2)) (i 1)))(b2:= (EQ_M (pi1 (dec (f mphi1) (pi2 (k (N 1))))) (nc 3))). false_to_sesns_all. simpl. aply_breq. repeat redg; repeat rewrite IFTFb. rewrite EQ_BRmsg_msg''' with (m1 := (pi1 (dec (f mphi1) (pi2 (k (N 1))))) ) (m2:= (nc 3)) (m:= (pi1 (dec (f mphi1) (pi2 (k (N 1))))) ) (m3:= (if_then_else_M (EQ_M (reveal (f mphi3)) (i 2)) (pi1 (dec (f mphi1) (pi2 (k (N 2))))) (if_then_else_M (if_then_else_B (EQ_M (reveal (f mphi3)) (i 1)) (if_then_else_B (EQ_M (act (f mphi2)) new) FAlse TRue) FAlse) (pi1 (dec (f mphi2) (pi2 (k (N 1))))) O))). simpl. rewrite EQ_BRmsg_msg''' with (m1 := (pi1 (dec (f mphi1) (pi2 (k (N 1))))) ) (m2:= (nc 3)) (m:= (pi1 (dec (f mphi1) (pi2 (k (N 1))))) ) (m3:= (if_then_else_M (if_then_else_B (if_then_else_B (if_then_else_B (EQ_M (reveal (f mphi3)) (i 2)) (if_then_else_B (EQ_M (act (f mphi2)) new) FAlse TRue) FAlse) (EQ_M (pi1 (dec (f mphi2) (pi2 (k (N 1))))) (nc 3)) FAlse) (EQ_M (pi1 (dec (f mphi1) (pi2 (k (N 2))))) (nc 3)) FAlse) (nc 5) (if_then_else_M (if_then_else_B (if_then_else_B (if_then_else_B (EQ_M (reveal (f mphi3)) (i 1)) (if_then_else_B (EQ_M (act (f mphi2)) new) FAlse TRue) FAlse) (EQ_M (pi1 (dec (f mphi2) (pi2 (k (N 1))))) (nc 3)) FAlse) (EQ_M (pi1 (dec (f mphi1) (pi2 (k (N 2))))) (nc 3)) FAlse) (nc 5) (if_then_else_M (EQ_M (reveal (f mphi3)) (i 2)) (pi1 (dec (f mphi1) (pi2 (k (N 2))))) (if_then_else_M (if_then_else_B (EQ_M (reveal (f mphi3)) (i 1)) (if_then_else_B (EQ_M (act (f mphi2)) new) FAlse TRue) FAlse) (pi1 (dec (f mphi2) (pi2 (k (N 1))))) O))))). simpl. aply_breq. false_to_sesns_all. simpl. rewrite andB_assoc with (b1:= (EQ_M (reveal (f mphi3)) (i 2))) (b2:= (if_then_else_B (EQ_M (act (f mphi2)) new) FAlse TRue)) (b3:= (EQ_M (pi1 (dec (f mphi2) (pi2 (k (N 1))))) (nc 3))). rewrite andB_assoc with (b1:= (EQ_M (reveal (f mphi3)) (i 2))) (b2:= ((if_then_else_B (EQ_M (act (f mphi2)) new) FAlse TRue) & (EQ_M (pi1 (dec (f mphi2) (pi2 (k (N 1))))) (nc 3)))) (b3:= (EQ_M (pi1 (dec (f mphi1) (pi2 (k (N 2))))) (nc 3))). rewrite andB_elm'' with (b1:= (EQ_M (reveal (f mphi3)) (i 2))) (b2:= (((if_then_else_B (EQ_M (act (f mphi2)) new) FAlse TRue) & (EQ_M (pi1 (dec (f mphi2) (pi2 (k (N 1))))) (nc 3))) & (EQ_M (pi1 (dec (f mphi1) (pi2 (k (N 2))))) (nc 3))) ). false_to_sesns_all. simpl. aply_breq. repeat redg; repeat rewrite IFTFb. rewrite <- IFSAME_M with (b:= (if_then_else_B (if_then_else_B (if_then_else_B (EQ_M (act (f mphi2)) new) FAlse TRue) (EQ_M (pi1 (dec (f mphi2) (pi2 (k (N 1))))) (nc 3)) FAlse) (EQ_M (pi1 (dec (f mphi1) (pi2 (k (N 2))))) (nc 3)) FAlse)) at 1. repeat rewrite andB_elm'' with (b1:= (if_then_else_B (if_then_else_B (EQ_M (act (f mphi2)) new) FAlse TRue) (EQ_M (pi1 (dec (f mphi2) (pi2 (k (N 1))))) (nc 3)) FAlse)) (b2:= (EQ_M (pi1 (dec (f mphi1) (pi2 (k (N 2))))) (nc 3))). repeat rewrite andB_elm'' with (b1:= (if_then_else_B (EQ_M (act (f mphi2)) new) FAlse TRue)) (b2:= (EQ_M (pi1 (dec (f mphi2) (pi2 (k (N 1))))) (nc 3))). aply_breq. aply_breq. rewrite <- IFSAME_M with (b:= (EQ_M (pi1 (dec (f mphi1) (pi2 (k (N 2))))) (nc 3))) at 1. aply_breq. Focus 3. false_to_sesns_all. simpl. aply_breq. repeat redg; repeat rewrite IFTFb. reflexivity. simpl. aply_breq_same. assert(qa10_ss # qb10_ss). repeat unf. assert(qa01_ss # qb01_ss). repeat unf. apply breq_msgeq1'. simpl. aply_breq_same.
import generalisation_linter import algebra.associated import algebra.char_p import topology.metric_space.basic import algebra.ring import algebra.category.Group import algebra.group_power import algebra.algebra.basic #print is_unit_iff_dvd_one lemma atest (α : Type*) [comm_monoid α] : (1 : α) * 1 = 1 ∧ ∀ a b : α, a * b = b * a := ⟨mul_one _, mul_comm⟩ -- lemma atest' (α : Type*) [comm_semigroup α] [monoid α] : (1 : α) * 1 = 1 ∧ ∀ a b : α, a * b = b * a := ⟨mul_one _, mul_comm⟩ lemma btest (α : Type*) [ordered_ring α] (a b : α) : 0 ≤ 0 ∧ a * b + b = a * b + b := ⟨eq.le rfl, rfl⟩ lemma btest' (α : Type*) [ring α] [preorder α] (a b : α) : 0 ≤ 0 ∧ a * b + b = a * b + b := ⟨eq.le rfl, rfl⟩ variables (α : Type*) #print atest #print char_p.char_is_prime_of_two_le -- _inst_1: integral_domain ↝ no_zero_divisors semiring -- should be domain? #check preorder -- #print eval -- run_cmd do d ← get_decl `eval, -- cd ← dag_attr.get_cache, -- e ← get_env, -- trace $ find_gens' d cd e d.type d.value 0 "", -- return () -- run_cmd do e ← get_env, cd ← class_dag e, l← e.get `mem_orbit_self,trace l.value.binding_body.binding_body.binding_body.binding_body -- run_cmd do e ← get_env, cd ← class_dag e, l← e.get `mem_orbit_self, aa ← get_instance_chains `mul_action 0 l.value.binding_body.binding_body.binding_body.binding_body , trace $ cd.minimal_vertices aa --.lambda_body.app_fn.app_fn.app_arg.lambda_body.app_fn.app_arg.app_fn.lambda_body--find_gens' cd e l.type l.value 0 "" -- run_cmd do e ← get_env, -- cd ← class_dag e, -- l← e.get `algebra.of_semimodule', -- trace $ l.value.binding_body.binding_body.binding_body.binding_body.binding_body.binding_body.binding_body.app_fn.app_fn.app_fn.app_arg.app_arg, -- -- aa ← is_instance_chain 2 l.value.binding_body.binding_body.binding_body.binding_body.binding_body.binding_body.binding_body.app_fn.app_fn.app_fn.app_arg.app_arg, -- -- trace aa -- aa ← get_instance_chains `semimodule 2 l.value.binding_body.binding_body.binding_body.binding_body.binding_body.binding_body.binding_body.app_fn.app_fn.app_fn.app_arg.app_arg, -- trace aa, -- trace $ cd.minimal_vertices aa --.lambda_body.app_fn.app_fn.app_arg.lambda_body.app_fn.app_arg.app_fn.lambda_body--find_gens' cd e l.type l.value 0 ""
{-# LANGUAGE CPP #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE TypeOperators #-} {-# LANGUAGE DataKinds #-} -- For OkLC as a class {-# LANGUAGE UndecidableInstances #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# OPTIONS_GHC -Wall #-} {-# OPTIONS -Wno-type-defaults #-} {-# OPTIONS_GHC -Wno-missing-signatures #-} {-# OPTIONS_GHC -Wno-unused-imports #-} {-# OPTIONS_GHC -fsimpl-tick-factor=500 #-} {-# OPTIONS_GHC -dsuppress-idinfo #-} {-# OPTIONS_GHC -fdicts-strict #-} {-# OPTIONS_GHC -Wno-orphans #-} module Main where import Prelude hiding (unzip,zip,zipWith) -- (id,(.),curry,uncurry) import qualified Prelude as P import Data.Monoid (Sum(..)) import Data.Foldable (fold) import Control.Applicative (liftA2) import Control.Arrow (second) import Control.Monad ((<=<)) import Data.List (unfoldr) -- TEMP import Data.Complex (Complex) import GHC.Float (int2Double) import qualified ConCat.AltCat as A import ConCat.AltCat (toCcc,toCcc',unCcc,unCcc',conceal,(:**:)(..),Ok,Ok2,U2,equal) import ConCat.Rebox import ConCat.Circuit (GenBuses,(:>)) import ConCat.Syntactic (Syn,render) import ConCat.RunCircuit (run) type EC = Syn :**: (:>) runU2 :: U2 a b -> IO () runU2 = print {-# INLINE runU2 #-} type GO a b = (GenBuses a, Ok2 (:>) a b) runSyn :: Syn a b -> IO () runSyn syn = putStrLn ('\n' : render syn) {-# INLINE runSyn #-} runSynCirc :: GO a b => String -> EC a b -> IO () runSynCirc nm (syn :**: circ) = runSyn syn >> runCirc nm circ {-# INLINE runSynCirc #-} runCirc :: GO a b => String -> (a :> b) -> IO () runCirc nm circ = run nm [] circ {-# INLINE runCirc #-} add5 :: Double -> Double add5 x = x + 5 decide :: Int -> Bool decide 4 = True decide _ = False fix :: (a -> a) -> a fix f = let {x = f x} in x fac :: Int -> Int fac 1 = 1 fac n = n*fac(n-1) main :: IO () --main = print "hello world!" main = sequence_ [ putChar '\n' -- return () -- -- Circuit graphs , runSynCirc "add" $ toCcc $ (+) @Double , runSynCirc "add5" $ toCcc add5 , runSynCirc "add-uncurry" $ toCcc $ uncurry ((+) @Double) , runSynCirc "decide" $ toCcc $ decide -- , runSynCirc "fac" $ toCcc (fix (\rec n -> if n == 0 then 1 else n * rec (n-1)) :: Int -> Int) -- , runSynCirc "fac" $ toCcc $ fac -- , runSynCirc "dup" $ toCcc $ A.dup @(->) @Int -- , runSynCirc "fst" $ toCcc $ fst @R @R -- , runSynCirc "twice" $ toCcc $ twice @R -- , runSynCirc "sqr" $ toCcc $ sqr @R -- , runSynCirc "complex-mul" $ toCcc $ uncurry ((*) @C) -- , runSynCirc "magSqr" $ toCcc $ magSqr @R -- , runSynCirc "cosSinProd" $ toCcc $ cosSinProd @R -- , runSynCirc "xp3y" $ toCcc $ \ (x,y) -> x + 3 * y :: R -- , runSynCirc "horner" $ toCcc $ horner @R [1,3,5] -- , runSynCirc "cos-2xx" $ toCcc $ \ x -> cos (2 * x * x) :: R -- ]
Given that the Second Polish Republic was a multicultural state , German policies and propaganda also sought to create and encourage conflicts between ethnic groups , fueling tension between Poles and Jews , and between Poles and Ukrainians . In Łódź , the Germans forced Jews to help destroy a monument to a Polish hero , Tadeusz Kościuszko , and filmed them committing the act . Soon afterward , the Germans set fire to a Jewish synagogue and filmed Polish bystanders , portraying them in propaganda releases as a " vengeful mob . " This divisive policy was reflected in the Germans ' decision to destroy Polish education , while at the same time , showing relative tolerance toward the Ukrainian school system . As the high @-@ ranking Nazi official Erich Koch explained , " We must do everything possible so that when a Pole meets a Ukrainian , he will be willing to kill the Ukrainian and conversely , the Ukrainian will be willing to kill the Pole . "
(* Title: Regular Algebras Author: Simon Foster, Georg Struth Maintainer: Simon Foster <s.foster at york.ac.uk> Georg Struth <g.struth at sheffield.ac.uk> *) section \<open>Dioids, Powers and Finite Sums\<close> theory Dioid_Power_Sum imports Kleene_Algebra.Dioid Kleene_Algebra.Finite_Suprema begin text \<open>We add a few facts about powers and finite sums---in fact, finite suprema---to an existing theory field for dioids.\<close> context dioid_one_zero begin lemma add_iso_r: "y \<le> z \<Longrightarrow> x + y \<le> x + z" using local.join.sup_mono by blast notation power ("_\<^bsup>_\<^esup>" [101,50] 100) lemma power_subdist: "x\<^bsup>n\<^esup> \<le> (x + y)\<^bsup>n\<^esup>" apply (induct n) apply simp using local.mult_isol_var local.power_Suc2 by auto lemma power_inductl_var: "x \<cdot> y \<le> y \<Longrightarrow> x\<^bsup>n\<^esup> \<cdot> y \<le> y" apply (induct n) apply simp by (metis (no_types, lifting) local.dual_order.trans local.mult_isol local.power_Suc2 mult_assoc) lemma power_inductr_var: "y \<cdot> x \<le> y \<Longrightarrow> y \<cdot> x\<^bsup>n\<^esup> \<le> y" by (induct n, metis eq_refl mult_oner power.simps(1), metis mult.assoc mult_isor order_refl order_trans power.simps(2) power_commutes) definition powsum :: "'a \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> 'a" ("_\<^bsub>_\<^esub>\<^bsup>_\<^esup>" [101,50,50] 100) where "powsum x m n = sum ((^) x) {m..n + m}" lemmas powsum_simps = powsum_def atLeastAtMostSuc_conv numerals lemma powsum1 [simp]: "x\<^bsub>n\<^esub>\<^bsup>0\<^esup> = x\<^bsup>n\<^esup>" by (simp add:powsum_simps) lemma powsum2: "x\<^bsub>n\<^esub>\<^bsup>Suc m\<^esup> = x\<^bsub>n\<^esub>\<^bsup>m \<^esup>+ x\<^bsup>n+Suc m\<^esup>" proof- have "x\<^bsub>n\<^esub>\<^bsup>Suc m\<^esup> = sum ((^) x) {n..(Suc m)+n}" using powsum_def by blast also have "... = sum ((^) x) {n..m+n} + x\<^bsup>n+Suc m\<^esup>" by (simp add: ab_semigroup_add_class.add.commute atLeastAtMostSuc_conv local.join.sup_commute) finally show ?thesis by (simp add: powsum_def) qed lemma powsum_00 [simp]: "x\<^bsub>0\<^esub>\<^bsup>0 \<^esup>= 1" by (simp add: powsum_def) lemma powsum_01 [simp]: "x\<^bsub>0\<^esub>\<^bsup>1\<^esup> = 1 + x" by (simp add: powsum2) lemma powsum_10 [simp]: "x\<^bsub>1\<^esub>\<^bsup>0\<^esup> = x" by (simp add: powsum_simps) lemma powsum_split: "x\<^bsub>m\<^esub>\<^bsup>i+Suc n\<^esup> = x\<^bsub>m\<^esub>\<^bsup>i\<^esup> + x\<^bsub>m+Suc i\<^esub>\<^bsup>n\<^esup>" by (induct n, simp_all add:powsum_simps ac_simps) lemma powsum_split_var1: "x\<^bsub>0\<^esub>\<^bsup>n+1\<^esup> = 1 + x\<^bsub>1\<^esub>\<^bsup>n\<^esup>" proof - have "x\<^bsub>0\<^esub>\<^bsup>n + 1\<^esup> = x\<^bsub>0\<^esub>\<^bsup>0 + Suc n\<^esup>" by simp also have "... = x\<^bsub>0\<^esub>\<^bsup>0\<^esup> + x\<^bsub>0 + Suc 0\<^esub>\<^bsup>n\<^esup>" by (subst powsum_split, rule refl) also have "... = 1 + x\<^bsub>0 + Suc 0\<^esub>\<^bsup>n\<^esup>" by simp finally show ?thesis by simp qed lemma powsum_split_var2 [simp]: "x\<^bsup>m\<^esup> + x\<^bsub>0\<^esub>\<^bsup>m\<^esup> = x\<^bsub>0\<^esub>\<^bsup>m\<^esup>" proof (induct m) case 0 show ?case by (metis add_idem' power_0 powsum_00) case (Suc n) show ?case by (simp add: add_commute powsum2) qed lemma powsum_split_var3: "x\<^bsub>0\<^esub>\<^bsup>m+Suc n\<^esup> = x\<^bsub>0\<^esub>\<^bsup>m \<^esup>+ x\<^bsub>0+Suc m\<^esub>\<^bsup>n\<^esup>" by (subst powsum_split, simp) lemma powsum_split_var4 [simp]: "x\<^bsub>0\<^esub>\<^bsup>m+n\<^esup> + x\<^bsub>m\<^esub>\<^bsup>n\<^esup> = x\<^bsub>0\<^esub>\<^bsup>m+n\<^esup>" proof (induct n) case 0 show ?case by (metis add_0_iff add_comm powsum1 powsum_split_var2) next case (Suc n) note hyp = this show ?case proof - have "x\<^bsub>0\<^esub>\<^bsup>m + Suc n\<^esup> + x\<^bsub>m\<^esub>\<^bsup>Suc n\<^esup> = x\<^bsub>0\<^esub>\<^bsup>m + n\<^esup> + x\<^bsup>Suc (m + n)\<^esup> + (x\<^bsub>m\<^esub>\<^bsup>n\<^esup> + x\<^bsup>m + Suc n\<^esup>)" by (auto simp add: powsum2) also have "... = (x\<^bsub>0\<^esub>\<^bsup>m + n \<^esup>+ x\<^bsub>m\<^esub>\<^bsup>n\<^esup>) + x\<^bsup>Suc (m + n)\<^esup> + x\<^bsup>m + Suc n\<^esup>" by (metis add.assoc add.left_commute) also have "... = x\<^bsup>Suc (m+n)\<^esup> + x\<^bsub>0\<^esub>\<^bsup>m+n\<^esup>" by (metis add_Suc_right add.assoc add.commute add_idem' hyp) also have "... = x\<^bsub>0\<^esub>\<^bsup>m + Suc n\<^esup>" by (simp add: add_commute powsum2) finally show ?thesis . qed qed lemma powsum_split_var6: "x\<^bsub>0\<^esub>\<^bsup>(Suc k)+Suc n\<^esup> = x\<^bsub>0\<^esub>\<^bsup>Suc k \<^esup>+ x\<^bsub>0+Suc (Suc k)\<^esub>\<^bsup>n\<^esup>" by (metis powsum_split_var3) lemma powsum_ext: "x \<le> x\<^bsub>0\<^esub>\<^bsup>Suc n\<^esup>" proof (induct n) case 0 show ?case by (metis One_nat_def local.join.sup_ge2 powsum_01) next case (Suc n) thus ?case by (auto simp add:less_eq_def powsum_simps, metis (lifting, no_types) add.left_commute) qed lemma powsum_one: "1 \<le> x\<^bsub>0\<^esub>\<^bsup>Suc n\<^esup>" by (induct n, metis One_nat_def local.join.sup.cobounded1 powsum_01, metis (full_types) Suc_eq_plus1 local.join.sup.cobounded1 powsum_split_var1) lemma powsum_shift1: "x \<cdot> x\<^bsub>m\<^esub>\<^bsup>n\<^esup> = x\<^bsub>m+1\<^esub>\<^bsup>n\<^esup>" apply (induct n) apply (simp_all add: powsum_simps) apply (metis local.add_left_comm local.distrib_left powsum_def) done lemma powsum_shift: "x\<^bsup>k \<^esup>\<cdot> x\<^bsub>m\<^esub>\<^bsup>n\<^esup> = x\<^bsub>k+m\<^esub>\<^bsup>n\<^esup>" by (induct k, simp_all, metis Suc_eq_plus1 mult.assoc powsum_shift1) lemma powsum_prod_suc: "x\<^bsub>0\<^esub>\<^bsup>m \<^esup>\<cdot> x\<^bsub>0\<^esub>\<^bsup>Suc n\<^esup> = x\<^bsub>0\<^esub>\<^bsup>Suc (m+n)\<^esup>" proof (induct m) case 0 show ?case by simp case (Suc m) note hyp = this show ?case proof - have "x\<^bsub>0\<^esub>\<^bsup>Suc m \<^esup>\<cdot> x\<^bsub>0\<^esub>\<^bsup>Suc n\<^esup> = x\<^bsub>0\<^esub>\<^bsup>m\<^esup> \<cdot> x\<^bsub>0\<^esub>\<^bsup>Suc n\<^esup> + x\<^bsup>Suc m \<^esup>\<cdot> x\<^bsub>0\<^esub>\<^bsup>Suc n\<^esup>" by (simp add: powsum2) also have "... = x\<^bsub>0\<^esub>\<^bsup>Suc (m + n)\<^esup> + x\<^bsup>Suc m \<^esup>\<cdot> x\<^bsub>0\<^esub>\<^bsup>Suc n\<^esup>" by (simp add:hyp) also have "... = x\<^bsub>0\<^esub>\<^bsup>Suc (m + n)\<^esup> + x\<^bsub>Suc m\<^esub>\<^bsup>Suc n\<^esup>" by (subst powsum_shift, simp) also have "... = x\<^bsub>0\<^esub>\<^bsup>Suc (m + n)\<^esup> + (x\<^bsub>Suc m\<^esub>\<^bsup>n\<^esup> + x\<^bsup>Suc m + Suc n\<^esup>)" by (simp add:powsum2) also have "... = x\<^bsub>0\<^esub>\<^bsup>Suc (m + n)\<^esup> + x\<^bsub>Suc m\<^esub>\<^bsup>n\<^esup> + x\<^bsup>Suc (Suc (m + n))\<^esup>" by (metis add_Suc_right add_Suc_shift add.assoc add.left_commute) also have "... = x\<^bsub>0\<^esub>\<^bsup>Suc (m + n)\<^esup> + x\<^bsup>Suc (Suc (m + n))\<^esup>" by (simp only: add_Suc_right[THEN sym] add_Suc_shift[THEN sym] powsum_split_var4) also have "... = x\<^bsub>0\<^esub>\<^bsup>Suc (Suc m + n)\<^esup>" by (simp add: powsum2) finally show ?thesis . qed qed lemma powsum_prod: "x\<^bsub>0\<^esub>\<^bsup>m \<^esup>\<cdot> x\<^bsub>0\<^esub>\<^bsup>n\<^esup> = x\<^bsub>0\<^esub>\<^bsup>m+n\<^esup>" by (cases n, simp, simp add: powsum_prod_suc) end end
[GOAL] ι : Type u_1 E : Type u_2 inst✝² : Finite ι inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E b : AffineBasis ι ℝ E ⊢ interior (↑(convexHull ℝ) (range ↑b)) = {x | ∀ (i : ι), 0 < ↑(coord b i) x} [PROOFSTEP] cases subsingleton_or_nontrivial ι [GOAL] case inl ι : Type u_1 E : Type u_2 inst✝² : Finite ι inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E b : AffineBasis ι ℝ E h✝ : Subsingleton ι ⊢ interior (↑(convexHull ℝ) (range ↑b)) = {x | ∀ (i : ι), 0 < ↑(coord b i) x} [PROOFSTEP] have : range b = univ := AffineSubspace.eq_univ_of_subsingleton_span_eq_top (subsingleton_range _) b.tot [GOAL] case inl ι : Type u_1 E : Type u_2 inst✝² : Finite ι inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E b : AffineBasis ι ℝ E h✝ : Subsingleton ι this : range ↑b = univ ⊢ interior (↑(convexHull ℝ) (range ↑b)) = {x | ∀ (i : ι), 0 < ↑(coord b i) x} [PROOFSTEP] simp [this] [GOAL] case inr ι : Type u_1 E : Type u_2 inst✝² : Finite ι inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E b : AffineBasis ι ℝ E h✝ : Nontrivial ι ⊢ interior (↑(convexHull ℝ) (range ↑b)) = {x | ∀ (i : ι), 0 < ↑(coord b i) x} [PROOFSTEP] haveI : FiniteDimensional ℝ E := b.finiteDimensional [GOAL] case inr ι : Type u_1 E : Type u_2 inst✝² : Finite ι inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E b : AffineBasis ι ℝ E h✝ : Nontrivial ι this : FiniteDimensional ℝ E ⊢ interior (↑(convexHull ℝ) (range ↑b)) = {x | ∀ (i : ι), 0 < ↑(coord b i) x} [PROOFSTEP] have : convexHull ℝ (range b) = ⋂ i, b.coord i ⁻¹' Ici 0 := by rw [b.convexHull_eq_nonneg_coord, setOf_forall]; rfl [GOAL] ι : Type u_1 E : Type u_2 inst✝² : Finite ι inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E b : AffineBasis ι ℝ E h✝ : Nontrivial ι this : FiniteDimensional ℝ E ⊢ ↑(convexHull ℝ) (range ↑b) = ⋂ (i : ι), ↑(coord b i) ⁻¹' Ici 0 [PROOFSTEP] rw [b.convexHull_eq_nonneg_coord, setOf_forall] [GOAL] ι : Type u_1 E : Type u_2 inst✝² : Finite ι inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E b : AffineBasis ι ℝ E h✝ : Nontrivial ι this : FiniteDimensional ℝ E ⊢ ⋂ (i : ι), {x | 0 ≤ ↑(coord b i) x} = ⋂ (i : ι), ↑(coord b i) ⁻¹' Ici 0 [PROOFSTEP] rfl [GOAL] case inr ι : Type u_1 E : Type u_2 inst✝² : Finite ι inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E b : AffineBasis ι ℝ E h✝ : Nontrivial ι this✝ : FiniteDimensional ℝ E this : ↑(convexHull ℝ) (range ↑b) = ⋂ (i : ι), ↑(coord b i) ⁻¹' Ici 0 ⊢ interior (↑(convexHull ℝ) (range ↑b)) = {x | ∀ (i : ι), 0 < ↑(coord b i) x} [PROOFSTEP] ext [GOAL] case inr.h ι : Type u_1 E : Type u_2 inst✝² : Finite ι inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E b : AffineBasis ι ℝ E h✝ : Nontrivial ι this✝ : FiniteDimensional ℝ E this : ↑(convexHull ℝ) (range ↑b) = ⋂ (i : ι), ↑(coord b i) ⁻¹' Ici 0 x✝ : E ⊢ x✝ ∈ interior (↑(convexHull ℝ) (range ↑b)) ↔ x✝ ∈ {x | ∀ (i : ι), 0 < ↑(coord b i) x} [PROOFSTEP] simp only [this, interior_iInter, ← IsOpenMap.preimage_interior_eq_interior_preimage (isOpenMap_barycentric_coord b _) (continuous_barycentric_coord b _), interior_Ici, mem_iInter, mem_setOf_eq, mem_Ioi, mem_preimage] [GOAL] V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u hne : Set.Nonempty s h : AffineIndependent ℝ Subtype.val ⊢ ∃ t, s ⊆ t ∧ t ⊆ u ∧ AffineIndependent ℝ Subtype.val ∧ affineSpan ℝ t = ⊤ [PROOFSTEP] obtain ⟨q, hq⟩ := hne [GOAL] case intro V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ⊢ ∃ t, s ⊆ t ∧ t ⊆ u ∧ AffineIndependent ℝ Subtype.val ∧ affineSpan ℝ t = ⊤ [PROOFSTEP] obtain ⟨ε, ε0, hεu⟩ := Metric.nhds_basis_closedBall.mem_iff.1 (hu.mem_nhds <| hsu hq) [GOAL] case intro.intro.intro V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u ⊢ ∃ t, s ⊆ t ∧ t ⊆ u ∧ AffineIndependent ℝ Subtype.val ∧ affineSpan ℝ t = ⊤ [PROOFSTEP] obtain ⟨t, ht₁, ht₂, ht₃⟩ := exists_subset_affineIndependent_affineSpan_eq_top h [GOAL] case intro.intro.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ ⊢ ∃ t, s ⊆ t ∧ t ⊆ u ∧ AffineIndependent ℝ Subtype.val ∧ affineSpan ℝ t = ⊤ [PROOFSTEP] let f : P → P := fun y => lineMap q y (ε / dist y q) [GOAL] case intro.intro.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) ⊢ ∃ t, s ⊆ t ∧ t ⊆ u ∧ AffineIndependent ℝ Subtype.val ∧ affineSpan ℝ t = ⊤ [PROOFSTEP] have hf : ∀ y, f y ∈ u := by refine' fun y => hεu _ simp only rw [Metric.mem_closedBall, lineMap_apply, dist_vadd_left, norm_smul, Real.norm_eq_abs, dist_eq_norm_vsub V y q, abs_div, abs_of_pos ε0, abs_of_nonneg (norm_nonneg _), div_mul_comm] exact mul_le_of_le_one_left ε0.le (div_self_le_one _) [GOAL] V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) ⊢ ∀ (y : P), f y ∈ u [PROOFSTEP] refine' fun y => hεu _ [GOAL] V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) y : P ⊢ f y ∈ Metric.closedBall q ε [PROOFSTEP] simp only [GOAL] V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) y : P ⊢ ↑(lineMap q y) (ε / dist y q) ∈ Metric.closedBall q ε [PROOFSTEP] rw [Metric.mem_closedBall, lineMap_apply, dist_vadd_left, norm_smul, Real.norm_eq_abs, dist_eq_norm_vsub V y q, abs_div, abs_of_pos ε0, abs_of_nonneg (norm_nonneg _), div_mul_comm] [GOAL] V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) y : P ⊢ ‖y -ᵥ q‖ / ‖y -ᵥ q‖ * ε ≤ ε [PROOFSTEP] exact mul_le_of_le_one_left ε0.le (div_self_le_one _) [GOAL] case intro.intro.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) hf : ∀ (y : P), f y ∈ u ⊢ ∃ t, s ⊆ t ∧ t ⊆ u ∧ AffineIndependent ℝ Subtype.val ∧ affineSpan ℝ t = ⊤ [PROOFSTEP] have hεyq : ∀ (y) (_ : y ∉ s), ε / dist y q ≠ 0 := fun y hy => div_ne_zero ε0.ne' (dist_ne_zero.2 (ne_of_mem_of_not_mem hq hy).symm) [GOAL] case intro.intro.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) hf : ∀ (y : P), f y ∈ u hεyq : ∀ (y : P), ¬y ∈ s → ε / dist y q ≠ 0 ⊢ ∃ t, s ⊆ t ∧ t ⊆ u ∧ AffineIndependent ℝ Subtype.val ∧ affineSpan ℝ t = ⊤ [PROOFSTEP] classical let w : t → ℝˣ := fun p => if hp : (p : P) ∈ s then 1 else Units.mk0 _ (hεyq (↑p) hp) refine' ⟨Set.range fun p : t => lineMap q p (w p : ℝ), _, _, _, _⟩ · intro p hp; use⟨p, ht₁ hp⟩; simp [hp] · rintro y ⟨⟨p, hp⟩, rfl⟩ by_cases hps : p ∈ s <;> simp only [hps, lineMap_apply_one, Units.val_mk0, dif_neg, dif_pos, not_false_iff, Units.val_one, Subtype.coe_mk] <;> [exact hsu hps; exact hf p] · exact (ht₂.units_lineMap ⟨q, ht₁ hq⟩ w).range · rw [affineSpan_eq_affineSpan_lineMap_units (ht₁ hq) w, ht₃] [GOAL] case intro.intro.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) hf : ∀ (y : P), f y ∈ u hεyq : ∀ (y : P), ¬y ∈ s → ε / dist y q ≠ 0 ⊢ ∃ t, s ⊆ t ∧ t ⊆ u ∧ AffineIndependent ℝ Subtype.val ∧ affineSpan ℝ t = ⊤ [PROOFSTEP] let w : t → ℝˣ := fun p => if hp : (p : P) ∈ s then 1 else Units.mk0 _ (hεyq (↑p) hp) [GOAL] case intro.intro.intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) hf : ∀ (y : P), f y ∈ u hεyq : ∀ (y : P), ¬y ∈ s → ε / dist y q ≠ 0 w : ↑t → ℝˣ := fun p => if hp : ↑p ∈ s then 1 else Units.mk0 (ε / dist (↑p) q) (_ : ε / dist (↑p) q ≠ 0) ⊢ ∃ t, s ⊆ t ∧ t ⊆ u ∧ AffineIndependent ℝ Subtype.val ∧ affineSpan ℝ t = ⊤ [PROOFSTEP] refine' ⟨Set.range fun p : t => lineMap q p (w p : ℝ), _, _, _, _⟩ [GOAL] case intro.intro.intro.intro.intro.intro.refine'_1 V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) hf : ∀ (y : P), f y ∈ u hεyq : ∀ (y : P), ¬y ∈ s → ε / dist y q ≠ 0 w : ↑t → ℝˣ := fun p => if hp : ↑p ∈ s then 1 else Units.mk0 (ε / dist (↑p) q) (_ : ε / dist (↑p) q ≠ 0) ⊢ s ⊆ range fun p => ↑(lineMap q ↑p) ↑(w p) [PROOFSTEP] intro p hp [GOAL] case intro.intro.intro.intro.intro.intro.refine'_1 V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) hf : ∀ (y : P), f y ∈ u hεyq : ∀ (y : P), ¬y ∈ s → ε / dist y q ≠ 0 w : ↑t → ℝˣ := fun p => if hp : ↑p ∈ s then 1 else Units.mk0 (ε / dist (↑p) q) (_ : ε / dist (↑p) q ≠ 0) p : P hp : p ∈ s ⊢ p ∈ range fun p => ↑(lineMap q ↑p) ↑(w p) [PROOFSTEP] use⟨p, ht₁ hp⟩ [GOAL] case h V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) hf : ∀ (y : P), f y ∈ u hεyq : ∀ (y : P), ¬y ∈ s → ε / dist y q ≠ 0 w : ↑t → ℝˣ := fun p => if hp : ↑p ∈ s then 1 else Units.mk0 (ε / dist (↑p) q) (_ : ε / dist (↑p) q ≠ 0) p : P hp : p ∈ s ⊢ (fun p => ↑(lineMap q ↑p) ↑(w p)) { val := p, property := (_ : p ∈ t) } = p [PROOFSTEP] simp [hp] [GOAL] case intro.intro.intro.intro.intro.intro.refine'_2 V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) hf : ∀ (y : P), f y ∈ u hεyq : ∀ (y : P), ¬y ∈ s → ε / dist y q ≠ 0 w : ↑t → ℝˣ := fun p => if hp : ↑p ∈ s then 1 else Units.mk0 (ε / dist (↑p) q) (_ : ε / dist (↑p) q ≠ 0) ⊢ (range fun p => ↑(lineMap q ↑p) ↑(w p)) ⊆ u [PROOFSTEP] rintro y ⟨⟨p, hp⟩, rfl⟩ [GOAL] case intro.intro.intro.intro.intro.intro.refine'_2.intro.mk V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) hf : ∀ (y : P), f y ∈ u hεyq : ∀ (y : P), ¬y ∈ s → ε / dist y q ≠ 0 w : ↑t → ℝˣ := fun p => if hp : ↑p ∈ s then 1 else Units.mk0 (ε / dist (↑p) q) (_ : ε / dist (↑p) q ≠ 0) p : P hp : p ∈ t ⊢ (fun p => ↑(lineMap q ↑p) ↑(w p)) { val := p, property := hp } ∈ u [PROOFSTEP] by_cases hps : p ∈ s <;> simp only [hps, lineMap_apply_one, Units.val_mk0, dif_neg, dif_pos, not_false_iff, Units.val_one, Subtype.coe_mk] <;> [exact hsu hps; exact hf p] [GOAL] case intro.intro.intro.intro.intro.intro.refine'_2.intro.mk V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) hf : ∀ (y : P), f y ∈ u hεyq : ∀ (y : P), ¬y ∈ s → ε / dist y q ≠ 0 w : ↑t → ℝˣ := fun p => if hp : ↑p ∈ s then 1 else Units.mk0 (ε / dist (↑p) q) (_ : ε / dist (↑p) q ≠ 0) p : P hp : p ∈ t ⊢ (fun p => ↑(lineMap q ↑p) ↑(w p)) { val := p, property := hp } ∈ u [PROOFSTEP] by_cases hps : p ∈ s [GOAL] case pos V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) hf : ∀ (y : P), f y ∈ u hεyq : ∀ (y : P), ¬y ∈ s → ε / dist y q ≠ 0 w : ↑t → ℝˣ := fun p => if hp : ↑p ∈ s then 1 else Units.mk0 (ε / dist (↑p) q) (_ : ε / dist (↑p) q ≠ 0) p : P hp : p ∈ t hps : p ∈ s ⊢ (fun p => ↑(lineMap q ↑p) ↑(w p)) { val := p, property := hp } ∈ u [PROOFSTEP] simp only [hps, lineMap_apply_one, Units.val_mk0, dif_neg, dif_pos, not_false_iff, Units.val_one, Subtype.coe_mk] [GOAL] case neg V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) hf : ∀ (y : P), f y ∈ u hεyq : ∀ (y : P), ¬y ∈ s → ε / dist y q ≠ 0 w : ↑t → ℝˣ := fun p => if hp : ↑p ∈ s then 1 else Units.mk0 (ε / dist (↑p) q) (_ : ε / dist (↑p) q ≠ 0) p : P hp : p ∈ t hps : ¬p ∈ s ⊢ (fun p => ↑(lineMap q ↑p) ↑(w p)) { val := p, property := hp } ∈ u [PROOFSTEP] simp only [hps, lineMap_apply_one, Units.val_mk0, dif_neg, dif_pos, not_false_iff, Units.val_one, Subtype.coe_mk] [GOAL] case pos V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) hf : ∀ (y : P), f y ∈ u hεyq : ∀ (y : P), ¬y ∈ s → ε / dist y q ≠ 0 w : ↑t → ℝˣ := fun p => if hp : ↑p ∈ s then 1 else Units.mk0 (ε / dist (↑p) q) (_ : ε / dist (↑p) q ≠ 0) p : P hp : p ∈ t hps : p ∈ s ⊢ p ∈ u [PROOFSTEP] exact hsu hps [GOAL] case neg V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) hf : ∀ (y : P), f y ∈ u hεyq : ∀ (y : P), ¬y ∈ s → ε / dist y q ≠ 0 w : ↑t → ℝˣ := fun p => if hp : ↑p ∈ s then 1 else Units.mk0 (ε / dist (↑p) q) (_ : ε / dist (↑p) q ≠ 0) p : P hp : p ∈ t hps : ¬p ∈ s ⊢ ↑(lineMap q p) (ε / dist p q) ∈ u [PROOFSTEP] exact hf p [GOAL] case intro.intro.intro.intro.intro.intro.refine'_3 V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) hf : ∀ (y : P), f y ∈ u hεyq : ∀ (y : P), ¬y ∈ s → ε / dist y q ≠ 0 w : ↑t → ℝˣ := fun p => if hp : ↑p ∈ s then 1 else Units.mk0 (ε / dist (↑p) q) (_ : ε / dist (↑p) q ≠ 0) ⊢ AffineIndependent ℝ Subtype.val [PROOFSTEP] exact (ht₂.units_lineMap ⟨q, ht₁ hq⟩ w).range [GOAL] case intro.intro.intro.intro.intro.intro.refine'_4 V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P s u : Set P hu : IsOpen u hsu : s ⊆ u h : AffineIndependent ℝ Subtype.val q : P hq : q ∈ s ε : ℝ ε0 : 0 < ε hεu : Metric.closedBall q ε ⊆ u t : Set P ht₁ : s ⊆ t ht₂ : AffineIndependent ℝ fun p => ↑p ht₃ : affineSpan ℝ t = ⊤ f : P → P := fun y => ↑(lineMap q y) (ε / dist y q) hf : ∀ (y : P), f y ∈ u hεyq : ∀ (y : P), ¬y ∈ s → ε / dist y q ≠ 0 w : ↑t → ℝˣ := fun p => if hp : ↑p ∈ s then 1 else Units.mk0 (ε / dist (↑p) q) (_ : ε / dist (↑p) q ≠ 0) ⊢ affineSpan ℝ (range fun p => ↑(lineMap q ↑p) ↑(w p)) = ⊤ [PROOFSTEP] rw [affineSpan_eq_affineSpan_lineMap_units (ht₁ hq) w, ht₃] [GOAL] V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P u : Set P hu : IsOpen u hne : Set.Nonempty u ⊢ ∃ s x, AffineIndependent ℝ Subtype.val ∧ affineSpan ℝ s = ⊤ [PROOFSTEP] rcases hne with ⟨x, hx⟩ [GOAL] case intro V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P u : Set P hu : IsOpen u x : P hx : x ∈ u ⊢ ∃ s x, AffineIndependent ℝ Subtype.val ∧ affineSpan ℝ s = ⊤ [PROOFSTEP] rcases hu.exists_between_affineIndependent_span_eq_top (singleton_subset_iff.mpr hx) (singleton_nonempty _) (affineIndependent_of_subsingleton _ _) with ⟨s, -, hsu, hs⟩ [GOAL] case intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝³ : NormedAddCommGroup V inst✝² : NormedSpace ℝ V inst✝¹ : MetricSpace P inst✝ : NormedAddTorsor V P u : Set P hu : IsOpen u x : P hx : x ∈ u s : Set P hsu : s ⊆ u hs : AffineIndependent ℝ Subtype.val ∧ affineSpan ℝ s = ⊤ ⊢ ∃ s x, AffineIndependent ℝ Subtype.val ∧ affineSpan ℝ s = ⊤ [PROOFSTEP] exact ⟨s, hsu, hs⟩ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : NormedSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P ι : Type u_3 inst✝ : Fintype ι b : AffineBasis ι ℝ V ⊢ Finset.centroid ℝ Finset.univ ↑b ∈ interior (↑(convexHull ℝ) (range ↑b)) [PROOFSTEP] haveI := b.nonempty [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : NormedSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P ι : Type u_3 inst✝ : Fintype ι b : AffineBasis ι ℝ V this : Nonempty ι ⊢ Finset.centroid ℝ Finset.univ ↑b ∈ interior (↑(convexHull ℝ) (range ↑b)) [PROOFSTEP] simp only [b.interior_convexHull, mem_setOf_eq, b.coord_apply_centroid (Finset.mem_univ _), inv_pos, Nat.cast_pos, Finset.card_pos, Finset.univ_nonempty, forall_true_iff] [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : NormedSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P inst✝ : FiniteDimensional ℝ V s : Set V ⊢ Set.Nonempty (interior (↑(convexHull ℝ) s)) ↔ affineSpan ℝ s = ⊤ [PROOFSTEP] refine' ⟨affineSpan_eq_top_of_nonempty_interior, fun h => _⟩ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : NormedSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P inst✝ : FiniteDimensional ℝ V s : Set V h : affineSpan ℝ s = ⊤ ⊢ Set.Nonempty (interior (↑(convexHull ℝ) s)) [PROOFSTEP] obtain ⟨t, hts, b, hb⟩ := AffineBasis.exists_affine_subbasis h [GOAL] case intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : NormedSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P inst✝ : FiniteDimensional ℝ V s : Set V h : affineSpan ℝ s = ⊤ t : Set V hts : t ⊆ s b : AffineBasis ↑t ℝ V hb : ↑b = Subtype.val ⊢ Set.Nonempty (interior (↑(convexHull ℝ) s)) [PROOFSTEP] suffices (interior (convexHull ℝ (range b))).Nonempty by rw [hb, Subtype.range_coe_subtype, setOf_mem_eq] at this refine' this.mono _ mono* [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : NormedSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P inst✝ : FiniteDimensional ℝ V s : Set V h : affineSpan ℝ s = ⊤ t : Set V hts : t ⊆ s b : AffineBasis ↑t ℝ V hb : ↑b = Subtype.val this : Set.Nonempty (interior (↑(convexHull ℝ) (range ↑b))) ⊢ Set.Nonempty (interior (↑(convexHull ℝ) s)) [PROOFSTEP] rw [hb, Subtype.range_coe_subtype, setOf_mem_eq] at this [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : NormedSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P inst✝ : FiniteDimensional ℝ V s : Set V h : affineSpan ℝ s = ⊤ t : Set V hts : t ⊆ s b : AffineBasis ↑t ℝ V hb : ↑b = Subtype.val this : Set.Nonempty (interior (↑(convexHull ℝ) t)) ⊢ Set.Nonempty (interior (↑(convexHull ℝ) s)) [PROOFSTEP] refine' this.mono _ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : NormedSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P inst✝ : FiniteDimensional ℝ V s : Set V h : affineSpan ℝ s = ⊤ t : Set V hts : t ⊆ s b : AffineBasis ↑t ℝ V hb : ↑b = Subtype.val this : Set.Nonempty (interior (↑(convexHull ℝ) t)) ⊢ interior (↑(convexHull ℝ) t) ⊆ interior (↑(convexHull ℝ) s) [PROOFSTEP] mono* [GOAL] case intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : NormedSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P inst✝ : FiniteDimensional ℝ V s : Set V h : affineSpan ℝ s = ⊤ t : Set V hts : t ⊆ s b : AffineBasis ↑t ℝ V hb : ↑b = Subtype.val ⊢ Set.Nonempty (interior (↑(convexHull ℝ) (range ↑b))) [PROOFSTEP] lift t to Finset V using b.finite_set [GOAL] case intro.intro.intro.intro V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : NormedSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P inst✝ : FiniteDimensional ℝ V s : Set V h : affineSpan ℝ s = ⊤ t : Finset V hts : ↑t ⊆ s b : AffineBasis ↑↑t ℝ V hb : ↑b = Subtype.val ⊢ Set.Nonempty (interior (↑(convexHull ℝ) (range ↑b))) [PROOFSTEP] exact ⟨_, b.centroid_mem_interior_convexHull⟩ [GOAL] V : Type u_1 P : Type u_2 inst✝⁴ : NormedAddCommGroup V inst✝³ : NormedSpace ℝ V inst✝² : MetricSpace P inst✝¹ : NormedAddTorsor V P inst✝ : FiniteDimensional ℝ V s : Set V hs : Convex ℝ s ⊢ Set.Nonempty (interior s) ↔ affineSpan ℝ s = ⊤ [PROOFSTEP] rw [← interior_convexHull_nonempty_iff_affineSpan_eq_top, hs.convexHull_eq]
section \<open>Elementary Group Constructions\<close> (* Title: HOL/Algebra/Elementary_Groups.thy Author: LC Paulson, ported from HOL Light *) theory Elementary_Groups imports Generated_Groups "HOL-Library.Infinite_Set" begin subsection\<open>Direct sum/product lemmas\<close> locale group_disjoint_sum = group G + AG: subgroup A G + BG: subgroup B G for G (structure) and A B begin lemma subset_one: "A \<inter> B \<subseteq> {\<one>} \<longleftrightarrow> A \<inter> B = {\<one>}" by auto lemma sub_id_iff: "A \<inter> B \<subseteq> {\<one>} \<longleftrightarrow> (\<forall>x\<in>A. \<forall>y\<in>B. x \<otimes> y = \<one> \<longrightarrow> x = \<one> \<and> y = \<one>)" (is "?lhs = ?rhs") proof - have "?lhs = (\<forall>x\<in>A. \<forall>y\<in>B. x \<otimes> inv y = \<one> \<longrightarrow> x = \<one> \<and> inv y = \<one>)" proof (intro ballI iffI impI) fix x y assume "A \<inter> B \<subseteq> {\<one>}" "x \<in> A" "y \<in> B" "x \<otimes> inv y = \<one>" then have "y = x" using group.inv_equality group_l_invI by fastforce then show "x = \<one> \<and> inv y = \<one>" using \<open>A \<inter> B \<subseteq> {\<one>}\<close> \<open>x \<in> A\<close> \<open>y \<in> B\<close> by fastforce next assume "\<forall>x\<in>A. \<forall>y\<in>B. x \<otimes> inv y = \<one> \<longrightarrow> x = \<one> \<and> inv y = \<one>" then show "A \<inter> B \<subseteq> {\<one>}" by auto qed also have "\<dots> = ?rhs" by (metis BG.mem_carrier BG.subgroup_axioms inv_inv subgroup_def) finally show ?thesis . qed lemma cancel: "A \<inter> B \<subseteq> {\<one>} \<longleftrightarrow> (\<forall>x\<in>A. \<forall>y\<in>B. \<forall>x'\<in>A. \<forall>y'\<in>B. x \<otimes> y = x' \<otimes> y' \<longrightarrow> x = x' \<and> y = y')" (is "?lhs = ?rhs") proof - have "(\<forall>x\<in>A. \<forall>y\<in>B. x \<otimes> y = \<one> \<longrightarrow> x = \<one> \<and> y = \<one>) = ?rhs" (is "?med = _") proof (intro ballI iffI impI) fix x y x' y' assume * [rule_format]: "\<forall>x\<in>A. \<forall>y\<in>B. x \<otimes> y = \<one> \<longrightarrow> x = \<one> \<and> y = \<one>" and AB: "x \<in> A" "y \<in> B" "x' \<in> A" "y' \<in> B" and eq: "x \<otimes> y = x' \<otimes> y'" then have carr: "x \<in> carrier G" "x' \<in> carrier G" "y \<in> carrier G" "y' \<in> carrier G" using AG.subset BG.subset by auto then have "inv x' \<otimes> x \<otimes> (y \<otimes> inv y') = inv x' \<otimes> (x \<otimes> y) \<otimes> inv y'" by (simp add: m_assoc) also have "\<dots> = \<one>" using carr by (simp add: eq) (simp add: m_assoc) finally have 1: "inv x' \<otimes> x \<otimes> (y \<otimes> inv y') = \<one>" . show "x = x' \<and> y = y'" using * [OF _ _ 1] AB by simp (metis carr inv_closed inv_inv local.inv_equality) next fix x y assume * [rule_format]: "\<forall>x\<in>A. \<forall>y\<in>B. \<forall>x'\<in>A. \<forall>y'\<in>B. x \<otimes> y = x' \<otimes> y' \<longrightarrow> x = x' \<and> y = y'" and xy: "x \<in> A" "y \<in> B" "x \<otimes> y = \<one>" show "x = \<one> \<and> y = \<one>" by (rule *) (use xy in auto) qed then show ?thesis by (simp add: sub_id_iff) qed lemma commuting_imp_normal1: assumes sub: "carrier G \<subseteq> A <#> B" and mult: "\<And>x y. \<lbrakk>x \<in> A; y \<in> B\<rbrakk> \<Longrightarrow> x \<otimes> y = y \<otimes> x" shows "A \<lhd> G" proof - have AB: "A \<subseteq> carrier G \<and> B \<subseteq> carrier G" by (simp add: AG.subset BG.subset) have "A #> x = x <# A" if x: "x \<in> carrier G" for x proof - obtain a b where xeq: "x = a \<otimes> b" and "a \<in> A" "b \<in> B" and carr: "a \<in> carrier G" "b \<in> carrier G" using x sub AB by (force simp: set_mult_def) have Ab: "A <#> {b} = {b} <#> A" using AB \<open>a \<in> A\<close> \<open>b \<in> B\<close> mult by (force simp: set_mult_def m_assoc subset_iff) have "A #> x = A <#> {a \<otimes> b}" by (auto simp: l_coset_eq_set_mult r_coset_eq_set_mult xeq) also have "\<dots> = A <#> {a} <#> {b}" using AB \<open>a \<in> A\<close> \<open>b \<in> B\<close> by (auto simp: set_mult_def m_assoc subset_iff) also have "\<dots> = {a} <#> A <#> {b}" by (metis AG.rcos_const AG.subgroup_axioms \<open>a \<in> A\<close> coset_join3 is_group l_coset_eq_set_mult r_coset_eq_set_mult subgroup.mem_carrier) also have "\<dots> = {a} <#> {b} <#> A" by (simp add: is_group carr group.set_mult_assoc AB Ab) also have "\<dots> = {x} <#> A" by (auto simp: set_mult_def xeq) finally show "A #> x = x <# A" by (simp add: l_coset_eq_set_mult) qed then show ?thesis by (auto simp: normal_def normal_axioms_def AG.subgroup_axioms is_group) qed lemma commuting_imp_normal2: assumes"carrier G \<subseteq> A <#> B" "\<And>x y. \<lbrakk>x \<in> A; y \<in> B\<rbrakk> \<Longrightarrow> x \<otimes> y = y \<otimes> x" shows "B \<lhd> G" proof (rule group_disjoint_sum.commuting_imp_normal1) show "group_disjoint_sum G B A" proof qed next show "carrier G \<subseteq> B <#> A" using BG.subgroup_axioms assms commut_normal commuting_imp_normal1 by blast qed (use assms in auto) lemma (in group) normal_imp_commuting: assumes "A \<lhd> G" "B \<lhd> G" "A \<inter> B \<subseteq> {\<one>}" "x \<in> A" "y \<in> B" shows "x \<otimes> y = y \<otimes> x" proof - interpret AG: normal A G using assms by auto interpret BG: normal B G using assms by auto interpret group_disjoint_sum G A B proof qed have * [rule_format]: "(\<forall>x\<in>A. \<forall>y\<in>B. \<forall>x'\<in>A. \<forall>y'\<in>B. x \<otimes> y = x' \<otimes> y' \<longrightarrow> x = x' \<and> y = y')" using cancel assms by (auto simp: normal_def) have carr: "x \<in> carrier G" "y \<in> carrier G" using assms AG.subset BG.subset by auto then show ?thesis using * [of x _ _ y] AG.coset_eq [rule_format, of y] BG.coset_eq [rule_format, of x] by (clarsimp simp: l_coset_def r_coset_def set_eq_iff) (metis \<open>x \<in> A\<close> \<open>y \<in> B\<close>) qed lemma normal_eq_commuting: assumes "carrier G \<subseteq> A <#> B" "A \<inter> B \<subseteq> {\<one>}" shows "A \<lhd> G \<and> B \<lhd> G \<longleftrightarrow> (\<forall>x\<in>A. \<forall>y\<in>B. x \<otimes> y = y \<otimes> x)" by (metis assms commuting_imp_normal1 commuting_imp_normal2 normal_imp_commuting) lemma (in group) hom_group_mul_rev: assumes "(\<lambda>(x,y). x \<otimes> y) \<in> hom (subgroup_generated G A \<times>\<times> subgroup_generated G B) G" (is "?h \<in> hom ?P G") and "x \<in> carrier G" "y \<in> carrier G" "x \<in> A" "y \<in> B" shows "x \<otimes> y = y \<otimes> x" proof - interpret P: group_hom ?P G ?h by (simp add: assms DirProd_group group_hom.intro group_hom_axioms.intro is_group) have xy: "(x,y) \<in> carrier ?P" by (auto simp: assms carrier_subgroup_generated generate.incl) have "x \<otimes> (x \<otimes> (y \<otimes> y)) = x \<otimes> (y \<otimes> (x \<otimes> y))" using P.hom_mult [OF xy xy] by (simp add: m_assoc assms) then have "x \<otimes> (y \<otimes> y) = y \<otimes> (x \<otimes> y)" using assms by simp then show ?thesis by (simp add: assms flip: m_assoc) qed lemma hom_group_mul_eq: "(\<lambda>(x,y). x \<otimes> y) \<in> hom (subgroup_generated G A \<times>\<times> subgroup_generated G B) G \<longleftrightarrow> (\<forall>x\<in>A. \<forall>y\<in>B. x \<otimes> y = y \<otimes> x)" (is "?lhs = ?rhs") proof assume ?lhs then show ?rhs using hom_group_mul_rev AG.subset BG.subset by blast next assume R: ?rhs have subG: "generate G (carrier G \<inter> A) \<subseteq> carrier G" for A by (simp add: generate_incl) have *: "x \<otimes> u \<otimes> (y \<otimes> v) = x \<otimes> y \<otimes> (u \<otimes> v)" if eq [rule_format]: "\<forall>x\<in>A. \<forall>y\<in>B. x \<otimes> y = y \<otimes> x" and gen: "x \<in> generate G (carrier G \<inter> A)" "y \<in> generate G (carrier G \<inter> B)" "u \<in> generate G (carrier G \<inter> A)" "v \<in> generate G (carrier G \<inter> B)" for x y u v proof - have "u \<otimes> y = y \<otimes> u" by (metis AG.carrier_subgroup_generated_subgroup BG.carrier_subgroup_generated_subgroup carrier_subgroup_generated eq that(3) that(4)) then have "x \<otimes> u \<otimes> y = x \<otimes> y \<otimes> u" using gen by (simp add: m_assoc subsetD [OF subG]) then show ?thesis using gen by (simp add: subsetD [OF subG] flip: m_assoc) qed show ?lhs using R by (auto simp: hom_def carrier_subgroup_generated subsetD [OF subG] *) qed lemma epi_group_mul_eq: "(\<lambda>(x,y). x \<otimes> y) \<in> epi (subgroup_generated G A \<times>\<times> subgroup_generated G B) G \<longleftrightarrow> A <#> B = carrier G \<and> (\<forall>x\<in>A. \<forall>y\<in>B. x \<otimes> y = y \<otimes> x)" proof - have subGA: "generate G (carrier G \<inter> A) \<subseteq> A" by (simp add: AG.subgroup_axioms generate_subgroup_incl) have subGB: "generate G (carrier G \<inter> B) \<subseteq> B" by (simp add: BG.subgroup_axioms generate_subgroup_incl) have "(((\<lambda>(x, y). x \<otimes> y) ` (generate G (carrier G \<inter> A) \<times> generate G (carrier G \<inter> B)))) = ((A <#> B))" by (auto simp: set_mult_def generate.incl pair_imageI dest: subsetD [OF subGA] subsetD [OF subGB]) then show ?thesis by (auto simp: epi_def hom_group_mul_eq carrier_subgroup_generated) qed lemma mon_group_mul_eq: "(\<lambda>(x,y). x \<otimes> y) \<in> mon (subgroup_generated G A \<times>\<times> subgroup_generated G B) G \<longleftrightarrow> A \<inter> B = {\<one>} \<and> (\<forall>x\<in>A. \<forall>y\<in>B. x \<otimes> y = y \<otimes> x)" proof - have subGA: "generate G (carrier G \<inter> A) \<subseteq> A" by (simp add: AG.subgroup_axioms generate_subgroup_incl) have subGB: "generate G (carrier G \<inter> B) \<subseteq> B" by (simp add: BG.subgroup_axioms generate_subgroup_incl) show ?thesis apply (auto simp: mon_def hom_group_mul_eq simp flip: subset_one) apply (simp_all (no_asm_use) add: inj_on_def AG.carrier_subgroup_generated_subgroup BG.carrier_subgroup_generated_subgroup) using cancel apply blast+ done qed lemma iso_group_mul_alt: "(\<lambda>(x,y). x \<otimes> y) \<in> iso (subgroup_generated G A \<times>\<times> subgroup_generated G B) G \<longleftrightarrow> A \<inter> B = {\<one>} \<and> A <#> B = carrier G \<and> (\<forall>x\<in>A. \<forall>y\<in>B. x \<otimes> y = y \<otimes> x)" by (auto simp: iso_iff_mon_epi mon_group_mul_eq epi_group_mul_eq) lemma iso_group_mul_eq: "(\<lambda>(x,y). x \<otimes> y) \<in> iso (subgroup_generated G A \<times>\<times> subgroup_generated G B) G \<longleftrightarrow> A \<inter> B = {\<one>} \<and> A <#> B = carrier G \<and> A \<lhd> G \<and> B \<lhd> G" by (simp add: iso_group_mul_alt normal_eq_commuting cong: conj_cong) lemma (in group) iso_group_mul_gen: assumes "A \<lhd> G" "B \<lhd> G" shows "(\<lambda>(x,y). x \<otimes> y) \<in> iso (subgroup_generated G A \<times>\<times> subgroup_generated G B) G \<longleftrightarrow> A \<inter> B \<subseteq> {\<one>} \<and> A <#> B = carrier G" proof - interpret group_disjoint_sum G A B using assms by (auto simp: group_disjoint_sum_def normal_def) show ?thesis by (simp add: subset_one iso_group_mul_eq assms) qed lemma iso_group_mul: assumes "comm_group G" shows "((\<lambda>(x,y). x \<otimes> y) \<in> iso (DirProd (subgroup_generated G A) (subgroup_generated G B)) G \<longleftrightarrow> A \<inter> B \<subseteq> {\<one>} \<and> A <#> B = carrier G)" proof (rule iso_group_mul_gen) interpret comm_group by (rule assms) show "A \<lhd> G" by (simp add: AG.subgroup_axioms subgroup_imp_normal) show "B \<lhd> G" by (simp add: BG.subgroup_axioms subgroup_imp_normal) qed end subsection\<open>The one-element group on a given object\<close> definition singleton_group :: "'a \<Rightarrow> 'a monoid" where "singleton_group a = \<lparr>carrier = {a}, monoid.mult = (\<lambda>x y. a), one = a\<rparr>" lemma singleton_group [simp]: "group (singleton_group a)" unfolding singleton_group_def by (auto intro: groupI) lemma singleton_abelian_group [simp]: "comm_group (singleton_group a)" by (metis group.group_comm_groupI monoid.simps(1) singleton_group singleton_group_def) lemma carrier_singleton_group [simp]: "carrier (singleton_group a) = {a}" by (auto simp: singleton_group_def) lemma (in group) hom_into_singleton_iff [simp]: "h \<in> hom G (singleton_group a) \<longleftrightarrow> h \<in> carrier G \<rightarrow> {a}" by (auto simp: hom_def singleton_group_def) declare group.hom_into_singleton_iff [simp] lemma (in group) id_hom_singleton: "id \<in> hom (singleton_group \<one>) G" by (simp add: hom_def singleton_group_def) subsection\<open>Similarly, trivial groups\<close> definition trivial_group :: "('a, 'b) monoid_scheme \<Rightarrow> bool" where "trivial_group G \<equiv> group G \<and> carrier G = {one G}" lemma trivial_imp_finite_group: "trivial_group G \<Longrightarrow> finite(carrier G)" by (simp add: trivial_group_def) lemma trivial_singleton_group [simp]: "trivial_group(singleton_group a)" by (metis monoid.simps(2) partial_object.simps(1) singleton_group singleton_group_def trivial_group_def) lemma (in group) trivial_group_subset: "trivial_group G \<longleftrightarrow> carrier G \<subseteq> {one G}" using is_group trivial_group_def by fastforce lemma (in group) trivial_group: "trivial_group G \<longleftrightarrow> (\<exists>a. carrier G = {a})" unfolding trivial_group_def using one_closed is_group by fastforce lemma (in group) trivial_group_alt: "trivial_group G \<longleftrightarrow> (\<exists>a. carrier G \<subseteq> {a})" by (auto simp: trivial_group) lemma (in group) trivial_group_subgroup_generated: assumes "S \<subseteq> {one G}" shows "trivial_group(subgroup_generated G S)" proof - have "carrier (subgroup_generated G S) \<subseteq> {\<one>}" using generate_empty generate_one subset_singletonD assms by (fastforce simp add: carrier_subgroup_generated) then show ?thesis by (simp add: group.trivial_group_subset) qed lemma (in group) trivial_group_subgroup_generated_eq: "trivial_group(subgroup_generated G s) \<longleftrightarrow> carrier G \<inter> s \<subseteq> {one G}" apply (rule iffI) apply (force simp: trivial_group_def carrier_subgroup_generated generate.incl) by (metis subgroup_generated_restrict trivial_group_subgroup_generated) lemma isomorphic_group_triviality1: assumes "G \<cong> H" "group H" "trivial_group G" shows "trivial_group H" using assms by (auto simp: trivial_group_def is_iso_def iso_def group.is_monoid Group.group_def bij_betw_def hom_one) lemma isomorphic_group_triviality: assumes "G \<cong> H" "group G" "group H" shows "trivial_group G \<longleftrightarrow> trivial_group H" by (meson assms group.iso_sym isomorphic_group_triviality1) lemma (in group_hom) kernel_from_trivial_group: "trivial_group G \<Longrightarrow> kernel G H h = carrier G" by (auto simp: trivial_group_def kernel_def) lemma (in group_hom) image_from_trivial_group: "trivial_group G \<Longrightarrow> h ` carrier G = {one H}" by (auto simp: trivial_group_def) lemma (in group_hom) kernel_to_trivial_group: "trivial_group H \<Longrightarrow> kernel G H h = carrier G" unfolding kernel_def trivial_group_def using hom_closed by blast subsection\<open>The additive group of integers\<close> definition integer_group where "integer_group = \<lparr>carrier = UNIV, monoid.mult = (+), one = (0::int)\<rparr>" lemma group_integer_group [simp]: "group integer_group" unfolding integer_group_def proof (rule groupI; simp) show "\<And>x::int. \<exists>y. y + x = 0" by presburger qed lemma carrier_integer_group [simp]: "carrier integer_group = UNIV" by (auto simp: integer_group_def) lemma one_integer_group [simp]: "\<one>\<^bsub>integer_group\<^esub> = 0" by (auto simp: integer_group_def) lemma mult_integer_group [simp]: "x \<otimes>\<^bsub>integer_group\<^esub> y = x + y" by (auto simp: integer_group_def) lemma inv_integer_group [simp]: "inv\<^bsub>integer_group\<^esub> x = -x" by (rule group.inv_equality [OF group_integer_group]) (auto simp: integer_group_def) lemma abelian_integer_group: "comm_group integer_group" by (rule group.group_comm_groupI [OF group_integer_group]) (auto simp: integer_group_def) lemma group_nat_pow_integer_group [simp]: fixes n::nat and x::int shows "pow integer_group x n = int n * x" by (induction n) (auto simp: integer_group_def algebra_simps) lemma group_int_pow_integer_group [simp]: fixes n::int and x::int shows "pow integer_group x n = n * x" by (simp add: int_pow_def2) lemma (in group) hom_integer_group_pow: "x \<in> carrier G \<Longrightarrow> pow G x \<in> hom integer_group G" by (rule homI) (auto simp: int_pow_mult) subsection\<open>Additive group of integers modulo n (n = 0 gives just the integers)\<close> definition integer_mod_group :: "nat \<Rightarrow> int monoid" where "integer_mod_group n \<equiv> if n = 0 then integer_group else \<lparr>carrier = {0..<int n}, monoid.mult = (\<lambda>x y. (x+y) mod int n), one = 0\<rparr>" lemma carrier_integer_mod_group: "carrier(integer_mod_group n) = (if n=0 then UNIV else {0..<int n})" by (simp add: integer_mod_group_def) lemma one_integer_mod_group[simp]: "one(integer_mod_group n) = 0" by (simp add: integer_mod_group_def) lemma mult_integer_mod_group[simp]: "monoid.mult(integer_mod_group n) = (\<lambda>x y. (x + y) mod int n)" by (simp add: integer_mod_group_def integer_group_def) lemma group_integer_mod_group [simp]: "group (integer_mod_group n)" proof - have *: "\<exists>y\<ge>0. y < int n \<and> (y + x) mod int n = 0" if "x < int n" "0 \<le> x" for x proof (cases "x=0") case False with that show ?thesis by (rule_tac x="int n - x" in exI) auto qed (use that in auto) show ?thesis apply (rule groupI) apply (auto simp: integer_mod_group_def Bex_def *, presburger+) done qed lemma inv_integer_mod_group[simp]: "x \<in> carrier (integer_mod_group n) \<Longrightarrow> m_inv(integer_mod_group n) x = (-x) mod int n" by (rule group.inv_equality [OF group_integer_mod_group]) (auto simp: integer_mod_group_def add.commute mod_add_right_eq) lemma pow_integer_mod_group [simp]: fixes m::nat shows "pow (integer_mod_group n) x m = (int m * x) mod int n" proof (cases "n=0") case False show ?thesis by (induction m) (auto simp: add.commute mod_add_right_eq distrib_left mult.commute) qed (simp add: integer_mod_group_def) lemma int_pow_integer_mod_group: "pow (integer_mod_group n) x m = (m * x) mod int n" proof - have "inv\<^bsub>integer_mod_group n\<^esub> (- (m * x) mod int n) = m * x mod int n" by (simp add: carrier_integer_mod_group mod_minus_eq) then show ?thesis by (simp add: int_pow_def2) qed lemma abelian_integer_mod_group [simp]: "comm_group(integer_mod_group n)" by (simp add: add.commute group.group_comm_groupI) lemma integer_mod_group_0 [simp]: "0 \<in> carrier(integer_mod_group n)" by (simp add: integer_mod_group_def) lemma integer_mod_group_1 [simp]: "1 \<in> carrier(integer_mod_group n) \<longleftrightarrow> (n \<noteq> 1)" by (auto simp: integer_mod_group_def) lemma trivial_integer_mod_group: "trivial_group(integer_mod_group n) \<longleftrightarrow> n = 1" (is "?lhs = ?rhs") proof assume ?lhs then show ?rhs by (simp add: trivial_group_def carrier_integer_mod_group set_eq_iff split: if_split_asm) (presburger+) next assume ?rhs then show ?lhs by (force simp: trivial_group_def carrier_integer_mod_group) qed subsection\<open>Cyclic groups\<close> lemma (in group) subgroup_of_powers: "x \<in> carrier G \<Longrightarrow> subgroup (range (\<lambda>n::int. x [^] n)) G" apply (auto simp: subgroup_def image_iff simp flip: int_pow_mult int_pow_neg) apply (metis group.int_pow_diff int_pow_closed is_group r_inv) done lemma (in group) carrier_subgroup_generated_by_singleton: assumes "x \<in> carrier G" shows "carrier(subgroup_generated G {x}) = (range (\<lambda>n::int. x [^] n))" proof show "carrier (subgroup_generated G {x}) \<subseteq> range (\<lambda>n::int. x [^] n)" proof (rule subgroup_generated_minimal) show "subgroup (range (\<lambda>n::int. x [^] n)) G" using assms subgroup_of_powers by blast show "{x} \<subseteq> range (\<lambda>n::int. x [^] n)" by clarify (metis assms int_pow_1 range_eqI) qed have x: "x \<in> carrier (subgroup_generated G {x})" using assms subgroup_generated_subset_carrier_subset by auto show "range (\<lambda>n::int. x [^] n) \<subseteq> carrier (subgroup_generated G {x})" proof clarify fix n :: "int" show "x [^] n \<in> carrier (subgroup_generated G {x})" by (simp add: x subgroup_int_pow_closed subgroup_subgroup_generated) qed qed definition cyclic_group where "cyclic_group G \<equiv> \<exists>x \<in> carrier G. subgroup_generated G {x} = G" lemma (in group) cyclic_group: "cyclic_group G \<longleftrightarrow> (\<exists>x \<in> carrier G. carrier G = range (\<lambda>n::int. x [^] n))" proof - have "\<And>x. \<lbrakk>x \<in> carrier G; carrier G = range (\<lambda>n::int. x [^] n)\<rbrakk> \<Longrightarrow> \<exists>x\<in>carrier G. subgroup_generated G {x} = G" by (rule_tac x=x in bexI) (auto simp: generate_pow subgroup_generated_def intro!: monoid.equality) then show ?thesis unfolding cyclic_group_def using carrier_subgroup_generated_by_singleton by fastforce qed lemma cyclic_integer_group [simp]: "cyclic_group integer_group" proof - have *: "int n \<in> generate integer_group {1}" for n proof (induction n) case 0 then show ?case using generate.simps by force next case (Suc n) then show ?case by simp (metis generate.simps insert_subset integer_group_def monoid.simps(1) subsetI) qed have **: "i \<in> generate integer_group {1}" for i proof (cases i rule: int_cases) case (nonneg n) then show ?thesis by (simp add: *) next case (neg n) then have "-i \<in> generate integer_group {1}" by (metis "*" add.inverse_inverse) then have "- (-i) \<in> generate integer_group {1}" by (metis UNIV_I group.generate_m_inv_closed group_integer_group integer_group_def inv_integer_group partial_object.select_convs(1) subsetI) then show ?thesis by simp qed show ?thesis unfolding cyclic_group_def by (rule_tac x=1 in bexI) (auto simp: carrier_subgroup_generated ** intro: monoid.equality) qed lemma nontrivial_integer_group [simp]: "\<not> trivial_group integer_group" using integer_mod_group_def trivial_integer_mod_group by presburger lemma (in group) cyclic_imp_abelian_group: "cyclic_group G \<Longrightarrow> comm_group G" apply (auto simp: cyclic_group comm_group_def is_group intro!: monoid_comm_monoidI) apply (metis add.commute int_pow_mult rangeI) done lemma trivial_imp_cyclic_group: "trivial_group G \<Longrightarrow> cyclic_group G" by (metis cyclic_group_def group.subgroup_generated_group_carrier insertI1 trivial_group_def) lemma (in group) cyclic_group_alt: "cyclic_group G \<longleftrightarrow> (\<exists>x. subgroup_generated G {x} = G)" proof safe fix x assume *: "subgroup_generated G {x} = G" show "cyclic_group G" proof (cases "x \<in> carrier G") case True then show ?thesis using \<open>subgroup_generated G {x} = G\<close> cyclic_group_def by blast next case False then show ?thesis by (metis "*" Int_empty_right Int_insert_right_if0 carrier_subgroup_generated generate_empty trivial_group trivial_imp_cyclic_group) qed qed (auto simp: cyclic_group_def) lemma (in group) cyclic_group_generated: "cyclic_group (subgroup_generated G {x})" using group.cyclic_group_alt group_subgroup_generated subgroup_generated2 by blast lemma (in group) cyclic_group_epimorphic_image: assumes "h \<in> epi G H" "cyclic_group G" "group H" shows "cyclic_group H" proof - interpret h: group_hom using assms by (simp add: group_hom_def group_hom_axioms_def is_group epi_def) obtain x where "x \<in> carrier G" and x: "carrier G = range (\<lambda>n::int. x [^] n)" and eq: "carrier H = h ` carrier G" using assms by (auto simp: cyclic_group epi_def) have "h ` carrier G = range (\<lambda>n::int. h x [^]\<^bsub>H\<^esub> n)" by (metis (no_types, lifting) \<open>x \<in> carrier G\<close> h.hom_int_pow image_cong image_image x) then show ?thesis using \<open>x \<in> carrier G\<close> eq h.cyclic_group by blast qed lemma isomorphic_group_cyclicity: "\<lbrakk>G \<cong> H; group G; group H\<rbrakk> \<Longrightarrow> cyclic_group G \<longleftrightarrow> cyclic_group H" by (meson ex_in_conv group.cyclic_group_epimorphic_image group.iso_sym is_iso_def iso_iff_mon_epi) end
/- Copyright (c) 2014 Parikshit Khanna. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Parikshit Khanna, Jeremy Avigad, Leonardo de Moura, Floris van Doorn, Mario Carneiro -/ import Lean import Mathlib.Init.Data.List.Instances import Mathlib.Init.Data.Nat.Basic /-! ## Definitions on Lists This file contains various definitions on `List`. It does not contain proofs about these definitions, those are contained in other files in `Mathlib.Data.List`. -/ namespace List /-- Split a list at an index. ``` splitAt 2 [a, b, c] = ([a, b], [c]) ``` -/ def splitAt : ℕ → List α → List α × List α | n+1, x :: xs => let (l, r) := splitAt n xs; (x :: l, r) | _, xs => ([], xs) /-- Split a list at an index. Ensures the left list always has the specified length by right padding with the provided default element. ``` splitAtD 2 [a, b, c] x = ([a, b], [c]) splitAtD 4 [a, b, c] x = ([a, b, c, x], []) ``` -/ def splitAtD : ℕ → List α → α → List α × List α | 0, xs, a => ([], xs) | n+1, [], a => let (l, r) := splitAtD n [] a; (a :: l, r) | n+1, x :: xs, a => let (l, r) := splitAtD n xs a; (x :: l, r) /-- An auxiliary function for `splitOnP`. -/ def splitOnPAux {α : Type u} (P : α → Prop) [DecidablePred P] : List α → (List α → List α) → List (List α) | [], f => [f []] | h :: t, f => if P h then f [] :: splitOnPAux P t id else splitOnPAux P t fun l => f (h :: l) /-- Split a list at every element satisfying a predicate. -/ def splitOnP {α : Type u} (P : α → Prop) [DecidablePred P] (l : List α) : List (List α) := splitOnPAux P l id /-- Split a list at every occurrence of an element. ``` [1,1,2,3,2,4,4].split_on 2 = [[1,1],[3],[4,4]] ``` -/ def splitOn {α : Type u} [DecidableEq α] (a : α) (as : List α) : List (List α) := as.splitOnP (· = a) /-- Apply a function to the nth tail of `l`. Returns the input without using `f` if the index is larger than the length of the List. ``` modifyNthTail f 2 [a, b, c] = [a, b] ++ f [c] ``` -/ @[simp] def modifyNthTail (f : List α → List α) : ℕ → List α → List α | 0, l => f l | n+1, [] => [] | n+1, a :: l => a :: modifyNthTail f n l /-- Apply `f` to the head of the list, if it exists. -/ @[simp] def modifyHead (f : α → α) : List α → List α | [] => [] | a :: l => f a :: l /-- Apply `f` to the nth element of the list, if it exists. -/ def modifyNth (f : α → α) : ℕ → List α → List α := modifyNthTail (modifyHead f) /-- Apply `f` to the last element of `l`, if it exists. -/ @[simp] def modifyLast (f : α → α) : List α → List α | [] => [] | [x] => [f x] | x :: xs => x :: modifyLast f xs /-- `insertNth n a l` inserts `a` into the list `l` after the first `n` elements of `l` ``` insertNth 2 1 [1, 2, 3, 4] = [1, 2, 1, 3, 4] ``` -/ def insertNth (n : ℕ) (a : α) : List α → List α := modifyNthTail (cons a) n /-- Take `n` elements from a list `l`. If `l` has less than `n` elements, append `n - length l` elements `x`. -/ def takeD : ∀ n : ℕ, List α → α → List α | 0, l, _ => [] | n+1, l, x => l.headD x :: takeD n l.tail x /-- Fold a function `f` over the list from the left, returning the list of partial results. ``` scanl (+) 0 [1, 2, 3] = [0, 1, 3, 6] ``` -/ def scanl (f : α → β → α) : α → List β → List α | a, [] => [a] | a, b :: l => a :: scanl f (f a b) l /-- Auxiliary definition used to define `scanr`. If `scanrAux f b l = (b', l')` then `scanr f b l = b' :: l'` -/ def scanrAux (f : α → β → β) (b : β) : List α → β × List β | [] => (b, []) | a :: l => let (b', l') := scanrAux f b l (f a b', b' :: l') /-- Fold a function `f` over the list from the right, returning the list of partial results. ``` scanr (+) 0 [1, 2, 3] = [6, 5, 3, 0] ``` -/ def scanr (f : α → β → β) (b : β) (l : List α) : List β := let (b', l') := scanrAux f b l b' :: l' /-- Given a function `f : α → β ⊕ γ`, `partitionMap f l` maps the list by `f` whilst partitioning the result it into a pair of lists, `list β × list γ`, partitioning the `sum.inl _` into the left list, and the `sum.inr _` into the right List. `partitionMap (id : ℕ ⊕ ℕ → ℕ ⊕ ℕ) [inl 0, inr 1, inl 2] = ([0,2], [1])` -/ def partitionMap (f : α → β ⊕ γ) : List α → List β × List γ | [] => ([], []) | x :: xs => match f x with | Sum.inr r => Prod.map id (cons r) $ partitionMap f xs | Sum.inl l => Prod.map (cons l) id $ partitionMap f xs /-- `find p l` is the first element of `l` satisfying `p`, or `none` if no such element exists. -/ def find (p : α → Prop) [DecidablePred p] : List α → Option α | [] => none | a :: l => if p a then some a else find p l /-- Auxiliary definition for `foldlIdx`. -/ def foldlIdxAux (f : ℕ → α → β → α) : ℕ → α → List β → α | _, a, [] => a | i, a, b :: l => foldlIdxAux f (i+1) (f i a b) l /-- Fold a list from left to right as with `foldl`, but the combining function also receives each element's index. -/ def foldlIdx (f : ℕ → α → β → α) (a : α) (l : List β) : α := foldlIdxAux f 0 a l /-- Auxiliary definition for `foldrIdx`. -/ def foldrIdxAux (f : ℕ → α → β → β) : ℕ → β → List α → β | _, b, [] => b | i, b, a :: l => f i a (foldrIdxAux f (i+1) b l) /-- Fold a list from right to left as with `foldr`, but the combining function also receives each element's index. -/ def foldrIdx (f : ℕ → α → β → β) (b : β) (l : List α) : β := foldrIdxAux f 0 b l /-- `findIdxs p l` is the list of indexes of elements of `l` that satisfy `p`. -/ def findIdxs (p : α → Prop) [DecidablePred p] (l : List α) : List Nat := foldrIdx (fun i a is => if p a then i :: is else is) [] l /-- Returns the elements of `l` that satisfy `p` together with their indexes in `l`. The returned list is ordered by index. -/ def indexesValues (p : α → Prop) [DecidablePred p] (l : List α) : List (ℕ × α) := foldrIdx (fun i a l => if p a then (i, a) :: l else l) [] l /-- `indexesOf a l` is the list of all indexes of `a` in `l`. For example: indexesOf a [a, b, a, a] = [0, 2, 3] -/ def indexesOf [DecidableEq α] (a : α) : List α → List Nat := findIdxs (Eq a) /-- `lookmap` is a combination of `lookup` and `filterMap`. `lookmap f l` will apply `f : α → option α` to each element of the list, replacing `a → b` at the first value `a` in the list such that `f a = some b`. -/ def lookmap (f : α → Option α) : List α → List α | [] => [] | a :: l => match f a with | some b => b :: l | none => a :: lookmap f l /-- `countp p l` is the number of elements of `l` that satisfy `p`. -/ def countp (p : α → Prop) [DecidablePred p] : List α → Nat | [] => 0 | x :: xs => if p x then countp p xs + 1 else countp p xs /-- `count a l` is the number of occurrences of `a` in `l`. -/ def count [DecidableEq α] (a : α) : List α → Nat := countp (Eq a) /-- `isPrefix l₁ l₂`, or `l₁ <+: l₂`, means that `l₁` is a prefix of `l₂`, that is, `l₂` has the form `l₁ ++ t` for some `t`. -/ def isPrefix (l₁ : List α) (l₂ : List α) : Prop := ∃ t, l₁ ++ t = l₂ /-- `isSuffix l₁ l₂`, or `l₁ <:+ l₂`, means that `l₁` is a suffix of `l₂`, that is, `l₂` has the form `t ++ l₁` for some `t`. -/ def isSuffix (l₁ : List α) (l₂ : List α) : Prop := ∃ t, t ++ l₁ = l₂ /-- `isInfix l₁ l₂`, or `l₁ <:+: l₂`, means that `l₁` is a contiguous substring of `l₂`, that is, `l₂` has the form `s ++ l₁ ++ t` for some `s, t`. -/ def isInfix (l₁ : List α) (l₂ : List α) : Prop := ∃ s t, s ++ l₁ ++ t = l₂ infixl:50 " <+: " => isPrefix infixl:50 " <:+ " => isSuffix infixl:50 " <:+: " => isInfix /-- `inits l` is the list of initial segments of `l`. ``` inits [1, 2, 3] = [[], [1], [1, 2], [1, 2, 3]] ``` -/ @[simp] def inits : List α → List (List α) | [] => [[]] | a :: l => [] :: map (fun t => a :: t) (inits l) /-- `tails l` is the list of terminal segments of `l`. ``` tails [1, 2, 3] = [[1, 2, 3], [2, 3], [3], []] ``` -/ @[simp] def tails : List α → List (List α) | [] => [[]] | a :: l => (a :: l) :: tails l def sublists'Aux : List α → (List α → List β) → List (List β) → List (List β) | [], f, r => f [] :: r | a :: l, f, r => sublists'Aux l f (sublists'Aux l (f ∘ cons a) r) /-- `sublists' l` is the list of all (non-contiguous) sublists of `l`. It differs from `sublists` only in the order of appearance of the sublists; `sublists'` uses the first element of the list as the MSB, `sublists` uses the first element of the list as the LSB. ``` sublists' [1, 2, 3] = [[], [3], [2], [2, 3], [1], [1, 3], [1, 2], [1, 2, 3]] ``` -/ def sublists' (l : List α) : List (List α) := sublists'Aux l id [] def sublistsAux : List α → (List α → List β → List β) → List β | [], f => [] | a :: l, f => f [a] (sublistsAux l fun ys r => f ys (f (a :: ys) r)) /-- `sublists l` is the list of all (non-contiguous) sublists of `l`; cf. `sublists'` for a different ordering. ``` sublists [1, 2, 3] = [[], [1], [2], [1, 2], [3], [1, 3], [2, 3], [1, 2, 3]] ``` -/ def sublists (l : List α) : List (List α) := [] :: sublistsAux l cons def sublistsAux₁ : List α → (List α → List β) → List β | [], f => [] | a :: l, f => f [a] ++ sublistsAux₁ l fun ys => f ys ++ f (a :: ys) section Forall₂ variable {r : α → β → Prop} {p : γ → δ → Prop} /-- `Forall₂ R l₁ l₂` means that `l₁` and `l₂` have the same length, and whenever `a` is the nth element of `l₁`, and `b` is the nth element of `l₂`, then `R a b` is satisfied. -/ inductive Forall₂ (R : α → β → Prop) : List α → List β → Prop | nil : Forall₂ R [] [] | cons {a b l₁ l₂} : R a b → Forall₂ R l₁ l₂ → Forall₂ R (a :: l₁) (b :: l₂) attribute [simp] Forall₂.nil end Forall₂ /-- Auxiliary definition used to define `transpose`. `transposeAux l L` takes each element of `l` and appends it to the start of each element of `L`. `transposeAux [a, b, c] [l₁, l₂, l₃] = [a::l₁, b::l₂, c::l₃]` -/ def transposeAux : List α → List (List α) → List (List α) | [], ls => ls | a :: i, [] => [a] :: transposeAux i [] | a :: i, l :: ls => (a :: l) :: transposeAux i ls /-- transpose of a list of lists, treated as a matrix. ``` transpose [[1, 2], [3, 4], [5, 6]] = [[1, 3, 5], [2, 4, 6]] ``` -/ def transpose : List (List α) → List (List α) | [] => [] | l :: ls => transposeAux l (transpose ls) /-- List of all sections through a list of lists. A section of `[L₁, L₂, ..., Lₙ]` is a list whose first element comes from `L₁`, whose second element comes from `L₂`, and so on. -/ def sections : List (List α) → List (List α) | [] => [[]] | l :: L => (sections L).bind fun s => l.map fun a => a :: s /-- `erasep p l` removes the first element of `l` satisfying the predicate `p`. -/ def erasep (p : α → Prop) [DecidablePred p] : List α → List α | [] => [] | a :: l => if p a then l else a :: erasep p l /-- `extractp p l` returns a pair of an element `a` of `l` satisfying the predicate `p`, and `l`, with `a` removed. If there is no such element `a` it returns `(none, l)`. -/ def extractp (p : α → Prop) [DecidablePred p] : List α → Option α × List α | [] => (none, []) | a :: l => if p a then (some a, l) else let (a', l') := extractp p l (a', a :: l') /-- `revzip l` returns a list of pairs of the elements of `l` paired with the elements of `l` in reverse order. ``` revzip [1,2,3,4,5] = [(1, 5), (2, 4), (3, 3), (4, 2), (5, 1)] ``` -/ def revzip (l : List α) : List (α × α) := zip l l.reverse /-- `product l₁ l₂` is the list of pairs `(a, b)` where `a ∈ l₁` and `b ∈ l₂`. ``` product [1, 2] [5, 6] = [(1, 5), (1, 6), (2, 5), (2, 6)] ``` -/ def product (l₁ : List α) (l₂ : List β) : List (α × β) := l₁.bind $ fun a => l₂.map $ Prod.mk a /-- `sigma l₁ l₂` is the list of dependent pairs `(a, b)` where `a ∈ l₁` and `b ∈ l₂ a`. ``` sigma [1, 2] (λ_, [(5 : ℕ), 6]) = [(1, 5), (1, 6), (2, 5), (2, 6)] ``` -/ protected def sigma {σ : α → Type _} (l₁ : List α) (l₂ : ∀ a, List (σ a)) : List (Σ a, σ a) := l₁.bind $ fun a => (l₂ a).map $ Sigma.mk a /-- Auxliary definition used to define `ofFn`. `ofFnAux f m h l` returns the first `m` elements of `ofFn f` appended to `l` -/ def ofFnAux {n} (f : Fin n → α) : ∀ m, m ≤ n → List α → List α | 0, h, l => l | m+1, h, l => ofFnAux f m (Nat.le_of_lt h) (f ⟨m, h⟩ :: l) /-- `ofFn f` with `f : fin n → α` returns the list whose ith element is `f i` ``` ofFn f = [f 0, f 1, ... , f(n - 1)] ``` -/ def ofFn {n} (f : Fin n → α) : List α := ofFnAux f n (Nat.le_refl _) [] /-- `ofFnNthVal f i` returns `some (f i)` if `i < n` and `none` otherwise. -/ def ofFnNthVal {n} (f : Fin n → α) (i : ℕ) : Option α := if h : i < n then some (f ⟨i, h⟩) else none /-- `disjoint l₁ l₂` means that `l₁` and `l₂` have no elements in common. -/ def disjoint (l₁ l₂ : List α) : Prop := ∀ ⦃a⦄, a ∈ l₁ → a ∈ l₂ → False section Pairwise variable (R : α → α → Prop) -- ././Mathport/Syntax/Translate/Basic.lean:452:2: warning: expanding binder collection (a' «expr ∈ » l) /-- `Pairwise R l` means that all the elements with earlier indexes are `R`-related to all the elements with later indexes. Pairwise R [1, 2, 3] ↔ R 1 2 ∧ R 1 3 ∧ R 2 3 For example if `R = (≠)` then it asserts `l` has no duplicates, and if `R = (<)` then it asserts that `l` is (strictly) sorted. -/ inductive Pairwise : List α → Prop | nil : Pairwise [] | cons : ∀ {a : α} {l : List α}, (∀ a' ∈ l, R a a') → Pairwise l → Pairwise (a :: l) end Pairwise -- ././Mathport/Syntax/Translate/Basic.lean:452:2: warning: expanding binder collection (y «expr ∈ » IH) /-- `pwFilter R l` is a maximal sublist of `l` which is `Pairwise R`. `pwFilter (≠)` is the erase duplicates function (cf. `eraseDup`), and `pwFilter (<)` finds a maximal increasing subsequence in `l`. For example, pwFilter (<) [0, 1, 5, 2, 6, 3, 4] = [0, 1, 2, 3, 4] -/ def pwFilter (R : α → α → Prop) [DecidableRel R] : List α → List α | [] => [] | x :: xs => let IH := pwFilter R xs if ∀ y ∈ IH, R x y then x :: IH else IH section Chain variable (R : α → α → Prop) /-- `Chain R a l` means that `R` holds between adjacent elements of `a::l`. ``` Chain R a [b, c, d] ↔ R a b ∧ R b c ∧ R c d ``` -/ inductive Chain : α → List α → Prop | nil {a : α} : Chain a [] | cons : ∀ {a b : α} {l : List α}, R a b → Chain b l → Chain a (b :: l) /-- `Chain' R l` means that `R` holds between adjacent elements of `l`. ``` Chain' R [a, b, c, d] ↔ R a b ∧ R b c ∧ R c d ``` -/ def Chain' : List α → Prop | [] => True | a :: l => Chain R a l end Chain /-- `Nodup l` means that `l` has no duplicates, that is, any element appears at most once in the List. It is defined as `Pairwise (≠)`. -/ def Nodup : List α → Prop := Pairwise (· ≠ ·) /-- `eraseDup l` removes duplicates from `l` (taking only the first occurrence). Defined as `pwFilter (≠)`. eraseDup [1, 0, 2, 2, 1] = [0, 2, 1] -/ def eraseDup [DecidableEq α] : List α → List α := pwFilter (· ≠ ·) /-- `range' s n` is the list of numbers `[s, s+1, ..., s+n-1]`. It is intended mainly for proving properties of `range` and `iota`. -/ @[simp] def range' : ℕ → ℕ → List ℕ | s, 0 => [] | s, n+1 => s :: range' (s+1) n /-- Drop `none`s from a list, and replace each remaining `some a` with `a`. -/ def reduceOption {α} : List (Option α) → List α := List.filterMap id /-- `ilast' x xs` returns the last element of `xs` if `xs` is non-empty; it returns `x` otherwise -/ @[simp] def ilast' {α} : α → List α → α | a, [] => a | a, b :: l => ilast' b l /-- `last' xs` returns the last element of `xs` if `xs` is non-empty; it returns `none` otherwise -/ @[simp] def last' {α} : List α → Option α | [] => none | [a] => some a | b :: l => last' l /-- `rotate l n` rotates the elements of `l` to the left by `n` ``` rotate [0, 1, 2, 3, 4, 5] 2 = [2, 3, 4, 5, 0, 1] ``` -/ def rotate (l : List α) (n : ℕ) : List α := let (l₁, l₂) := List.splitAt (n % l.length) l l₂ ++ l₁ /-- rotate' is the same as `rotate`, but slower. Used for proofs about `rotate`-/ def rotate' : List α → ℕ → List α | [], n => [] | l, 0 => l | a :: l, n+1 => rotate' (l ++ [a]) n def mmap {m : Type u → Type v} [Monad m] {α β} (f : α → m β) : List α → m (List β) | [] => pure [] | h :: t => return (← f h) :: (← mmap f t) def mmap' {m : Type → Type v} [Monad m] {α β} (f : α → m β) : List α → m Unit | [] => pure () | h :: t => f h *> t.mmap' f /-- Filters and maps elements of a list -/ def mmapFilter {m : Type → Type v} [Monad m] {α β} (f : α → m (Option β)) : List α → m (List β) | [] => pure [] | h :: t => do let b ← f h let t' ← t.mmapFilter f pure $ match b with | none => t' | some x => x :: t' /-- `mmapUpperTriangle f l` calls `f` on all elements in the upper triangular part of `l × l`. That is, for each `e ∈ l`, it will run `f e e` and then `f e e'` for each `e'` that appears after `e` in `l`. ``` mmapUpperTriangle f [1, 2, 3] = return [← f 1 1, ← f 1 2, ← f 1 3, ← f 2 2, ← f 2 3, ← f 3 3] ``` -/ def mmapUpperTriangle {m} [Monad m] {α β : Type u} (f : α → α → m β) : List α → m (List β) | [] => pure [] | h :: t => return (← f h h) :: (← t.mmap (f h)) ++ (← t.mmapUpperTriangle f) /-- `mmap'Diag f l` calls `f` on all elements in the upper triangular part of `l × l`. That is, for each `e ∈ l`, it will run `f e e` and then `f e e'` for each `e'` that appears after `e` in `l`. ``` mmap'Diag f [1, 2, 3] = do f 1 1; f 1 2; f 1 3; f 2 2; f 2 3; f 3 3 ``` -/ def mmap'Diag {m} [Monad m] {α} (f : α → α → m Unit) : List α → m Unit | [] => return () | h :: t => do f h h; t.mmap' (f h); t.mmap'Diag f protected def traverse {F : Type u → Type v} [Applicative F] {α β} (f : α → F β) : List α → F (List β) | [] => pure [] | x :: xs => cons <$> f x <*> List.traverse f xs /-- `getRest l l₁` returns `some l₂` if `l = l₁ ++ l₂`. If `l₁` is not a prefix of `l`, returns `none` -/ def getRest [DecidableEq α] : List α → List α → Option (List α) | l, [] => some l | [], _ => none | x :: l, y :: l₁ => if x = y then getRest l l₁ else none /-- `List.slice n m xs` removes a slice of length `m` at index `n` in list `xs`. -/ def slice {α} : ℕ → ℕ → List α → List α | 0, n, xs => xs.drop n | n+1, m, [] => [] | n+1, m, x :: xs => x :: slice n m xs /-- Left-biased version of `List.map₂`. `map₂Left' f as bs` applies `f` to each pair of elements `aᵢ ∈ as` and `bᵢ ∈ bs`. If `bs` is shorter than `as`, `f` is applied to `none` for the remaining `aᵢ`. Returns the results of the `f` applications and the remaining `bs`. ``` map₂Left' prod.mk [1, 2] ['a'] = ([(1, some 'a'), (2, none)], []) map₂Left' prod.mk [1] ['a', 'b'] = ([(1, some 'a')], ['b']) ``` -/ @[simp] def map₂Left' (f : α → Option β → γ) : List α → List β → List γ × List β | [], bs => ([], bs) | a :: as, [] => ((a :: as).map fun a => f a none, []) | a :: as, b :: bs => let r := map₂Left' f as bs; (f a (some b) :: r.1, r.2) /-- Right-biased version of `List.map₂`. `map₂Right' f as bs` applies `f` to each pair of elements `aᵢ ∈ as` and `bᵢ ∈ bs`. If `as` is shorter than `bs`, `f` is applied to `none` for the remaining `bᵢ`. Returns the results of the `f` applications and the remaining `as`. ``` map₂Right' prod.mk [1] ['a', 'b'] = ([(some 1, 'a'), (none, 'b')], []) map₂Right' prod.mk [1, 2] ['a'] = ([(some 1, 'a')], [2]) ``` -/ def map₂Right' (f : Option α → β → γ) (as : List α) (bs : List β) : List γ × List α := map₂Left' (flip f) bs as /-- Left-biased version of `List.zip`. `zipLeft' as bs` returns the list of pairs `(aᵢ, bᵢ)` for `aᵢ ∈ as` and `bᵢ ∈ bs`. If `bs` is shorter than `as`, the remaining `aᵢ` are paired with `none`. Also returns the remaining `bs`. ``` zipLeft' [1, 2] ['a'] = ([(1, some 'a'), (2, none)], []) zipLeft' [1] ['a', 'b'] = ([(1, some 'a')], ['b']) zipLeft' = map₂Left' prod.mk ``` -/ def zipLeft' : List α → List β → List (α × Option β) × List β := map₂Left' Prod.mk /-- Right-biased version of `List.zip`. `zipRight' as bs` returns the list of pairs `(aᵢ, bᵢ)` for `aᵢ ∈ as` and `bᵢ ∈ bs`. If `as` is shorter than `bs`, the remaining `bᵢ` are paired with `none`. Also returns the remaining `as`. ``` zipRight' [1] ['a', 'b'] = ([(some 1, 'a'), (none, 'b')], []) zipRight' [1, 2] ['a'] = ([(some 1, 'a')], [2]) zipRight' = map₂Right' prod.mk ``` -/ def zipRight' : List α → List β → List (Option α × β) × List α := map₂Right' Prod.mk /-- Left-biased version of `List.map₂`. `map₂Left f as bs` applies `f` to each pair `aᵢ ∈ as` and `bᵢ ‌∈ bs`. If `bs` is shorter than `as`, `f` is applied to `none` for the remaining `aᵢ`. ``` map₂Left prod.mk [1, 2] ['a'] = [(1, some 'a'), (2, none)] map₂Left prod.mk [1] ['a', 'b'] = [(1, some 'a')] map₂Left f as bs = (map₂Left' f as bs).fst ``` -/ @[simp] def map₂Left (f : α → Option β → γ) : List α → List β → List γ | [], _ => [] | a :: as, [] => (a :: as).map fun a => f a none | a :: as, b :: bs => f a (some b) :: map₂Left f as bs /-- Right-biased version of `List.map₂`. `map₂Right f as bs` applies `f` to each pair `aᵢ ∈ as` and `bᵢ ‌∈ bs`. If `as` is shorter than `bs`, `f` is applied to `none` for the remaining `bᵢ`. ``` map₂Right prod.mk [1, 2] ['a'] = [(some 1, 'a')] map₂Right prod.mk [1] ['a', 'b'] = [(some 1, 'a'), (none, 'b')] map₂Right f as bs = (map₂Right' f as bs).fst ``` -/ def map₂Right (f : Option α → β → γ) (as : List α) (bs : List β) : List γ := map₂Left (flip f) bs as /-- Left-biased version of `List.zip`. `zipLeft as bs` returns the list of pairs `(aᵢ, bᵢ)` for `aᵢ ∈ as` and `bᵢ ∈ bs`. If `bs` is shorter than `as`, the remaining `aᵢ` are paired with `none`. ``` zipLeft [1, 2] ['a'] = [(1, some 'a'), (2, none)] zipLeft [1] ['a', 'b'] = [(1, some 'a')] zipLeft = map₂Left prod.mk ``` -/ def zipLeft : List α → List β → List (α × Option β) := map₂Left Prod.mk /-- Right-biased version of `List.zip`. `zipRight as bs` returns the list of pairs `(aᵢ, bᵢ)` for `aᵢ ∈ as` and `bᵢ ∈ bs`. If `as` is shorter than `bs`, the remaining `bᵢ` are paired with `none`. ``` zipRight [1, 2] ['a'] = [(some 1, 'a')] zipRight [1] ['a', 'b'] = [(some 1, 'a'), (none, 'b')] zipRight = map₂Right prod.mk ``` -/ def zipRight : List α → List β → List (Option α × β) := map₂Right Prod.mk /-- If all elements of `xs` are `some xᵢ`, `allSome xs` returns the `xᵢ`. Otherwise it returns `none`. ``` allSome [some 1, some 2] = some [1, 2] allSome [some 1, none ] = none ``` -/ def allSome : List (Option α) → Option (List α) | [] => some [] | some a :: as => cons a <$> allSome as | none :: as => none /-- `fillNones xs ys` replaces the `none`s in `xs` with elements of `ys`. If there are not enough `ys` to replace all the `none`s, the remaining `none`s are dropped from `xs`. ``` fillNones [none, some 1, none, none] [2, 3] = [2, 1, 3] ``` -/ def fillNones {α} : List (Option α) → List α → List α | [], _ => [] | some a :: as, as' => a :: fillNones as as' | none :: as, [] => as.reduceOption | none :: as, a :: as' => a :: fillNones as as' /-- `takeList as ns` extracts successive sublists from `as`. For `ns = n₁ ... nₘ`, it first takes the `n₁` initial elements from `as`, then the next `n₂` ones, etc. It returns the sublists of `as` -- one for each `nᵢ` -- and the remaining elements of `as`. If `as` does not have at least as many elements as the sum of the `nᵢ`, the corresponding sublists will have less than `nᵢ` elements. ``` takeList ['a', 'b', 'c', 'd', 'e'] [2, 1, 1] = ([['a', 'b'], ['c'], ['d']], ['e']) takeList ['a', 'b'] [3, 1] = ([['a', 'b'], []], []) ``` -/ def takeList {α} : List α → List ℕ → List (List α) × List α | xs, [] => ([], xs) | xs, n :: ns => let ⟨xs₁, xs₂⟩ := xs.splitAt n let ⟨xss, rest⟩ := takeList xs₂ ns (xs₁ :: xss, rest) /-- Auxliary definition used to define `toChunks`. `toChunksAux n xs i` returns `(xs.take i, (xs.drop i).toChunks (n+1))`, that is, the first `i` elements of `xs`, and the remaining elements chunked into sublists of length `n+1`. -/ def toChunksAux {α} (n : ℕ) : List α → ℕ → List α × List (List α) | [], i => ([], []) | x :: xs, 0 => let (l, L) := toChunksAux n xs n ([], (x :: l) :: L) | x :: xs, i+1 => let (l, L) := toChunksAux n xs i (x :: l, L) /-- `xs.toChunks n` splits the list into sublists of size at most `n`, such that `(xs.toChunks n).join = xs`. ``` [1, 2, 3, 4, 5, 6, 7, 8].toChunks 10 = [[1, 2, 3, 4, 5, 6, 7, 8]] [1, 2, 3, 4, 5, 6, 7, 8].toChunks 3 = [[1, 2, 3], [4, 5, 6], [7, 8]] [1, 2, 3, 4, 5, 6, 7, 8].toChunks 2 = [[1, 2], [3, 4], [5, 6], [7, 8]] [1, 2, 3, 4, 5, 6, 7, 8].toChunks 0 = [[1, 2, 3, 4, 5, 6, 7, 8]] ``` -/ def toChunks {α} : ℕ → List α → List (List α) | _, [] => [] | 0, xs => [xs] | n+1, x :: xs => let (l, L) := toChunksAux n xs n (x :: l) :: L /-! We add some n-ary versions of `List.zipWith` for functions with more than two arguments. These can also be written in terms of `List.zip` or `List.zipWith`. For example, `zipWith₃ f xs ys zs` could also be written as `zipWith id (zipWith f xs ys) zs` or as `(zip xs $ zip ys zs).map $ λ ⟨x, y, z⟩, f x y z`. -/ /-- Ternary version of `List.zipWith`. -/ def zipWith₃ (f : α → β → γ → δ) : List α → List β → List γ → List δ | x :: xs, y :: ys, z :: zs => f x y z :: zipWith₃ f xs ys zs | _, _, _ => [] /-- Quaternary version of `List.zipWith`. -/ def zipWith₄ (f : α → β → γ → δ → ε) : List α → List β → List γ → List δ → List ε | x :: xs, y :: ys, z :: zs, u :: us => f x y z u :: zipWith₄ f xs ys zs us | _, _, _, _ => [] /-- Quinary version of `List.zipWith`. -/ def zipWith₅ (f : α → β → γ → δ → ε → ζ) : List α → List β → List γ → List δ → List ε → List ζ | x :: xs, y :: ys, z :: zs, u :: us, v :: vs => f x y z u v :: zipWith₅ f xs ys zs us vs | _, _, _, _, _ => [] /-- An auxiliary function for `List.mapWithPrefixSuffix`. -/ def mapWithPrefixSuffixAux {α β} (f : List α → α → List α → β) : List α → List α → List β | prev, [] => [] | prev, h :: t => f prev h t :: mapWithPrefixSuffixAux f (prev.concat h) t /-- `List.mapWithPrefixSuffix f l` maps `f` across a list `l`. For each `a ∈ l` with `l = pref ++ [a] ++ suff`, `a` is mapped to `f pref a suff`. Example: if `f : list ℕ → ℕ → list ℕ → β`, `List.mapWithPrefixSuffix f [1, 2, 3]` will produce the list `[f [] 1 [2, 3], f [1] 2 [3], f [1, 2] 3 []]`. -/ def mapWithPrefixSuffix {α β} (f : List α → α → List α → β) (l : List α) : List β := mapWithPrefixSuffixAux f [] l /-- `List.mapWithComplement f l` is a variant of `List.mapWithPrefixSuffix` that maps `f` across a list `l`. For each `a ∈ l` with `l = pref ++ [a] ++ suff`, `a` is mapped to `f a (pref ++ suff)`, i.e., the list input to `f` is `l` with `a` removed. Example: if `f : ℕ → list ℕ → β`, `List.mapWithComplement f [1, 2, 3]` will produce the list `[f 1 [2, 3], f 2 [1, 3], f 3 [1, 2]]`. -/ def mapWithComplement {α β} (f : α → List α → β) : List α → List β := mapWithPrefixSuffix fun pref a suff => f a (pref ++ suff) end List
section{*Properties for Proving the Abstract Translation Algorithm*} theory HBDTranslationProperties imports ExtendedHBDAlgebra Diagrams begin context BaseOperationVars begin lemma io_diagram_fb_perm_eq: "io_diagram A \<Longrightarrow> fb_perm_eq A" proof (simp add: fb_perm_eq_def, safe) fix x assume [simp]: "perm x (VarFB A)" assume [simp]: "io_diagram A" have [simp]: "perm (VarFB A) x" by (simp add: perm_sym) have [simp]: "set (VarFB A) \<inter> set (InFB A) = {}" by (simp add: VarFB_def Var_def InFB_def) have [simp]: "set (VarFB A) \<inter> set (OutFB A) = {}" by (simp add: VarFB_def Var_def OutFB_def) have [simp]: "perm (Out A) (VarFB A @ OutFB A)" by (metis OutFB_def VarFB_def Var_def \<open>io_diagram A\<close> diff_inter_left perm_switch_aux_c perm_sym io_diagram_def) have [simp]: "set x \<subseteq> set (VarFB A) \<union> set (OutFB A)" using \<open>perm x (VarFB A)\<close> perm_set_eq by blast have [simp]: "distinct x" using \<open>perm x (VarFB A)\<close> by (metis VarFB_def Var_def \<open>perm (VarFB A) x\<close> \<open>io_diagram A\<close> dist_perm distinct_inter io_diagram_def) have "set x \<inter> set (InFB A) = {}" using \<open>perm x (VarFB A)\<close> by (metis Diff_disjoint InFB_def perm_diff_eq set_diff) have [simp]: "set (In A) \<subseteq> set (VarFB A) \<union> set (InFB A)" by (simp add: InFB_def set_diff) have [simp]: "set x \<inter> set (InFB A) = {}" by (simp add: \<open>set x \<inter> set (InFB A) = {}\<close>) have [simp]: "TI (Trs A) = TVs (In A)" using \<open>io_diagram A\<close> io_diagram_distinct(3) by blast have [simp]: "TO (Trs A) = TVs (Out A)" using \<open>io_diagram A\<close> io_diagram_distinct(4) by blast have [simp]: "distinct (Out A)" using \<open>io_diagram A\<close> io_diagram_distinct(2) by blast have "(fb ^^ length (VarFB A)) ([VarFB A @ InFB A \<leadsto> In A] oo Trs A oo [Out A \<leadsto> VarFB A @ OutFB A]) = (fb ^^ length (VarFB A)) ([x @ InFB A \<leadsto> VarFB A @ InFB A] oo ([VarFB A @ InFB A \<leadsto> In A] oo Trs A oo [Out A \<leadsto> VarFB A @ OutFB A]) oo [VarFB A @ OutFB A \<leadsto> x @ OutFB A])" by (subst fb_perm, simp_all add: fbtype_def) also have "... = (fb ^^ length (VarFB A)) ( ([x @ InFB A \<leadsto> VarFB A @ InFB A] oo [VarFB A @ InFB A \<leadsto> In A]) oo Trs A oo ([Out A \<leadsto> VarFB A @ OutFB A] oo [VarFB A @ OutFB A \<leadsto> x @ OutFB A]))" by (simp add: comp_assoc) also have "... = (fb ^^ length (VarFB A)) ( ([x @ InFB A \<leadsto> In A]) oo Trs A oo ([Out A \<leadsto> x @ OutFB A]))" apply (subgoal_tac "[x @ InFB A \<leadsto> VarFB A @ InFB A] oo [VarFB A @ InFB A \<leadsto> In A] = [x @ InFB A \<leadsto> In A]") apply simp apply (subgoal_tac "[Out A \<leadsto> VarFB A @ OutFB A] oo [VarFB A @ OutFB A \<leadsto> x @ OutFB A] = [Out A \<leadsto> x @ OutFB A]") apply simp by (simp_all add: switch_comp) finally show "(fb ^^ length (VarFB A)) ([VarFB A @ InFB A \<leadsto> In A] oo Trs A oo [Out A \<leadsto> VarFB A @ OutFB A]) = (fb ^^ length (VarFB A)) ([x @ InFB A \<leadsto> In A] oo Trs A oo [Out A \<leadsto> x @ OutFB A])" by simp qed theorem FeedbackSerial: "io_diagram A \<Longrightarrow> io_diagram B \<Longrightarrow> set (In A) \<inter> set (In B) = {} (*required*) \<Longrightarrow> set (Out A) \<inter> set (Out B) = {} \<Longrightarrow> FB (A ||| B) = FB (FB (A) ;; FB (B))" apply (rule FeedbackSerial_Feedbackless, simp_all) apply (rule io_diagram_fb_perm_eq) by (simp add: io_diagram_Parallel) lemmas fb_perm_sym = fb_perm [THEN sym] declare length_TVs [simp del] declare [[simp_trace_depth_limit=40]] lemma in_out_equiv_FB: "io_diagram B \<Longrightarrow> in_out_equiv A B \<Longrightarrow> in_out_equiv (FB A) (FB B)" apply (rule in_out_equiv_FB_less, simp_all) apply (rule io_diagram_fb_perm_eq) using in_out_equiv_io_diagram by blast end end
subroutine computepdfuncertainty(PDFarray,PDFcentral, & PDFperror,PDFnerror,PDFerror) c--- Routine to compute PDF uncertainty given an array of results c--- for different PDF uncertainty sets (PDFarray). c--- Returns central value (PDFcentral), positive and negative c--- excursions (PDFperror, PDFnerror) and symmetric uncertainty (PDFerror). c--- Note that the appropriate method for computing uncertainties c--- is chosen according to the value of "PDFname" used in LHAPDF c--- (as a result the logic may need to be updated in the future, c--- it is current as of 9/2013) c--- c--- This routine is similar to the one provided natively in LHAPDFv5.8.5 c--- onwards ("uncertainties.f", written by Graeme Watt) implicit none include 'lhapdf.f' include 'PDFerrors.f' integer j double precision PDFarray(0:1000),PDFcentral,PDFperror,PDFnerror, & PDFerror,sum1,sum2,PDFMCav,PDFMCer logical first data first/.true./ save first c--- NNPDF: just compute average and standard deviation if ((index(PDFname,'NNPDF') .gt. 0) .or. & (index(PDFname,'nnpdf') .gt. 0)) then if (first) then write(6,*)'****************************************************' write(6,*)'* Using MC prescription for PDF uncertainties, *' write(6,*)'* appropriate for NNPDF sets *' write(6,*)'* *' write(6,*)'* (for details and references, see Eqn. (158) *' write(6,*)'* in Appendix B of arXiv:0808.1231 [hep-ph]) *' write(6,*)'****************************************************' first=.false. endif sum1=0d0 sum2=0d0 do j=1,maxPDFsets if (PDFarray(j) .ne. 0.) then sum1=sum1+PDFarray(j) sum2=sum2+PDFarray(j)**2d0 endif enddo PDFMCav = sum1/maxPDFsets PDFMCer = dsqrt(sum2/maxPDFsets - PDFMCav**2d0) PDFperror=PDFMCer PDFnerror=PDFMCer PDFerror=PDFMCer PDFcentral=PDFMCav return endif c--- Alekhin et al: Hessian approach (symmetric) if ((index(PDFname,'ABM') .gt. 0) .or. & (index(PDFname,'ABKM') .gt. 0) .or. & (index(PDFname,'A02M') .gt. 0).or. & (index(PDFname,'abm') .gt. 0) .or. & (index(PDFname,'abkm') .gt. 0) .or. & (index(PDFname,'a02m') .gt. 0)) then if (first) then write(6,*)'****************************************************' write(6,*)'* Using symmetric Hessian prescription for PDF *' write(6,*)'* uncertainties, appropriate for Alekhin et al. *' write(6,*)'****************************************************' first=.false. endif PDFerror=0d0 do j=1,maxPDFsets if (PDFarray(j) .ne. 0.) then PDFerror=PDFerror+(PDFarray(j)-PDFarray(0))**2d0 endif enddo PDFerror=dsqrt(PDFerror) PDFperror=PDFerror PDFnerror=PDFerror PDFcentral=PDFarray(0) return endif c--- everyone else (CTEQ and MSTW): Hessian approach (asymmetric) if (first) then write(6,*)'****************************************************' write(6,*)'* Using asymmetric Hessian prescription for PDF *' write(6,*)'* uncertainties, appropriate for CTEQ, MSTW *' write(6,*)'* *' write(6,*)'* (see, for example Eqn. (43) of *' write(6,*)'* J.Campbell, J.Huston, W.J.Stirling, *' write(6,*)'* Rep. Prog. Phys. 70 (2007) 89) *' write(6,*)'****************************************************' first=.false. endif PDFperror=0d0 PDFnerror=0d0 PDFerror=0d0 do j=1,maxPDFsets-1,2 if (PDFarray(j) .ne. 0.) then PDFperror=PDFperror+max(0d0, & PDFarray(j)-PDFarray(0),PDFarray(j+1)-PDFarray(0))**2 PDFnerror=PDFnerror+max(0d0, & PDFarray(0)-PDFarray(j),PDFarray(0)-PDFarray(j+1))**2 PDFerror=PDFerror+(PDFarray(j)-PDFarray(j+1))**2 endif enddo PDFerror=0.5d0*dsqrt(PDFerror) PDFperror=dsqrt(PDFperror) PDFnerror=dsqrt(PDFnerror) PDFcentral=PDFarray(0) return return end
Beautifully located 10 marla corner plot with 3 marla extra land in Bahria Town Phase 7 for Sale. Bahria Town Phase 7 is fully developed area close to G-T road and DHA Phase 1. This is ideal location for residence and investment. Prices have gone up high here and there is still more potential for investment.
State Before: x : ℝ h0x : 0 < x hxp : x < π hx2 : ¬x ≤ 2 ⊢ 2 + 2 = 4 State After: no goals Tactic: norm_num
\section{Design Decisions and Challenges} \label{sec:background} In Presto, we make several design choices to build a highly robust and scalable system that provides near optimal load balancing without requiring changes to the transport layer or switch hardware. We now discuss our design decisions. \subsection{Design Decisions} \tightparagraph{Load Balancing in the Soft Edge} A key design decision in Presto is to implement the functionality in the soft edge (\ie, the vSwitch and hypervisor) of the network. %should we motivate why not to do it in hardware? %A current trend in datacenter design is to utilize network equipment from %original design manufacturers (ODMs) in order to simplify and customize %the network. This has been reported to significantly reduce costs and improve %network performance~\cite{aws-peek}. %Motivation here is that network is becoming very simple, and functionalities %are being moved to an intelligent edge. Examples are VMWare/NSDI, Fabric, NFV in vSwitch, %SDNs/OpenFlow, % Given recent advancements in this space\eric{what advancements? can we be more specific % in order to provide better motivation?}, we believe the soft edge is the best % place to deploy new network functions, such as load balancing, in a scalable and % distributed manner.\eric{is this a new position? vmware nsdi paper...} The vSwitch occupies a unique position in the networking stack in that it can easily modify packets without requiring any changes to customer VMs or transport layers. Functionality built into the vSwitch can be made aware of the underlying hardware offload features presented by the NIC and OS, meaning it can be fast. Furthermore, an open, software-based approach prevents extra hardware cost and vendor lock-in, and allows for simplified network management. These criteria are important for providers today~\cite{aws-peek}. Thanks to projects like Open vSwitch, soft-switching platforms are now fast, mature, open source, adopted widely, remotely configurable, SDN-enabled, and feature-rich~\cite{ovs-edge,nv-mtd,pfaff2015design}. Presto is built on these platforms. \tightparagraph{Reactive vs Proactive Load Balancing} The second major design decision in Presto is to use a proactive approach to congestion management. Bursty behavior can create transient congestion that must be reacted to before switch buffers overflow to prevent loss (timescales range from 100s of $\mu$s to around 4 ms~\cite{planck}). This requirement renders most of the centralized reactive schemes ineffective as they are often too slow to react to any but the largest network events,~\eg{}, link failures. %Not reacting to transient congestion can increase tail latencies. Furthermore, centralized schemes can hurt performance when rerouting flows using stale information. %By reacting on a different scale than the congestion, centralized schemes may reroute flows %on stale information, which can hurt performance. Distributed reactive schemes like MPTCP~\cite{mptcp} and CONGA~\cite{conga} can respond to congestion at faster timescales, but have a high barrier to deployment. Furthermore, distributed reactive schemes must take great care to avoid oscillations. Presto takes a proactive, correct-by-design approach to congestion management. That is, if small, near-uniform portions of traffic are equally balanced over a symmetric network topology, then the load-balancing can remain agnostic to congestion and leave congestion control to the higher layers of the networking stack. %then we don't need to %be reactive to congestion. Presto is only reactive to network events such as link failures. Fortunately, the larger timescales of reactive feedback loops are sufficient in these scenarios. \tightparagraph{Load Balancing Granularity} ECMP has been shown to be ineffective at load balancing the network, and thus many schemes advocate load balancing at a finer granularity than a flow~\cite{drb,conga,juniper-vcf,packetspray}. A key factor impacting the choice of granularity is operating at high speed. %and ensuring suitable application level performance. %Implementing fine-grained, near-uniform load balancing in 10+ Gbps networks %is difficult. Operating at 10+ Gbps incurs great computational overhead, and therefore host-based load balancing schemes must be fast, light-weight and take advantage of optimizations provided in the networking stack. For example, per-packet load balancing techniques~\cite{drb} cannot be employed at the network edge because TSO does not work on a per-packet basis. TSO, commonly supported in OSes and NICs, allows for large TCP segments (typically 64 KB in size) to be passed down the networking stack to the NIC. The NIC breaks the segments into MTU-sized packets and copies and computes header data, such as sequence numbers and checksums. When TSO is disabled, a host incurs 100\% utilization of one CPU core and can only achieve around 5.5 Gbps~\cite{bullettrains}. Therefore, per-packet schemes are unlikely to scale to fast networks without hardware support. Limiting overhead by increasing the MTU is difficult because VMs, switches, and routers must all be configured appropriately, and traffic leaving the datacenter must use normal 1500 byte packets. Furthermore, per-packet schemes~\cite{drb,packetspray} are likely to introduce significant reordering into the network. %Achieving line rate at 10 Gbps is nontrivial because dealing %with so many 1500 byte MTU-sized packets at varying layers %of the networking stack causes significant computational overhead. %Therefore, modern operating systems and network adapters have many %optimizations to help burden the load. %On the sender side, TCP Segmentation Offload (TSO)~\footnote{Generically known as large segment offload or generic segmentation offload} %is designed to allow the TCP/IP stack to deal with large TCP segments. Segments, up to 64 KB in size, are passed %from the application layer all the way down to the NIC, which in turn breaks the large segment down into 1500 byte packets. %The NIC copies and calculates the header information, such as checksums and sequence numbers. %This allows the computational burden to be substainally lessened, and therefore rates of 10+ %Gbps can be achieved. With TSO disabled, achievable 10 Gbps throughput drops to around 5.5 Gbps~\cite{bullettrains}. \begin{figure}[!t] \centering \includegraphics[width=0.5\textwidth]{presto/figures/flowlets/histo.pdf} \caption{Stacked histogram of flowlet sizes (in MB) for a 1 GB {\tt scp} file transfer. We vary the number of {\tt nuttcp}~\cite{nuttcp} background flows and denote them as {\em Competing Flows}. The size of each flowlet is shown within each bar, and flowlets are created whenever there is a 500 $\mu$s delay between segments. The top 10 flowlet sizes are shown here. We also analyzed the results of a 1 GB {\tt nuttcp}, {\tt ftp}, and a simple custom client/server transfer and found them to be similar. } \label{micro_flowlet_size} \end{figure} %\aditya{the following two paras don't flow well. they don't make a clear case for why flowlets is a bad idea and TSO segment level switching is a good idea. if reordering is the 100us flowlets' big problem then why not use our receiver-side reordering tricks with 100us flowlets? also it is not clear how were are overcoming reordering simply by relying on TSO segment switching} %\eric{Rough estimates from our experiments with 100$\mu$s: ~90\% of flowlet sizes are 114KB or less with flowlets. ~00.1\% of flowlets are %larger than 1 MB, with the largest ranging from 2.1-20.5MB. Some thoughts: (i) 100 $\mu$s flowlets can still have flowlet sizes larger %than switch buffers, which can cause congestion/loss when collision occur, (ii) given that flowlet with 100 $\mu$s does not prevent reordering, %then why should we use flowlets at all? (iii) flowlets were really meant to have inactivity timers larger than the max difference in latency %over any two paths, and buffer latency at one switch alone is ~4ms, so the use of flowlets on these small time scales is fundamentally %flawed, (iv) flowlets are sensitive to traffic demand at sender, (v) flowlets are non-uniform in size, (vi) flowlets could break small %flows over multiple paths. Using TSO segment ensures: (i) small, uniform units of load-balancing, which means (ii) we are indenpendent %of traffic demand, (iii) collisions are not a problem b/c TSO size is smaller than buffer size, (iv) most small flows are routed %over the same path, (v) we do not impose too much computational overhead on sender/receiver and (vi) we still need to solve reordering.} % Rough outline for next two paragraphs % Problem with flowlets: % 1. Sensitive to traffic patterns at the sender % a. In practice, we find this means the distribution of flowlet sizes is not uniform, and has a tail % b. These tails can still experience hash collisions, albeit less often. % i. congestion: lower throughput and to longer mice tail latencies % 2. Needlessly break down small flows into several flowlets % a. Especially early in connection: 100us, 50 KB mice flows broken into 4-5 flowlets % 3. Designed to be robust to reordering, but difficult to tune % Another possibility is to load balance on flowlets~\cite{conga,juniper-vcf}. A flow is comprised of a series of bursts, and a flowlet is created when the inter-arrival time between two packets in a flow exceeds a threshold inactivity timer. In practice, inactivity timer values are between 100-500 $\mu$s~\cite{conga}. These values intend to strike a good balance between load balancing on a sub-flow level and acting as a buffer to limit reordering between flowlets. Flowlets are derived from traffic patterns at the sender, and in practice this means the distribution of flowlet sizes is not uniform. To analyze flowlet sizes, a simple experiment is shown in Figure~\ref{micro_flowlet_size}. We connect a sender and a receiver to a single switch and start an {\tt scp} transfer designed to emulate an elephant flow. Meanwhile, other senders are hooked up to the same switch and send to the same receiver. We vary the number of these competing flows and show a stacked histogram of the top 10 flowlet sizes for a 1 GB {\tt scp} transfer with a 500 $\mu$s inactivity timer. The graph shows flowlet sizes can be quite large, with more than half the transfer being attributed to a single flowlet for up to 3 competing flows. Using a smaller inactivity timer, such 100$\mu$s, helps (90\% of flowlet sizes are 114KB or less), but does not prevent a long tail: 0.1\% of flowlets are larger than 1 MB, with the largest ranging from 2.1-20.5 MB. Collisions on large flowlet sizes can lead to congestion. The second problem with flowlets is that small inactivity thresholds, such as 100 $\mu$s, can lead to significant reordering. Not only does this impact TCP performance (profiled in Section~\ref{sec:micro}), but it also needlessly breaks small flows into several flowlets. With only one flow in the network, we found a 50 KB mice flow was broken into 4-5 flowlets on average. Small flows typically do not need to be load balanced on a sub-flow level and need not be exposed to reordering. %Another possibility is to load balance on flowlets~\cite{conga,juniper-vcf}. A flow is typically comprised of a series of bursts, and each burst is defined as a flowlet. By monitoring the inter-arrival time of packets in a flow, one can easily define an inactivity timer to seperate flowlets. In practice, intactivity timer values are between 100-500 $\mu$s~\cite{conga}. These values are intended to strike a good balance between creating enough opportunities to load balance on a sub-flow level and also ensure the reordering is limited at the destination due to the time buffer naturally incurred between flowlets. We find, however, that it is difficult to strike a balance between achieving fine-grained, near-optimal load balancing and robustness against reordering. We perform a simple experiment in Figure~\ref{micro_flowlet_size}. We connect a sender and a receiver to a single switch and start a transfer over an application designed to emulate an elephant flow ({\tt scp}). Meanwhile, we also hook up other senders to the same switch and have them send to the same reciever. We vary the number of competing flows and show a stacked histogram of the top 10 flowlet sizes for a 1 GB scp transfer with a 500 $\mu$s inactivity timer. \eric{Competing flows use nuttcp.} The graph shows flowlet sizes can be large, which means hash collisions can still occur on large flowlets. Using a smaller timeout, such as 100 $\mu$s, creates smaller flowlets, but as we show in Section XXX, suffers from severe reordering that greatly reduces throughput and hurts applications. \eric{mention creates congestion, which leads to lower thorughput and increase mice FCT latency} % What do we want in sub-flow load balancing? % A. Want to move toward idealized ECMP: uniform sub-flow load balancing without the tail % B. Units should be as small as possible for fine-grained load balancing, but % not so small to be inefficient (TSO) or break small flows into parts % C. Independent of traffic patterns on sender % As a result, we settle on... The shortcomings of the previous approaches lead us to reconsider on what granularity load balancing should occur. %We take motivation from a best-case ECMP scenario. Ideally, sub-flow load balancing should be done on near uniform sizes. %independent of traffic patterns on the sender to avoid long tails. Also, the unit of load balancing should be small to allow for fine-grained load balancing, but not so small as to break small flows into many pieces or as to be a significant computational burden. As a result, we propose load balancing on 64 KB units of data we call {\em flowcells}. Flowcells have a number of advantages. First, the maximum segment size supported by TSO is 64 KB, so flowcells provide a natural interface to high speed optimizations provided by the NIC and OS and can scale to fast networking speeds. Second, an overwhelming fraction of mice flows are less than 64 KB in size and thus do not have to worry about reordering~\cite{benson10,vl2,kandula2009nature}. Last, since most bytes in datacenter networks originate from elephant flows~\cite{kandula2009nature,benson10,dctcp}, this ensures that a significant portion of datacenter traffic is routed on uniform sizes. While promising, this approach must combat reordering to be effective. Essentially we make a trade-off: %we provide line rate load balancing in the most effective %manner as to avoid congestion and then handle reordering head-on at the receiver. the sender avoids congestion by providing fine-grained, near-uniform load balancing, and the receiver handles reordering to maintain line-rate. %We highlight the challenges of this approach %in the next subsection and provide a design to mitigate reordering problems in Section~\ref{sec:design}. %In order to obtain fine-grained, near-optimal load balancing, we should stripe on a granularity %that is indendpent of traffic patterns, near-uniform in size, and as small as possible while still %scaling to fast network speeds. %Therefore, we argue the TSO segment~\keqiang{what about saying maximum TSO segment size (64KB), each TSO segment's size is bounded by maximum TSO size} %is the natural granularity in which to load balance. Doing so %provides several benefits. First, TSO segments are small and near-uniform in size, so an %effective load-balancing scheme should be able to closely track the optimal case of per-packet %load balancing, but without the additional computational overhead. %Second, the TSO engine in the NIC will ensure that all packets created from a TSO segment will contain the same %header information. We show in Section XXX how this is important to deal with reordering because %we can easily impart metadata on all packets within a segment that help us distinguish loss from %ordering~\keqiang{all the packets within the same flowcell contain the same flowcell id. %"all packets created from a TSO segment will contain the same %header information" is fine but a flowcell can contrain several TSO segments depending on TSO segment size}. %Last, small flows less than 64 KB in size will actually be routed over the %same path in the network, meaning a very large fraction of mice flows will not be routed on a subflow %level and thus do not have to worry about reordering~\cite{benson10,vl2,kandula2009nature}~\keqiang{Around 90\% of datacenter flows' sizes are smaller than 64KB~\cite{benson10}, %meaning the overwhelming majority is load balanced like ECMP and we only need to engineering the left 10\%}. %\eric{need to mention that we can combine segments as long as not above 64 KB, so not really %per TSO segment. helps in small flows.} %While promising, this approach has a major challenge: reordering. We highlight these challenges %in the next subsection and provide a design to mitigate the problems in Section~\ref{sec:design}. \tightparagraph{Per-Hop vs End-to-End Multipathing} The last design consideration is whether multipathing should be done on a local, per-hop level (\eg{}, ECMP), or on a global, end-to-end level. In Presto, we choose the latter: pre-configured end-to-end paths are allocated in the network and path selection (and thus multipathing) is realized by having the network edge place flowcells onto these paths. Presto can be used to load-balance in an ECMP style per-hop manner, but the choice of end-to-end multipathing provides additional benefits due to greater control of how flowcells are mapped to paths. Per-hop multipathing can be inefficient under asymmetric topologies~\cite{wcmp}, and load-balancing on a global end-to-end level can allow for weighted scheduling at the vSwitch to rebalance traffic. This is especially important when failure occurs. The second benefit is flowcells can be assigned over multiple paths very evenly by iterating over paths in a round-robin, rather than randomized, fashion. %As we show %in Section~\ref{sec:micro}, randomization in per-hop multipathing can lead to "unluckiness" where %multiple flowcells get sent to the same link over a small timescale by multiple flows. This transient congestion %can lead to increased buffer occupancy and higher delays in the network. These benefits can %fundamentally be provided at the per-hop level (\cite{wcmp} handles asymmetry), but require changes to networking firmware. %These considerations motivate us to utilize end-to-end multipathing, but Presto can also %use per-hop multipathing when conveinent. \subsection{Reordering Challenges} %The above design decisions in Presto cause following main challenges: Due to the impact of fine-grained, flowcell-based load balancing, Presto must account for reordering. Here, we highlight reordering challenges. The next section shows how Presto deals with these concerns. %\tightparagraph{Soft Edge Distributed Load Balancing} %There are two main challenges in implementing load balancing at the soft edge. First, the implementation %must scale to fast networking speeds because networking at 10+ Gbps can have significant overhead if not carefully %considered. Therefore, in order to achieve line rate, great care must be taken to ensure that load balancing %schemes are light-weight, simple and can take advantage of optimizations provided by the NIC and OS. %The second major problem is how to load balance in a distributed fashion at the vSwitches in such %a way that the load balancing performs well globally. %Nodes must ensure they are spreading %their traffic equally throughout the network, but in a low-overhead fashion that does not require detailed %topographical information about the network, real-time traffic matrices, or strict coordination %with other senders. %\eric{several things to add: in presto, we can just make dumb edge decisions and not have to worry %about (i) the traffic patterns, that is who else is sending, (ii) topology asymmetry. Basically, %we want to highight that the vSwitch shouldn't require a lot of detailed network-wide information, %but should be able to somehow still load balance in a way that performs very well globally.} \tightparagraph{Reordering's Impact on TCP} The impact of reordering on TCP is well-studied~\cite{leung2007overview,paxson1997end}. Duplicate acknowledgments caused by reordering can cause TCP to move to a more conservative sender state and reduce the sender's congestion window. Relying on parameter tuning, such as adjusting the DUP-ACK threshold, is not ideal because increasing the DUP-ACK threshold increases the time to recover from real loss. Other TCP settings such as Forward Acknowledgement (FACK) assume un-acked bytes in the SACK are lost and degrade performance under reordering. A scheme that introduces reordering should not rely on careful configuration of TCP parameters because (i) it is hard to find a single set of parameters that work effectively over multiple scenarios and (ii) datacenter tenants should not be forced to constantly tune their networking stacks. Finally, many reordering-robust variants of TCP have been proposed~\cite{rr-tcp,blanton2002making,tcp-pr}, but as we will show, GRO becomes ineffective under reordering. Therefore, reordering should be handled below the transport layer. \tightparagraph{Computational Bottleneck of Reordering} Akin to TSO, Generic Receive Offload (GRO) mitigates the computational burden of receiving 1500 byte packets at 10 Gbps. GRO is implemented in the kernel of the hypervisor, and its handler is called directly by the NIC driver. It is responsible for aggregating packets into larger segments that are pushed up to OVS and the TCP/IP stack. GRO is implemented in the Linux kernel and is used even without virtualization. Similar functionality can be found in Windows (RSC~\cite{ms-rsc}) and hardware (LRO~\cite{grossman2005large}). Because modern CPUs use aggressive prefetching, the cost of receiving TCP data is now dominated by per-packet, rather than per-byte, operations. As shown by Menon~\cite{optimize-tcp-receive}, the majority of this overhead comes from buffer management and other routines not related to protocol processing, and therefore significant computational overhead can be avoided by aggregating "raw" packets from the NIC into a single {\tt sk\_buff}. %\footnote{Refer to~\cite{linuxgro,optimize-tcp-receive} for detailed study and explanation} Essentially, spending a few cycles to aggregate packets within GRO creates less segments for TCP and prevents having to use substantially more cycles at higher layers in the networking stack. Refer to~\cite{linuxgro,optimize-tcp-receive} for detailed study and explanation. To better understand the problems reordering causes, a brief description of the TCP receive chain in Linux follows. First, interrupt coalescing allows the NIC to create an interrupt for a batch of packets~\cite{mogul1997eliminating,understanding-linux-network}, which prompts the driver to poll the packets into an aggregation queue. Next, the driver invokes the GRO handler, located in the kernel, which {\em merges} the packets into larger segments. The merging continues, possibly across many polling events, until a segment reaches a threshold size, a certain age, or cannot be combined with the incoming packet. Then, the combined, larger segment is {\em pushed up} to the rest of the TCP/IP networking stack. The GRO process is done on a per-flow level. With GRO disabled, throughput drops to around 5.7-7.1 Gbps and CPU utilization spikes to 100\% (Section~\ref{sec:micro} and~\cite{bullettrains}). Receive offload algorithms, whether in hardware (LRO)~\cite{grossman2005large,open-lro} or in software (GRO), are usually {\em stateless} to make them fast: no state is kept beyond the segment being merged. %\begin{figure}[!htb] % \centering % \includegraphics[width=0.45\textwidth]{presto/figures/gro-design/gro.pdf} % \caption{GRO design. FIX ME!} % \label{gro-design} %\end{figure} \begin{figure}[!t] \centering \includegraphics[width=0.5\textwidth]{presto/figures/gro-design/gro-break.pdf} \caption{GRO pushes up small segments ($S_i$) during reordering.} \label{gro-break} \end{figure} We now uncover how GRO breaks down in the face of reordering. Figure~\ref{gro-break} shows the impact of reordering on GRO. Reordering does not allow the segment to grow: each reordered packet cannot be merged with the existing segment, and thus the previously created segment must be pushed up. With extreme reordering, GRO is effectively disabled because small MTU-sized segments are constantly pushed up. This causes (i) severe computational overhead and (ii) TCP to be exposed to significant amounts of reordering. We term this the {\em small segment flooding} problem. Determining where to combat the reordering problem has not previously taken the small segment flooding problem into account. Using a reordering buffer to deal with reordered packets is a common solution (\eg{}, works like~\cite{drb} re-sort out-of-order packets in a shim layer below TCP), but a buffer implemented above GRO cannot prevent small segment flooding. Implementing a buffer below GRO means that the NIC must be changed, which is (i) expensive and cumbersome to update and (ii) unlikely to help combat reordering over multiple interrupts. In our system, the buffer is implemented in the GRO layer itself. We argue this is a natural location because GRO can directly control segment sizes while simultaneously limiting the impact of reordering. Furthermore, GRO can still be applied on packets pushed up from LRO, which means hardware doesn't have to be modified or made complex. Implementing a better GRO algorithm has multiple challenges. The algorithm should be light-weight to scale to fast networking speeds. Furthermore, an ideal scheme should be able to distinguish loss from reordering. When a gap in sequence numbers is detected (\eg{}, when $P_5$ is received after $P_2$ in Figure~\ref{gro-break}), it is not obvious if this gap is caused from loss or reordering. If the gap is due to reordering, GRO should not push segments up in order to try to wait to receive the missing gap and merge the missing packets into a preestablished segment. If the gap is due to loss, however, then GRO should immediately push up the segments to allow TCP to react to the loss as fast as possible. Ideally, an updated GRO algorithm should ensure TCP does not perform any worse than a scheme with no reordering. Finally, the scheme should adapt to prevailing network conditions, traffic patterns and application demands.
-- f1 : (a:Type) -> (x:a) -> (y:x) -> x -- f1 a x y = y hej : (tyOp : List Type -> Type) -> Type hej tyOp = tyOp [Int, Bool] hej2 : List Type -> Type hej2 (t :: ts) = t test21 : Type test21 = hej2 [Type] -- test21 == Type test22 : Type test22 = hej2 [Int] -- test22 == Int
using Distributions # Checks output from chernikova function for some input matrix # defines a specified finitely generated cone. That is, # { x: Ax ≥ 0 } == { B y : y ≥ 0 } # # Arguments: # A::Matrix - constraint matrix (rows correspond to individual constraints) # B::Matrix - matrix of cones rays (columns correspond to individual rays) function test_chernikova(A::Matrix{T1}, B::Matrix{T2}) where {T1<:Union{Integer, Rational}, T2<:Integer} rays = Cones.norm_cols(B) chern_rays = chernikova(A) @test Cones.check_columns_same(rays, chern_rays) end # Simple two rays example println("\tExample 1...") A = [-1//5 4//5; 4//5 -1//5] B = [1 4; 4 1] test_chernikova(A,B) # Redundant constraints println("\tExample 2...") A = [[1 1 0]; [1 0 1]; [1 1 1]; [1 1 1]; [0 1 1]] B = [1 0 0; 0 1 0; 0 0 1] test_chernikova(A,B) # Empty set example println("\tExample 3...") A = [-1 -1 -1] B = Array{Int}(undef, 3, 0) test_chernikova(A, B) # Last two examples are broken... # One ray example println("\tExample 4...") A = [[1 -1]; [-1 1]] B = reshape([1, 1], 2, 1) test_chernikova(A, B) # Example from paper # "Algorithm for finding a general formula for the non-negative solutions of a system of linear inequalities" # by Chernikova (1964) println("\tExample 5...") A = [[3 -4 1 0]; [2 0 4 -1]; [-4 -7 2 4]; [-1 0 20 2]; [6 -5 -4 2]] B = [[1, 0, 0, 2] [0, 0, 1, 4] [0, 1, 4, 16] [0, 0, 2, 4] [0, 32, 128, 336] [6, 0, 0, 6] [21, 12, 0, 42] [52, 40, 4, 120] [20, 0, 32, 4] [24080, 25760, 30800, 53760]] test_chernikova(A, B)
module Data.Time.LocalTime import Data.So import Data.Rational import public Data.Time.LocalTime.TimeZone import public Data.Time.LocalTime.TimeOfDay import public Data.Time.LocalTime.CalendarDiffTime import public Data.Time.LocalTime.Internal.LocalTime import Data.Time.Calendar.Days import Data.Time.Calendar.Gregorian import Data.Time.Clock.DiffTime import Data.Time.Clock.UTCTime import Data.Time.Clock %default total -- -------------------------------------------------------------------------- ||| addLocalTime a b = a + b export addLocalTime : NominalDiffTime -> LocalTime -> LocalTime addLocalTime x = utcToLocalTime utc . addUTCTime x . localTimeToUTC utc ||| diffLocalTime a b = a - b export diffLocalTime : LocalTime -> LocalTime -> NominalDiffTime diffLocalTime a b = diffUTCTime (localTimeToUTC utc a) (localTimeToUTC utc b) ||| Get the local time of a UT1 time on a particular meridian (in degrees, positive is East). export ut1ToLocalTime : Rational -> UniversalTime -> Maybe LocalTime ut1ToLocalTime long date = let localTime = date.modJulianDate + long / 360 in do localMJD <- floor localTime dt <- dayFractionToTimeOfDay (localTime - cast localMJD) pure $ MkLocalTime (ModifiedJulianDay localMJD) dt ||| Get the UT1 time of a local time on a particular meridian (in degrees, positive is East). export localTimeToUT1 : Rational -> LocalTime -> UniversalTime localTimeToUT1 long lt = ModJulianDate (fromInteger lt.localDay.modifiedJulianDay + (timeOfDayToDayFraction lt.localTimeOfDay) - (long / 360)) public export Show UniversalTime where show t = show (ut1ToLocalTime 0 t) -- -------------------------------------------------------------------------- -- vim: tw=80 sw=2 expandtab :
import topology.instances.real open filter real open_locale topological_space variable a : ℕ → ℝ theorem part_a : ¬ (∀ l : ℝ, tendsto a at_top (𝓝 l)) ↔ tendsto a at_top at_top := begin sorry end theorem part_c : (∀ R > 0, ∃ N : ℕ, ∀ n ≥ N, a n > R) ↔ (tendsto a at_top at_top) := begin sorry end theorem part_d : ¬ (∀ L : ℝ, ∀ ε : ℝ, ∃ N : ℕ, ∀ n ≥ N, abs (a n - L) > ε) ↔ (tendsto a at_top at_top) := begin sorry end theorem part_e : (∀ ε > 0, ∃ N : ℕ, ∀ n ≥ N, a n > 1 / ε) ↔ (tendsto a at_top at_top) := begin sorry end theorem part_f : ¬ (∀ n : ℕ, a (n+1) > a n) ↔ (tendsto a at_top at_top) := begin sorry end theorem part_g : ¬ (∃ N : ℕ, ∀ R > 0, ∀ n ≥ N, a n > R) ↔ (tendsto a at_top at_top) := begin sorry end theorem part_h : ¬ (∀ R : ℝ, ∃ n : ℕ, a n > R) ↔ (tendsto a at_top at_top) := begin sorry end
State Before: α : Type u_1 inst✝¹ : CancelCommMonoidWithZero α inst✝ : DecidableRel fun x x_1 => x ∣ x_1 a : α ha : Finite a a ⊢ a ^ 1 ∣ a State After: no goals Tactic: simp State Before: α : Type u_1 inst✝¹ : CancelCommMonoidWithZero α inst✝ : DecidableRel fun x x_1 => x ∣ x_1 a : α ha : Finite a a x✝ : a ^ (1 + 1) ∣ a b : α hb : a = a ^ (1 + 1) * b ⊢ False State After: α : Type u_1 inst✝¹ : CancelCommMonoidWithZero α inst✝ : DecidableRel fun x x_1 => x ∣ x_1 a : α ha : Finite a a x✝ : a ^ (1 + 1) ∣ a b : α hb : 1 = 1 * (a * 1 * b) ⊢ False Tactic: rw [← mul_one a, pow_add, pow_one, mul_assoc, mul_assoc, mul_right_inj' (ne_zero_of_finite ha)] at hb State Before: α : Type u_1 inst✝¹ : CancelCommMonoidWithZero α inst✝ : DecidableRel fun x x_1 => x ∣ x_1 a : α ha : Finite a a x✝ : a ^ (1 + 1) ∣ a b : α hb : 1 = 1 * (a * 1 * b) ⊢ False State After: no goals Tactic: exact mt isUnit_iff_dvd_one.2 (not_unit_of_finite ha) ⟨b, by simp_all⟩ State Before: α : Type u_1 inst✝¹ : CancelCommMonoidWithZero α inst✝ : DecidableRel fun x x_1 => x ∣ x_1 a : α ha : Finite a a x✝ : a ^ (1 + 1) ∣ a b : α hb : 1 = 1 * (a * 1 * b) ⊢ 1 = a * b State After: no goals Tactic: simp_all
[STATEMENT] lemma SUP_prod_alt_def: \<^marker>\<open>contributor \<open>Alessandro Coglio\<close>\<close> "Sup (f ` A) = (Sup ((fst \<circ> f) ` A), Sup((snd \<circ> f) ` A))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. Sup (f ` A) = (Sup ((fst \<circ> f) ` A), Sup ((snd \<circ> f) ` A)) [PROOF STEP] by (simp add: Sup_prod_def image_image)
[GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝ : Fintype ι ⊢ stdSimplex ℝ ι ⊆ closedBall 0 1 [PROOFSTEP] intro f hf [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝ : Fintype ι f : ι → ℝ hf : f ∈ stdSimplex ℝ ι ⊢ f ∈ closedBall 0 1 [PROOFSTEP] rw [Metric.mem_closedBall, dist_pi_le_iff zero_le_one] [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝ : Fintype ι f : ι → ℝ hf : f ∈ stdSimplex ℝ ι ⊢ ∀ (b : ι), dist (f b) (OfNat.ofNat 0 b) ≤ 1 [PROOFSTEP] intro x [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝ : Fintype ι f : ι → ℝ hf : f ∈ stdSimplex ℝ ι x : ι ⊢ dist (f x) (OfNat.ofNat 0 x) ≤ 1 [PROOFSTEP] rw [Pi.zero_apply, Real.dist_0_eq_abs, abs_of_nonneg <| hf.1 x] [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝ : Fintype ι f : ι → ℝ hf : f ∈ stdSimplex ℝ ι x : ι ⊢ f x ≤ 1 [PROOFSTEP] exact (mem_Icc_of_mem_stdSimplex hf x).2 [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁸ : LinearOrderedRing 𝕜 inst✝⁷ : DenselyOrdered 𝕜 inst✝⁶ : TopologicalSpace 𝕜 inst✝⁵ : OrderTopology 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : TopologicalSpace E inst✝² : ContinuousAdd E inst✝¹ : Module 𝕜 E inst✝ : ContinuousSMul 𝕜 E x y : E ⊢ [x-[𝕜]y] ⊆ closure (openSegment 𝕜 x y) [PROOFSTEP] rw [segment_eq_image, openSegment_eq_image, ← closure_Ioo (zero_ne_one' 𝕜)] [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁸ : LinearOrderedRing 𝕜 inst✝⁷ : DenselyOrdered 𝕜 inst✝⁶ : TopologicalSpace 𝕜 inst✝⁵ : OrderTopology 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : TopologicalSpace E inst✝² : ContinuousAdd E inst✝¹ : Module 𝕜 E inst✝ : ContinuousSMul 𝕜 E x y : E ⊢ (fun θ => (1 - θ) • x + θ • y) '' closure (Ioo 0 1) ⊆ closure ((fun θ => (1 - θ) • x + θ • y) '' Ioo 0 1) [PROOFSTEP] exact image_closure_subset_closure_image (by continuity) [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁸ : LinearOrderedRing 𝕜 inst✝⁷ : DenselyOrdered 𝕜 inst✝⁶ : TopologicalSpace 𝕜 inst✝⁵ : OrderTopology 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : TopologicalSpace E inst✝² : ContinuousAdd E inst✝¹ : Module 𝕜 E inst✝ : ContinuousSMul 𝕜 E x y : E ⊢ Continuous fun θ => (1 - θ) • x + θ • y [PROOFSTEP] continuity [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝¹¹ : LinearOrderedRing 𝕜 inst✝¹⁰ : DenselyOrdered 𝕜 inst✝⁹ : PseudoMetricSpace 𝕜 inst✝⁸ : OrderTopology 𝕜 inst✝⁷ : ProperSpace 𝕜 inst✝⁶ : CompactIccSpace 𝕜 inst✝⁵ : AddCommGroup E inst✝⁴ : TopologicalSpace E inst✝³ : T2Space E inst✝² : ContinuousAdd E inst✝¹ : Module 𝕜 E inst✝ : ContinuousSMul 𝕜 E x y : E ⊢ closure (openSegment 𝕜 x y) = [x-[𝕜]y] [PROOFSTEP] rw [segment_eq_image, openSegment_eq_image, ← closure_Ioo (zero_ne_one' 𝕜)] [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝¹¹ : LinearOrderedRing 𝕜 inst✝¹⁰ : DenselyOrdered 𝕜 inst✝⁹ : PseudoMetricSpace 𝕜 inst✝⁸ : OrderTopology 𝕜 inst✝⁷ : ProperSpace 𝕜 inst✝⁶ : CompactIccSpace 𝕜 inst✝⁵ : AddCommGroup E inst✝⁴ : TopologicalSpace E inst✝³ : T2Space E inst✝² : ContinuousAdd E inst✝¹ : Module 𝕜 E inst✝ : ContinuousSMul 𝕜 E x y : E ⊢ closure ((fun θ => (1 - θ) • x + θ • y) '' Ioo 0 1) = (fun θ => (1 - θ) • x + θ • y) '' closure (Ioo 0 1) [PROOFSTEP] exact (image_closure_of_isCompact (bounded_Ioo _ _).isCompact_closure <| Continuous.continuousOn <| by continuity).symm [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝¹¹ : LinearOrderedRing 𝕜 inst✝¹⁰ : DenselyOrdered 𝕜 inst✝⁹ : PseudoMetricSpace 𝕜 inst✝⁸ : OrderTopology 𝕜 inst✝⁷ : ProperSpace 𝕜 inst✝⁶ : CompactIccSpace 𝕜 inst✝⁵ : AddCommGroup E inst✝⁴ : TopologicalSpace E inst✝³ : T2Space E inst✝² : ContinuousAdd E inst✝¹ : Module 𝕜 E inst✝ : ContinuousSMul 𝕜 E x y : E ⊢ Continuous fun θ => (1 - θ) • x + θ • y [PROOFSTEP] continuity [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s a b : 𝕜 ha : 0 < a hb : 0 ≤ b hab : a + b = 1 ⊢ interior (a • s) + closure (b • s) = interior (a • s) + b • s [PROOFSTEP] rw [isOpen_interior.add_closure (b • s)] [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s a b : 𝕜 ha : 0 ≤ a hb : 0 < b hab : a + b = 1 ⊢ a • closure s + b • interior s ⊆ interior s [PROOFSTEP] rw [add_comm] [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s a b : 𝕜 ha : 0 ≤ a hb : 0 < b hab : a + b = 1 ⊢ b • interior s + a • closure s ⊆ interior s [PROOFSTEP] exact hs.combo_interior_closure_subset_interior hb ha (add_comm a b ▸ hab) [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s a b : 𝕜 ha : 0 ≤ a hb : 0 < b hab : a + b = 1 ⊢ a • s + b • interior s ⊆ interior s [PROOFSTEP] rw [add_comm] [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s a b : 𝕜 ha : 0 ≤ a hb : 0 < b hab : a + b = 1 ⊢ b • interior s + a • s ⊆ interior s [PROOFSTEP] exact hs.combo_interior_self_subset_interior hb ha (add_comm a b ▸ hab) [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s x y : E hx : x ∈ interior s hy : y ∈ closure s ⊢ openSegment 𝕜 x y ⊆ interior s [PROOFSTEP] rintro _ ⟨a, b, ha, hb, hab, rfl⟩ [GOAL] case intro.intro.intro.intro.intro ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s x y : E hx : x ∈ interior s hy : y ∈ closure s a b : 𝕜 ha : 0 < a hb : 0 < b hab : a + b = 1 ⊢ a • x + b • y ∈ interior s [PROOFSTEP] exact hs.combo_interior_closure_mem_interior hx hy ha hb.le hab [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s x y : E hx : x ∈ closure s hy : y ∈ interior s ⊢ openSegment 𝕜 x y ⊆ interior s [PROOFSTEP] rintro _ ⟨a, b, ha, hb, hab, rfl⟩ [GOAL] case intro.intro.intro.intro.intro ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s x y : E hx : x ∈ closure s hy : y ∈ interior s a b : 𝕜 ha : 0 < a hb : 0 < b hab : a + b = 1 ⊢ a • x + b • y ∈ interior s [PROOFSTEP] exact hs.combo_closure_interior_mem_interior hx hy ha.le hb hab [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s x y : E hx : x ∈ closure s hy : y ∈ interior s t : 𝕜 ht : t ∈ Ioc 0 1 ⊢ x + t • (y - x) ∈ interior s [PROOFSTEP] simpa only [sub_smul, smul_sub, one_smul, add_sub, add_comm] using hs.combo_interior_closure_mem_interior hy hx ht.1 (sub_nonneg.mpr ht.2) (add_sub_cancel'_right _ _) [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s x y : E hx : x ∈ closure s hy : x + y ∈ interior s t : 𝕜 ht : t ∈ Ioc 0 1 ⊢ x + t • y ∈ interior s [PROOFSTEP] simpa only [add_sub_cancel'] using hs.add_smul_sub_mem_interior' hx hy ht [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s h : Set.Pairwise (s \ interior s) fun x y => ∃ c, ↑(lineMap x y) c ∈ interior s ⊢ StrictConvex 𝕜 s [PROOFSTEP] refine' strictConvex_iff_openSegment_subset.2 _ [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s h : Set.Pairwise (s \ interior s) fun x y => ∃ c, ↑(lineMap x y) c ∈ interior s ⊢ Set.Pairwise s fun x y => openSegment 𝕜 x y ⊆ interior s [PROOFSTEP] intro x hx y hy hne [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s h : Set.Pairwise (s \ interior s) fun x y => ∃ c, ↑(lineMap x y) c ∈ interior s x : E hx : x ∈ s y : E hy : y ∈ s hne : x ≠ y ⊢ openSegment 𝕜 x y ⊆ interior s [PROOFSTEP] by_cases hx' : x ∈ interior s [GOAL] case pos ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s h : Set.Pairwise (s \ interior s) fun x y => ∃ c, ↑(lineMap x y) c ∈ interior s x : E hx : x ∈ s y : E hy : y ∈ s hne : x ≠ y hx' : x ∈ interior s ⊢ openSegment 𝕜 x y ⊆ interior s [PROOFSTEP] exact hs.openSegment_interior_self_subset_interior hx' hy [GOAL] case neg ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s h : Set.Pairwise (s \ interior s) fun x y => ∃ c, ↑(lineMap x y) c ∈ interior s x : E hx : x ∈ s y : E hy : y ∈ s hne : x ≠ y hx' : ¬x ∈ interior s ⊢ openSegment 𝕜 x y ⊆ interior s [PROOFSTEP] by_cases hy' : y ∈ interior s [GOAL] case pos ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s h : Set.Pairwise (s \ interior s) fun x y => ∃ c, ↑(lineMap x y) c ∈ interior s x : E hx : x ∈ s y : E hy : y ∈ s hne : x ≠ y hx' : ¬x ∈ interior s hy' : y ∈ interior s ⊢ openSegment 𝕜 x y ⊆ interior s [PROOFSTEP] exact hs.openSegment_self_interior_subset_interior hx hy' [GOAL] case neg ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s h : Set.Pairwise (s \ interior s) fun x y => ∃ c, ↑(lineMap x y) c ∈ interior s x : E hx : x ∈ s y : E hy : y ∈ s hne : x ≠ y hx' : ¬x ∈ interior s hy' : ¬y ∈ interior s ⊢ openSegment 𝕜 x y ⊆ interior s [PROOFSTEP] rcases h ⟨hx, hx'⟩ ⟨hy, hy'⟩ hne with ⟨c, hc⟩ [GOAL] case neg.intro ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s h : Set.Pairwise (s \ interior s) fun x y => ∃ c, ↑(lineMap x y) c ∈ interior s x : E hx : x ∈ s y : E hy : y ∈ s hne : x ≠ y hx' : ¬x ∈ interior s hy' : ¬y ∈ interior s c : 𝕜 hc : ↑(lineMap x y) c ∈ interior s ⊢ openSegment 𝕜 x y ⊆ interior s [PROOFSTEP] refine' (openSegment_subset_union x y ⟨c, rfl⟩).trans (insert_subset_iff.2 ⟨hc, union_subset _ _⟩) [GOAL] case neg.intro.refine'_1 ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s h : Set.Pairwise (s \ interior s) fun x y => ∃ c, ↑(lineMap x y) c ∈ interior s x : E hx : x ∈ s y : E hy : y ∈ s hne : x ≠ y hx' : ¬x ∈ interior s hy' : ¬y ∈ interior s c : 𝕜 hc : ↑(lineMap x y) c ∈ interior s ⊢ openSegment 𝕜 x (↑(lineMap x y) c) ⊆ interior s case neg.intro.refine'_2 ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s h : Set.Pairwise (s \ interior s) fun x y => ∃ c, ↑(lineMap x y) c ∈ interior s x : E hx : x ∈ s y : E hy : y ∈ s hne : x ≠ y hx' : ¬x ∈ interior s hy' : ¬y ∈ interior s c : 𝕜 hc : ↑(lineMap x y) c ∈ interior s ⊢ openSegment 𝕜 (↑(lineMap x y) c) y ⊆ interior s [PROOFSTEP] exacts [hs.openSegment_self_interior_subset_interior hx hc, hs.openSegment_interior_self_subset_interior hc hy] [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s h : Set.Pairwise (s \ interior s) fun x y => Set.Nonempty ([x-[𝕜]y] \ frontier s) ⊢ StrictConvex 𝕜 s [PROOFSTEP] refine' hs.strictConvex' <| h.imp_on fun x hx y hy _ => _ [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s h : Set.Pairwise (s \ interior s) fun x y => Set.Nonempty ([x-[𝕜]y] \ frontier s) x : E hx : x ∈ s \ interior s y : E hy : y ∈ s \ interior s x✝ : x ≠ y ⊢ fun ⦃a b⦄ => Set.Nonempty ([a-[𝕜]b] \ frontier s) → ∃ c, ↑(lineMap a b) c ∈ interior s [PROOFSTEP] simp only [segment_eq_image_lineMap, ← self_diff_frontier] [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s h : Set.Pairwise (s \ interior s) fun x y => Set.Nonempty ([x-[𝕜]y] \ frontier s) x : E hx : x ∈ s \ interior s y : E hy : y ∈ s \ interior s x✝ : x ≠ y ⊢ Set.Nonempty ((fun a => ↑(lineMap x y) a) '' Icc 0 1 \ frontier s) → ∃ c, ↑(lineMap x y) c ∈ s \ frontier s [PROOFSTEP] rintro ⟨_, ⟨⟨c, hc, rfl⟩, hcs⟩⟩ [GOAL] case intro.intro.intro.intro ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s h : Set.Pairwise (s \ interior s) fun x y => Set.Nonempty ([x-[𝕜]y] \ frontier s) x : E hx : x ∈ s \ interior s y : E hy : y ∈ s \ interior s x✝ : x ≠ y c : 𝕜 hc : c ∈ Icc 0 1 hcs : ¬(fun a => ↑(lineMap x y) a) c ∈ frontier s ⊢ ∃ c, ↑(lineMap x y) c ∈ s \ frontier s [PROOFSTEP] refine' ⟨c, hs.segment_subset hx.1 hy.1 _, hcs⟩ [GOAL] case intro.intro.intro.intro ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁵ : LinearOrderedField 𝕜 inst✝⁴ : AddCommGroup E inst✝³ : Module 𝕜 E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousConstSMul 𝕜 E s : Set E hs : Convex 𝕜 s h : Set.Pairwise (s \ interior s) fun x y => Set.Nonempty ([x-[𝕜]y] \ frontier s) x : E hx : x ∈ s \ interior s y : E hy : y ∈ s \ interior s x✝ : x ≠ y c : 𝕜 hc : c ∈ Icc 0 1 hcs : ¬(fun a => ↑(lineMap x y) a) c ∈ frontier s ⊢ ↑(lineMap x y) c ∈ [x-[𝕜]y] [PROOFSTEP] exact (segment_eq_image_lineMap 𝕜 x y).symm ▸ mem_image_of_mem _ hc [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁴ : AddCommGroup E inst✝³ : Module ℝ E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousSMul ℝ E s : Set E hs : Set.Finite s ⊢ IsCompact (↑(convexHull ℝ) s) [PROOFSTEP] rw [hs.convexHull_eq_image] [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁴ : AddCommGroup E inst✝³ : Module ℝ E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousSMul ℝ E s : Set E hs : Set.Finite s ⊢ IsCompact (↑(Finset.sum Finset.univ fun x => LinearMap.smulRight (LinearMap.proj x) ↑x) '' stdSimplex ℝ ↑s) [PROOFSTEP] apply (@isCompact_stdSimplex _ hs.fintype).image [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁴ : AddCommGroup E inst✝³ : Module ℝ E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousSMul ℝ E s : Set E hs : Set.Finite s ⊢ Continuous ↑(Finset.sum Finset.univ fun x => LinearMap.smulRight (LinearMap.proj x) ↑x) [PROOFSTEP] haveI := hs.fintype [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁴ : AddCommGroup E inst✝³ : Module ℝ E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousSMul ℝ E s : Set E hs : Set.Finite s this : Fintype ↑s ⊢ Continuous ↑(Finset.sum Finset.univ fun x => LinearMap.smulRight (LinearMap.proj x) ↑x) [PROOFSTEP] apply LinearMap.continuous_on_pi [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁴ : AddCommGroup E inst✝³ : Module ℝ E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousSMul ℝ E s : Set E hs : Convex ℝ s x : E hx : x ∈ interior s t : ℝ ht : 1 < t ⊢ closure s ⊆ ↑(homothety x t) '' interior s [PROOFSTEP] intro y hy [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁴ : AddCommGroup E inst✝³ : Module ℝ E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousSMul ℝ E s : Set E hs : Convex ℝ s x : E hx : x ∈ interior s t : ℝ ht : 1 < t y : E hy : y ∈ closure s ⊢ y ∈ ↑(homothety x t) '' interior s [PROOFSTEP] have hne : t ≠ 0 := (one_pos.trans ht).ne' [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁴ : AddCommGroup E inst✝³ : Module ℝ E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousSMul ℝ E s : Set E hs : Convex ℝ s x : E hx : x ∈ interior s t : ℝ ht : 1 < t y : E hy : y ∈ closure s hne : t ≠ 0 ⊢ y ∈ ↑(homothety x t) '' interior s [PROOFSTEP] refine' ⟨homothety x t⁻¹ y, hs.openSegment_interior_closure_subset_interior hx hy _, (AffineEquiv.homothetyUnitsMulHom x (Units.mk0 t hne)).apply_symm_apply y⟩ [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁴ : AddCommGroup E inst✝³ : Module ℝ E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousSMul ℝ E s : Set E hs : Convex ℝ s x : E hx : x ∈ interior s t : ℝ ht : 1 < t y : E hy : y ∈ closure s hne : t ≠ 0 ⊢ ↑(homothety x t⁻¹) y ∈ openSegment ℝ x y [PROOFSTEP] rw [openSegment_eq_image_lineMap, ← inv_one, ← inv_Ioi (zero_lt_one' ℝ), ← image_inv, image_image, homothety_eq_lineMap] [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁴ : AddCommGroup E inst✝³ : Module ℝ E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousSMul ℝ E s : Set E hs : Convex ℝ s x : E hx : x ∈ interior s t : ℝ ht : 1 < t y : E hy : y ∈ closure s hne : t ≠ 0 ⊢ ↑(lineMap x y) t⁻¹ ∈ (fun x_1 => ↑(lineMap x y) x_1⁻¹) '' Ioi 1 [PROOFSTEP] exact mem_image_of_mem _ ht [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁴ : AddCommGroup E inst✝³ : Module ℝ E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousSMul ℝ E s : Set E hconv : Convex ℝ s hne : Set.Nonempty s ⊢ IsPathConnected s [PROOFSTEP] refine' isPathConnected_iff.mpr ⟨hne, _⟩ [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁴ : AddCommGroup E inst✝³ : Module ℝ E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousSMul ℝ E s : Set E hconv : Convex ℝ s hne : Set.Nonempty s ⊢ ∀ (x : E), x ∈ s → ∀ (y : E), y ∈ s → JoinedIn s x y [PROOFSTEP] intro x x_in y y_in [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁴ : AddCommGroup E inst✝³ : Module ℝ E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousSMul ℝ E s : Set E hconv : Convex ℝ s hne : Set.Nonempty s x : E x_in : x ∈ s y : E y_in : y ∈ s ⊢ JoinedIn s x y [PROOFSTEP] have H := hconv.segment_subset x_in y_in [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁴ : AddCommGroup E inst✝³ : Module ℝ E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousSMul ℝ E s : Set E hconv : Convex ℝ s hne : Set.Nonempty s x : E x_in : x ∈ s y : E y_in : y ∈ s H : [x-[ℝ]y] ⊆ s ⊢ JoinedIn s x y [PROOFSTEP] rw [segment_eq_image_lineMap] at H [GOAL] ι : Type u_1 𝕜 : Type u_2 E : Type u_3 inst✝⁴ : AddCommGroup E inst✝³ : Module ℝ E inst✝² : TopologicalSpace E inst✝¹ : TopologicalAddGroup E inst✝ : ContinuousSMul ℝ E s : Set E hconv : Convex ℝ s hne : Set.Nonempty s x : E x_in : x ∈ s y : E y_in : y ∈ s H : ↑(lineMap x y) '' Icc 0 1 ⊆ s ⊢ JoinedIn s x y [PROOFSTEP] exact JoinedIn.ofLine AffineMap.lineMap_continuous.continuousOn (lineMap_apply_zero _ _) (lineMap_apply_one _ _) H
[STATEMENT] lemma uv_autoref[autoref_rules,param]: "(uv_lookup,(\<in>)) \<in> nat_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> bool_rel" "(uv_empty,{}) \<in> \<langle>nat_rel\<rangle>uv_set_rel" "(uv_set_bit,insert) \<in> nat_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel" "(uv_reset_bit,op_set_delete) \<in> nat_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel" "(uv_union,(\<union>)) \<in> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel" "(uv_inter,(\<inter>)) \<in> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel" "(uv_diff,(-)) \<in> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel" "(uv_zeroes,op_set_isEmpty) \<in> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> bool_rel" "(uv_equal,(=)) \<in> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> bool_rel" "(uv_subseteq,(\<subseteq>)) \<in> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> bool_rel" "(uv_subset,(\<subset>)) \<in> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> bool_rel" "(uv_disjoint,op_set_disjoint) \<in> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> bool_rel" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (((uv_lookup, (\<in>)) \<in> nat_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> bool_rel &&& (uv_empty, {}) \<in> \<langle>nat_rel\<rangle>uv_set_rel &&& (uv_set_bit, insert) \<in> nat_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel) &&& (uv_reset_bit, op_set_delete) \<in> nat_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel &&& (uv_union, (\<union>)) \<in> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel &&& (uv_inter, (\<inter>)) \<in> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel) &&& ((uv_diff, (-)) \<in> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel &&& (uv_zeroes, op_set_isEmpty) \<in> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> bool_rel &&& (uv_equal, (=)) \<in> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> bool_rel) &&& (uv_subseteq, (\<subseteq>)) \<in> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> bool_rel &&& (uv_subset, (\<subset>)) \<in> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> bool_rel &&& (uv_disjoint, op_set_disjoint) \<in> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> \<langle>nat_rel\<rangle>uv_set_rel \<rightarrow> bool_rel [PROOF STEP] by (auto simp: uv_set_rel_def br_def simp: uv_memb_correct uv_empty_correct uv_insert_correct uv_delete_correct simp: uv_union_correct uv_inter_correct uv_diff_correct uv_isEmpty_correct simp: uv_equal_correct uv_subseteq_correct uv_subset_correct uv_disjoint_correct)
theorem ex1 : (fun y => y + 0) = (fun x => 0 + x) := by funext x simp theorem ex2 : (fun y x => y + x + 0) = (fun x y => y + x) := by funext x y rw [Nat.add_zero, Nat.add_comm] theorem ex3 : (fun (x : Nat × Nat) => x.1 + x.2) = (fun (x : Nat × Nat) => x.2 + x.1) := by funext (a, b) show a + b = b + a rw [Nat.add_comm] theorem ex4 : (fun (x : Nat × Nat) (y : Nat × Nat) => x.1 + y.2) = (fun (x : Nat × Nat) (z : Nat × Nat) => z.2 + x.1) := by funext (a, b) (c, d) show a + d = d + a rw [Nat.add_comm] theorem ex5 : (fun (x : Id Nat) => x.succ + 0) = (fun (x : Id Nat) => 0 + x.succ) := by funext (x : Nat) have y := x + 1 -- if `(x : Nat)` is not used at `funext`, then `x+1` would fail to be elaborated since we don't have the instance `Add (Id Nat)` rw [Nat.add_comm] theorem ex6 : (fun (x : Nat) y z => x + y + z) = (fun x y z => x + (y + z)) := by funext rw [Nat.add_assoc]
A sequence converges if and only if the sequence obtained by subtracting a constant from each term converges.
(* Title: Presburger-Automata/Presburger_Automata.thy Author: Markus Reiter and Stefan Berghofer, TU Muenchen, 2008-2009 *) theory Presburger_Automata imports DFS "HOL-Library.Nat_Bijection" begin section \<open>General automata\<close> definition "reach tr p as q = (q = foldl tr p as)" lemma reach_nil: "reach tr p [] p" by (simp add: reach_def) lemma reach_snoc: "reach tr p bs q \<Longrightarrow> reach tr p (bs @ [b]) (tr q b)" by (simp add: reach_def) lemma reach_nil_iff: "reach tr p [] q = (p = q)" by (auto simp add: reach_def) lemma reach_snoc_iff: "reach tr p (bs @ [b]) k = (\<exists>q. reach tr p bs q \<and> k = tr q b)" by (auto simp add: reach_def) lemma reach_induct [consumes 1, case_names Nil snoc, induct set: reach]: assumes "reach tr p w q" and "P [] p" and "\<And>k x y. \<lbrakk>reach tr p x k; P x k\<rbrakk> \<Longrightarrow> P (x @ [y]) (tr k y)" shows "P w q" using assms by (induct w arbitrary: q rule: rev_induct) (simp add: reach_def)+ lemma reach_trans: "\<lbrakk>reach tr p a r; reach tr r b q\<rbrakk> \<Longrightarrow> reach tr p (a @ b) q" by (simp add: reach_def) lemma reach_inj: "\<lbrakk>reach tr p a q; reach tr p a q'\<rbrakk> \<Longrightarrow> q = q'" by (simp add: reach_def) definition "accepts tr P s as = P (foldl tr s as)" locale Automaton = fixes trans :: "'a \<Rightarrow> 'b \<Rightarrow> 'a" and is_node :: "'a \<Rightarrow> bool" and is_alpha :: "'b \<Rightarrow> bool" assumes trans_is_node: "\<And>q a. \<lbrakk>is_node q; is_alpha a\<rbrakk> \<Longrightarrow> is_node (trans q a)" begin lemma steps_is_node: assumes "is_node q" and "list_all is_alpha w" shows "is_node (foldl trans q w)" using assms by (induct w arbitrary: q) (simp add: trans_is_node)+ lemma reach_is_node: "\<lbrakk>reach trans p w q; is_node p; list_all is_alpha w\<rbrakk> \<Longrightarrow> is_node q" by (simp add: steps_is_node reach_def) end section \<open>BDDs\<close> definition is_alph :: "nat \<Rightarrow> bool list \<Rightarrow> bool" where "is_alph n = (\<lambda>w. length w = n)" datatype 'a bdd = Leaf 'a | Branch "'a bdd" "'a bdd" for map: bdd_map primrec bddh :: "nat \<Rightarrow> 'a bdd \<Rightarrow> bool" where "bddh n (Leaf x) = True" | "bddh n (Branch l r) = (case n of 0 \<Rightarrow> False | Suc m \<Rightarrow> bddh m l \<and> bddh m r)" lemma bddh_ge: assumes "m \<ge> n" assumes "bddh n bdd" shows "bddh m bdd" using assms proof (induct bdd arbitrary: n m) case (Branch l r) then obtain v where V: "n = Suc v" by (cases n) simp+ show ?case proof (cases "n = m") case True with Branch show ?thesis by simp next case False with Branch have "\<exists>w. m = Suc w \<and> n \<le> w" by (cases m) simp+ then obtain w where W: "m = Suc w \<and> n \<le> w" .. with Branch V have "v \<le> w \<and> bddh v l \<and> bddh v r" by simp with Branch have "bddh w l \<and> bddh w r" by blast with W show ?thesis by simp qed qed simp abbreviation "bdd_all \<equiv> pred_bdd" fun bdd_lookup :: "'a bdd \<Rightarrow> bool list \<Rightarrow> 'a" where "bdd_lookup (Leaf x) bs = x" | "bdd_lookup (Branch l r) (b#bs) = bdd_lookup (if b then r else l) bs" lemma bdd_all_bdd_lookup: "\<lbrakk>bddh (length ws) bdd; bdd_all P bdd\<rbrakk> \<Longrightarrow> P (bdd_lookup bdd ws)" by (induct bdd ws rule: bdd_lookup.induct) simp+ lemma bdd_all_bdd_lookup_iff: "bddh n bdd \<Longrightarrow> bdd_all P bdd = (\<forall>ws. length ws = n \<longrightarrow> P (bdd_lookup bdd ws))" apply (rule iffI) apply (simp add: bdd_all_bdd_lookup) proof (induct bdd arbitrary: n) case Leaf thus ?case apply simp apply (erule mp) apply (rule_tac x="replicate n False" in exI, simp) done next case (Branch l r n) then obtain k where k: "n = Suc k" by (cases n) simp+ from Branch have R: "\<And>ws. length ws = n \<Longrightarrow> P (bdd_lookup (Branch l r) ws)" by simp have "\<And>ws. length ws = k \<Longrightarrow> P (bdd_lookup l ws) \<and> P (bdd_lookup r ws)" proof - fix ws :: "bool list" assume H: "length ws = k" with k have "length (False#ws) = n" by simp hence 1: "P (bdd_lookup (Branch l r) (False#ws))" by (rule R) from H k have "length (True#ws) = n" by simp hence "P (bdd_lookup (Branch l r) (True#ws))" by (rule R) with 1 show "P (bdd_lookup l ws) \<and> P (bdd_lookup r ws)" by simp qed with Branch k show ?case by auto qed lemma bdd_all_bdd_map: assumes "bdd_all P bdd" and "\<And>a. P a \<Longrightarrow> Q (f a)" shows "bdd_all Q (bdd_map f bdd)" using assms by (induct bdd) simp+ lemma bddh_bdd_map: shows "bddh n (bdd_map f bdd) = bddh n bdd" proof assume "bddh n (bdd_map f bdd)" thus "bddh n bdd" proof (induct bdd arbitrary: n) case (Branch l r n) then obtain k where "n = Suc k" by (cases n) simp+ with Branch show ?case by simp qed simp next assume "bddh n bdd" thus "bddh n (bdd_map f bdd)" proof (induct bdd arbitrary: n) case (Branch l r n) then obtain k where "n = Suc k" by (cases n) simp+ with Branch show ?case by simp qed simp qed lemma bdd_map_bdd_lookup: assumes "bddh (length ws) bdd" shows "bdd_lookup (bdd_map f bdd) ws = f (bdd_lookup bdd ws)" using assms by (induct bdd ws rule: bdd_lookup.induct) (auto simp add: bddh_bdd_map)+ fun bdd_binop :: "('a \<Rightarrow> 'b \<Rightarrow> 'c) \<Rightarrow> 'a bdd \<Rightarrow> 'b bdd \<Rightarrow> 'c bdd" where "bdd_binop f (Leaf x) (Leaf y) = Leaf (f x y)" | "bdd_binop f (Branch l r) (Leaf y) = Branch (bdd_binop f l (Leaf y)) (bdd_binop f r (Leaf y))" | "bdd_binop f (Leaf x) (Branch l r) = Branch (bdd_binop f (Leaf x) l) (bdd_binop f (Leaf x) r)" | "bdd_binop f (Branch l\<^sub>1 r\<^sub>1) (Branch l\<^sub>2 r\<^sub>2) = Branch (bdd_binop f l\<^sub>1 l\<^sub>2) (bdd_binop f r\<^sub>1 r\<^sub>2)" lemma bddh_binop: "bddh n (bdd_binop f l r) = (bddh n l \<and> bddh n r)" by (induct f l r arbitrary: n rule: bdd_binop.induct) (auto split: nat.split_asm) lemma bdd_lookup_binop: "\<lbrakk>bddh (length bs) l; bddh (length bs) r\<rbrakk> \<Longrightarrow> bdd_lookup (bdd_binop f l r) bs = f (bdd_lookup l bs) (bdd_lookup r bs)" apply (induct f l r arbitrary: bs rule: bdd_binop.induct) apply simp apply (case_tac bs) apply simp+ apply (case_tac bs) apply simp+ apply (case_tac bs) apply simp+ done lemma bdd_all_bdd_binop: assumes "bdd_all P bdd" and "bdd_all Q bdd'" and "\<And>a b. \<lbrakk>P a; Q b\<rbrakk> \<Longrightarrow> R (f a b)" shows "bdd_all R (bdd_binop f bdd bdd')" using assms by (induct f bdd bdd' rule: bdd_binop.induct) simp+ lemma insert_list_idemp[simp]: "List.insert x (List.insert x xs) = List.insert x xs" by simp primrec add_leaves :: "'a bdd \<Rightarrow> 'a list \<Rightarrow> 'a list" where "add_leaves (Leaf x) xs = List.insert x xs" | "add_leaves (Branch b c) xs = add_leaves c (add_leaves b xs)" lemma add_leaves_bdd_lookup: "bddh n b \<Longrightarrow> (x \<in> set (add_leaves b xs)) = ((\<exists>bs. x = bdd_lookup b bs \<and> is_alph n bs) \<or> x \<in> set xs)" apply (induct b arbitrary: xs n) apply (auto split: nat.split_asm) apply (rule_tac x="replicate n arbitrary" in exI) apply (simp add: is_alph_def) apply (rule_tac x="True # bs" in exI) apply (simp add: is_alph_def) apply (rule_tac x="False # bs" in exI) apply (simp add: is_alph_def) apply (case_tac bs) apply (simp add: is_alph_def) apply (simp add: is_alph_def) apply (drule_tac x=list in spec) apply (case_tac a) apply simp apply simp apply (rule_tac x=list in exI) apply simp done lemma add_leaves_bdd_all_eq: "list_all P (add_leaves tr xs) \<longleftrightarrow> bdd_all P tr \<and> list_all P xs" by (induct tr arbitrary: xs) (auto simp add: list_all_iff) lemmas add_leaves_bdd_all_eq' = add_leaves_bdd_all_eq [where xs="[]", simplified, symmetric] lemma add_leaves_mono: "set xs \<subseteq> set ys \<Longrightarrow> set (add_leaves tr xs) \<subseteq> set (add_leaves tr ys)" by (induct tr arbitrary: xs ys) auto lemma add_leaves_binop_subset: "set (add_leaves (bdd_binop f b b') [f x y. x \<leftarrow> xs, y \<leftarrow> ys]) \<subseteq> (\<Union>x\<in>set (add_leaves b xs). \<Union>y\<in>set (add_leaves b' ys). {f x y})" (is "?A \<subseteq> ?B") proof - have "?A \<subseteq> (\<Union>x\<in>set (add_leaves b xs). f x ` set (add_leaves b' ys))" proof (induct f b b' arbitrary: xs ys rule: bdd_binop.induct) case (1 f x y xs ys) then show ?case by auto next case (2 f l r y xs ys) then show ?case apply auto apply (drule_tac ys1="[f x y. x \<leftarrow> add_leaves l xs, y \<leftarrow> List.insert y ys]" in rev_subsetD [OF _ add_leaves_mono]) apply auto apply (drule meta_spec, drule meta_spec, drule subsetD, assumption) apply simp done next case (3 f x l r xs ys) then show ?case apply auto apply (drule_tac ys1="[f x y. x \<leftarrow> List.insert x xs, y \<leftarrow> add_leaves l ys]" in rev_subsetD [OF _ add_leaves_mono]) apply auto apply (drule meta_spec, drule meta_spec, drule subsetD, assumption) apply simp done next case (4 f l\<^sub>1 r\<^sub>1 l\<^sub>2 r\<^sub>2 xs ys) then show ?case apply auto apply (drule_tac ys1="[f x y. x \<leftarrow> add_leaves l\<^sub>1 xs, y \<leftarrow> add_leaves l\<^sub>2 ys]" in rev_subsetD [OF _ add_leaves_mono]) apply simp apply (drule meta_spec, drule meta_spec, drule subsetD, assumption) apply simp done qed also have "(\<Union>x\<in>set (add_leaves b xs). f x ` set (add_leaves b' ys)) = ?B" by auto finally show ?thesis . qed section \<open>DFAs\<close> type_synonym bddtable = "nat bdd list" type_synonym astate = "bool list" type_synonym dfa = "bddtable \<times> astate" definition dfa_is_node :: "dfa \<Rightarrow> nat \<Rightarrow> bool" where "dfa_is_node A = (\<lambda>q. q < length (fst A))" definition wf_dfa :: "dfa \<Rightarrow> nat \<Rightarrow> bool" where "wf_dfa A n = (list_all (bddh n) (fst A) \<and> list_all (bdd_all (dfa_is_node A)) (fst A) \<and> length (snd A) = length (fst A) \<and> length (fst A) > 0)" definition dfa_trans :: "dfa \<Rightarrow> nat \<Rightarrow> bool list \<Rightarrow> nat" where "dfa_trans A q bs \<equiv> bdd_lookup (fst A ! q) bs" definition dfa_accepting :: "dfa \<Rightarrow> nat \<Rightarrow> bool" where "dfa_accepting A q = snd A ! q" locale aut_dfa = fixes A n assumes well_formed: "wf_dfa A n" sublocale aut_dfa < Automaton "dfa_trans A" "dfa_is_node A" "is_alph n" proof fix q a assume Q: "dfa_is_node A q" and A: "is_alph n a" hence QL: "q < length (fst A)" by (simp add: dfa_is_node_def) with well_formed A have H: "bddh (length a) (fst A ! q)" by (simp add: wf_dfa_def list_all_iff is_alph_def) from QL well_formed have "bdd_all (dfa_is_node A) (fst A ! q)" by (simp add: wf_dfa_def list_all_iff) with H show "dfa_is_node A (dfa_trans A q a)" by (simp add: dfa_trans_def bdd_all_bdd_lookup) qed context aut_dfa begin lemmas trans_is_node = trans_is_node lemmas steps_is_node = steps_is_node lemmas reach_is_node = reach_is_node end lemmas dfa_trans_is_node = aut_dfa.trans_is_node [OF aut_dfa.intro] lemmas dfa_steps_is_node = aut_dfa.steps_is_node [OF aut_dfa.intro] lemmas dfa_reach_is_node = aut_dfa.reach_is_node [OF aut_dfa.intro] abbreviation "dfa_steps A \<equiv> foldl (dfa_trans A)" abbreviation "dfa_accepts A \<equiv> accepts (dfa_trans A) (dfa_accepting A) 0" abbreviation "dfa_reach A \<equiv> reach (dfa_trans A)" lemma dfa_startnode_is_node: "wf_dfa A n \<Longrightarrow> dfa_is_node A 0" by (simp add: dfa_is_node_def wf_dfa_def) subsection \<open>Minimization\<close> primrec make_tr :: "(nat \<Rightarrow> 'a) \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> 'a list" where "make_tr f 0 i = []" | "make_tr f (Suc n) i = f i # make_tr f n (Suc i)" primrec fold_map_idx :: "(nat \<Rightarrow> 'c \<Rightarrow> 'a \<Rightarrow> 'c \<times> 'b) \<Rightarrow> nat \<Rightarrow> 'c \<Rightarrow> 'a list \<Rightarrow> 'c \<times> 'b list" where "fold_map_idx f i y [] = (y, [])" | "fold_map_idx f i y (x # xs) = (let (y', x') = f i y x in let (y'', xs') = fold_map_idx f (Suc i) y' xs in (y'', x' # xs'))" definition init_tr :: "dfa \<Rightarrow> bool list list" where "init_tr = (\<lambda>(bd,as). make_tr (\<lambda>i. make_tr (\<lambda>j. as ! i \<noteq> as ! j) i 0) (length bd - 1) 1)" definition tr_lookup :: "bool list list \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> bool" where "tr_lookup = (\<lambda>T i j. (if i = j then False else if i > j then T ! (i - 1) ! j else T ! (j - 1) ! i))" fun check_eq :: "nat bdd \<Rightarrow> nat bdd \<Rightarrow> bool list list \<Rightarrow> bool" where "check_eq (Leaf i) (Leaf j) T = (\<not> tr_lookup T i j)" | "check_eq (Branch l r) (Leaf i) T = (check_eq l (Leaf i) T \<and> check_eq r (Leaf i) T)" | "check_eq (Leaf i) (Branch l r) T = (check_eq (Leaf i) l T \<and> check_eq (Leaf i) r T)" | "check_eq (Branch l1 r1) (Branch l2 r2) T = (check_eq l1 l2 T \<and> check_eq r1 r2 T)" definition iter :: "dfa \<Rightarrow> bool list list \<Rightarrow> bool \<times> bool list list" where "iter = (\<lambda>(bd,as) T. fold_map_idx (\<lambda>i. fold_map_idx (\<lambda>j c b. let b' = b \<or> \<not> check_eq (bd ! i) (bd ! j) T in (c \<or> b \<noteq> b', b')) 0) 1 False T)" definition count_tr :: "bool list list \<Rightarrow> nat" where "count_tr = foldl (foldl (\<lambda>y x. if x then y else Suc y)) 0" lemma fold_map_idx_fst_snd_eq: assumes f: "\<And>i c x. fst (f i c x) = (c \<or> x \<noteq> snd (f i c x))" shows "fst (fold_map_idx f i c xs) = (c \<or> xs \<noteq> snd (fold_map_idx f i c xs))" by (induct xs arbitrary: i c) (simp_all add: split_beta f) lemma foldl_mono: assumes f: "\<And>x y y'. y < y' \<Longrightarrow> f y x < f y' x" and y: "y < y'" shows "foldl f y xs < foldl f y' xs" using y by (induct xs arbitrary: y y') (simp_all add: f) lemma fold_map_idx_count: assumes f: "\<And>i c x y. fst (f i c x) = (c \<or> g y (snd (f i c x)) < (g y x::nat))" and f': "\<And>i c x y. g y (snd (f i c x)) \<le> g y x" and g: "\<And>x y y'. y < y' \<Longrightarrow> g y x < g y' x" shows "fst (fold_map_idx f i c xs) = (c \<or> foldl g y (snd (fold_map_idx f i c xs)) < foldl g y xs)" and "foldl g y (snd (fold_map_idx f i c xs)) \<le> foldl g y xs" proof (induct xs arbitrary: i c y) case (Cons x xs) { case 1 show ?case using f' [of y i c x, simplified le_eq_less_or_eq] by (auto simp add: split_beta Cons(1) [of _ _ "g y (snd (f i c x))"] f [of _ _ _ y] intro: less_le_trans foldl_mono g Cons) next case 2 show ?case using f' [of y i c x, simplified le_eq_less_or_eq] by (auto simp add: split_beta intro: order_trans less_imp_le intro!: foldl_mono g Cons) } qed simp_all lemma iter_count: assumes eq: "(b, T') = iter (bd, as) T" and b: "b" shows "count_tr T' < count_tr T" proof - let ?f = "fold_map_idx (\<lambda>i. fold_map_idx (\<lambda>j c b. let b' = b \<or> \<not> check_eq (bd ! i) (bd ! j) T in (c \<or> b \<noteq> b', b')) 0) (Suc 0) False T" from eq [symmetric] b have "fst ?f" by (auto simp add: iter_def) also have "fst ?f = (False \<or> count_tr (snd ?f) < count_tr T)" unfolding count_tr_def by (rule fold_map_idx_count foldl_mono | simp)+ finally show ?thesis by (simp add: eq [THEN arg_cong, of snd, simplified] iter_def) qed function fixpt :: "dfa \<Rightarrow> bool list list \<Rightarrow> bool list list" where "fixpt M T = (let (b, T2) = iter M T in if b then fixpt M T2 else T2)" by auto termination by (relation "measure (\<lambda>(M, T). count_tr T)") (auto simp: iter_count) lemma fixpt_True[simp]: "fst (iter M T) \<Longrightarrow> fixpt M T = fixpt M (snd (iter M T))" by (simp add: split_beta) lemma fixpt_False[simp]: "\<not> (fst (iter M T)) \<Longrightarrow> fixpt M T = T" by (simp add: split_beta iter_def fold_map_idx_fst_snd_eq) declare fixpt.simps [simp del] lemma fixpt_induct: assumes H: "\<And>M T. (fst (iter M T) \<Longrightarrow> P M (snd (iter M T))) \<Longrightarrow> P M T" shows "P M T" proof (induct M T rule: fixpt.induct) case (1 M T) show ?case by (rule H) (rule 1 [OF refl prod.collapse]) qed definition dist_nodes :: "dfa \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> bool" where "dist_nodes = (\<lambda>M n m p q. \<exists>w. length w = n \<and> list_all (is_alph m) w \<and> dfa_accepting M (dfa_steps M p w) \<noteq> dfa_accepting M (dfa_steps M q w))" definition wf_tr :: "dfa \<Rightarrow> bool list list \<Rightarrow> bool" where "wf_tr = (\<lambda>M T. length T = length (fst M) - 1 \<and> (\<forall>i < length T. length (T ! i) = i + 1))" lemma make_tr_len: "length (make_tr f n i) = n" by (induct n arbitrary: i) simp_all lemma make_tr_nth: "j < n \<Longrightarrow> make_tr f n i ! j = f (i + j)" by (induct n arbitrary: i j) (auto simp add: nth_Cons') lemma init_tr_wf: "wf_tr M (init_tr M)" by (simp add: init_tr_def wf_tr_def split_beta make_tr_len make_tr_nth) lemma fold_map_idx_len: "length (snd (fold_map_idx f i y xs)) = length xs" by (induct xs arbitrary: i y) (simp_all add: split_beta) lemma fold_map_idx_nth: "j < length xs \<Longrightarrow> snd (fold_map_idx f i y xs) ! j = snd (f (i + j) (fst (fold_map_idx f i y (take j xs))) (xs ! j))" by (induct xs arbitrary: i j y) (simp_all add: split_beta nth_Cons' take_Cons') lemma init_tr_dist_nodes: assumes "dfa_is_node M q" and "p < q" shows "tr_lookup (init_tr M) q p = dist_nodes M 0 v p q" proof - have 1: "dist_nodes M 0 v p q = (snd M ! p \<noteq> snd M ! q)" by (simp add: dist_nodes_def dfa_accepting_def) from assms have "tr_lookup (init_tr M) q p = (snd M ! p \<noteq> snd M ! q)" by (auto simp add: dfa_is_node_def init_tr_def tr_lookup_def make_tr_nth split_beta) with 1 show ?thesis by simp qed lemma dist_nodes_suc: "dist_nodes M (Suc n) v p q = (\<exists>bs. is_alph v bs \<and> dist_nodes M n v (dfa_trans M p bs) (dfa_trans M q bs))" proof assume "dist_nodes M (Suc n) v p q" then obtain w where W: "length w = Suc n" and L: "list_all (is_alph v) w" and A: "dfa_accepting M (dfa_steps M p w) \<noteq> dfa_accepting M (dfa_steps M q w)" unfolding dist_nodes_def by blast then obtain b bs where B: "w = b # bs" by (cases w) auto from A have A2: "dfa_accepting M (dfa_steps M (dfa_trans M p b) bs) \<noteq> dfa_accepting M (dfa_steps M (dfa_trans M q b) bs)" unfolding B by simp with W B L show "\<exists>bs. is_alph v bs \<and> dist_nodes M n v (dfa_trans M p bs) (dfa_trans M q bs)" by (auto simp: dist_nodes_def) next assume "\<exists>bs. is_alph v bs \<and> dist_nodes M n v (dfa_trans M p bs) (dfa_trans M q bs)" then obtain b bs where W: "length bs = n" and V: "is_alph v b" and V': "list_all (is_alph v) bs" and A: "dfa_accepting M (dfa_steps M (dfa_trans M p b) bs) \<noteq> dfa_accepting M (dfa_steps M (dfa_trans M q b) bs)" unfolding dist_nodes_def by blast hence "dfa_accepting M (dfa_steps M p (b # bs)) \<noteq> dfa_accepting M (dfa_steps M q (b # bs))" by simp moreover from W have "length (b # bs) = Suc n" by simp moreover from V V' have "list_all (is_alph v) (b # bs)" by simp ultimately show "dist_nodes M (Suc n) v p q" unfolding dist_nodes_def by blast qed lemma bdd_lookup_append: assumes "bddh n B" and "length bs \<ge> n" shows "bdd_lookup B (bs @ w) = bdd_lookup B bs" using assms proof (induct B bs arbitrary: n rule: bdd_lookup.induct) case (2 l r b bs n) then obtain n' where N: "n = Suc n'" by (cases n) simp+ with 2 show ?case by (cases b) auto qed simp+ lemma bddh_exists: "\<exists>n. bddh n B" proof (induct B) case (Branch l r) then obtain n m where L: "bddh n l" and R: "bddh m r" by blast with bddh_ge[of n "max n m" l] bddh_ge[of m "max n m" r] have "bddh (Suc (max n m)) (Branch l r)" by simp thus ?case by (rule exI) qed simp lemma check_eq_dist_nodes: assumes "\<forall>p q. dfa_is_node M q \<and> p < q \<longrightarrow> tr_lookup T q p = (\<exists>n < m. dist_nodes M n v p q)" and "m > 0" and "bdd_all (dfa_is_node M) l" and "bdd_all (dfa_is_node M) r" shows "(\<not> check_eq l r T) = (\<exists>bs. bddh (length bs) l \<and> bddh (length bs) r \<and> (\<exists>n < m. dist_nodes M n v (bdd_lookup l bs) (bdd_lookup r bs)))" using assms proof (induct l r T rule: check_eq.induct) case (1 i j T) have "i < j \<or> i = j \<or> i > j" by auto thus ?case by (elim disjE) (insert 1, auto simp: dist_nodes_def tr_lookup_def) next case (2 l r i T) hence IV1: "(\<not> check_eq l (Leaf i) T) = (\<exists>bs. bddh (length bs) l \<and> bddh (length bs) (Leaf i) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup l bs) (bdd_lookup (Leaf i) bs)))" by simp from 2 have IV2: "(\<not> check_eq r (Leaf i) T) = (\<exists>bs. bddh (length bs) r \<and> bddh (length bs) (Leaf i) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup r bs) (bdd_lookup (Leaf i) bs)))" by simp have "(\<not> check_eq (Branch l r) (Leaf i) T) = (\<not> check_eq l (Leaf i) T \<or> \<not> check_eq r (Leaf i) T)" by simp also have "\<dots> = (\<exists>bs. bddh (length bs) (Branch l r) \<and> bddh (length bs) (Leaf i) \<and> (\<exists>n<m . dist_nodes M n v (bdd_lookup (Branch l r) bs) (bdd_lookup (Leaf i) bs)))" (is "(?L \<or> ?R) = ?E") proof assume "?L \<or> ?R" thus "?E" proof (elim disjE) assume "?L" then obtain bs where O: "bddh (length bs) l \<and> bddh (length bs) (Leaf i) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup l bs) (bdd_lookup (Leaf i) bs))" unfolding IV1 by blast from bddh_exists obtain k where B: "bddh k r" by blast with O have "bddh (length bs + k) r" and "bddh (length bs + k) l" and "bddh (length bs + k) (Leaf i)" by (simp add: bddh_ge[of k "length bs + k"] bddh_ge[of "length bs" "length bs + k"])+ with O have "bddh (length (False # bs @ replicate k False)) (Branch l r) \<and> bddh (length (False # bs @ replicate k False)) (Leaf i) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup (Branch l r) (False # bs @ replicate k False)) (bdd_lookup (Leaf i) (False # bs @ replicate k False)))" by (auto simp: bdd_lookup_append) thus ?thesis by (rule exI) next assume "?R" then obtain bs where O: "bddh (length bs) r \<and> bddh (length bs) (Leaf i) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup r bs) (bdd_lookup (Leaf i) bs))" unfolding IV2 by blast from bddh_exists obtain k where B: "bddh k l" by blast with O have "bddh (length bs + k) l" and "bddh (length bs + k) r" and "bddh (length bs + k) (Leaf i)" by (simp add: bddh_ge[of k "length bs + k"] bddh_ge[of "length bs" "length bs + k"])+ with O have "bddh (length (True # bs @ replicate k False)) (Branch l r) \<and> bddh (length (True # bs @ replicate k False)) (Leaf i) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup (Branch l r) (True # bs @ replicate k False)) (bdd_lookup (Leaf i) (True # bs @ replicate k False)))" by (auto simp: bdd_lookup_append) thus ?thesis by (rule exI) qed next assume "?E" then obtain bs where O: "bddh (length bs) (Branch l r) \<and> bddh (length bs) (Leaf i) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup (Branch l r) bs) (bdd_lookup (Leaf i) bs))" by blast then obtain b br where B: "bs = b # br" by (cases bs) auto with O IV1 IV2 show "?L \<or> ?R" by (cases b) auto qed finally show ?case by simp next case (3 i l r T) hence IV1: "(\<not> check_eq (Leaf i) l T) = (\<exists>bs. bddh (length bs) l \<and> bddh (length bs) (Leaf i) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup (Leaf i) bs) (bdd_lookup l bs)))" by simp from 3 have IV2: "(\<not> check_eq (Leaf i) r T) = (\<exists>bs. bddh (length bs) r \<and> bddh (length bs) (Leaf i) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup (Leaf i) bs) (bdd_lookup r bs)))" by simp have "(\<not> check_eq (Leaf i) (Branch l r) T) = (\<not> check_eq (Leaf i) l T \<or> \<not> check_eq (Leaf i) r T)" by simp also have "\<dots> = (\<exists>bs. bddh (length bs) (Branch l r) \<and> bddh (length bs) (Leaf i) \<and> (\<exists>n<m . dist_nodes M n v (bdd_lookup (Leaf i) bs) (bdd_lookup (Branch l r) bs)))" (is "(?L \<or> ?R) = ?E") proof assume "?L \<or> ?R" thus "?E" proof (elim disjE) assume "?L" then obtain bs where O: "bddh (length bs) l \<and> bddh (length bs) (Leaf i) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup (Leaf i) bs) (bdd_lookup l bs))" unfolding IV1 by blast from bddh_exists obtain k where B: "bddh k r" by blast with O have "bddh (length bs + k) r" and "bddh (length bs + k) l" and "bddh (length bs + k) (Leaf i)" by (simp add: bddh_ge[of k "length bs + k"] bddh_ge[of "length bs" "length bs + k"])+ with O have "bddh (length (False # bs @ replicate k False)) (Branch l r) \<and> bddh (length (False # bs @ replicate k False)) (Leaf i) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup (Leaf i) (False # bs @ replicate k False)) (bdd_lookup (Branch l r) (False # bs @ replicate k False)))" by (auto simp: bdd_lookup_append) thus ?thesis by (rule exI) next assume "?R" then obtain bs where O: "bddh (length bs) r \<and> bddh (length bs) (Leaf i) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup (Leaf i) bs) (bdd_lookup r bs))" unfolding IV2 by blast from bddh_exists obtain k where B: "bddh k l" by blast with O have "bddh (length bs + k) l" and "bddh (length bs + k) r" and "bddh (length bs + k) (Leaf i)" by (simp add: bddh_ge[of k "length bs + k"] bddh_ge[of "length bs" "length bs + k"])+ with O have "bddh (length (True # bs @ replicate k False)) (Branch l r) \<and> bddh (length (True # bs @ replicate k False)) (Leaf i) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup (Leaf i) (True # bs @ replicate k False)) (bdd_lookup (Branch l r) (True # bs @ replicate k False)))" by (auto simp: bdd_lookup_append) thus ?thesis by (rule exI) qed next assume "?E" then obtain bs where O: "bddh (length bs) (Branch l r) \<and> bddh (length bs) (Leaf i) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup (Leaf i) bs) (bdd_lookup (Branch l r) bs))" by blast then obtain b br where B: "bs = b # br" by (cases bs) auto with O IV1 IV2 show "?L \<or> ?R" by (cases b) auto qed finally show ?case by simp next case (4 l1 r1 l2 r2 T) hence IV1: "(\<not> check_eq l1 l2 T) = (\<exists>bs. bddh (length bs) l1 \<and> bddh (length bs) l2 \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup l1 bs) (bdd_lookup l2 bs)))" by simp from 4 have IV2: "(\<not> check_eq r1 r2 T) = (\<exists>bs. bddh (length bs) r1 \<and> bddh (length bs) r2 \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup r1 bs) (bdd_lookup r2 bs)))" by simp have "(\<not> check_eq (Branch l1 r1) (Branch l2 r2) T) = (\<not> check_eq l1 l2 T \<or> \<not> check_eq r1 r2 T)" by simp also have "\<dots> = (\<exists>bs. bddh (length bs) (Branch l1 r1) \<and> bddh (length bs) (Branch l2 r2) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup (Branch l1 r1) bs) (bdd_lookup (Branch l2 r2) bs)))" (is "(?L \<or> ?R) = (\<exists>bs. ?E bs)") proof assume "?L \<or> ?R" thus "\<exists>bs. ?E bs" proof (elim disjE) assume "?L" then obtain bs where O: "bddh (length bs) l1 \<and> bddh (length bs) l2 \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup l1 bs) (bdd_lookup l2 bs))" unfolding IV1 by blast from bddh_exists obtain k1 k2 where K1: "bddh k1 r1" and K2: "bddh k2 r2" by blast with O have "bddh (length bs + max k1 k2) l1" and "bddh (length bs + max k1 k2) l2" and "bddh (length bs + max k1 k2) r1" and "bddh (length bs + max k1 k2) r2" by (simp add: bddh_ge[of "length bs" "length bs + max k1 k2"] bddh_ge[of k1 "length bs + max k1 k2"] bddh_ge[of k2 "length bs + max k1 k2"])+ with O have "bddh (length (False # bs @ replicate (max k1 k2) False)) (Branch l1 r1) \<and> bddh (length (False # bs @ replicate (max k1 k2) False)) (Branch l2 r2) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup (Branch l1 r1) (False # bs @ replicate (max k1 k2) False)) (bdd_lookup (Branch l2 r2) (False # bs @ replicate (max k1 k2) False)))" by (auto simp: bdd_lookup_append) thus ?thesis by (rule exI) next assume "?R" then obtain bs where O: "bddh (length bs) r1 \<and> bddh (length bs) r2 \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup r1 bs) (bdd_lookup r2 bs))" unfolding IV2 by blast from bddh_exists obtain k1 k2 where K1: "bddh k1 l1" and K2: "bddh k2 l2" by blast with O have "bddh (length bs + max k1 k2) l1" and "bddh (length bs + max k1 k2) l2" and "bddh (length bs + max k1 k2) r1" and "bddh (length bs + max k1 k2) r2" by (simp add: bddh_ge[of "length bs" "length bs + max k1 k2"] bddh_ge[of k1 "length bs + max k1 k2"] bddh_ge[of k2 "length bs + max k1 k2"])+ with O have "bddh (length (True # bs @ replicate (max k1 k2) False)) (Branch l1 r1) \<and> bddh (length (True # bs @ replicate (max k1 k2) False)) (Branch l2 r2) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup (Branch l1 r1) (True # bs @ replicate (max k1 k2) False)) (bdd_lookup (Branch l2 r2) (True # bs @ replicate (max k1 k2) False)))" by (auto simp: bdd_lookup_append) thus ?thesis by (rule exI) qed next assume "\<exists>bs. ?E bs" then obtain bs where O: "?E bs" by blast then obtain b br where B: "bs = b # br" by (cases bs) auto with O IV1 IV2 show "?L \<or> ?R" by (cases b) auto qed finally show ?case by simp qed lemma iter_wf: "wf_tr M T \<Longrightarrow> wf_tr M (snd (iter M T))" by (simp add: wf_tr_def iter_def fold_map_idx_len fold_map_idx_nth split_beta) lemma fixpt_wf: "wf_tr M T \<Longrightarrow> wf_tr M (fixpt M T)" proof (induct M T rule: fixpt_induct) case (1 M T) show ?case proof (cases "fst (iter M T)") case True with 1 show ?thesis by (simp add: iter_wf) next case False with 1 show ?thesis by simp qed qed lemma list_split: assumes "n \<le> length bss" shows "\<exists>b bs. bss = b @ bs \<and> length b = n" using assms proof (induct bss arbitrary: n) case (Cons a as) show ?case proof (cases n) case (Suc n') with Cons have "\<exists>b bs. as = b @ bs \<and> length b = n'" by simp then obtain b bs where B: "as = b @ bs \<and> length b = n'" by blast with Suc Cons have "a # as = (a # b) @ bs \<and> length (a # b) = n" by simp thus ?thesis by blast qed simp qed simp lemma iter_dist_nodes: assumes "wf_tr M T" and "wf_dfa M v" and "\<forall>p q. dfa_is_node M q \<and> p < q \<longrightarrow> tr_lookup T q p = (\<exists>n < m. dist_nodes M n v p q)" and "m > 0" and "dfa_is_node M q" and "p < q" shows "tr_lookup (snd (iter M T)) q p = (\<exists>n < Suc m. dist_nodes M n v p q)" proof - from assms obtain m' where M': "m = Suc m'" by (cases m) simp+ have C: "(\<not> check_eq (fst M ! q) (fst M ! p) T) = (\<exists>n<m. dist_nodes M (Suc n) v p q)" proof assume "\<not> check_eq (fst M ! q) (fst M ! p) T" with assms have "\<exists>bs. bddh (length bs) (fst M ! q) \<and> bddh (length bs) (fst M ! p) \<and> (\<exists>n < m. dist_nodes M n v (bdd_lookup (fst M ! q) bs) (bdd_lookup (fst M ! p) bs))" by (simp add: check_eq_dist_nodes wf_dfa_def list_all_iff dfa_is_node_def) then obtain bs n bss where X: "bddh (length bs) (fst M ! q) \<and> bddh (length bs) (fst M ! p) \<and> n < m \<and> length bss = n \<and> list_all (is_alph v) bss \<and> dfa_accepting M (dfa_steps M (bdd_lookup (fst M ! q) bs) bss) \<noteq> dfa_accepting M (dfa_steps M (bdd_lookup (fst M ! p) bs) bss)" unfolding dist_nodes_def by blast from list_split[of v "bs @ replicate v False"] have "\<exists>b' bs'. bs @ replicate v False = b' @ bs' \<and> length b' = v" by simp then obtain b' bs' where V: "bs @ replicate v False = b' @ bs' \<and> length b' = v" by blast with X bdd_lookup_append[of "length bs" "fst M ! q" "bs" "replicate v False"] bdd_lookup_append[of "length bs" "fst M ! p" "bs" "replicate v False"] have 1: "dfa_accepting M (dfa_steps M (bdd_lookup (fst M ! q) (bs @ replicate v False)) bss) \<noteq> dfa_accepting M (dfa_steps M (bdd_lookup (fst M ! p) (bs @ replicate v False)) bss)" by simp from assms have "bddh v (fst M ! q) \<and> bddh v (fst M ! p)" by (simp add: wf_dfa_def dfa_is_node_def list_all_iff) with 1 V have "dfa_accepting M (dfa_steps M (dfa_trans M q b') bss) \<noteq> dfa_accepting M (dfa_steps M (dfa_trans M p b') bss)" by (auto simp: bdd_lookup_append dfa_trans_def) with X V have "is_alph v b' \<and> dist_nodes M n v (dfa_trans M p b') (dfa_trans M q b')" by (auto simp: dist_nodes_def is_alph_def) hence "dist_nodes M (Suc n) v p q" by (auto simp: dist_nodes_suc) with X show "\<exists>n<m. dist_nodes M (Suc n) v p q" by auto next assume "\<exists>n<m. dist_nodes M (Suc n) v p q" hence "\<exists>bs. \<exists>n<m. is_alph v bs \<and> dist_nodes M n v (dfa_trans M p bs) (dfa_trans M q bs)" by (auto simp: dist_nodes_suc) then obtain bs where X: "\<exists>n<m. is_alph v bs \<and> dist_nodes M n v (dfa_trans M p bs) (dfa_trans M q bs)" by blast hence BS: "length bs = v" by (auto simp: is_alph_def) with assms have "bddh (length bs) (fst M ! p) \<and> bddh (length bs) (fst M ! q)" by (simp add: wf_dfa_def dfa_is_node_def list_all_iff) with X have "bddh (length bs) (fst M ! p) \<and> bddh (length bs) (fst M ! q) \<and> (\<exists>n<m. dist_nodes M n v (bdd_lookup (fst M ! q) bs) (bdd_lookup (fst M ! p) bs))" by (auto simp: dfa_trans_def dist_nodes_def) moreover from assms have "bdd_all (dfa_is_node M) (fst M ! p) \<and> bdd_all (dfa_is_node M) (fst M ! q)" by (simp add: wf_dfa_def dfa_is_node_def list_all_iff) moreover note assms(3,4) ultimately show "\<not> check_eq (fst M ! q) (fst M ! p) T" by (auto simp: check_eq_dist_nodes) qed from assms have "tr_lookup (snd (iter M T)) q p = (if tr_lookup T q p then True else \<not> check_eq (fst M ! q) (fst M ! p) T)" by (auto simp add: iter_def wf_tr_def split_beta fold_map_idx_nth tr_lookup_def dfa_is_node_def) also have "\<dots> = (tr_lookup T q p \<or> \<not> check_eq (fst M ! q) (fst M ! p) T)" by simp also from assms C have "\<dots> = ((\<exists>n<m. dist_nodes M n v p q) \<or> (\<exists>n<m. dist_nodes M (Suc n) v p q))" by simp also have "\<dots> = (\<exists>n < m. dist_nodes M n v p q \<or> dist_nodes M (Suc n) v p q)" by auto also have "\<dots> = (\<exists>n < Suc m. dist_nodes M n v p q)" proof assume "\<exists>n<m. dist_nodes M n v p q \<or> dist_nodes M (Suc n) v p q" then obtain n where D: "dist_nodes M n v p q \<or> dist_nodes M (Suc n) v p q" and N: "n < m" by blast moreover from N have "n < Suc m" by simp ultimately show "\<exists>n < Suc m. dist_nodes M n v p q" by (elim disjE) blast+ next assume "\<exists>n < Suc m. dist_nodes M n v p q" then obtain n where N: "n < Suc m" and D: "dist_nodes M n v p q" by blast from N have "n < m \<or> n = m" by auto from this D M' show "\<exists>n<m. dist_nodes M n v p q \<or> dist_nodes M (Suc n) v p q" by auto qed finally show ?thesis by simp qed lemma fixpt_dist_nodes': assumes "wf_tr M T" and "wf_dfa M v" and "\<forall>p q. dfa_is_node M q \<and> p < q \<longrightarrow> tr_lookup T q p = (\<exists>n < m. dist_nodes M n v p q)" and "m > 0" and "dfa_is_node M q" and "p < q" shows "tr_lookup (fixpt M T) q p = (\<exists>n. dist_nodes M n v p q)" using assms proof (induct M T arbitrary: m rule: fixpt_induct) case (1 M T m) let ?T = "snd (iter M T)" show ?case proof (cases "fst (iter M T)") case True { fix p' q' assume H: "dfa_is_node M q' \<and> p' < q'" with 1 have "tr_lookup ?T q' p' = (\<exists>n < Suc m. dist_nodes M n v p' q')" by (simp only: iter_dist_nodes) } hence 2: "\<forall>p q. dfa_is_node M q \<and> p < q \<longrightarrow> tr_lookup ?T q p = (\<exists>n < Suc m. dist_nodes M n v p q)" by simp moreover from 1 have "wf_tr M ?T" by (simp add: iter_wf) moreover note 1(3,6,7) 1(1)[of "Suc m"] True ultimately have "tr_lookup (fixpt M ?T) q p = (\<exists>n. dist_nodes M n v p q)" by simp with True show ?thesis by (simp add: Let_def split_beta) next case False then have F: "snd (iter M T) = T" by (simp add: iter_def fold_map_idx_fst_snd_eq split_beta) have C: "\<And>m'. \<forall>p q. dfa_is_node M q \<and> p < q \<longrightarrow> tr_lookup T q p = (\<exists>n < m' + m. dist_nodes M n v p q)" proof - fix m' show "\<forall>p q. dfa_is_node M q \<and> p < q \<longrightarrow> tr_lookup T q p = (\<exists>n < m' + m. dist_nodes M n v p q)" proof (induct m') case 0 with 1 show ?case by simp next case (Suc m') { fix p' q' assume H: "dfa_is_node M q'" and H2: "p' < q'" note 1(2,3) Suc moreover from Suc 1 have "0 < m' + m" by simp moreover note H H2 ultimately have "tr_lookup (snd (iter M T)) q' p' = (\<exists>n < Suc (m' + m). dist_nodes M n v p' q')" by (rule iter_dist_nodes) with F have "tr_lookup T q' p' = (\<exists>n < Suc m' + m. dist_nodes M n v p' q')" by simp } thus ?case by simp qed qed { fix p' q' assume H: "dfa_is_node M q' \<and> p' < q'" have "tr_lookup T q' p' = (\<exists>n. dist_nodes M n v p' q')" proof assume "tr_lookup T q' p'" with H C[of 0] show "\<exists>n. dist_nodes M n v p' q'" by auto next assume H': "\<exists>n. dist_nodes M n v p' q'" then obtain n where "dist_nodes M n v p' q'" by blast moreover have "n < Suc n + m" by simp ultimately have "\<exists>n' < Suc n + m. dist_nodes M n' v p' q'" by blast with H C[of "Suc n"] show "tr_lookup T q' p'" by simp qed } hence "\<forall>p q. dfa_is_node M q \<and> p < q \<longrightarrow> tr_lookup T q p = (\<exists>n. dist_nodes M n v p q)" by simp with False \<open>dfa_is_node M q\<close> \<open>p < q\<close> show ?thesis by simp qed qed lemma fixpt_dist_nodes: assumes "wf_dfa M v" and "dfa_is_node M p" and "dfa_is_node M q" shows "tr_lookup (fixpt M (init_tr M)) p q = (\<exists>n. dist_nodes M n v p q)" proof - { fix p q assume H1: "p < q" and H2: "dfa_is_node M q" from init_tr_wf have "wf_tr M (init_tr M)" by simp moreover note assms(1) moreover { fix p' q' assume "dfa_is_node M q'" and "p' < q'" hence "tr_lookup (init_tr M) q' p' = dist_nodes M 0 v p' q'" by (rule init_tr_dist_nodes) also have "\<dots> = (\<exists>n < 1. dist_nodes M n v p' q')" by auto finally have "tr_lookup (init_tr M) q' p' = (\<exists>n<1. dist_nodes M n v p' q')" by simp } hence "\<forall>p q. dfa_is_node M q \<and> p < q \<longrightarrow> tr_lookup (init_tr M) q p = (\<exists>n<1. dist_nodes M n v p q)" by simp moreover note H1 H2 ultimately have "tr_lookup (fixpt M (init_tr M)) q p = (\<exists>n. dist_nodes M n v p q)" by (simp only: fixpt_dist_nodes'[of _ _ _ 1]) } with assms(2,3) show ?thesis by (auto simp: tr_lookup_def dist_nodes_def) qed primrec mk_eqcl' :: "nat option list \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> bool list list \<Rightarrow> nat option list" where "mk_eqcl' [] i j l T = []" | "mk_eqcl' (x#xs) i j l T = (if tr_lookup T j i \<or> x \<noteq> None then x else Some l) # mk_eqcl' xs i (Suc j) l T" lemma mk_eqcl'_len: "length (mk_eqcl' xs i j l T) = length xs" by (induct xs arbitrary: j) simp+ function mk_eqcl :: "nat option list \<Rightarrow> nat list \<Rightarrow> nat \<Rightarrow> bool list list \<Rightarrow> nat list \<times> nat list" where "mk_eqcl [] zs i T = ([], zs)" | "mk_eqcl (None # xs) zs i T = (let (xs',zs') = mk_eqcl (mk_eqcl' xs i (Suc i) (length zs) T) (zs @ [i]) (Suc i) T in (length zs # xs', zs'))" | "mk_eqcl (Some l # xs) zs i T = (let (xs',zs') = mk_eqcl xs zs (Suc i) T in (l # xs', zs'))" by pat_completeness auto termination by (lexicographic_order simp: mk_eqcl'_len) lemma mk_eqcl'_bound: assumes "\<And>x k. \<lbrakk>x \<in> set xs; x = Some k\<rbrakk> \<Longrightarrow> k < l" and "x \<in> set (mk_eqcl' xs i j l T)" and "x = Some k" shows "k \<le> l" using assms proof (induct xs arbitrary: j) case (Cons y xs j) hence "x = y \<or> x = Some l \<or> x \<in> set (mk_eqcl' xs i (Suc j) l T)" by (cases "tr_lookup T j i \<or> y \<noteq> None") auto thus ?case proof (elim disjE) assume "x = y" hence "x \<in> set (y # xs)" by simp with Cons(2)[of x k] Cons(4) show ?thesis by simp qed (insert Cons, auto) qed simp lemma mk_eqcl'_nth': assumes "\<And>x k. \<lbrakk>x \<in> set xs; x = Some k\<rbrakk> \<Longrightarrow> k < l" and "\<And>i'. \<lbrakk>i' < length xs; \<not> tr_lookup T (i' + j) i\<rbrakk> \<Longrightarrow> xs ! i' = None" and "i < j" and "j' < length xs" shows "(mk_eqcl' xs i j l T ! j' = Some l) = (\<not> tr_lookup T (j' + j) i)" using assms proof (induct xs arbitrary: j j') case (Cons x xs j) have I1:"\<And>i'. \<lbrakk>i' < length xs; \<not> tr_lookup T (i' + Suc j) i\<rbrakk> \<Longrightarrow> xs ! i' = None" proof - fix i' assume H: "i' < length xs" "\<not> tr_lookup T (i' + Suc j) i" with Cons(3)[of "Suc i'"] show "xs ! i' = None" by simp qed have "j' = 0 \<or> j' > 0" by auto thus ?case proof (elim disjE) assume "j' > 0" then obtain j'' where J: "j' = Suc j''" by (cases j') simp+ from Cons(1)[of "Suc j" j''] I1 Cons(2,4,5) J show ?thesis by simp next assume H: "j' = 0" with Cons(3)[of 0] have "\<not> tr_lookup T j i \<Longrightarrow> x = None" by simp with Cons H show ?thesis by auto qed qed simp lemma mk_eqcl'_nth: assumes "\<And>i' j' k. \<lbrakk>i' < length xs; j' < length xs; xs ! i' = Some k\<rbrakk> \<Longrightarrow> (xs ! j' = Some k) = (\<not> tr_lookup T (i' + jj) (j' + jj))" and "\<And>a b c. \<lbrakk>a \<le> length T; b \<le> length T; c \<le> length T; \<not> tr_lookup T a b; \<not> tr_lookup T b c\<rbrakk> \<Longrightarrow> \<not> tr_lookup T a c" and "length xs + jj = length T + 1" and "\<And>x k. \<lbrakk>x \<in> set xs; x = Some k\<rbrakk> \<Longrightarrow> k < l" and "\<And>i'. \<lbrakk>i' < length xs; \<not> tr_lookup T (i' + jj) ii\<rbrakk> \<Longrightarrow> xs ! i' = None" and "ii < jj" and "i < length xs" and "mk_eqcl' xs ii jj l T ! i = Some m" and "j < length xs" shows "(mk_eqcl' xs ii jj l T ! j = Some m) = (\<not> tr_lookup T (i + jj) (j + jj))" using assms proof (induct xs arbitrary: jj i j) case Nil from Nil(7) have False by simp thus ?case by simp next case (Cons y xs jj i j) show ?case proof (cases i) case 0 show ?thesis proof (cases j) case 0 with \<open>i=0\<close> Cons(9) show ?thesis by (simp add: tr_lookup_def) next case (Suc j') from 0 Cons(5,9) have 1: "y = Some m \<and> m < l \<or> (y = None \<and> \<not> tr_lookup T jj ii \<and> m = l)" by (cases y, cases "tr_lookup T jj ii", auto) thus ?thesis proof (elim disjE) assume H: "y = Some m \<and> m < l" from Suc have "(mk_eqcl' (y # xs) ii jj l T ! j = Some m) = (mk_eqcl' xs ii (Suc jj) l T ! j' = Some m)" by simp also from H have "\<dots> = (xs ! j' = Some m)" proof (induct xs arbitrary: jj j') case (Cons a xs jj j') thus ?case by (cases j') simp+ qed simp also from Suc have "\<dots> = ((y # xs) ! j = Some m)" by simp also from Cons(2)[of i j m] Cons(8,10) Suc 0 H have "\<dots> = (\<not> tr_lookup T (i + jj) (j + jj))" by simp finally show ?thesis by simp next assume H: "y = None \<and> \<not> tr_lookup T jj ii \<and> m = l" with Suc have "(mk_eqcl' (y # xs) ii jj l T ! j = Some m) = (mk_eqcl' xs ii (Suc jj) l T ! j' = Some l)" by simp also have "\<dots> = (\<not> tr_lookup T (j' + Suc jj) ii)" proof (rule mk_eqcl'_nth') from Cons(5) show "\<And>x k. \<lbrakk>x \<in> set xs; x = Some k\<rbrakk> \<Longrightarrow> k < l" by simp show "\<And>i'. \<lbrakk>i' < length xs; \<not> tr_lookup T (i' + Suc jj) ii\<rbrakk> \<Longrightarrow> xs ! i' = None" proof - fix i' assume "i' < length xs" "\<not> tr_lookup T (i' + Suc jj) ii" with Cons(6)[of "Suc i'"] show "xs ! i' = None" by simp qed from Cons(7) show "ii < Suc jj" by simp from Cons(10) Suc show "j' < length xs" by simp qed also from Suc H 0 have "\<dots> = (\<not> tr_lookup T (j + jj) ii \<and> \<not> tr_lookup T (i + jj) ii)" by (simp add: add.commute) also have "\<dots> = (\<not> tr_lookup T (i + jj) (j + jj) \<and> \<not> tr_lookup T (i + jj) ii)" proof assume H': "\<not> tr_lookup T (j + jj) ii \<and> \<not> tr_lookup T (i + jj) ii" hence "\<not> tr_lookup T ii (j + jj)" by (auto simp: tr_lookup_def) with H' Cons(3)[of "i + jj" ii "j + jj"] Cons(4,7,8,10) show "\<not> tr_lookup T (i + jj) (j + jj) \<and> \<not> tr_lookup T (i + jj) ii" by simp next assume H': "\<not> tr_lookup T (i + jj) (j + jj) \<and> \<not> tr_lookup T (i + jj) ii" hence "\<not> tr_lookup T (j + jj) (i + jj)" by (auto simp: tr_lookup_def) with H' Cons(3)[of "j + jj" "i + jj" ii] Cons(4,7,8,10) show "\<not> tr_lookup T (j + jj) ii \<and> \<not> tr_lookup T (i + jj) ii" by simp qed also from 0 H have "\<dots> = (\<not> tr_lookup T (i + jj) (j + jj))" by simp finally show ?thesis by simp qed qed next case (Suc i') show ?thesis proof (cases j) case 0 have "m \<le> l" proof (rule mk_eqcl'_bound) from Cons(5) show "\<And>x k. \<lbrakk>x \<in> set (y # xs); x = Some k\<rbrakk> \<Longrightarrow> k < l" by simp from Cons(8) have "i < length (mk_eqcl' (y # xs) ii jj l T)" by (simp add: mk_eqcl'_len) with Cons(9) have "\<exists>i < length (mk_eqcl' (y # xs) ii jj l T). mk_eqcl' (y # xs) ii jj l T ! i = Some m" by blast thus "Some m \<in> set (mk_eqcl' (y # xs) ii jj l T)" by (simp only: in_set_conv_nth) show "Some m = Some m" by simp qed hence "m < l \<or> m = l" by auto thus ?thesis proof (elim disjE) assume H: "m < l" with Cons(9) have I: "(y # xs) ! i = Some m" proof (induct ("y # xs") arbitrary: jj i) case (Cons a l jj i) thus ?case by (cases i) (auto, cases "tr_lookup T jj ii \<or> a \<noteq> None", simp+) qed simp from 0 H have "(mk_eqcl' (y # xs) ii jj l T ! j = Some m) = ((y#xs) ! j = Some m)" by (cases "tr_lookup T jj ii \<or> y \<noteq> None") simp+ also from Cons(8,10) I have "\<dots> = (\<not> tr_lookup T (i + jj) (j + jj))" by (rule Cons(2)) finally show ?thesis by simp next assume H: "m = l" from Cons(5,6,7,8) have "(mk_eqcl' (y # xs) ii jj l T ! i = Some l) = (\<not> tr_lookup T (i + jj) ii)" by (rule mk_eqcl'_nth') with H Cons(9) have I: "\<not> tr_lookup T (i + jj) ii" by simp with 0 H Cons(5) have "(mk_eqcl' (y # xs) ii jj l T ! j = Some m) = (\<not> tr_lookup T (j + jj) ii \<and> \<not> tr_lookup T (i + jj) ii \<and> y = None)" by auto also from Cons(6)[of 0] 0 have "\<dots> = (\<not> tr_lookup T (j + jj) ii \<and> \<not> tr_lookup T (i + jj) ii)" by auto also have "\<dots> = (\<not> tr_lookup T (i + jj) (j + jj) \<and> \<not> tr_lookup T (i + jj) ii)" proof assume H': "\<not> tr_lookup T (j + jj) ii \<and> \<not> tr_lookup T (i + jj) ii" hence "\<not> tr_lookup T ii (j + jj)" by (auto simp: tr_lookup_def) with H' Cons(3)[of "i + jj" ii "j + jj"] Cons(4,7,8,10) show "\<not> tr_lookup T (i + jj) (j + jj) \<and> \<not> tr_lookup T (i + jj) ii" by simp next assume H': "\<not> tr_lookup T (i + jj) (j + jj) \<and> \<not> tr_lookup T (i + jj) ii" hence "\<not> tr_lookup T (j + jj) (i + jj)" by (auto simp: tr_lookup_def) with H' Cons(3)[of "j + jj" "i + jj" ii] Cons(4,7,8,10) show "\<not> tr_lookup T (j + jj) ii \<and> \<not> tr_lookup T (i + jj) ii" by simp qed also from I have "\<dots> = (\<not> tr_lookup T (i + jj) (j + jj))" by simp finally show ?thesis by simp qed next case (Suc j') hence "(mk_eqcl' (y # xs) ii jj l T ! j = Some m) = (mk_eqcl' xs ii (Suc jj) l T ! j' = Some m)" by simp also have "\<dots> = (\<not> tr_lookup T (i' + Suc jj) (j' + Suc jj))" proof (rule Cons(1)) show "\<And>i' j' k. \<lbrakk>i' < length xs; j' < length xs; xs ! i' = Some k\<rbrakk> \<Longrightarrow> (xs ! j' = Some k) = (\<not> tr_lookup T (i' + Suc jj) (j' + Suc jj))" proof - fix i' j' k assume "i' < length xs" "j' < length xs" "xs ! i' = Some k" with Cons(2)[of "Suc i'" "Suc j'" k] show "(xs ! j' = Some k) = (\<not> tr_lookup T (i' + Suc jj) (j' + Suc jj))" by simp qed from Cons(3) show "\<And>a b c. \<lbrakk>a \<le> length T; b \<le> length T; c \<le> length T; \<not> tr_lookup T a b; \<not> tr_lookup T b c \<rbrakk> \<Longrightarrow> \<not> tr_lookup T a c" by blast from Cons(4) show "length xs + Suc jj = length T + 1" by simp from Cons(5) show "\<And>x k. \<lbrakk>x \<in> set xs; x = Some k\<rbrakk> \<Longrightarrow> k < l" by simp show "\<And>i'. \<lbrakk>i' < length xs; \<not> tr_lookup T (i' + Suc jj) ii\<rbrakk> \<Longrightarrow> xs ! i' = None" proof - fix i' assume "i' < length xs" "\<not> tr_lookup T (i' + Suc jj) ii" with Cons(6)[of "Suc i'"] show "xs ! i' = None" by simp qed from Cons(7) show "ii < Suc jj" by simp from Cons(8) \<open>i=Suc i'\<close> show "i' < length xs" by simp from Cons(9) \<open>i=Suc i'\<close> show "mk_eqcl' xs ii (Suc jj) l T ! i' = Some m" by simp from Cons(10) Suc show "j' < length xs" by simp qed also from Suc \<open>i=Suc i'\<close> have "\<dots> = (\<not> tr_lookup T (i + jj) (j + jj))" by simp finally show ?thesis by simp qed qed qed lemma mk_eqcl'_Some: assumes "i < length xs" and "xs ! i \<noteq> None" shows "mk_eqcl' xs ii j l T ! i = xs ! i" using assms proof (induct xs arbitrary: j i) case (Cons y xs j i) thus ?case by (cases i) auto qed simp lemma mk_eqcl'_Some2: assumes "i < length xs" and "k < l" shows "(mk_eqcl' xs ii j l T ! i = Some k) = (xs ! i = Some k)" using assms proof (induct xs arbitrary: j i) case (Cons y xs j i) thus ?case by (cases i) auto qed simp lemma mk_eqcl_fst_Some: assumes "i < length xs" and "k < length zs" shows "(fst (mk_eqcl xs zs ii T) ! i = k) = (xs ! i = Some k)" using assms proof (induct xs zs ii T arbitrary: i rule: mk_eqcl.induct) case (2 xs zs ii T i) thus ?case by (cases i) (simp add: split_beta mk_eqcl'_len mk_eqcl'_Some2)+ next case (3 l xs zs ii T i) thus ?case by (cases i) (simp add: split_beta)+ qed simp lemma mk_eqcl_len_snd: "length zs \<le> length (snd (mk_eqcl xs zs i T))" by (induct xs zs i T rule: mk_eqcl.induct) (simp add: split_beta)+ lemma mk_eqcl_len_fst: "length (fst (mk_eqcl xs zs i T)) = length xs" by (induct xs zs i T rule: mk_eqcl.induct) (simp add: split_beta mk_eqcl'_len)+ lemma mk_eqcl_set_snd: assumes "i \<notin> set zs" and "j > i" shows "i \<notin> set (snd (mk_eqcl xs zs j T))" using assms by (induct xs zs j T rule: mk_eqcl.induct) (auto simp: split_beta) lemma mk_eqcl_snd_mon: assumes "\<And>j1 j2. \<lbrakk>j1 < j2; j2 < length zs\<rbrakk> \<Longrightarrow> zs ! j1 < zs ! j2" and "\<And>x. x \<in> set zs \<Longrightarrow> x < i" and "j1 < j2" and "j2 < length (snd (mk_eqcl xs zs i T))" shows "snd (mk_eqcl xs zs i T) ! j1 < snd (mk_eqcl xs zs i T) ! j2" using assms proof (induct xs zs i T rule: mk_eqcl.induct) case (2 xs zs i T) have "\<And>j1 j2. \<lbrakk>j1 < j2; j2 < length (zs @ [i])\<rbrakk> \<Longrightarrow> (zs @ [i]) ! j1 < (zs @ [i]) ! j2" proof - fix j1 j2 assume H: "j1 < j2" "j2 < length (zs @ [i])" hence "j2 < length zs \<or> j2 = length zs" by auto from this H 2 show "(zs @ [i]) ! j1 < (zs @ [i]) ! j2" by (elim disjE) (simp add: nth_append)+ qed moreover have "\<And>x. x \<in> set (zs @ [i]) \<Longrightarrow> x < Suc i" proof - fix x assume "x \<in> set (zs @ [i])" hence "x \<in> set zs \<or> x = i" by auto with 2(3)[of x] show "x < Suc i" by auto qed moreover note 2(4) moreover from 2(5) have "j2 < length (snd (mk_eqcl (mk_eqcl' xs i (Suc i) (length zs) T) (zs @ [i]) (Suc i) T))" by (simp add: split_beta) ultimately have "snd (mk_eqcl (mk_eqcl' xs i (Suc i) (length zs) T) (zs @ [i]) (Suc i) T) ! j1 < snd (mk_eqcl (mk_eqcl' xs i (Suc i) (length zs) T) (zs @ [i]) (Suc i) T) ! j2" by (rule 2(1)) thus ?case by (simp add: split_beta) next case (3 l xs zs i T) note 3(2) moreover have "\<And>x. x \<in> set zs \<Longrightarrow> x < Suc i" proof - fix x assume "x \<in> set zs" with 3(3)[of x] show "x < Suc i" by simp qed moreover note 3(4) moreover from 3(5) have "j2 < length (snd (mk_eqcl xs zs (Suc i) T))" by (simp add: split_beta) ultimately have "snd (mk_eqcl xs zs (Suc i) T) ! j1 < snd (mk_eqcl xs zs (Suc i) T) ! j2" by (rule 3(1)) thus ?case by (simp add: split_beta) qed simp lemma mk_eqcl_snd_nth: assumes "i < length zs" shows "snd (mk_eqcl xs zs j T) ! i = zs ! i" using assms by (induct xs zs j T rule: mk_eqcl.induct) (simp add: split_beta nth_append)+ lemma mk_eqcl_bound: assumes "\<And>x k. \<lbrakk>x \<in> set xs; x = Some k\<rbrakk> \<Longrightarrow> k < length zs" and "x \<in> set (fst (mk_eqcl xs zs ii T))" shows "x < length (snd (mk_eqcl xs zs ii T))" using assms proof (induct xs zs ii T rule: mk_eqcl.induct) case (2 xs zs i T) hence "x = length zs \<or> x \<in> set (fst (mk_eqcl (mk_eqcl' xs i (Suc i) (length zs) T) (zs @ [i]) (Suc i) T))" by (auto simp: split_beta) thus ?case proof (elim disjE) assume "x = length zs" hence "x < length (zs @ [i])" by simp also have "\<dots> \<le> length (snd (mk_eqcl (mk_eqcl' xs i (Suc i) (length zs) T) (zs @ [i]) (Suc i) T))" by (simp only: mk_eqcl_len_snd) finally show ?thesis by (simp add: split_beta) next assume H: "x \<in> set (fst (mk_eqcl (mk_eqcl' xs i (Suc i) (length zs) T) (zs @ [i]) (Suc i) T))" have "\<And>x k. \<lbrakk>x \<in> set (mk_eqcl' xs i (Suc i) (length zs) T); x = Some k\<rbrakk> \<Longrightarrow> k < length (zs @ [i])" proof - fix x k assume H': "x \<in> set (mk_eqcl' xs i (Suc i) (length zs) T)" " x = Some k" { fix x' k' assume "x' \<in> set xs" "x' = Some k'" with 2 have "k' < length zs" by simp } from this H' have "k \<le> length zs" by (rule mk_eqcl'_bound) thus "k < length (zs @ [i])" by simp qed with H 2 show ?thesis by (simp add: split_beta) qed next case (3 l xs zs i T) hence "x = l \<or> x \<in> set (fst (mk_eqcl xs zs (Suc i) T))" by (auto simp: split_beta) thus ?case proof (elim disjE) assume "x = l" with 3 have "x < length zs" by simp also from 3 have "\<dots> \<le> length (snd (mk_eqcl (Some l # xs) zs i T))" by (simp only: mk_eqcl_len_snd) finally show ?thesis by simp next assume "x \<in> set (fst (mk_eqcl xs zs (Suc i) T))" with 3 have "x < length (snd (mk_eqcl xs zs (Suc i) T))" by simp thus ?thesis by (simp add: split_beta) qed qed simp lemma mk_eqcl_fst_snd: assumes "\<And>i. i < length zs \<Longrightarrow> zs ! i < length xs + ii \<and> (zs ! i \<ge> ii \<longrightarrow> xs ! (zs ! i - ii) = Some i)" and "\<And>j1 j2. \<lbrakk>j1 < j2; j2 < length zs\<rbrakk> \<Longrightarrow> zs ! j1 < zs ! j2" and "\<And>z. z \<in> set zs \<Longrightarrow> z < ii" and "i < length (snd (mk_eqcl xs zs ii T))" and "length xs + ii \<le> length T + 1" shows "snd (mk_eqcl xs zs ii T) ! i < length (fst (mk_eqcl xs zs ii T)) + ii \<and> (snd (mk_eqcl xs zs ii T) ! i \<ge> ii \<longrightarrow> fst (mk_eqcl xs zs ii T) ! (snd (mk_eqcl xs zs ii T) ! i - ii) = i)" using assms proof (induct xs zs ii T arbitrary: i rule: mk_eqcl.induct) case (1 zs ii T i) from 1(1)[of i] 1(4,5) show ?case by simp next case (2 xs zs i T j) have "\<And>i'. i' < length (zs @ [i]) \<Longrightarrow> (zs @ [i]) ! i' < length (mk_eqcl' xs i (Suc i) (length zs) T) + Suc i \<and> (Suc i \<le> (zs @ [i]) ! i' \<longrightarrow> mk_eqcl' xs i (Suc i) (length zs) T ! ((zs @ [i]) ! i' - Suc i) = Some i')" proof - fix i' assume "i' < length (zs @ [i])" hence "i' < length zs \<or> i' = length zs" by auto thus "(zs @ [i]) ! i' < length (mk_eqcl' xs i (Suc i) (length zs) T) + Suc i \<and> (Suc i \<le> (zs @ [i]) ! i' \<longrightarrow> mk_eqcl' xs i (Suc i) (length zs) T ! ((zs @ [i]) ! i' - Suc i) = Some i')" proof (elim disjE) assume H: "i' < length zs" with 2(2) have I: "zs ! i' < length (None # xs) + i \<and> (i \<le> zs ! i' \<longrightarrow> (None # xs) ! (zs ! i' - i) = Some i')" by simp with H have G1: "(zs @ [i]) ! i' < length (mk_eqcl' xs i (Suc i) (length zs) T) + Suc i" by (auto simp: mk_eqcl'_len nth_append) { assume H': "Suc i \<le> (zs @ [i]) ! i'" then obtain k where K: "(zs @ [i]) ! i' - i = Suc k" by (cases "(zs @ [i]) ! i' - i") simp+ hence K': "k = (zs @ [i]) ! i' - Suc i" by simp from K H' H I have "xs ! k = Some i'" by (simp add: nth_append) with K I H have "mk_eqcl' xs i (Suc i) (length zs) T ! k = Some i'" by (auto simp add: mk_eqcl'_Some nth_append) with K' have "mk_eqcl' xs i (Suc i) (length zs) T ! ((zs @ [i]) ! i' - Suc i) = Some i'" by simp } with G1 show ?thesis by simp qed simp qed moreover have "\<And>j1 j2. \<lbrakk>j1 < j2; j2 < length (zs @ [i])\<rbrakk> \<Longrightarrow> (zs @ [i]) ! j1 < (zs @ [i]) ! j2" proof - fix j1 j2 assume H: "j1 < j2" "j2 < length (zs @ [i])" hence "j2 < length zs \<or> j2 = length zs" by auto from this H 2(3)[of j1 j2] 2(4)[of "zs ! j1"] show "(zs @ [i]) ! j1 < (zs @ [i]) ! j2" by (elim disjE) (simp add: nth_append)+ qed moreover have "\<And>z. z \<in> set (zs @ [i]) \<Longrightarrow> z < Suc i" proof - fix z assume "z \<in> set (zs @ [i])" hence "z \<in> set zs \<or> z = i" by auto with 2(4)[of z] show "z < Suc i" by auto qed moreover from 2 have "j < length (snd (mk_eqcl (mk_eqcl' xs i (Suc i) (length zs) T) (zs @ [i]) (Suc i) T))" by (simp add: split_beta) moreover from 2 have "length (mk_eqcl' xs i (Suc i) (length zs) T) + Suc i \<le> length T + 1" by (simp add: mk_eqcl'_len) ultimately have IV: "snd (mk_eqcl (mk_eqcl' xs i (Suc i) (length zs) T) (zs @ [i]) (Suc i) T) ! j < length (fst (mk_eqcl (mk_eqcl' xs i (Suc i) (length zs) T) (zs @ [i]) (Suc i) T)) + Suc i \<and> (Suc i \<le> snd (mk_eqcl (mk_eqcl' xs i (Suc i) (length zs) T) (zs @ [i]) (Suc i) T) ! j \<longrightarrow> fst (mk_eqcl (mk_eqcl' xs i (Suc i) (length zs) T) (zs @ [i]) (Suc i) T) ! (snd (mk_eqcl (mk_eqcl' xs i (Suc i) (length zs) T) (zs @ [i]) (Suc i) T) ! j - Suc i) = j)" by (rule 2(1)) hence G1: "snd (mk_eqcl (None # xs) zs i T) ! j < length (fst (mk_eqcl (None # xs) zs i T)) + i" by (auto simp: split_beta) { assume "i \<le> snd (mk_eqcl (None # xs) zs i T) ! j" hence "i = snd (mk_eqcl (None # xs) zs i T) ! j \<or> Suc i \<le> snd (mk_eqcl (None # xs) zs i T) ! j" by auto hence "fst (mk_eqcl (None # xs) zs i T) ! (snd (mk_eqcl (None # xs) zs i T) ! j - i) = j" proof (elim disjE) assume H: "i = snd (mk_eqcl (None # xs) zs i T) ! j" define k where "k = length zs" hence K: "snd (mk_eqcl (None # xs) zs i T) ! k = i" by (simp add: mk_eqcl_snd_nth split_beta) { assume "j \<noteq> k" hence "j < k \<or> j > k" by auto hence "snd (mk_eqcl (None # xs) zs i T) ! j \<noteq> i" proof (elim disjE) assume H': "j < k" from k_def have "k < length (zs @ [i])" by simp also have "\<dots> \<le> length (snd (mk_eqcl (mk_eqcl' xs i (Suc i) (length zs) T) (zs @ [i]) (Suc i) T))" by (simp only: mk_eqcl_len_snd) also have "\<dots> = length (snd (mk_eqcl (None # xs) zs i T))" by (simp add: split_beta) finally have K': "k < length (snd (mk_eqcl (None # xs) zs i T))" by simp from 2(3,4) H' this have "snd (mk_eqcl (None # xs) zs i T) ! j < snd (mk_eqcl (None # xs) zs i T) ! k" by (rule mk_eqcl_snd_mon) with K show ?thesis by simp next assume H': "j > k" from 2(3,4) H' 2(5) have "snd (mk_eqcl (None # xs) zs i T) ! k < snd (mk_eqcl (None # xs) zs i T) ! j" by (rule mk_eqcl_snd_mon) with K show ?thesis by simp qed } with H k_def have "j = length zs" by auto with H show ?thesis by (simp add: split_beta) next assume H: "Suc i \<le> snd (mk_eqcl (None # xs) zs i T) ! j" then obtain k where K: "snd (mk_eqcl (None # xs) zs i T) ! j - i = Suc k" by (cases "snd (mk_eqcl (None # xs) zs i T) ! j - i") simp+ hence K': "k = snd (mk_eqcl (None # xs) zs i T) ! j - Suc i" by simp from H IV have "fst (mk_eqcl (mk_eqcl' xs i (Suc i) (length zs) T) (zs @ [i]) (Suc i) T) ! (snd (mk_eqcl (mk_eqcl' xs i (Suc i) (length zs) T) (zs @ [i]) (Suc i) T) ! j - Suc i) = j" by (auto simp: split_beta) with K' have "fst (mk_eqcl (None # xs) zs i T) ! Suc k = j" by (simp add: split_beta) with K show ?thesis by simp qed } with G1 show ?case by simp next case (3 l xs zs i T j) have 1: "snd (mk_eqcl (Some l # xs) zs i T) = snd (mk_eqcl xs zs (Suc i) T)" by (simp add: split_beta) have 2: "length (fst (mk_eqcl (Some l # xs) zs i T)) = length (Some l # xs)" by (simp add: split_beta mk_eqcl_len_fst) have "\<And>j. j < length zs \<Longrightarrow> zs ! j < length xs + Suc i \<and> (Suc i \<le> zs ! j \<longrightarrow> xs ! (zs ! j - Suc i) = Some j)" proof - fix j assume H: "j < length zs" with 3(2)[of j] have I: "zs ! j < length (Some l # xs) + i \<and> (i \<le> zs ! j \<longrightarrow> (Some l # xs) ! (zs ! j - i) = Some j)" by simp hence G1: "zs ! j < length xs + Suc i" and G2: "i \<le> zs ! j \<longrightarrow> (Some l # xs) ! (zs ! j - i) = Some j" by simp+ { assume H2: "Suc i \<le> zs ! j" then obtain k where K: "zs ! j - i = Suc k" by (cases "zs ! j - i") simp+ with H2 G2 have "xs ! k = Some j" by simp moreover from K have "k = zs ! j - Suc i" by simp ultimately have "xs ! (zs ! j - Suc i) = Some j" by simp } with G1 show "zs ! j < length xs + Suc i \<and> (Suc i \<le> zs ! j \<longrightarrow> xs ! (zs ! j - Suc i) = Some j)" by simp qed moreover note 3(3) moreover have "\<And>z. z \<in> set zs \<Longrightarrow> z < Suc i" proof - fix z assume "z \<in> set zs" with 3(4)[of z] show "z < Suc i" by simp qed moreover from 3(5) 1 have "j < length (snd (mk_eqcl xs zs (Suc i) T))" by simp moreover from 3 have "length xs + Suc i \<le> length T + 1" by simp ultimately have IV: "snd (mk_eqcl xs zs (Suc i) T) ! j < length (fst (mk_eqcl xs zs (Suc i) T)) + Suc i \<and> (Suc i \<le> snd (mk_eqcl xs zs (Suc i) T) ! j \<longrightarrow> fst (mk_eqcl xs zs (Suc i) T) ! (snd (mk_eqcl xs zs (Suc i) T) ! j - Suc i) = j)" by (rule 3(1)) with 1 have G1: "snd (mk_eqcl (Some l # xs) zs i T) ! j < length (fst (mk_eqcl (Some l # xs) zs i T)) + i" by (simp add: split_beta mk_eqcl_len_fst) { assume "i \<le> snd (mk_eqcl (Some l # xs) zs i T) ! j" hence "i = snd (mk_eqcl (Some l # xs) zs i T) ! j \<or> i < snd (mk_eqcl (Some l # xs) zs i T) ! j" by auto hence "fst (mk_eqcl (Some l # xs) zs i T) ! (snd (mk_eqcl (Some l # xs) zs i T) ! j - i) = j" proof (elim disjE) assume H: "i = snd (mk_eqcl (Some l # xs) zs i T) ! j" with 3 1 have "\<exists>j < length (snd (mk_eqcl xs zs (Suc i) T)). snd (mk_eqcl xs zs (Suc i) T) ! j = i" by auto hence T1: "i \<in> set (snd (mk_eqcl xs zs (Suc i) T))" by (simp only: in_set_conv_nth) from 3(4) have "i \<notin> set zs" by auto hence "i \<notin> set (snd (mk_eqcl xs zs (Suc i) T))" by (simp add: mk_eqcl_set_snd) with T1 show ?thesis by simp next assume H: "i < snd (mk_eqcl (Some l # xs) zs i T) ! j" from H obtain k where K: "snd (mk_eqcl (Some l # xs) zs i T) ! j - i = Suc k" by (cases "snd (mk_eqcl (Some l # xs) zs i T) ! j - i") simp+ hence K': "snd (mk_eqcl (Some l # xs) zs i T) ! j - Suc i = k" by simp from 1 H IV have "fst (mk_eqcl xs zs (Suc i) T) ! (snd (mk_eqcl xs zs (Suc i) T) ! j - Suc i) = j" by simp with K K' show ?thesis by (simp add: split_beta) qed } with G1 show ?case by simp qed lemma mk_eqcl_fst_nth: assumes "\<And>i j k. \<lbrakk>i < length xs; j < length xs; xs ! i = Some k\<rbrakk> \<Longrightarrow> (xs ! j = Some k) = (\<not> tr_lookup T (i + ii) (j + ii))" and "\<And>a b c. \<lbrakk>a \<le> length T; b \<le> length T; c \<le> length T; \<not> tr_lookup T a b; \<not> tr_lookup T b c\<rbrakk> \<Longrightarrow> \<not> tr_lookup T a c" and "\<And>x k. \<lbrakk>x \<in> set xs; x = Some k\<rbrakk> \<Longrightarrow> k < length zs" and "length xs + ii = length T + 1" and "i < length xs" and "j < length xs" shows "(fst (mk_eqcl xs zs ii T) ! i = fst (mk_eqcl xs zs ii T) ! j) = (\<not> tr_lookup T (i + ii) (j + ii))" using assms proof (induct xs zs ii T arbitrary: i j rule: mk_eqcl.induct) case (1 zs ii T) thus ?case by simp next case (2 xs zs ii T) { fix i j assume H: "i < j" "j < length (None # xs)" then obtain j' where J: "j = Suc j'" by (cases j) simp+ have "(fst (mk_eqcl (None # xs) zs ii T) ! i = fst (mk_eqcl (None # xs) zs ii T) ! j) = (\<not> tr_lookup T (i + ii) (j + ii))" proof (cases i) case 0 with J have "(fst (mk_eqcl (None # xs) zs ii T) ! i = fst (mk_eqcl (None # xs) zs ii T) ! j) = (fst (mk_eqcl (mk_eqcl' xs ii (Suc ii) (length zs) T) (zs @ [ii]) (Suc ii) T) ! j' = length zs)" by (auto simp add: split_beta) also from H J have "\<dots> = (mk_eqcl' xs ii (Suc ii) (length zs) T ! j' = Some (length zs))" by (simp add: mk_eqcl_fst_Some mk_eqcl'_len) also have "\<dots> = (\<not> tr_lookup T (j' + Suc ii) ii)" proof - have "\<And>x k. \<lbrakk>x \<in> set xs; x = Some k\<rbrakk> \<Longrightarrow> k < length zs" proof - fix x k assume "x \<in> set xs" "x = Some k" with 2(4)[of x k] show "k < length zs" by simp qed moreover have "\<And>i'. \<lbrakk>i' < length xs; \<not> tr_lookup T (i' + Suc ii) ii\<rbrakk> \<Longrightarrow> xs ! i' = None" proof - fix i' assume H: "i' < length xs" "\<not> tr_lookup T (i' + Suc ii) ii" { assume H': "xs ! i' \<noteq> None" then obtain k where "xs ! i' = Some k" by (cases "xs ! i'") simp+ with 2(2)[of "Suc i'" 0 k] H have False by simp } thus "xs ! i' = None" by (cases "xs ! i'") simp+ qed moreover from H J have "ii < Suc ii" "j' < length xs" by simp+ ultimately show ?thesis by (rule mk_eqcl'_nth') qed also from J 0 have "\<dots> = (\<not> tr_lookup T (i + ii) (j + ii))" by (auto simp: tr_lookup_def) finally show ?thesis by simp next case (Suc i') have "\<And>i j k. \<lbrakk>i < length (mk_eqcl' xs ii (Suc ii) (length zs) T); j < length (mk_eqcl' xs ii (Suc ii) (length zs) T); mk_eqcl' xs ii (Suc ii) (length zs) T ! i = Some k\<rbrakk> \<Longrightarrow> (mk_eqcl' xs ii (Suc ii) (length zs) T ! j = Some k) = (\<not> tr_lookup T (i + Suc ii) (j + Suc ii))" proof - fix i j k assume H: "i < length (mk_eqcl' xs ii (Suc ii) (length zs) T)" "j < length (mk_eqcl' xs ii (Suc ii) (length zs) T)" "mk_eqcl' xs ii (Suc ii) (length zs) T ! i = Some k" { fix i' j' k assume "i' < length xs" "j' < length xs" "xs ! i' = Some k" with 2(2)[of "Suc i'" "Suc j'" k] have "(xs ! j' = Some k) = (\<not> tr_lookup T (i' + Suc ii) (j' + Suc ii))" by simp } moreover note 2(3) moreover from 2(5) have "length xs + Suc ii = length T + 1" by simp moreover { fix x k assume "x \<in> set xs" "x = Some k" with 2(4)[of x k] have "k < length zs" by simp } moreover have "\<And>i'. \<lbrakk>i' < length xs; \<not> tr_lookup T (i' + Suc ii) ii\<rbrakk> \<Longrightarrow> xs ! i' = None" proof - fix i' assume H': "i' < length xs" "\<not> tr_lookup T (i' + Suc ii) ii" { assume "xs ! i' \<noteq> None" then obtain k where K: "xs ! i' = Some k" by (cases "xs ! i'") simp+ with H' 2(2)[of "Suc i'" 0 k] have False by simp } thus "xs ! i' = None" by (cases "xs ! i' = None") simp+ qed moreover have "ii < Suc ii" by simp moreover from H have "i < length xs" by (simp add: mk_eqcl'_len) moreover note H(3) moreover from H have "j < length xs" by (simp add: mk_eqcl'_len) ultimately show "(mk_eqcl' xs ii (Suc ii) (length zs) T ! j = Some k) = (\<not> tr_lookup T (i + Suc ii) (j + Suc ii))" by (rule mk_eqcl'_nth) qed moreover note 2(3) moreover have "\<And>x k. \<lbrakk>x \<in> set (mk_eqcl' xs ii (Suc ii) (length zs) T); x = Some k\<rbrakk> \<Longrightarrow> k < length (zs @ [ii])" proof - fix x k assume H: "x \<in> set (mk_eqcl' xs ii (Suc ii) (length zs) T)" "x = Some k" { fix x k assume "x \<in> set xs" "x = Some k" with 2(4)[of x k] have "k < length zs" by simp } from this H have "k \<le> length zs" by (rule mk_eqcl'_bound) thus "k < length (zs @ [ii])" by simp qed moreover from 2(5) have "length (mk_eqcl' xs ii (Suc ii) (length zs) T) + Suc ii = length T + 1" by (simp add: mk_eqcl'_len) moreover from H Suc J have "i' < length (mk_eqcl' xs ii (Suc ii) (length zs) T)" "j' < length (mk_eqcl' xs ii (Suc ii) (length zs) T)" by (simp add: mk_eqcl'_len)+ ultimately have IV: "(fst (mk_eqcl (mk_eqcl' xs ii (Suc ii) (length zs) T) (zs @ [ii]) (Suc ii) T) ! i' = fst (mk_eqcl (mk_eqcl' xs ii (Suc ii) (length zs) T) (zs @ [ii]) (Suc ii) T) ! j') = (\<not> tr_lookup T (i' + Suc ii) (j' + Suc ii))" by (rule 2(1)) with Suc J show ?thesis by (simp add: split_beta) qed } note L = this have "i < j \<or> i = j \<or> i > j" by auto thus ?case proof (elim disjE) assume "i > j" with 2(6) L have "(fst (mk_eqcl (None # xs) zs ii T) ! j = fst (mk_eqcl (None # xs) zs ii T) ! i) = (\<not> tr_lookup T (i + ii) (j + ii))" by (auto simp: tr_lookup_def) thus ?thesis by auto qed (insert 2(7) L, simp add: tr_lookup_def)+ next case (3 l xs zs ii T i j) { fix i j assume H: "i < j" "j < length (Some l # xs)" then obtain j' where J: "j = Suc j'" by (cases j) simp+ have "(fst (mk_eqcl (Some l # xs) zs ii T) ! i = fst (mk_eqcl (Some l # xs) zs ii T) ! j) = (\<not> tr_lookup T (i + ii) (j + ii))" proof (cases i) case 0 with J have "(fst (mk_eqcl (Some l # xs) zs ii T) ! i = fst (mk_eqcl (Some l # xs) zs ii T) ! j) = (fst (mk_eqcl xs zs (Suc ii) T) ! j' = l)" by (auto simp add: split_beta) also from 3(4)[of "Some l" l] H J have "\<dots> = (xs ! j' = Some l)" by (simp add: mk_eqcl_fst_Some) also from J have "\<dots> = ((Some l # xs) ! j = Some l)" by simp also from H 0 3(2)[of i j l] have "\<dots> = (\<not> tr_lookup T (i + ii) (j + ii))" by simp finally show ?thesis by simp next case (Suc i') have "\<And>i j k. \<lbrakk>i < length xs; j < length xs; xs ! i = Some k\<rbrakk> \<Longrightarrow> (xs ! j = Some k) = (\<not> tr_lookup T (i + Suc ii) (j + Suc ii))" proof - fix i j k assume "i < length xs" "j < length xs" "xs ! i = Some k" with 3(2)[of "Suc i" "Suc j" k] show "(xs ! j = Some k) = (\<not> tr_lookup T (i + Suc ii) (j + Suc ii))" by simp qed moreover note 3(3) moreover have "\<And>x k. \<lbrakk>x \<in> set xs; x = Some k\<rbrakk> \<Longrightarrow> k < length zs" proof - fix x k assume "x \<in> set xs" "x = Some k" with 3(4)[of x k] show "k < length zs" by simp qed moreover from 3(5) H Suc J have "length xs + Suc ii = length T + 1" "i' < length xs" "j' < length xs" by simp+ ultimately have "(fst (mk_eqcl xs zs (Suc ii) T) ! i' = fst (mk_eqcl xs zs (Suc ii) T) ! j') = (\<not> tr_lookup T (i' + Suc ii) (j' + Suc ii))" by (rule 3(1)) with J Suc show ?thesis by (simp add: split_beta) qed } note L = this have "i < j \<or> i = j \<or> i > j" by auto thus ?case proof (elim disjE) assume "i > j" with 3(6) L have "(fst (mk_eqcl (Some l # xs) zs ii T) ! j = fst (mk_eqcl (Some l # xs) zs ii T) ! i) = (\<not> tr_lookup T (j + ii) (i + ii))" by simp thus ?thesis by (auto simp: tr_lookup_def) qed (insert 3(7) L, simp add: tr_lookup_def)+ qed definition min_dfa :: "dfa \<Rightarrow> dfa" where "min_dfa = (\<lambda>(bd, as). let (os, ns) = mk_eqcl (replicate (length bd) None) [] 0 (fixpt (bd, as) (init_tr (bd, as))) in (map (\<lambda>p. bdd_map (\<lambda>q. os ! q) (bd ! p)) ns, map (\<lambda>p. as ! p) ns))" definition eq_nodes :: "dfa \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> bool" where "eq_nodes = (\<lambda>M v p q. \<not> (\<exists>n. dist_nodes M n v p q))" lemma mk_eqcl_fixpt_fst_bound: assumes "dfa_is_node M i" shows "fst (mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M))) ! i < length (snd (mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M))))" (is "fst ?M ! i < length (snd ?M)") proof - { fix x k assume H: "x \<in> set (replicate (length (fst M)) (None::nat option))" "x = Some k" hence "k < length []" by (cases "length (fst M) = 0") simp+ } moreover from assms have "fst ?M ! i \<in> set (fst ?M)" by (simp add: dfa_is_node_def mk_eqcl_len_fst) ultimately show ?thesis by (rule mk_eqcl_bound) qed lemma mk_eqcl_fixpt_fst_nth: assumes "wf_dfa M v" and "dfa_is_node M p" and "dfa_is_node M q" shows "(fst (mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M))) ! p = fst (mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M))) ! q) = eq_nodes M v p q" (is "(fst ?M ! p = fst ?M ! q) = eq_nodes M v p q") proof - have WF: "wf_tr M (fixpt M (init_tr M))" by (simp only: fixpt_wf init_tr_wf) have "(fst ?M ! p = fst ?M ! q) = (\<not> tr_lookup (fixpt M (init_tr M)) p q)" proof - { fix i j k assume H: "i < length (replicate (length (fst M)) None)" "j < length (replicate (length (fst M)) None)" "replicate (length (fst M)) None ! i = Some k" hence "(replicate (length (fst M)) None ! j = Some k) = (\<not> tr_lookup (fixpt M (init_tr M)) (i + 0) (j + 0))" by simp } moreover have "\<And>a b c. \<lbrakk>a \<le> length (fixpt M (init_tr M)); b \<le> length (fixpt M (init_tr M)); c \<le> length (fixpt M (init_tr M)); \<not> tr_lookup (fixpt M (init_tr M)) a b; \<not> tr_lookup (fixpt M (init_tr M)) b c\<rbrakk> \<Longrightarrow> \<not> tr_lookup (fixpt M (init_tr M)) a c" proof - fix a b c assume H': "a \<le> length (fixpt M (init_tr M))" "b \<le> length (fixpt M (init_tr M))" "c \<le> length (fixpt M (init_tr M))" "\<not> tr_lookup (fixpt M (init_tr M)) a b" "\<not> tr_lookup (fixpt M (init_tr M)) b c" { fix q assume H'': "q \<le> length (fixpt M (init_tr M))" from assms have "length (fst M) > 0" by (simp add: wf_dfa_def) then obtain m where M: "length (fst M) = Suc m" by (cases "length (fst M)") simp+ hence M': "m = length (fst M) - 1" by simp with H'' WF have "q \<le> m" by (simp add: wf_tr_def) with M have "q < length (fst M)" by simp } with H' have D: "dfa_is_node M a" "dfa_is_node M b" "dfa_is_node M c" by (auto simp: dfa_is_node_def) with H'(4,5) assms(1) have "\<not> (\<exists>n. dist_nodes M n v a b)" "\<not> (\<exists>n. dist_nodes M n v b c)" by (simp add: fixpt_dist_nodes[symmetric])+ hence "\<not> (\<exists>n. dist_nodes M n v a c)" by (auto simp: dist_nodes_def) with H' assms D show "\<not> tr_lookup (fixpt M (init_tr M)) a c" by (simp add: fixpt_dist_nodes[symmetric]) qed moreover have "\<And>x k. \<lbrakk>x \<in> set (replicate (length (fst M)) None); x = Some k\<rbrakk> \<Longrightarrow> k < length []" proof - fix x k assume "x \<in> set (replicate (length (fst M)) (None::nat option))" "x = Some k" thus "k < length []" by (cases "length (fst M) = 0") simp+ qed moreover from WF assms have "length (replicate (length (fst M)) None) + 0 = length (fixpt M (init_tr M)) + 1" by (simp add: wf_tr_def wf_dfa_def) moreover from assms have "p < length (replicate (length (fst M)) None)" "q < length (replicate (length (fst M)) None)" by (simp add: dfa_is_node_def)+ ultimately have "(fst ?M ! p = fst ?M ! q) = (\<not> tr_lookup (fixpt M (init_tr M)) (p+0) (q+0))" by (rule mk_eqcl_fst_nth) thus ?thesis by simp qed also from assms have "\<dots> = eq_nodes M v p q" by (simp only: fixpt_dist_nodes eq_nodes_def) finally show ?thesis by simp qed lemma mk_eqcl_fixpt_fst_snd_nth: assumes "i < length (snd (mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M))))" and "wf_dfa M v" shows "snd (mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M))) ! i < length (fst (mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M)))) \<and> fst (mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M))) ! (snd (mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M))) ! i) = i" (is "snd ?M ! i < length (fst ?M) \<and> fst ?M ! (snd ?M ! i) = i") proof - have "\<And>i. i < length [] \<Longrightarrow> [] ! i < length (replicate (length (fst M)) None) + 0 \<and> (0 \<le> [] ! i \<longrightarrow> replicate (length (fst M)) None ! ([] ! i - 0) = Some i)" by simp moreover have "\<And>j1 j2. \<lbrakk>j1 < j2; j2 < length []\<rbrakk> \<Longrightarrow> [] ! j1 < [] ! j2" by simp moreover have "\<And>z. z \<in> set [] \<Longrightarrow> z < 0" by simp moreover note assms(1) moreover have "length (replicate (length (fst M)) None) + 0 \<le> length (fixpt M (init_tr M)) + 1" proof - have WF: "wf_tr M (fixpt M (init_tr M))" by (simp only: init_tr_wf fixpt_wf) from assms have "length (fst M) > 0" by (simp add: wf_dfa_def) then obtain m where M:"length (fst M) = Suc m" by (cases "length (fst M)") simp+ hence M': "m = length (fst M) - 1" by simp with WF have "length (fixpt M (init_tr M)) = m" by (simp add: wf_tr_def) with M show ?thesis by simp qed ultimately have "snd ?M ! i < length (fst ?M) + 0 \<and> (0 \<le> snd ?M ! i \<longrightarrow> fst ?M ! (snd ?M ! i - 0) = i)" by (rule mk_eqcl_fst_snd) thus ?thesis by simp qed lemma eq_nodes_dfa_trans: assumes "eq_nodes M v p q" and "is_alph v bs" shows "eq_nodes M v (dfa_trans M p bs) (dfa_trans M q bs)" proof (rule ccontr) assume H: "\<not> eq_nodes M v (dfa_trans M p bs) (dfa_trans M q bs)" then obtain n w where "length w = n" "list_all (is_alph v) w" "dfa_accepting M (dfa_steps M (dfa_trans M p bs) w) \<noteq> dfa_accepting M (dfa_steps M (dfa_trans M q bs) w)" unfolding eq_nodes_def dist_nodes_def by blast with assms have "length (bs # w) = Suc n" "list_all (is_alph v) (bs # w)" "dfa_accepting M (dfa_steps M p (bs # w)) \<noteq> dfa_accepting M (dfa_steps M q (bs # w))" by simp+ hence "\<not> eq_nodes M v p q" unfolding eq_nodes_def dist_nodes_def by blast with assms show False by simp qed lemma mk_eqcl_fixpt_trans: assumes "wf_dfa M v" and "dfa_is_node M p" and "is_alph v bs" shows "dfa_trans (min_dfa M) (fst (mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M))) ! p) bs = fst (mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M))) ! (dfa_trans M p bs)" (is "dfa_trans (min_dfa M) (fst ?M ! p) bs = fst ?M ! (dfa_trans M p bs)") proof - let ?q = "snd ?M ! (fst ?M ! p)" from assms have I1: "?q < length (fst ?M)" "fst ?M ! ?q = fst ?M ! p" by (simp add: mk_eqcl_fixpt_fst_bound mk_eqcl_fixpt_fst_snd_nth)+ with assms have I2: "bddh (length bs) (fst M ! ?q)" by (simp add: mk_eqcl_len_fst wf_dfa_def list_all_iff is_alph_def) from I1 have I3: "dfa_is_node M ?q" by (simp add: mk_eqcl_len_fst dfa_is_node_def) with assms I1 have "eq_nodes M v p ?q" by (simp add: mk_eqcl_fixpt_fst_nth[symmetric]) with assms have "eq_nodes M v (dfa_trans M p bs) (dfa_trans M ?q bs)" by (simp add: eq_nodes_dfa_trans) with assms I3 have "fst ?M ! (dfa_trans M p bs) = fst ?M ! (dfa_trans M ?q bs)" by (simp add: dfa_trans_is_node mk_eqcl_fixpt_fst_nth) with assms I2 show ?thesis by (simp add: dfa_trans_def min_dfa_def split_beta mk_eqcl_fixpt_fst_bound bdd_map_bdd_lookup) qed lemma mk_eqcl_fixpt_steps: assumes "wf_dfa M v" and "dfa_is_node M p" and "list_all (is_alph v) w" shows "dfa_steps (min_dfa M) (fst (mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M))) ! p) w = fst (mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M))) ! (dfa_steps M p w)" (is "dfa_steps (min_dfa M) (fst ?M ! p) w = fst ?M ! (dfa_steps M p w)") using assms by (induct w arbitrary: p) (simp add: mk_eqcl_fixpt_trans dfa_trans_is_node)+ lemma mk_eqcl_fixpt_startnode: assumes "length (fst M) > 0" shows "length (snd (mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M)))) > 0 \<and> fst (mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M))) ! 0 = 0 \<and> snd (mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M))) ! 0 = 0" (is "length (snd ?M) > 0 \<and> fst ?M ! 0 = 0 \<and> snd ?M ! 0 = 0") proof - from assms obtain k where K: "length (fst M) = Suc k" by (cases "length (fst M)") simp+ from K have "length (snd ?M) = length (snd (mk_eqcl (mk_eqcl' (replicate k None) 0 (Suc 0) 0 (fixpt M (init_tr M))) [0] (Suc 0) (fixpt M (init_tr M))))" by (simp add: split_beta) also have "\<dots> \<ge> length [0::nat]" by (simp only: mk_eqcl_len_snd) finally have "length (snd ?M) > 0" by auto with K show ?thesis by (simp add: split_beta mk_eqcl_snd_nth) qed lemma min_dfa_wf: "wf_dfa M v \<Longrightarrow> wf_dfa (min_dfa M) v" proof - assume H: "wf_dfa M v" obtain bd as where "min_dfa M = (bd, as)" by (cases "min_dfa M") auto hence M: "bd = fst (min_dfa M)" "as = snd (min_dfa M)" by simp+ let ?M = "mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M))" { fix x assume "x \<in> set bd" then obtain i where I: "i < length bd" "x = bd ! i" by (auto simp: in_set_conv_nth) with M H have "snd ?M ! i < length (fst ?M)" by (simp add: min_dfa_def split_beta mk_eqcl_fixpt_fst_snd_nth) hence N: "dfa_is_node M (snd ?M ! i)" by (simp add: mk_eqcl_len_fst dfa_is_node_def) with H have BH: "bddh v (fst M ! (snd ?M ! i))" by (simp add: wf_dfa_def list_all_iff dfa_is_node_def) from I M have BI: "bd ! i = bdd_map (\<lambda>q. fst ?M ! q) (fst M ! (snd ?M ! i))" by (simp add: split_beta min_dfa_def) with BH have G1: "bddh v (bd ! i)" by (simp add: bddh_bdd_map) from H N have "bdd_all (dfa_is_node M) (fst M ! (snd ?M ! i))" by (simp add: wf_dfa_def list_all_iff dfa_is_node_def) moreover { fix q assume "dfa_is_node M q" hence "fst ?M ! q < length (snd ?M)" by (simp add: mk_eqcl_fixpt_fst_bound) hence "dfa_is_node (min_dfa M) (fst ?M ! q)" by (simp add: dfa_is_node_def min_dfa_def split_beta) } ultimately have "bdd_all (dfa_is_node (min_dfa M)) (bdd_map (\<lambda>q. fst ?M ! q) (fst M ! (snd ?M ! i)))" by (simp add: bdd_all_bdd_map) with G1 BI I have "bddh v x \<and> bdd_all (dfa_is_node (min_dfa M)) x" by simp } hence G: "list_all (bddh v) bd \<and> list_all (bdd_all (dfa_is_node (min_dfa M))) bd" by (simp add: list_all_iff) from H have "length (fst M) > 0" by (simp add: wf_dfa_def) hence "length (snd ?M) > 0" by (auto simp only: mk_eqcl_fixpt_startnode) with G M show "wf_dfa (min_dfa M) v" by (simp add: wf_dfa_def min_dfa_def split_beta) qed lemma min_dfa_accept: assumes "wf_dfa M v" and "list_all (is_alph v) w" shows "dfa_accepts (min_dfa M) w = dfa_accepts M w" proof - let ?M = "mk_eqcl (replicate (length (fst M)) None) [] 0 (fixpt M (init_tr M))" from assms have "length (fst M) > 0" by (simp add: wf_dfa_def) hence SN: "length (snd ?M) > 0 \<and> fst ?M ! 0 = 0 \<and> snd ?M ! 0 = 0" by (auto simp only: mk_eqcl_fixpt_startnode) have D: "dfa_steps (min_dfa M) 0 w = fst ?M ! dfa_steps M 0 w" proof - from assms have "dfa_is_node M 0" by (simp add: wf_dfa_def dfa_is_node_def) moreover from SN have "dfa_steps (min_dfa M) 0 w = dfa_steps (min_dfa M) (fst ?M ! 0) w" by simp moreover note assms ultimately show ?thesis by (simp add: mk_eqcl_fixpt_steps) qed from assms have WF: "wf_dfa (min_dfa M) v" by (simp add: min_dfa_wf) hence "dfa_is_node (min_dfa M) 0" by (simp add: dfa_startnode_is_node) with WF assms have "dfa_is_node (min_dfa M) (dfa_steps (min_dfa M) 0 w)" by (simp add: dfa_steps_is_node) with D have DN: "dfa_is_node (min_dfa M) (fst ?M ! dfa_steps M 0 w)" by simp let ?q = "snd ?M ! (fst ?M ! dfa_steps M 0 w)" from assms have N: "dfa_is_node M (dfa_steps M 0 w)" by (simp add: dfa_steps_is_node dfa_startnode_is_node) with assms have I: "?q < length (fst ?M)" "fst ?M ! ?q = fst ?M ! dfa_steps M 0 w" by (simp add: mk_eqcl_fixpt_fst_bound mk_eqcl_fixpt_fst_snd_nth)+ hence "dfa_is_node M ?q" by (simp add: mk_eqcl_len_fst dfa_is_node_def) with assms N I have EQ: "eq_nodes M v (dfa_steps M 0 w) ?q" by (simp add: mk_eqcl_fixpt_fst_nth[symmetric]) have A: "dfa_accepting M (dfa_steps M 0 w) = dfa_accepting M ?q" proof (rule ccontr) assume H: "dfa_accepting M (dfa_steps M 0 w) \<noteq> dfa_accepting M ?q" hence "dist_nodes M 0 v (dfa_steps M 0 w) ?q" by (auto simp: dist_nodes_def) with EQ show False by (simp add: eq_nodes_def) qed from D have "dfa_accepts (min_dfa M) w = snd (min_dfa M) ! (fst ?M ! dfa_steps M 0 w)" by (simp add: accepts_def dfa_accepting_def) also from WF DN have "\<dots> = dfa_accepting M ?q" by (simp add: dfa_is_node_def wf_dfa_def min_dfa_def split_beta dfa_accepting_def) also from A have "\<dots> = dfa_accepts M w" by (simp add: accepts_def) finally show ?thesis by simp qed section \<open>NFAs\<close> type_synonym nbddtable = "bool list bdd list" type_synonym nfa = "nbddtable \<times> astate" definition nfa_is_node :: "nfa \<Rightarrow> bool list \<Rightarrow> bool" where "nfa_is_node A = (\<lambda>qs. length qs = length (fst A))" definition wf_nfa :: "nfa \<Rightarrow> nat \<Rightarrow> bool" where "wf_nfa A n = (list_all (bddh n) (fst A) \<and> list_all (bdd_all (nfa_is_node A)) (fst A) \<and> length (snd A) = length (fst A) \<and> length (fst A) > 0)" definition set_of_bv :: "bool list \<Rightarrow> nat set" where "set_of_bv bs = {i. i < length bs \<and> bs ! i}" fun bv_or :: "bool list \<Rightarrow> bool list \<Rightarrow> bool list" where "bv_or [] [] = []" | "bv_or (x # xs) (y # ys) = (x \<or> y) # (bv_or xs ys)" lemma bv_or_nth: assumes "length l = length r" assumes "i < length l" shows "bv_or l r ! i = (l ! i \<or> r ! i)" using assms proof (induct l r arbitrary: i rule: bv_or.induct) case (2 xx xss yy yss ii) have "ii = 0 \<or> ii > 0" by auto thus ?case proof (elim disjE) assume "ii > 0" then obtain j where J: "ii = Suc j" by (induct ii) simp+ with 2 show ?thesis by simp qed simp qed simp+ lemma bv_or_length: assumes "length l = length r" shows "length (bv_or l r) = length l" using assms by (induct l r rule: bv_or.induct) simp+ lemma bv_or_set_of_bv: assumes "nfa_is_node A p" and "nfa_is_node A q" shows "set_of_bv (bv_or p q) = set_of_bv p \<union> set_of_bv q" using assms by (auto simp: nfa_is_node_def set_of_bv_def bv_or_length bv_or_nth) lemma bv_or_is_node: "\<lbrakk>nfa_is_node A p; nfa_is_node A q\<rbrakk> \<Longrightarrow> nfa_is_node A (bv_or p q)" by (simp add: bv_or_length nfa_is_node_def) fun subsetbdd where "subsetbdd [] [] bdd = bdd" | "subsetbdd (bdd' # bdds) (b # bs) bdd = (if b then subsetbdd bdds bs (bdd_binop bv_or bdd bdd') else subsetbdd bdds bs bdd)" definition nfa_emptybdd :: "nat \<Rightarrow> bool list bdd" where "nfa_emptybdd n = Leaf (replicate n False)" lemma bddh_subsetbdd: assumes "list_all (bddh l) (fst A)" and "bddh l bdd'" and "nfa_is_node A q" shows "bddh l (subsetbdd (fst A) q bdd')" using assms unfolding nfa_is_node_def by (induct ("fst A") q bdd' rule: subsetbdd.induct) (simp add: bddh_binop)+ lemma bdd_lookup_subsetbdd': assumes "length bdds = length q" and "\<forall>x \<in> set bdds. bddh (length ws) x" and "bddh (length ws) obdd" and "\<And>bs w. \<lbrakk>bs \<in> set bdds; length w = length ws\<rbrakk> \<Longrightarrow> length (bdd_lookup bs w) = c" and "\<And>w. length w = length ws \<Longrightarrow> length (bdd_lookup obdd w) = c" and "a < c" shows "bdd_lookup (subsetbdd bdds q obdd) ws ! a = ((\<exists>i < length q. q ! i \<and> bdd_lookup (bdds ! i) ws ! a) \<or> bdd_lookup obdd ws ! a)" using assms proof (induct bdds q obdd rule: subsetbdd.induct) case (2 bdd' bdds x xs bdd) show ?case proof (cases x) case True with 2 have H: "bdd_lookup (subsetbdd bdds xs (bdd_binop bv_or bdd bdd')) ws ! a = ((\<exists>i<length xs. xs ! i \<and> bdd_lookup (bdds ! i) ws ! a) \<or> bdd_lookup (bdd_binop bv_or bdd bdd') ws ! a)" by (simp add: bddh_binop bdd_lookup_binop bv_or_length) from 2 have "((\<exists>i < length xs. xs ! i \<and> bdd_lookup (bdds ! i) ws ! a) \<or> bdd_lookup (bdd_binop bv_or bdd bdd') ws ! a) = ((\<exists>i < length xs. xs ! i \<and> bdd_lookup (bdds ! i) ws ! a) \<or> (bdd_lookup bdd' ws) ! a \<or> (bdd_lookup bdd ws) ! a)" by (auto simp: bdd_lookup_binop bv_or_nth) also have "\<dots> = ((\<exists>i < Suc (length xs). (True # xs) ! i \<and> bdd_lookup ((bdd' # bdds) ! i) ws ! a) \<or> bdd_lookup bdd ws ! a)" (is "((\<exists>i. ?P i) \<or> ?Q \<or> ?R) = ((\<exists>i. ?S i) \<or> ?R)") proof assume "(\<exists>i. ?P i) \<or> ?Q \<or> ?R" thus "(\<exists>i. ?S i) \<or> ?R" by (elim disjE) auto next assume "(\<exists>i. ?S i) \<or> ?R" thus "(\<exists>i. ?P i) \<or> ?Q \<or> ?R" proof (elim disjE) assume "\<exists>i. ?S i" then obtain i where I: "?S i" .. { assume "i = 0" with I have "?Q" by simp } { assume "i \<noteq> 0" then obtain j where "i = Suc j" by (cases i) simp+ with I have "\<exists>j. ?P j" by auto } with \<open>i=0 \<Longrightarrow> ?Q\<close> show ?thesis by (cases "i=0") simp+ qed simp qed finally have "((\<exists>i<length xs. xs ! i \<and> bdd_lookup (bdds ! i) ws ! a) \<or> bdd_lookup (bdd_binop bv_or bdd bdd') ws ! a) = ((\<exists>i<Suc (length xs). (True # xs) ! i \<and> bdd_lookup ((bdd' # bdds) ! i) ws ! a) \<or> bdd_lookup bdd ws ! a)" by simp with True H show ?thesis by simp next case False with 2 have H: "bdd_lookup (subsetbdd bdds xs bdd) ws ! a = ((\<exists>i < length xs. xs ! i \<and> bdd_lookup (bdds ! i) ws ! a) \<or> bdd_lookup bdd ws ! a)" by simp have "((\<exists>i<length xs. xs ! i \<and> bdd_lookup (bdds ! i) ws ! a) \<or> bdd_lookup bdd ws ! a) = ((\<exists>i<Suc (length xs). (False # xs) ! i \<and> bdd_lookup ((bdd' # bdds) ! i) ws ! a) \<or> bdd_lookup bdd ws ! a)" (is "((\<exists>i. ?S i) \<or> ?R) = ((\<exists>i. ?P i) \<or> ?R)") proof assume "(\<exists>i. ?S i) \<or> ?R" thus "(\<exists>i. ?P i) \<or> ?R" by (elim disjE) auto next assume "(\<exists>i. ?P i) \<or> ?R" thus "(\<exists>i. ?S i) \<or> ?R" proof (elim disjE) assume "\<exists>i. ?P i" then obtain i where "?P i" .. then obtain j where "i = Suc j" by (cases i) simp+ with \<open>?P i\<close> show ?thesis by auto qed simp qed with False H show ?thesis by simp qed qed simp+ lemma bdd_lookup_subsetbdd: assumes "wf_nfa N (length ws)" and "nfa_is_node N q" and "a < length (fst N)" shows "bdd_lookup (subsetbdd (fst N) q (nfa_emptybdd (length q))) ws ! a = (\<exists>i< length q. q ! i \<and> bdd_lookup (fst N ! i) ws ! a)" proof - { fix w :: "bool list" assume H: "length w = length ws" from assms have "\<forall>bd \<in> set (fst N). bdd_all (nfa_is_node N) bd" by (simp add: wf_nfa_def list_all_iff) moreover from assms have "\<forall>bd \<in> set (fst N). bddh (length ws) bd" by (simp add: wf_nfa_def list_all_iff) moreover note H ultimately have "\<forall>bd \<in> set (fst N). nfa_is_node N (bdd_lookup bd w)" by (simp add: bdd_all_bdd_lookup) } with assms have "bdd_lookup (subsetbdd (fst N) q (nfa_emptybdd (length q))) ws ! a = ((\<exists>i < length q. q ! i \<and> bdd_lookup (fst N ! i) ws ! a) \<or> bdd_lookup (nfa_emptybdd (length q)) ws ! a)" by (simp add: bdd_lookup_subsetbdd' nfa_is_node_def wf_nfa_def list_all_iff nfa_emptybdd_def) with assms show ?thesis by (auto simp: nfa_emptybdd_def nfa_is_node_def) qed definition nfa_trans :: "nfa \<Rightarrow> bool list \<Rightarrow> bool list \<Rightarrow> bool list" where "nfa_trans A qs bs = bdd_lookup (subsetbdd (fst A) qs (nfa_emptybdd (length qs))) bs" fun nfa_accepting' :: "bool list \<Rightarrow> bool list \<Rightarrow> bool" where "nfa_accepting' [] bs = False" | "nfa_accepting' as [] = False" | "nfa_accepting' (a # as) (b # bs) = (a \<and> b \<or> nfa_accepting' as bs)" definition nfa_accepting :: "nfa \<Rightarrow> bool list \<Rightarrow> bool" where "nfa_accepting A = nfa_accepting' (snd A)" lemma nfa_accepting'_set_of_bv: "nfa_accepting' l r = (set_of_bv l \<inter> set_of_bv r \<noteq> {})" proof - have nfa_accepting_help: "\<And>as q. nfa_accepting' as q = (\<exists>i. i < length as \<and> i < length q \<and> as ! i \<and> q ! i)" proof - fix as q show "nfa_accepting' as q = (\<exists>i < length as. i < length q \<and> as ! i \<and> q ! i)" proof (induct as q rule: nfa_accepting'.induct) case (3 a as q qs) thus ?case proof (cases "a\<and>q") case False with 3 have "nfa_accepting' as qs = (\<exists>i < length as. i < length qs \<and> as ! i \<and> qs ! i)" (is "?T = _") by simp also have "\<dots> = (\<exists>j < length as. j < length qs \<and> (a#as) ! Suc j \<and> (q#qs) ! Suc j)" by simp also have "\<dots> = (\<exists>j < length (a#as). j < length (q#qs) \<and> (a#as) ! j \<and> (q#qs) ! j)" (is "(\<exists>j. ?P j) = (\<exists>j. ?Q j)") proof assume "\<exists>j. ?P j" then obtain j where "?P j" .. hence "?Q (Suc j)" by simp thus "\<exists>j. ?Q j" by (rule exI) next assume "\<exists>j. ?Q j" then obtain j where J: "?Q j" .. with False obtain i where "j = Suc i" by (cases j) simp+ with J have "?P i" by simp thus "\<exists>i. ?P i" by (rule exI) qed also from False have "\<dots> = ((a\<and>q \<and> ?Q 0) \<or> (\<not> (a\<and>q) \<and> (\<exists>j. ?Q j)))" by auto also have "\<dots> = ((a \<and> q \<and> (\<exists>j. ?Q j)) \<or> (\<not>(a\<and>q) \<and> (\<exists>j. ?Q j)))" by auto also have "\<dots> = (\<exists>j. ?Q j)" by auto finally have "?T = (\<exists>j. ?Q j)" . with False show ?thesis by auto qed (auto simp: 3) qed simp+ qed hence "nfa_accepting' l r = (\<exists>i. i < length l \<and> i < length r \<and> l ! i \<and> r ! i)" by simp also have "\<dots> = (\<exists>i. i \<in> set_of_bv l \<and> i \<in> set_of_bv r)" by (auto simp: set_of_bv_def) also have "\<dots> = (set_of_bv l \<inter> set_of_bv r \<noteq> {})" by auto finally show ?thesis . qed lemma nfa_accepting_set_of_bv: "nfa_accepting A q = (set_of_bv (snd A) \<inter> set_of_bv q \<noteq> {})" by (simp add: nfa_accepting'_set_of_bv nfa_accepting_def) definition nfa_startnode :: "nfa \<Rightarrow> bool list" where "nfa_startnode A = (replicate (length (fst A)) False)[0:=True]" locale aut_nfa = fixes A n assumes well_formed: "wf_nfa A n" sublocale aut_nfa < Automaton "nfa_trans A" "nfa_is_node A" "is_alph n" proof fix q a assume Q: "nfa_is_node A q" and A: "is_alph n a" with well_formed have "bdd_all (nfa_is_node A) (subsetbdd (fst A) q (nfa_emptybdd (length q)))" by (simp add: wf_nfa_def bdd_all_is_node_subsetbdd) moreover from well_formed Q have "bddh n (subsetbdd (fst A) q (nfa_emptybdd (length q)))" by (simp add: wf_nfa_def nfa_emptybdd_def bddh_subsetbdd) with A have "bddh (length a) (subsetbdd (fst A) q (nfa_emptybdd (length q)))" by (simp add: is_alph_def) ultimately have "nfa_is_node A (bdd_lookup (subsetbdd (fst A) q (nfa_emptybdd (length q))) a)" by (simp add: bdd_all_bdd_lookup) then show "nfa_is_node A (nfa_trans A q a)" by (simp add: nfa_trans_def) qed context aut_nfa begin lemmas trans_is_node = trans_is_node lemmas steps_is_node = steps_is_node lemmas reach_is_node = reach_is_node end lemmas nfa_trans_is_node = aut_nfa.trans_is_node [OF aut_nfa.intro] lemmas nfa_steps_is_node = aut_nfa.steps_is_node [OF aut_nfa.intro] lemmas nfa_reach_is_node = aut_nfa.reach_is_node [OF aut_nfa.intro] abbreviation "nfa_steps A \<equiv> foldl (nfa_trans A)" abbreviation "nfa_accepts A \<equiv> accepts (nfa_trans A) (nfa_accepting A) (nfa_startnode A)" abbreviation "nfa_reach A \<equiv> reach (nfa_trans A)" lemma nfa_startnode_is_node: "wf_nfa A n \<Longrightarrow> nfa_is_node A (nfa_startnode A)" by (simp add: nfa_is_node_def wf_nfa_def nfa_startnode_def) section \<open>Automata Constructions\<close> subsection \<open>Negation\<close> definition negate_dfa :: "dfa \<Rightarrow> dfa" where "negate_dfa = (\<lambda>(t,a). (t, map Not a))" lemma negate_wf_dfa: "wf_dfa (negate_dfa A) l = wf_dfa A l" by (simp add: negate_dfa_def wf_dfa_def dfa_is_node_def split_beta) lemma negate_negate_dfa: "negate_dfa (negate_dfa A) = A" proof (induct A) case (Pair t a) thus ?case by (induct a) (simp add: negate_dfa_def)+ qed lemma dfa_accepts_negate: assumes "wf_dfa A n" and "list_all (is_alph n) bss" shows "dfa_accepts (negate_dfa A) bss = (\<not> dfa_accepts A bss)" proof - have "dfa_steps (negate_dfa A) 0 bss = dfa_steps A 0 bss" by (simp add: negate_dfa_def dfa_trans_def [abs_def] split_beta) moreover from assms have "dfa_is_node A (dfa_steps A 0 bss)" by (simp add: dfa_steps_is_node dfa_startnode_is_node) ultimately show ?thesis using assms by (simp add: accepts_def dfa_accepting_def wf_dfa_def dfa_is_node_def negate_dfa_def split_beta) qed subsection \<open>Product Automaton\<close> definition prod_succs :: "dfa \<Rightarrow> dfa \<Rightarrow> nat \<times> nat \<Rightarrow> (nat \<times> nat) list" where "prod_succs A B = (\<lambda>(i, j). add_leaves (bdd_binop Pair (fst A ! i) (fst B ! j)) [])" definition "prod_is_node A B = (\<lambda>(i, j). dfa_is_node A i \<and> dfa_is_node B j)" definition prod_invariant :: "dfa \<Rightarrow> dfa \<Rightarrow> nat option list list \<times> (nat \<times> nat) list \<Rightarrow> bool" where "prod_invariant A B = (\<lambda>(tab, ps). length tab = length (fst A) \<and> (\<forall>tab'\<in>set tab. length tab' = length (fst B)))" definition "prod_ins = (\<lambda>(i, j). \<lambda>(tab, ps). (tab[i := (tab ! i)[j := Some (length ps)]], ps @ [(i, j)]))" definition prod_memb :: "nat \<times> nat \<Rightarrow> nat option list list \<times> (nat \<times> nat) list \<Rightarrow> bool" where "prod_memb = (\<lambda>(i, j). \<lambda>(tab, ps). tab ! i ! j \<noteq> None)" definition prod_empt :: "dfa \<Rightarrow> dfa \<Rightarrow> nat option list list \<times> (nat \<times> nat) list" where "prod_empt A B = (replicate (length (fst A)) (replicate (length (fst B)) None), [])" definition prod_dfs :: "dfa \<Rightarrow> dfa \<Rightarrow> nat \<times> nat \<Rightarrow> nat option list list \<times> (nat \<times> nat) list" where "prod_dfs A B x = gen_dfs (prod_succs A B) prod_ins prod_memb (prod_empt A B) [x]" definition binop_dfa :: "(bool \<Rightarrow> bool \<Rightarrow> bool) \<Rightarrow> dfa \<Rightarrow> dfa \<Rightarrow> dfa" where "binop_dfa f A B = (let (tab, ps) = prod_dfs A B (0, 0) in (map (\<lambda>(i, j). bdd_binop (\<lambda>k l. the (tab ! k ! l)) (fst A ! i) (fst B ! j)) ps, map (\<lambda>(i, j). f (snd A ! i) (snd B ! j)) ps))" locale prod_DFS = fixes A B n assumes well_formed1: "wf_dfa A n" and well_formed2: "wf_dfa B n" sublocale prod_DFS < DFS "prod_succs A B" "prod_is_node A B" "prod_invariant A B" prod_ins prod_memb "prod_empt A B" apply unfold_locales apply (simp add: prod_memb_def prod_ins_def prod_invariant_def prod_is_node_def split_paired_all dfa_is_node_def) apply (case_tac "a = aa") apply (case_tac "b = ba") apply auto[3] apply (simp add: prod_memb_def prod_empt_def prod_is_node_def split_paired_all dfa_is_node_def) apply (insert well_formed1 well_formed2)[] apply (simp add: prod_is_node_def prod_succs_def split_paired_all dfa_is_node_def wf_dfa_def) apply (drule conjunct1 [OF conjunct2])+ apply (simp add: list_all_iff) apply (rule ballI) apply (simp add: split_paired_all) apply (drule subsetD [OF add_leaves_binop_subset [where xs="[]" and ys="[]", simplified]]) apply clarify apply (drule_tac x="fst A ! a" in bspec) apply simp apply (drule_tac x="fst B ! b" in bspec) apply simp apply (simp add: add_leaves_bdd_all_eq' list_all_iff) apply (simp add: prod_invariant_def prod_empt_def set_replicate_conv_if) apply (simp add: prod_is_node_def prod_invariant_def prod_memb_def prod_ins_def split_paired_all dfa_is_node_def) apply (rule ballI) apply (drule subsetD [OF set_update_subset_insert]) apply auto apply (simp add: prod_is_node_def dfa_is_node_def) done context prod_DFS begin lemma prod_dfs_eq_rtrancl: "prod_is_node A B x \<Longrightarrow> prod_is_node A B y \<Longrightarrow> prod_memb y (prod_dfs A B x) = ((x, y) \<in> (succsr (prod_succs A B))\<^sup>*)" by (unfold prod_dfs_def) (rule dfs_eq_rtrancl) lemma prod_dfs_bij: assumes x: "prod_is_node A B x" shows "(fst (prod_dfs A B x) ! i ! j = Some k \<and> dfa_is_node A i \<and> dfa_is_node B j) = (k < length (snd (prod_dfs A B x)) \<and> (snd (prod_dfs A B x) ! k = (i, j)))" proof - from x have "list_all (prod_is_node A B) [x]" by simp with empt_invariant have "(fst (dfs (prod_empt A B) [x]) ! i ! j = Some k \<and> dfa_is_node A i \<and> dfa_is_node B j) = (k < length (snd (dfs (prod_empt A B) [x])) \<and> (snd (dfs (prod_empt A B) [x]) ! k = (i, j)))" proof (induct rule: dfs_invariant) case base show ?case by (auto simp add: prod_empt_def dfa_is_node_def) next case (step S y) obtain y1 y2 where y: "y = (y1, y2)" by (cases y) show ?case proof (cases "y1 = i") case True show ?thesis proof (cases "y2 = j") case True with step y \<open>y1 = i\<close> show ?thesis by (auto simp add: prod_ins_def prod_memb_def split_beta nth_append prod_invariant_def prod_is_node_def dfa_is_node_def) next case False with step y \<open>y1 = i\<close> show ?thesis by (auto simp add: prod_ins_def prod_memb_def split_beta nth_append prod_invariant_def prod_is_node_def dfa_is_node_def) qed next case False with step y show ?thesis by (auto simp add: prod_ins_def prod_memb_def split_beta nth_append) qed qed then show ?thesis by (simp add: prod_dfs_def) qed lemma prod_dfs_mono: assumes z: "prod_invariant A B z" and xs: "list_all (prod_is_node A B) xs" and H: "fst z ! i ! j = Some k" shows "fst (gen_dfs (prod_succs A B) prod_ins prod_memb z xs) ! i ! j = Some k" using z xs apply (rule dfs_invariant) apply (rule H) apply (simp add: prod_ins_def prod_memb_def split_paired_all prod_is_node_def prod_invariant_def) apply (case_tac "aa = i") apply (case_tac "ba = j") apply (simp add: dfa_is_node_def)+ done lemma prod_dfs_start: "\<lbrakk>dfa_is_node A i; dfa_is_node B j\<rbrakk> \<Longrightarrow> fst (prod_dfs A B (i, j)) ! i ! j = Some 0" apply (simp add: prod_dfs_def empt prod_is_node_def gen_dfs_simps) apply (rule prod_dfs_mono) apply (rule ins_invariant) apply (simp add: prod_is_node_def dfa_is_node_def) apply (rule empt_invariant) apply (rule empt) apply (simp add: prod_is_node_def) apply (rule succs_is_node) apply (simp add: prod_is_node_def) apply (simp add: prod_ins_def prod_empt_def dfa_is_node_def) done lemma prod_dfs_inj: assumes x: "prod_is_node A B x" and i1: "dfa_is_node A i1" and i2: "dfa_is_node B i2" and j1: "dfa_is_node A j1" and j2: "dfa_is_node B j2" and i: "fst (prod_dfs A B x) ! i1 ! i2 = Some k" and j: "fst (prod_dfs A B x) ! j1 ! j2 = Some k" shows "(i1, i2) = (j1, j2)" proof - from x i1 i2 i have "k < length (snd (prod_dfs A B x)) \<and> snd (prod_dfs A B x) ! k = (i1, i2)" by (simp add: prod_dfs_bij [symmetric]) moreover from x j1 j2 j have "k < length (snd (prod_dfs A B x)) \<and> snd (prod_dfs A B x) ! k = (j1, j2)" by (simp add: prod_dfs_bij [symmetric]) ultimately show ?thesis by simp qed lemma prod_dfs_statetrans: assumes bs: "length bs = n" and i: "dfa_is_node A i" and j: "dfa_is_node B j" and s1: "dfa_is_node A s1" and s2: "dfa_is_node B s2" and k: "fst (prod_dfs A B (s1, s2)) ! i ! j = Some k" obtains k' where "fst (prod_dfs A B (s1, s2)) ! dfa_trans A i bs ! dfa_trans B j bs = Some k'" and "dfa_is_node A (dfa_trans A i bs)" and "dfa_is_node B (dfa_trans B j bs)" and "k' < length (snd (prod_dfs A B (s1, s2)))" proof - from i well_formed1 bs have h_tr1: "bddh (length bs) (fst A ! i)" by (simp add: wf_dfa_def dfa_is_node_def list_all_iff) from j well_formed2 bs have h_tr2: "bddh (length bs) (fst B ! j)" by (simp add: wf_dfa_def dfa_is_node_def list_all_iff) from i j k have "prod_memb (i, j) (prod_dfs A B (s1, s2))" by (simp add: prod_memb_def split_beta) then have "((s1, s2), (i, j)) \<in> (succsr (prod_succs A B))\<^sup>*" using i j s1 s2 by (simp add: prod_dfs_eq_rtrancl prod_is_node_def) moreover from h_tr1 h_tr2 have "(bdd_lookup (fst A ! i) bs, bdd_lookup (fst B ! j) bs) = bdd_lookup (bdd_binop Pair (fst A ! i) (fst B ! j)) bs" by (simp add: bdd_lookup_binop) with i j h_tr1 h_tr2 have "((i, j), (bdd_lookup (fst A ! i) bs, bdd_lookup (fst B ! j) bs)) \<in> succsr (prod_succs A B)" by (auto simp add: succsr_def prod_succs_def add_leaves_bdd_lookup [of "length bs"] bddh_binop is_alph_def) ultimately have "((s1, s2), (bdd_lookup (fst A ! i) bs, bdd_lookup (fst B ! j) bs)) \<in> (succsr (prod_succs A B))\<^sup>*" .. moreover from well_formed1 well_formed2 bs i j have "prod_is_node A B (bdd_lookup (fst A ! i) bs, bdd_lookup (fst B ! j) bs)" by (auto simp: prod_is_node_def bdd_all_bdd_lookup is_alph_def dfa_trans_is_node dfa_trans_def[symmetric]) moreover from i well_formed1 bs have s_tr1: "dfa_is_node A (dfa_trans A i bs)" by (simp add: is_alph_def dfa_trans_is_node) moreover from j well_formed2 bs have s_tr2: "dfa_is_node B (dfa_trans B j bs)" by (simp add: is_alph_def dfa_trans_is_node) ultimately have "\<exists>k'. fst (prod_dfs A B (s1, s2)) ! dfa_trans A i bs ! dfa_trans B j bs = Some k'" using s1 s2 by (simp add: prod_dfs_eq_rtrancl [symmetric] prod_memb_def split_beta prod_is_node_def dfa_trans_def) then obtain k' where k': "fst (prod_dfs A B (s1, s2)) ! dfa_trans A i bs ! dfa_trans B j bs = Some k'" .. from k' s_tr1 s_tr2 s1 s2 have "k' < length (snd (prod_dfs A B (s1, s2))) \<and> snd (prod_dfs A B (s1, s2)) ! k' = (dfa_trans A i bs, dfa_trans B j bs)" by (simp add: prod_dfs_bij [symmetric] prod_is_node_def) then have "k' < length (snd (prod_dfs A B (s1, s2)))" by simp with k' s_tr1 s_tr2 show ?thesis .. qed lemma binop_wf_dfa: "wf_dfa (binop_dfa f A B) n" proof - let ?dfa = "binop_dfa f A B" from well_formed1 well_formed2 have is_node_s1_s2: "prod_is_node A B (0, 0)" by (simp add: prod_is_node_def wf_dfa_def dfa_is_node_def) let ?tr = "map (\<lambda>(i,j). bdd_binop (\<lambda>k l. the (fst (prod_dfs A B (0, 0)) ! k ! l)) (fst A ! i) (fst B ! j)) (snd (prod_dfs A B (0,0)))" { fix i j assume ij: "(i, j) \<in> set (snd (prod_dfs A B (0, 0)))" then obtain k where k: "k < length (snd (prod_dfs A B (0, 0)))" "snd (prod_dfs A B (0, 0)) ! k = (i, j)" by (auto simp add: in_set_conv_nth) from conjI [OF k] obtain ij_k: "fst (prod_dfs A B (0,0)) ! i ! j = Some k" and i: "dfa_is_node A i" and j: "dfa_is_node B j" by (simp add: prod_dfs_bij [OF is_node_s1_s2, symmetric]) from well_formed1 i have bddh_tr1: "bddh n (fst A ! i)" and less_tr1: "bdd_all (dfa_is_node A) (fst A ! i)" by (simp add: wf_dfa_def list_all_iff dfa_is_node_def)+ from well_formed2 j have bddh_tr2: "bddh n (fst B ! j)" and less_tr2: "bdd_all (dfa_is_node B) (fst B ! j)" by (simp add: wf_dfa_def list_all_iff dfa_is_node_def)+ from bddh_tr1 bddh_tr2 have 1: "bddh n (bdd_binop (\<lambda>k l. the (fst (prod_dfs A B (0, 0)) ! k ! l)) (fst A ! i) (fst B ! j))" by (simp add: bddh_binop) have "\<forall>bs. length bs = n \<longrightarrow> the (fst (prod_dfs A B (0, 0)) ! dfa_trans A i bs ! dfa_trans B j bs) < length (snd (prod_dfs A B (0, 0)))" proof (intro strip) fix bs assume bs: "length (bs::bool list) = n" moreover note i j moreover from well_formed1 well_formed2 have "dfa_is_node A 0" and "dfa_is_node B 0" by (simp add: dfa_is_node_def wf_dfa_def)+ moreover note ij_k ultimately obtain m where "fst (prod_dfs A B (0, 0)) ! dfa_trans A i bs ! dfa_trans B j bs = Some m" and "m < length (snd (prod_dfs A B (0, 0)))" by (rule prod_dfs_statetrans) then show "the (fst (prod_dfs A B (0,0)) ! dfa_trans A i bs ! dfa_trans B j bs) < length (snd (prod_dfs A B (0,0)))" by simp qed with bddh_tr1 bddh_tr2 have 2: "bdd_all (\<lambda>q. q < length (snd (prod_dfs A B (0, 0)))) (bdd_binop (\<lambda>k l. the (fst (prod_dfs A B (0,0)) ! k ! l)) (fst A ! i) (fst B ! j))" by (simp add: bddh_binop bdd_lookup_binop bdd_all_bdd_lookup_iff[of n _ "\<lambda>x. x < length (snd (prod_dfs A B (0,0)))"] dfa_trans_def) note this 1 } hence 1: "list_all (bddh n) ?tr" and 2: "list_all (bdd_all (\<lambda>q. q < length ?tr)) ?tr" by (auto simp: split_paired_all list_all_iff) from well_formed1 well_formed2 have 3: "fst (prod_dfs A B (0, 0)) ! 0 ! 0 = Some 0" by (simp add: wf_dfa_def dfa_is_node_def prod_dfs_start) from is_node_s1_s2 have "(fst (prod_dfs A B (0,0)) ! 0 ! 0 = Some 0 \<and> dfa_is_node A 0 \<and> dfa_is_node B 0) = (0 < length (snd (prod_dfs A B (0,0))) \<and> snd (prod_dfs A B (0,0)) ! 0 = (0,0))" by (rule prod_dfs_bij) with 3 well_formed1 well_formed2 have "0 < length (snd (prod_dfs A B (0,0)))" by (simp add: wf_dfa_def dfa_is_node_def) with 1 2 3 show "wf_dfa (binop_dfa f A B) n" by (simp add: binop_dfa_def wf_dfa_def split_beta dfa_is_node_def) qed theorem binop_dfa_reachable: assumes bss: "list_all (is_alph n) bss" shows "(\<exists>m. dfa_reach (binop_dfa f A B) 0 bss m \<and> fst (prod_dfs A B (0, 0)) ! s\<^sub>1 ! s\<^sub>2 = Some m \<and> dfa_is_node A s\<^sub>1 \<and> dfa_is_node B s\<^sub>2) = (dfa_reach A 0 bss s\<^sub>1 \<and> dfa_reach B 0 bss s\<^sub>2)" proof - let ?tr = "map (\<lambda>(i, j). bdd_binop (\<lambda>k l. the (fst (prod_dfs A B (0,0)) ! k ! l)) (fst A ! i) (fst B ! j)) (snd (prod_dfs A B (0,0)))" have T: "?tr = fst (binop_dfa f A B)" by (simp add: binop_dfa_def split_beta) from well_formed1 well_formed2 have is_node_s1_s2: "prod_is_node A B (0, 0)" by (simp add: prod_is_node_def wf_dfa_def dfa_is_node_def) from well_formed1 well_formed2 have s1: "dfa_is_node A 0" and s2: "dfa_is_node B 0" by (simp add: dfa_is_node_def wf_dfa_def)+ from s1 s2 have start: "fst (prod_dfs A B (0,0)) ! 0 ! 0 = Some 0" by (rule prod_dfs_start) show "(\<exists>m. dfa_reach (binop_dfa f A B) 0 bss m \<and> fst (prod_dfs A B (0, 0)) ! s\<^sub>1 ! s\<^sub>2 = Some m \<and> dfa_is_node A s\<^sub>1 \<and> dfa_is_node B s\<^sub>2) = (dfa_reach A 0 bss s\<^sub>1 \<and> dfa_reach B 0 bss s\<^sub>2)" (is "(\<exists>m. ?lhs1 m \<and> ?lhs2 m \<and> ?lhs3 \<and> ?lhs4) = ?rhs" is "?lhs = _") proof assume "\<exists>m. ?lhs1 m \<and> ?lhs2 m \<and> ?lhs3 \<and> ?lhs4" then obtain m where lhs: "?lhs1 m" "?lhs2 m" "?lhs3" "?lhs4" by auto from lhs bss show ?rhs proof (induct arbitrary: s\<^sub>1 s\<^sub>2) case Nil from is_node_s1_s2 s1 s2 \<open>dfa_is_node A s\<^sub>1\<close> \<open>dfa_is_node B s\<^sub>2\<close> have "(0, 0) = (s\<^sub>1, s\<^sub>2)" using start \<open>fst (prod_dfs A B (0,0)) ! s\<^sub>1 ! s\<^sub>2 = Some 0\<close> by (rule prod_dfs_inj) moreover have "dfa_reach A 0 [] 0" by (rule reach_nil) moreover have "dfa_reach B 0 [] 0" by (rule reach_nil) ultimately show ?case by simp next case (snoc j bss bs s\<^sub>1 s\<^sub>2) then have "length bs = n" by (simp add: is_alph_def) moreover from binop_wf_dfa have "dfa_is_node (binop_dfa f A B) 0" by (simp add: dfa_is_node_def wf_dfa_def) with snoc binop_wf_dfa [of f] have "dfa_is_node (binop_dfa f A B) j" by (simp add: dfa_reach_is_node) then have j: "j < length (snd (prod_dfs A B (0,0)))" by (simp add: binop_dfa_def dfa_is_node_def split_beta) with prod_dfs_bij [OF is_node_s1_s2, of "fst (snd (prod_dfs A B (0,0)) ! j)" "snd (snd (prod_dfs A B (0,0)) ! j)"] have j_tr1: "dfa_is_node A (fst (snd (prod_dfs A B (0,0)) ! j))" and j_tr2: "dfa_is_node B (snd (snd (prod_dfs A B (0,0)) ! j))" and Some_j: "fst (prod_dfs A B (0,0)) ! fst (snd (prod_dfs A B (0,0)) ! j) ! snd (snd (prod_dfs A B (0,0)) ! j) = Some j" by auto note j_tr1 j_tr2 s1 s2 Some_j ultimately obtain k where k: "fst (prod_dfs A B (0,0)) ! dfa_trans A (fst (snd (prod_dfs A B (0, 0)) ! j)) bs ! dfa_trans B (snd (snd (prod_dfs A B (0, 0)) ! j)) bs = Some k" and s_tr1': "dfa_is_node A (dfa_trans A (fst (snd (prod_dfs A B (0,0)) ! j)) bs)" and s_tr2': "dfa_is_node B (dfa_trans B (snd (snd (prod_dfs A B (0,0)) ! j)) bs)" by (rule prod_dfs_statetrans) from well_formed1 well_formed2 j_tr1 j_tr2 snoc have lh: "bddh (length bs) (fst A ! fst (snd (prod_dfs A B (0,0)) ! j))" and rh: "bddh (length bs) (fst B ! snd (snd (prod_dfs A B (0,0)) ! j))" by (auto simp: wf_dfa_def dfa_is_node_def list_all_iff is_alph_def) from snoc(3)[unfolded dfa_trans_def binop_dfa_def Let_def split_beta fst_conv nth_map[OF j] bdd_lookup_binop[OF lh,OF rh], folded dfa_trans_def] k have "fst (prod_dfs A B (0,0)) ! s\<^sub>1 ! s\<^sub>2 = Some k" by simp with is_node_s1_s2 \<open>dfa_is_node A s\<^sub>1\<close> \<open>dfa_is_node B s\<^sub>2\<close> s_tr1' s_tr2' have "(s\<^sub>1, s\<^sub>2) = (dfa_trans A (fst (snd (prod_dfs A B (0,0)) ! j)) bs, dfa_trans B (snd (snd (prod_dfs A B (0,0)) ! j)) bs)" using k by (rule prod_dfs_inj) moreover from snoc Some_j j_tr1 j_tr2 have "dfa_reach A 0 bss (fst (snd (prod_dfs A B (0,0)) ! j))" by simp hence "dfa_reach A 0 (bss @ [bs]) (dfa_trans A (fst (snd (prod_dfs A B (0,0)) ! j)) bs)" by (rule reach_snoc) moreover from snoc Some_j j_tr1 j_tr2 have "dfa_reach B 0 bss (snd (snd (prod_dfs A B (0,0)) ! j))" by simp hence "dfa_reach B 0 (bss @ [bs]) (dfa_trans B (snd (snd (prod_dfs A B (0,0)) ! j)) bs)" by (rule reach_snoc) ultimately show "dfa_reach A 0 (bss @ [bs]) s\<^sub>1 \<and> dfa_reach B 0 (bss @ [bs]) s\<^sub>2" by simp qed next assume ?rhs hence reach: "dfa_reach A 0 bss s\<^sub>1" "dfa_reach B 0 bss s\<^sub>2" by simp_all then show ?lhs using bss proof (induct arbitrary: s\<^sub>2) case Nil with start s1 s2 show ?case by (auto intro: reach_nil simp: reach_nil_iff) next case (snoc j bss bs s\<^sub>2) from snoc(3) obtain s\<^sub>2' where reach_s2': "dfa_reach B 0 bss s\<^sub>2'" and s2': "s\<^sub>2 = dfa_trans B s\<^sub>2' bs" by (auto simp: reach_snoc_iff) from snoc(2) [OF reach_s2'] snoc(4) obtain m where reach_m: "dfa_reach (binop_dfa f A B) 0 bss m" and m: "fst (prod_dfs A B (0,0)) ! j ! s\<^sub>2' = Some m" and j: "dfa_is_node A j" and s2'': "dfa_is_node B s\<^sub>2'" by auto from snoc have "list_all (is_alph n) bss" by simp with binop_wf_dfa reach_m dfa_startnode_is_node[OF binop_wf_dfa] have m_less: "dfa_is_node (binop_dfa f A B) m" by (rule dfa_reach_is_node) from is_node_s1_s2 m j s2'' have m': "(m < length (snd (prod_dfs A B (0,0))) \<and> snd (prod_dfs A B (0,0)) ! m = (j, s\<^sub>2'))" by (simp add: prod_dfs_bij [symmetric]) with j s2'' have "dfa_is_node A (fst (snd (prod_dfs A B (0,0)) ! m))" "dfa_is_node B (snd (snd (prod_dfs A B (0,0)) ! m))" by simp_all with well_formed1 well_formed2 snoc have bddh: "bddh (length bs) (fst A ! fst (snd (prod_dfs A B (0,0)) ! m))" "bddh (length bs) (fst B ! snd (snd (prod_dfs A B (0,0)) ! m))" by (simp add: wf_dfa_def is_alph_def dfa_is_node_def list_all_iff)+ from snoc have "length bs = n" by (simp add: is_alph_def) then obtain k where k: "fst (prod_dfs A B (0,0)) ! dfa_trans A j bs ! dfa_trans B s\<^sub>2' bs = Some k" and s_tr1: "dfa_is_node A (dfa_trans A j bs)" and s_tr2: "dfa_is_node B (dfa_trans B s\<^sub>2' bs)" using j s2'' s1 s2 m by (rule prod_dfs_statetrans) show ?case apply (rule exI) apply (simp add: s2') apply (intro conjI) apply (rule reach_snoc) apply (rule reach_m) apply (cut_tac m_less) apply (simp add: dfa_trans_def binop_dfa_def split_beta dfa_is_node_def) apply (simp add: bddh bdd_lookup_binop split_beta) apply (simp add: dfa_trans_def[symmetric] m' k) apply (rule s_tr1) apply (rule s_tr2) done qed qed qed lemma binop_dfa_steps: assumes X: "list_all (is_alph n) bs" shows "snd (binop_dfa f A B) ! dfa_steps (binop_dfa f A B) 0 bs = f (snd A ! dfa_steps A 0 bs) (snd B ! dfa_steps B 0 bs)" (is "?as3 ! dfa_steps ?A 0 bs = ?rhs") proof - note 2 = dfa_startnode_is_node[OF well_formed1] note 5 = dfa_startnode_is_node[OF well_formed2] note B = dfa_startnode_is_node[OF binop_wf_dfa] define tab where "tab = fst (prod_dfs A B (0,0))" define ps where "ps = snd (prod_dfs A B (0,0))" from tab_def ps_def have prod: "prod_dfs A B (0,0) = (tab, ps)" by simp define s1 where "s1 = dfa_steps A 0 bs" define s2 where "s2 = dfa_steps B 0 bs" with s1_def have "dfa_reach A 0 bs s1" and "dfa_reach B 0 bs s2" by (simp add: reach_def)+ with X have "\<exists>m. dfa_reach ?A 0 bs m \<and> fst (prod_dfs A B (0, 0)) ! s1 ! s2 = Some m \<and> dfa_is_node A s1 \<and> dfa_is_node B s2" by (simp add: binop_dfa_reachable) with tab_def have "\<exists>m. dfa_reach ?A 0 bs m \<and> tab ! s1 ! s2 = Some m \<and> dfa_is_node A s1 \<and> dfa_is_node B s2" by simp then obtain m where R: "dfa_reach ?A 0 bs m" and M: "tab ! s1 ! s2 = Some m" and s1: "dfa_is_node A s1" and s2: "dfa_is_node B s2" by blast hence M': "m = dfa_steps ?A 0 bs" by (simp add: reach_def) from B X R binop_wf_dfa [of f] have mL: "dfa_is_node ?A m" by (simp add: dfa_reach_is_node) from 2 5 M s1 s2 have bij: "m < length (snd (prod_dfs A B (0, 0))) \<and> snd (prod_dfs A B (0, 0)) ! m = (s1, s2)" unfolding tab_def by (simp add: prod_dfs_bij[symmetric] prod_is_node_def) with mL have "snd (binop_dfa f A B) ! m = f (snd A ! s1) (snd B ! s2)" by (simp add: binop_dfa_def split_beta dfa_is_node_def) with M' s1_def s2_def show "snd ?A ! dfa_steps ?A 0 bs = f (snd A ! dfa_steps A 0 bs) (snd B ! dfa_steps B 0 bs)" by simp qed end lemma binop_wf_dfa: assumes A: "wf_dfa A n" and B: "wf_dfa B n" shows "wf_dfa (binop_dfa f A B) n" proof - from A B interpret prod_DFS A B n by unfold_locales show ?thesis by (rule binop_wf_dfa) qed theorem binop_dfa_accepts: assumes A: "wf_dfa A n" and B: "wf_dfa B n" and X: "list_all (is_alph n) bss" shows "dfa_accepts (binop_dfa f A B) bss = f (dfa_accepts A bss) (dfa_accepts B bss)" proof - from A B interpret prod_DFS A B n by unfold_locales from X show ?thesis by (simp add: accepts_def dfa_accepting_def binop_dfa_steps) qed definition and_dfa :: "dfa \<Rightarrow> dfa \<Rightarrow> dfa" where "and_dfa = binop_dfa (\<and>)" lemma and_wf_dfa: assumes "wf_dfa M n" and "wf_dfa N n" shows "wf_dfa (and_dfa M N) n" using assms by (simp add: and_dfa_def binop_wf_dfa) lemma and_dfa_accepts: assumes "wf_dfa M n" and "wf_dfa N n" and "list_all (is_alph n) bs" shows "dfa_accepts (and_dfa M N) bs = (dfa_accepts M bs \<and> dfa_accepts N bs)" using assms by (simp add: binop_dfa_accepts and_dfa_def) definition or_dfa :: "dfa \<Rightarrow> dfa \<Rightarrow> dfa" where "or_dfa = binop_dfa (\<or>)" lemma or_wf_dfa: assumes "wf_dfa M n" and "wf_dfa N n" shows "wf_dfa (or_dfa M N) n" using assms by (simp add: or_dfa_def binop_wf_dfa) lemma or_dfa_accepts: assumes "wf_dfa M n" and "wf_dfa N n" and "list_all (is_alph n) bs" shows "dfa_accepts (or_dfa M N) bs = (dfa_accepts M bs \<or> dfa_accepts N bs)" using assms by (simp add: binop_dfa_accepts or_dfa_def) definition imp_dfa :: "dfa \<Rightarrow> dfa \<Rightarrow> dfa" where "imp_dfa = binop_dfa (\<longrightarrow>)" lemma imp_wf_dfa: assumes "wf_dfa M n" and "wf_dfa N n" shows "wf_dfa (imp_dfa M N) n" using assms by (simp add: binop_wf_dfa imp_dfa_def) lemma imp_dfa_accepts: assumes "wf_dfa M n" and "wf_dfa N n" and "list_all (is_alph n) bs" shows "dfa_accepts (imp_dfa M N) bs = (dfa_accepts M bs \<longrightarrow> dfa_accepts N bs)" using assms by (auto simp add: binop_dfa_accepts imp_dfa_def) subsection \<open>Transforming DFAs to NFAs\<close> definition nfa_of_dfa :: "dfa \<Rightarrow> nfa" where "nfa_of_dfa = (\<lambda>(bdd,as). (map (bdd_map (\<lambda>q. (replicate (length bdd) False)[q:=True])) bdd, as))" lemma dfa2wf_nfa: assumes "wf_dfa M n" shows "wf_nfa (nfa_of_dfa M) n" proof - have "\<And>a. dfa_is_node M a \<Longrightarrow> nfa_is_node (nfa_of_dfa M) ((replicate (length (fst M)) False)[a:=True])" by (simp add: dfa_is_node_def nfa_is_node_def nfa_of_dfa_def split_beta) hence "\<And>bdd. bdd_all (dfa_is_node M) bdd \<Longrightarrow> bdd_all (nfa_is_node (nfa_of_dfa M)) (bdd_map (\<lambda>q. (replicate (length (fst M)) False)[q:=True]) bdd)" by (simp add: bdd_all_bdd_map) with assms have "list_all (bdd_all (nfa_is_node (nfa_of_dfa M))) (fst (nfa_of_dfa M))" by (simp add: list_all_iff split_beta nfa_of_dfa_def wf_dfa_def) with assms show ?thesis by (simp add: wf_nfa_def wf_dfa_def nfa_of_dfa_def split_beta list_all_iff bddh_bdd_map) qed lemma replicate_upd_inj: "\<lbrakk>q < n; (replicate n False)[q:=True] = (replicate n False)[p:=True]\<rbrakk> \<Longrightarrow> (q = p)" (is "\<lbrakk>_ ;?lhs = ?rhs\<rbrakk> \<Longrightarrow> _") proof - assume q: "q < n" and r: "?lhs = ?rhs" { assume "p \<noteq> q" with q have "?lhs ! q = True" by simp moreover from \<open>p \<noteq> q\<close> q have "?rhs ! q = False" by simp ultimately have "?lhs \<noteq> ?rhs" by auto } with r show "q = p" by auto qed lemma nfa_of_dfa_reach': assumes V: "wf_dfa M l" and X: "list_all (is_alph l) bss" and N: "n1 = (replicate (length (fst M)) False)[q:=True]" and Q: "dfa_is_node M q" and R: "nfa_reach (nfa_of_dfa M) n1 bss n2" shows "\<exists>p. dfa_reach M q bss p \<and> n2 = (replicate (length (fst M)) False)[p:=True]" proof - from R V X N Q show ?thesis proof induct case Nil hence "dfa_reach M q [] q" by (simp add: reach_nil) with Nil show ?case by auto next case (snoc j bss bs) hence N1: "nfa_is_node (nfa_of_dfa M) n1" by (simp add: nfa_is_node_def nfa_of_dfa_def split_beta) from snoc have V2: "wf_nfa (nfa_of_dfa M) l" by (simp add: dfa2wf_nfa) from snoc have "\<exists>p. dfa_reach M q bss p \<and> j = (replicate (length (fst M)) False)[p := True]" by simp then obtain p where PR: "dfa_reach M q bss p" and J: "j = (replicate (length (fst M)) False)[p:=True]" by blast hence JL: "nfa_is_node (nfa_of_dfa M) j" by (simp add: nfa_is_node_def nfa_of_dfa_def split_beta) from snoc PR have PL: "dfa_is_node M p" by (simp add: dfa_reach_is_node) with snoc JL have PL': "p < length j" by (simp add: nfa_is_node_def dfa_is_node_def nfa_of_dfa_def split_beta) define m where "m = dfa_trans M p bs" with snoc PR have MR: "dfa_reach M q (bss @ [bs]) m" by (simp add: reach_snoc) with snoc have mL: "dfa_is_node M m" by (simp add: dfa_reach_is_node) from V2 JL snoc have "nfa_is_node (nfa_of_dfa M) (nfa_trans (nfa_of_dfa M) j bs)" by (simp add: nfa_trans_is_node) hence L: "length (nfa_trans (nfa_of_dfa M) j bs) = length (fst M)" by (simp add: nfa_is_node_def nfa_of_dfa_def split_beta) have "nfa_trans (nfa_of_dfa M) j bs = (replicate (length (fst M)) False)[m := True]" (is "?lhs = ?rhs") proof (simp add: list_eq_iff_nth_eq L, intro strip) fix i assume H: "i < length (fst M)" show "nfa_trans (nfa_of_dfa M) j bs ! i = (replicate (length (fst M)) False)[m := True] ! i" (is "?lhs = ?rhs") proof assume lhs: "?lhs" from V2 snoc have "wf_nfa (nfa_of_dfa M) (length bs)" by (simp add: is_alph_def) moreover note JL moreover from H have IL: "i < length (fst (nfa_of_dfa M))" by (simp add: nfa_of_dfa_def split_beta) moreover from \<open>?lhs\<close> have "bdd_lookup (subsetbdd (fst (nfa_of_dfa M)) j (nfa_emptybdd (length j))) bs ! i" by (simp add: nfa_trans_def) ultimately have "\<exists>x < length j. j ! x \<and> bdd_lookup (fst (nfa_of_dfa M) ! x) bs ! i" by (simp add: bdd_lookup_subsetbdd) then obtain x where xl: "x < length j" and xj: "j ! x" and xs: "bdd_lookup (fst (nfa_of_dfa M) ! x) bs ! i" by blast with snoc J PL' have "x = p" by (cases "p = x") simp+ with xs PL snoc(3,4) m_def show "(replicate (length (fst M)) False)[m := True] ! i" by (simp add: nfa_of_dfa_def split_beta dfa_trans_def dfa_is_node_def wf_dfa_def is_alph_def bdd_map_bdd_lookup list_all_iff) next assume rhs: "?rhs" with H mL have "m = i" by (cases "m = i") (simp add: dfa_is_node_def)+ from PL snoc(3,4) m_def \<open>m = i\<close> H have "bdd_lookup (fst (nfa_of_dfa M) ! p) bs ! i" by (simp add: nfa_of_dfa_def split_beta dfa_is_node_def wf_dfa_def is_alph_def list_all_iff bdd_map_bdd_lookup dfa_trans_def) with PL' J have E: "\<exists>p < length j. j ! p \<and> bdd_lookup( fst (nfa_of_dfa M) ! p) bs ! i" by auto from snoc(4) V2 have V': "wf_nfa (nfa_of_dfa M) (length bs)" by (simp add: is_alph_def) from H have H': "i < length (fst (nfa_of_dfa M))" by (simp add: nfa_of_dfa_def split_beta) from H' V' E JL have "bdd_lookup (subsetbdd (fst (nfa_of_dfa M)) j (nfa_emptybdd (length j))) bs ! i" by (simp add: bdd_lookup_subsetbdd) thus "?lhs" by (simp add: nfa_trans_def) qed qed with MR show ?case by auto qed qed lemma nfa_of_dfa_reach: assumes V: "wf_dfa M l" and X: "list_all (is_alph l) bss" and N1: "n1 = (replicate (length (fst M)) False)[q:=True]" and N2: "n2 = (replicate (length (fst M)) False)[p:=True]" and Q: "dfa_is_node M q" shows "nfa_reach (nfa_of_dfa M) n1 bss n2 = dfa_reach M q bss p" proof assume "nfa_reach (nfa_of_dfa M) n1 bss n2" with assms have "\<exists>p. dfa_reach M q bss p \<and> n2 = (replicate (length (fst M)) False)[p := True]" by (simp add: nfa_of_dfa_reach') then obtain p' where R: "dfa_reach M q bss p'" and N2': "n2 = (replicate (length (fst M)) False)[p' := True]" by blast from V R Q X have "dfa_is_node M p'" by (simp add: dfa_reach_is_node) with N2 N2' have "p' = p" by (simp add: dfa_is_node_def replicate_upd_inj) with R show "dfa_reach M q bss p" by simp next assume H: "dfa_reach M q bss p" define n2' where "n2' = nfa_steps (nfa_of_dfa M) n1 bss" hence R': "nfa_reach (nfa_of_dfa M) n1 bss n2'" by (simp add: reach_def) with assms have "\<exists>p. dfa_reach M q bss p \<and> n2' = (replicate (length (fst M)) False)[p := True]" by (simp add: nfa_of_dfa_reach') then obtain p' where R: "dfa_reach M q bss p'" and N2': "n2' = (replicate (length (fst M)) False)[p' := True]" by blast with H have "p = p'" by (simp add: reach_inj) with N2' N2 have "n2 = n2'" by simp with R' show "nfa_reach (nfa_of_dfa M) n1 bss n2" by simp qed lemma nfa_accepting_replicate: assumes "q < length (fst N)" and "length (snd N) = length (fst N)" shows "nfa_accepting N ((replicate (length (fst N)) False)[q:=True]) = snd N ! q" proof - from assms have "set_of_bv ((replicate (length (fst N)) False)[q:=True]) = {q}" proof (auto simp: set_of_bv_def) fix x assume "x < length (fst N)" and "(replicate (length (fst N)) False)[q := True] ! x" with assms show "x = q" by (cases "x = q") simp+ qed hence "nfa_accepting N ((replicate (length (fst N)) False)[q:=True]) = (set_of_bv (snd N) \<inter> {q} \<noteq> {})" by (simp add: nfa_accepting_set_of_bv) also have "\<dots> = (q \<in> set_of_bv (snd N))" by auto also from assms have "\<dots> = snd N ! q" by (auto simp: set_of_bv_def) finally show ?thesis . qed lemma nfa_of_dfa_accepts: assumes V: "wf_dfa A n" and X: "list_all (is_alph n) bss" shows "nfa_accepts (nfa_of_dfa A) bss = dfa_accepts A bss" proof - from V have Q: "dfa_is_node A 0" by (simp add: dfa_startnode_is_node) have S: "nfa_startnode (nfa_of_dfa A) = (replicate (length (fst A)) False)[0:= True]" by (simp add: nfa_startnode_def nfa_of_dfa_def split_beta) define p where "p = dfa_steps A 0 bss" define n2 where "n2 = (replicate (length (fst A)) False)[p := True]" from p_def have PR: "dfa_reach A 0 bss p" by (simp add: reach_def) with p_def n2_def Q S X V have "nfa_reach (nfa_of_dfa A) (nfa_startnode (nfa_of_dfa A)) bss n2" by (simp add: nfa_of_dfa_reach) hence N2: "n2 = nfa_steps (nfa_of_dfa A) (nfa_startnode (nfa_of_dfa A)) bss" by (simp add: reach_def) from PR Q X V have "dfa_is_node A p" by (simp add: dfa_reach_is_node) hence "p < length (fst (nfa_of_dfa A))" by (simp add: dfa_is_node_def nfa_of_dfa_def split_beta) moreover from dfa2wf_nfa[OF V] have "length (snd (nfa_of_dfa A)) = length (fst (nfa_of_dfa A))" by (auto simp add: wf_nfa_def) moreover from n2_def have "n2 = (replicate (length (fst (nfa_of_dfa A))) False)[p := True]" by (simp add: nfa_of_dfa_def split_beta) ultimately have "nfa_accepting (nfa_of_dfa A) n2 = snd (nfa_of_dfa A) ! p" by (simp add: nfa_accepting_replicate) with N2 p_def show ?thesis by (simp add: accepts_def accepts_def dfa_accepting_def nfa_of_dfa_def split_beta) qed subsection \<open>Transforming NFAs to DFAs\<close> fun bddinsert :: "'a bdd \<Rightarrow> bool list \<Rightarrow> 'a \<Rightarrow> 'a bdd" where "bddinsert (Leaf a) [] x = Leaf x" | "bddinsert (Leaf a) (w#ws) x = (if w then Branch (Leaf a) (bddinsert (Leaf a) ws x) else Branch (bddinsert (Leaf a) ws x) (Leaf a))" | "bddinsert (Branch l r) (w#ws) x = (if w then Branch l (bddinsert r ws x) else Branch (bddinsert l ws x) r)" lemma bddh_bddinsert: assumes "bddh x b" and "length w \<ge> x" shows "bddh (length w) (bddinsert b w y)" using assms proof (induct b w y arbitrary: x rule: bddinsert.induct) case (2 aa ww wss yy xaa) have "bddh 0 (Leaf aa) \<and> 0 \<le> length wss" by simp with 2(1) 2(2) have "bddh (length wss) (bddinsert (Leaf aa) wss yy)" by (cases ww) blast+ with 2 show ?case by simp next case (3 ll rr ww wss yy xx) from 3(3) obtain y where Y: "Suc y = xx" by (cases xx) simp+ with 3 have 1: "bddh y rr \<and> bddh y ll \<and> y \<le> length wss" by auto show ?case proof (cases ww) case True with 1 3(1) have IV: "bddh (length wss) (bddinsert rr wss yy)" by blast with Y 3 have "y \<le> length wss" and "bddh y ll" by auto hence "bddh (length wss) ll" by (rule bddh_ge) with IV True show ?thesis by simp next case False with 1 3(2) have IV: "bddh (length wss) (bddinsert ll wss yy)" by blast with Y 3 have "y \<le> length wss" and "bddh y rr" by auto hence "bddh (length wss) rr" by (rule bddh_ge) with IV False show ?thesis by simp qed qed simp+ lemma bdd_lookup_bddinsert: assumes "bddh (length w) bd" and "length w = length v" shows "bdd_lookup (bddinsert bd w y) v = (if w = v then y else bdd_lookup bd v)" using assms proof (induct bd w y arbitrary: v rule: bddinsert.induct) case (2 aa ww wss xx vv) hence "\<exists>v vs. vv = v # vs" by (cases vv) simp+ then obtain v vs where V: "vv = v # vs" by blast with 2 have "length wss = length vs" by simp with 2 have IV: "bdd_lookup (bddinsert (Leaf aa) wss xx) vs = (if wss = vs then xx else bdd_lookup (Leaf aa) vs)" by (cases ww) simp+ have "bdd_lookup (bddinsert (Leaf aa) (ww # wss) xx) vv = bdd_lookup (if ww then (Branch (Leaf aa) (bddinsert (Leaf aa) wss xx)) else Branch (bddinsert (Leaf aa) wss xx) (Leaf aa)) vv" by simp also have "\<dots> = (if ww then bdd_lookup (Branch (Leaf aa) (bddinsert (Leaf aa) wss xx)) vv else bdd_lookup (Branch (bddinsert (Leaf aa) wss xx) (Leaf aa)) vv)" by simp also from V IV have "\<dots> = (if ww # wss = v # vs then bdd_lookup (bddinsert (Leaf aa) wss xx) vs else bdd_lookup (Leaf aa) vs)" by (cases ww) auto also from V IV have "\<dots> = (if ww # wss = vv then xx else bdd_lookup (Leaf aa) vs)" by auto finally show ?case by simp next case (3 ll rr ww wss xx vv) hence "\<exists>v vs. vv = v # vs" by (cases vv) simp+ then obtain v vs where V: "vv = v # vs" by blast show ?case proof (cases ww) case True with 3 V have IV: "bdd_lookup (bddinsert rr wss xx) vs = (if wss = vs then xx else bdd_lookup rr vs)" by simp with True 3 V show ?thesis by auto next case False with 3 V have IV: "bdd_lookup (bddinsert ll wss xx) vs = (if wss = vs then xx else bdd_lookup ll vs)" by simp with False 3 V show ?thesis by auto qed qed simp+ definition subset_succs :: "nfa \<Rightarrow> bool list \<Rightarrow> bool list list" where "subset_succs A qs = add_leaves (subsetbdd (fst A) qs (nfa_emptybdd (length qs))) []" definition subset_invariant :: "nfa \<Rightarrow> nat option bdd \<times> bool list list \<Rightarrow> bool" where "subset_invariant A = (\<lambda>(bdd, qss). bddh (length (fst A)) bdd)" definition "subset_ins qs = (\<lambda>(bdd, qss). (bddinsert bdd qs (Some (length qss)), qss @ [qs]))" definition subset_memb :: "bool list \<Rightarrow> nat option bdd \<times> bool list list \<Rightarrow> bool" where "subset_memb qs = (\<lambda>(bdd, qss). bdd_lookup bdd qs \<noteq> None)" definition subset_empt :: "nat option bdd \<times> bool list list" where "subset_empt = (Leaf None, [])" definition subset_dfs :: "nfa \<Rightarrow> bool list \<Rightarrow> nat option bdd \<times> bool list list" where "subset_dfs A x = gen_dfs (subset_succs A) subset_ins subset_memb subset_empt [x]" definition det_nfa :: "nfa \<Rightarrow> dfa" where "det_nfa A = (let (bdd, qss) = subset_dfs A (nfa_startnode A) in (map (\<lambda>qs. bdd_map (\<lambda>qs. the (bdd_lookup bdd qs)) (subsetbdd (fst A) qs (nfa_emptybdd (length qs)))) qss, map (\<lambda>qs. nfa_accepting A qs) qss))" locale subset_DFS = fixes A n assumes well_formed: "wf_nfa A n" lemma finite_list: "finite {xs::('a::finite) list. length xs = k}" apply (induct k) apply simp apply (subgoal_tac "{xs::('a::finite) list. length xs = Suc k} = (\<Union>x. Cons x ` {xs. length xs = k})") apply auto apply (case_tac x) apply auto done sublocale subset_DFS < DFS "subset_succs A" "nfa_is_node A" "subset_invariant A" subset_ins subset_memb subset_empt apply (unfold_locales) apply (simp add: nfa_is_node_def subset_invariant_def subset_memb_def subset_ins_def bdd_lookup_bddinsert split_beta) apply (simp add: nfa_is_node_def subset_memb_def subset_empt_def) apply (insert well_formed)[] apply (simp add: subset_succs_def add_leaves_bdd_all_eq bdd_all_is_node_subsetbdd wf_nfa_def) apply (simp add: subset_invariant_def subset_empt_def) apply (simp add: nfa_is_node_def subset_invariant_def subset_memb_def subset_ins_def split_paired_all) apply (subgoal_tac "length (fst A) = length x") apply (auto simp: bddh_bddinsert) apply (simp add: nfa_is_node_def) apply (rule finite_list) done context subset_DFS begin lemmas dfs_eq_rtrancl[folded subset_dfs_def] = dfs_eq_rtrancl lemma subset_dfs_bij: assumes H1: "nfa_is_node A q" and H2: "nfa_is_node A q0" shows "(bdd_lookup (fst (subset_dfs A q0)) q = Some v) = (v < length (snd (subset_dfs A q0)) \<and> (snd (subset_dfs A q0)) ! v = q)" proof - from assms have "list_all (nfa_is_node A) [q0]" by simp with empt_invariant show ?thesis using H1 unfolding subset_dfs_def proof (induct arbitrary: v q rule: dfs_invariant) case (step S x vv qq) obtain bd1 l1 where S: "S = (bd1, l1)" by (cases S) blast+ { assume "x \<in> set l1" hence "list_ex (\<lambda>l. l = x) l1" by (simp add: list_ex_iff) hence "\<exists>i < length l1. l1 ! i = x" by (simp add: list_ex_length) then obtain i where "i < length l1 \<and> l1 ! i = x" by blast with step S have "bdd_lookup bd1 x = Some i" by (simp add: nfa_is_node_def) with step S have "False" by (simp add: subset_memb_def) } hence X: "\<forall>i < length l1. l1 ! i \<noteq> x" by auto obtain bd2 l2 where S2: "subset_ins x S = (bd2, l2)" by (cases "subset_ins x S") blast+ with S have SS: "bd2 = bddinsert bd1 x (Some (length l1))" "l2 = l1 @ [x]" by (simp add: subset_ins_def)+ from step S H1 have "bdd_lookup (bddinsert bd1 x (Some (length l1))) qq = (if x = qq then Some (length l1) else bdd_lookup bd1 qq)" by (simp add: bdd_lookup_bddinsert subset_invariant_def nfa_is_node_def) with SS have "(bdd_lookup bd2 qq = Some vv) = (if x = qq then length l1 = vv else bdd_lookup bd1 qq = Some vv)" by simp also have "\<dots> = (x = qq \<and> length l1 = vv \<or> x \<noteq> qq \<and> bdd_lookup bd1 qq = Some vv)" by auto also have "\<dots> = (vv < length l2 \<and> l2 ! vv = qq)" proof (cases "x = qq") case True hence "(x = qq \<and> length l1 = vv \<or> x \<noteq> qq \<and> bdd_lookup bd1 qq = Some vv) = (x = qq \<and> length l1 = vv)" by simp also have "\<dots> = (vv < length l2 \<and> l2 ! vv = qq)" proof assume H: "vv < length l2 \<and> l2 ! vv = qq" show "x = qq \<and> length l1 = vv" proof (cases "vv = length l1") case False with H SS have "vv < length l1" by simp with SS have "l2 ! vv = l1 ! vv" by (simp add: nth_append) with False H SS \<open>x = qq\<close> have "vv < length l1 \<and> l1 ! vv = x" by auto with X show ?thesis by auto qed (simp add: True) qed (auto simp: SS) finally show ?thesis . next case False hence "(x = qq \<and> length l1 = vv \<or> x \<noteq> qq \<and> bdd_lookup bd1 qq = Some vv) = (x \<noteq> qq \<and> bdd_lookup bd1 qq = Some vv)" by simp also from step(4,5) S \<open>x\<noteq>qq\<close> have "\<dots> = (vv < length l1 \<and> l1 ! vv = qq)" by simp also from SS \<open>x\<noteq>qq\<close> have "\<dots> = (vv < length l2 \<and> l2 ! vv = qq)" by (simp add: nth_append) finally show ?thesis . qed finally show ?case by (simp add: S2) qed (simp add: subset_empt_def) qed lemma subset_dfs_start: assumes H: "nfa_is_node A q0" shows "bdd_lookup (fst (subset_dfs A q0)) q0 = Some 0" proof - obtain bd l where S: "subset_ins q0 subset_empt = (bd, l)" by (cases "subset_ins q0 subset_empt") blast+ from H have "\<not> subset_memb q0 subset_empt" by (simp add: empt) with H empt_invariant have I: "subset_invariant A (subset_ins q0 subset_empt)" by (simp add: ins_invariant) from H have "list_all (nfa_is_node A) (subset_succs A q0)" by (simp add: succs_is_node) with I have "bdd_lookup (fst (gen_dfs (subset_succs A) subset_ins subset_memb (subset_ins q0 subset_empt) (subset_succs A q0))) q0 = Some 0" proof (induct rule: dfs_invariant) case base thus ?case unfolding subset_ins_def subset_empt_def by (induct q0) simp+ next case (step S x) hence Q: "subset_memb q0 S" by (simp add: subset_memb_def split_beta) with step have "q0 \<noteq> x" by auto from step have I: "bddh (length (fst A)) (fst S)" by (simp add: subset_invariant_def split_beta) with H step \<open>q0\<noteq>x\<close> have V: "\<And>v. bdd_lookup (bddinsert (fst S) x v) q0 = bdd_lookup (fst S) q0" by (simp add: bdd_lookup_bddinsert nfa_is_node_def) with step show "bdd_lookup (fst (subset_ins x S)) q0 = Some 0" by (auto simp: subset_ins_def split_beta) qed thus ?thesis unfolding subset_dfs_def by (auto simp: nfa_is_node_def gen_dfs_simps subset_memb_def subset_empt_def) qed lemma subset_dfs_is_node: assumes "nfa_is_node A q0" shows "list_all (nfa_is_node A) (snd (subset_dfs A q0))" proof - from assms have "list_all (nfa_is_node A) [q0]" by simp with empt_invariant show ?thesis unfolding subset_dfs_def proof (induct rule: dfs_invariant) case base thus ?case by (simp add: subset_empt_def) next case (step S x) thus ?case by (simp add: subset_ins_def split_beta) qed qed lemma det_wf_nfa: shows "wf_dfa (det_nfa A) n" proof - obtain bt ls where BT: "subset_dfs A (nfa_startnode A) = (bt, ls)" by (cases "subset_dfs A (nfa_startnode A)") auto note Q = nfa_startnode_is_node[OF well_formed] from Q have N:"list_all (nfa_is_node A) (snd (subset_dfs A (nfa_startnode A)))" by (simp add: subset_dfs_is_node) with BT have L: "list_all (nfa_is_node A) ls" by simp have D: "det_nfa A = (map (\<lambda>q. bdd_map (\<lambda>q. the (bdd_lookup bt q)) (subsetbdd (fst A) q (nfa_emptybdd (length q)))) ls, map (\<lambda>q. nfa_accepting A q) ls)" (is "_ = (?bdt, ?atbl)") unfolding det_nfa_def by (simp add: BT) from well_formed L have "list_all (\<lambda>q. bddh n (subsetbdd (fst A) q (nfa_emptybdd (length q)))) ls" by (induct ls) (simp add: bddh_subsetbdd wf_nfa_def nfa_emptybdd_def)+ hence "list_all (\<lambda>q. bddh n (bdd_map (\<lambda>q. the (bdd_lookup bt q)) (subsetbdd (fst A) q (nfa_emptybdd (length q))))) ls" by (simp add: bddh_bdd_map) hence A: "list_all (bddh n) ?bdt" by (simp add: list_all_iff) { fix q assume "\<exists>i < length ls. ls ! i = q" then obtain i where len_i: "i < length ls" and i: "q = ls ! i" by blast from len_i i L have Q': "nfa_is_node A q" by (simp add: list_all_iff) then have "(bdd_lookup (fst (subset_dfs A (nfa_startnode A))) q = Some i) = (i < length (snd (subset_dfs A (nfa_startnode A))) \<and> snd (subset_dfs A (nfa_startnode A)) ! i = q)" using Q by (rule subset_dfs_bij) with BT len_i i have "bdd_lookup bt q = Some i" by simp with BT have "subset_memb q (subset_dfs A (nfa_startnode A))" by (simp add: subset_memb_def) with Q' Q have TR: "(nfa_startnode A,q) \<in> (succsr (subset_succs A))\<^sup>*" by (simp add: dfs_eq_rtrancl) { fix p assume P: "p \<in> set (subset_succs A q)" with TR have 3: "(nfa_startnode A,p) \<in> (succsr (subset_succs A))\<^sup>*" by (simp add: succsr_def rtrancl_into_rtrancl) from Q' have "list_all (nfa_is_node A) (subset_succs A q)" by (rule succs_is_node) with P have 4: "nfa_is_node A p" by (simp add: list_all_iff) with Q 3 have "subset_memb p (subset_dfs A (nfa_startnode A))" by (simp add: dfs_eq_rtrancl) with BT have "bdd_lookup bt p \<noteq> None" by (simp add: subset_memb_def) with BT obtain j where j: "bdd_lookup (fst (subset_dfs A (nfa_startnode A))) p = Some j" by (cases "bdd_lookup bt p") simp+ from 4 Q j have "j < length (snd (subset_dfs A (nfa_startnode A))) \<and> (snd (subset_dfs A (nfa_startnode A))) ! j = p" by (auto simp add: subset_dfs_bij) with j BT 4 have "\<exists>j. bdd_lookup bt p = Some j \<and> j < length ls" by auto } hence "\<forall>p \<in> set (subset_succs A q). \<exists>j. bdd_lookup bt p = Some j \<and> j < length ls" by auto hence "list_all (\<lambda>p. \<exists>j. bdd_lookup bt p = Some j \<and> j < length ls) (add_leaves (subsetbdd (fst A) q (nfa_emptybdd (length q))) [])" by (simp add: list_all_iff subset_succs_def) hence "bdd_all (\<lambda>p. \<exists>j. bdd_lookup bt p = Some j \<and> j < length ls) (subsetbdd (fst A) q (nfa_emptybdd (length q)))" by (simp add: add_leaves_bdd_all_eq) hence "bdd_all (\<lambda>l. l < length ls) (bdd_map (\<lambda>q. the (bdd_lookup bt q)) (subsetbdd (fst A) q (nfa_emptybdd (length q))))" by (induct ("subsetbdd (fst A) q (nfa_emptybdd (length q))")) auto } then have "\<forall>x \<in> set ls. bdd_all (\<lambda>l. l < length ls) (bdd_map (\<lambda>q. the (bdd_lookup bt q)) (subsetbdd (fst A) x (nfa_emptybdd (length x))))" by (simp add: in_set_conv_nth) hence "list_all (\<lambda>x. bdd_all (\<lambda>l. l < length ls) (bdd_map (\<lambda>q. the (bdd_lookup bt q)) (subsetbdd (fst A) x (nfa_emptybdd (length x))))) ls" by (simp add: list_all_iff) hence B: "list_all (bdd_all (\<lambda>l. l < length ls)) (map (\<lambda>x. bdd_map (\<lambda>q. the (bdd_lookup bt q)) (subsetbdd (fst A) x (nfa_emptybdd (length x)))) ls)" by (simp add: list_all_iff) from well_formed have "bdd_lookup (fst (subset_dfs A (nfa_startnode A))) (nfa_startnode A) = Some 0" by (simp add: subset_dfs_start nfa_startnode_is_node) with well_formed have "0 < length (snd (subset_dfs A (nfa_startnode A)))" by (simp add: subset_dfs_bij nfa_startnode_is_node) with A B D BT show ?thesis by (simp add: wf_dfa_def det_nfa_def dfa_is_node_def) qed lemma nfa_reach_subset_memb: assumes R: "nfa_reach A q0 bss q" and Q0: "nfa_is_node A q0" and X: "list_all (is_alph n) bss" shows "subset_memb q (subset_dfs A q0)" proof - from assms well_formed have Q: "nfa_is_node A q" by (simp add: nfa_reach_is_node) from R X have "\<exists>bs. nfa_reach A q0 bs q \<and> list_all (is_alph n) bs" by auto with Q0 have "(q0,q) \<in> (succsr (subset_succs A))\<^sup>*" by (simp add: nfa_reach_rtrancl) with Q0 Q show ?thesis by (simp add: dfs_eq_rtrancl) qed lemma det_nfa_reach': fixes bd :: "nat option bdd" and ls :: "bool list list" assumes "subset_dfs A (nfa_startnode A) = (bd, ls)" (is "?subset_dfs = _") and "\<exists>bs. nfa_reach A (nfa_startnode A) bs q1 \<and> list_all (is_alph n) bs" and "q1 = ls ! i" and "q2 = ls ! j" and "i < length ls" and "j < length ls" and "list_all (is_alph n) bss" shows "nfa_reach A q1 bss q2 = (dfa_reach (det_nfa A) i bss j \<and> nfa_is_node A q2)" (is "_ = (dfa_reach ?M i bss j \<and> _)") proof assume "nfa_reach A q1 bss q2" from this assms show "dfa_reach ?M i bss j \<and> nfa_is_node A q2" proof (induct arbitrary: j) case (Nil j) with well_formed have Q0: "nfa_is_node A (nfa_startnode A)" by (simp add: nfa_startnode_is_node) from Nil obtain bs where "nfa_reach A (nfa_startnode A) bs q1" and "list_all (is_alph n) bs" by blast with well_formed Q0 Nil have Q1: "nfa_is_node A q1" by (simp add: nfa_reach_is_node) with Q0 have "\<And>v. (bdd_lookup (fst (?subset_dfs)) q1 = Some v) = (v < length (snd (?subset_dfs)) \<and> snd (?subset_dfs) ! v = q1)" by (simp add: subset_dfs_bij) with Nil(1) have 1: "\<And>v. (bdd_lookup bd q1 = Some v) = (v < length ls \<and> ls ! v = q1)" by simp from Nil 1 have "bdd_lookup bd q1 = Some i" by simp moreover from Nil 1 have "bdd_lookup bd q1 = Some j" by simp ultimately have "i = j" by simp have "dfa_reach ?M i [] i" by (simp add: reach_nil) with \<open>i=j\<close> Q1 show ?case by simp next case (snoc p bss bs j) note S_len = nfa_startnode_is_node[OF well_formed] from snoc obtain bss' where BSS':"nfa_reach A (nfa_startnode A) bss' q1" and BSS'L: "list_all (is_alph n) bss'" by blast with well_formed S_len have Q_len: "nfa_is_node A q1" by (simp add: nfa_reach_is_node) with well_formed snoc have P_len: "nfa_is_node A p" by (simp add: nfa_reach_is_node) from BSS' snoc have "nfa_reach A (nfa_startnode A) (bss' @ bss) p" by (simp add: reach_trans) moreover note S_len moreover from snoc BSS'L have "list_all (is_alph n) (bss' @ bss)" by simp ultimately have "subset_memb p ?subset_dfs" by (rule nfa_reach_subset_memb) hence "bdd_lookup (fst ?subset_dfs) p \<noteq> None" by (simp add: subset_memb_def split_beta) then obtain v where P: "bdd_lookup (fst ?subset_dfs) p = Some v" by (cases "bdd_lookup (fst ?subset_dfs) p") simp+ with P_len S_len have "v < length (snd (?subset_dfs)) \<and> snd ?subset_dfs ! v = p" by (simp add: subset_dfs_bij) with snoc have V: "v < length ls \<and> ls ! v = p" by simp with snoc P_len have R: "dfa_reach ?M i bss v \<and> nfa_is_node A p" by simp from snoc have BS: "is_alph n bs" by simp with well_formed P_len have Z: "nfa_is_node A (nfa_trans A p bs)" by (simp add: nfa_trans_is_node) with snoc have N: "nfa_is_node A (ls ! j)" by simp from snoc have "j < length (snd ?subset_dfs) \<and> snd ?subset_dfs ! j = ls ! j" by simp with N S_len have "bdd_lookup (fst ?subset_dfs) (ls ! j) = Some j" by (simp add: subset_dfs_bij) with snoc have J: "bdd_lookup bd (ls ! j) = Some j" by simp from snoc have BD: "fst ?M = map (\<lambda>q. bdd_map (\<lambda>q. the (bdd_lookup bd q)) (subsetbdd (fst A) q (nfa_emptybdd (length q)))) ls" by (simp add: det_nfa_def) with V have "fst ?M ! v = bdd_map (\<lambda>q. the (bdd_lookup bd q)) (subsetbdd (fst A) p (nfa_emptybdd (length p)))" by simp with well_formed BS P_len have "bdd_lookup (fst ?M ! v) bs = the (bdd_lookup bd (bdd_lookup (subsetbdd (fst A) p (nfa_emptybdd (length p))) bs))" by (auto simp add: bdd_map_bdd_lookup bddh_subsetbdd wf_nfa_def is_alph_def nfa_emptybdd_def) also from snoc J have "\<dots> = j" by (simp add: nfa_trans_def) finally have JJ: "bdd_lookup (fst ?M ! v) bs = j" . from R BS JJ have RR: "dfa_reach ?M i (bss @ [bs]) j" by (auto simp add: reach_snoc dfa_trans_def[symmetric]) with Z show ?case by simp qed next assume "dfa_reach ?M i bss j \<and> nfa_is_node A q2" hence "dfa_reach ?M i bss j" and "nfa_is_node A q2" by simp+ from this assms show "nfa_reach A q1 bss q2" proof (induct arbitrary: q2) case (snoc j bss bs q2) define v where "v = bdd_lookup (fst ?M ! j) bs" define qq where "qq = nfa_trans A (ls ! j) bs" from well_formed have Q0: "nfa_is_node A (nfa_startnode A)" by (simp add: nfa_startnode_is_node) from snoc have L: "length (fst ?M) = length ls" by (simp add: det_nfa_def) with snoc have "dfa_is_node ?M i" by (simp add: dfa_is_node_def) moreover note \<open>dfa_reach ?M i bss j\<close> moreover from snoc have "wf_dfa ?M n" by (simp add: det_wf_nfa) moreover from snoc have "list_all (is_alph n) bss" by simp ultimately have "dfa_is_node ?M j" by (simp add: dfa_reach_is_node) with L have J_len: "j < length ls" by (simp add: dfa_is_node_def) from Q0 have "list_all (nfa_is_node A) (snd ?subset_dfs)" by (rule subset_dfs_is_node) with snoc J_len have J: "nfa_is_node A (ls ! j)" by (simp add: list_all_iff) moreover note snoc(4,5,6) refl[of "ls!j"] snoc(8) J_len moreover from snoc have "list_all (is_alph n) bss" by simp ultimately have R: "nfa_reach A q1 bss (ls ! j)" by (rule snoc(2)) from snoc obtain bs' where R': "nfa_reach A (nfa_startnode A) bs' q1" and BS': "list_all (is_alph n) bs'" by blast with R have lsj: "nfa_reach A (nfa_startnode A) (bs' @ bss) (ls ! j)" by (simp add: reach_trans) hence "nfa_reach A (nfa_startnode A) ((bs' @ bss) @ [bs]) qq" unfolding qq_def by (rule reach_snoc) with well_formed snoc(10) Q0 BS' have M: "subset_memb qq ?subset_dfs" and QQ_len: "nfa_is_node A qq" by (simp add: nfa_reach_subset_memb nfa_reach_is_node)+ with snoc(4) have QQ: "bdd_lookup bd qq \<noteq> None" by (simp add: subset_memb_def) from well_formed snoc J have H: "bddh (length bs) (subsetbdd (fst A) (ls ! j) (nfa_emptybdd (length (ls ! j))))" by (simp add: bddh_subsetbdd wf_nfa_def nfa_emptybdd_def is_alph_def) from v_def have "v = bdd_lookup (fst ?M ! j) bs" by simp also from snoc(4) have "\<dots> = bdd_lookup (map (\<lambda>q. bdd_map (\<lambda>q. the (bdd_lookup bd q)) (subsetbdd (fst A) q (nfa_emptybdd (length q)))) ls ! j) bs" by (simp add: det_nfa_def) also from J_len have "\<dots> = bdd_lookup (bdd_map (\<lambda>q. the (bdd_lookup bd q)) (subsetbdd (fst A) (ls ! j) (nfa_emptybdd (length (ls ! j))))) bs" by simp also from H have "\<dots> = the (bdd_lookup bd (bdd_lookup (subsetbdd (fst A) (ls ! j) (nfa_emptybdd (length (ls ! j)))) bs))" by (simp add: bdd_map_bdd_lookup) also from qq_def have "\<dots> = the (bdd_lookup bd qq)" by (simp add: nfa_trans_def) finally have "v = the (bdd_lookup bd qq)" . with QQ have QQ': "bdd_lookup bd qq = Some v" by (cases "bdd_lookup bd qq") simp+ with snoc(4) have "bdd_lookup (fst ?subset_dfs) qq = Some v" by simp with QQ_len Q0 have "v < length (snd ?subset_dfs) \<and> (snd ?subset_dfs) ! v = qq" by (simp add: subset_dfs_bij) with snoc v_def have Q2: "qq = q2" by (simp add: dfa_trans_def) with R qq_def show "nfa_reach A q1 (bss @ [bs]) q2" by (simp add: reach_snoc) qed (simp add: reach_nil) qed lemma det_nfa_reach: fixes bd :: "nat option bdd" and ls :: "bool list list" assumes S: "subset_dfs A (nfa_startnode A) = (bd, ls)" (is "?subset_dfs = _") and Q1: "q1 = ls ! j" and J: "j < length ls" and X: "list_all (is_alph n) bss" shows "nfa_reach A (nfa_startnode A) bss q1 = dfa_reach (det_nfa A) 0 bss j" proof - note SL = nfa_startnode_is_node[OF well_formed] have "nfa_reach A (nfa_startnode A) [] (nfa_startnode A)" by (rule reach_nil) hence 1: "\<exists>b. nfa_reach A (nfa_startnode A) b (nfa_startnode A) \<and> list_all (is_alph n) b" by auto from SL have "bdd_lookup (fst ?subset_dfs) (nfa_startnode A) = Some 0" by (simp add: subset_dfs_start) with SL have "0 < length (snd ?subset_dfs) \<and> snd ?subset_dfs ! 0 = nfa_startnode A" by (simp add: subset_dfs_bij) with S have 2: "0 < length ls \<and> ls ! 0 = nfa_startnode A" by simp from S 1 Q1 J 2 X have T: "nfa_reach A (nfa_startnode A) bss q1 = (dfa_reach (det_nfa A) 0 bss j \<and> nfa_is_node A q1)" by (simp only: det_nfa_reach') from SL have "list_all (nfa_is_node A) (snd ?subset_dfs)" by (simp add: subset_dfs_is_node) with Q1 J S have "nfa_is_node A q1" by (simp add: list_all_iff) with T show ?thesis by simp qed lemma det_nfa_accepts: assumes X: "list_all (is_alph n) w" shows "dfa_accepts (det_nfa A) w = nfa_accepts A w" proof - note SL = nfa_startnode_is_node[OF well_formed] let ?q = "nfa_startnode A" let ?subset_dfs = "subset_dfs A (nfa_startnode A)" define bd where "bd = fst ?subset_dfs" define ls where "ls = snd ?subset_dfs" with bd_def have BD: "?subset_dfs = (bd,ls)" by simp define p where "p = nfa_steps A (nfa_startnode A) w" with well_formed X SL have P: "nfa_is_node A p" by (simp add: nfa_steps_is_node) from p_def have R: "nfa_reach A ?q w p" by (simp add: reach_def) with assms have "\<exists>bs. nfa_reach A ?q bs p \<and> list_all (is_alph n) bs" by auto with SL have "(?q, p) \<in> (succsr (subset_succs A))\<^sup>*" by (simp add: nfa_reach_rtrancl) with SL P have "subset_memb p ?subset_dfs" by (simp add: dfs_eq_rtrancl) with BD have "bdd_lookup bd p \<noteq> None" by (simp add: subset_memb_def) then obtain k where K: "bdd_lookup bd p = Some k" by (cases "bdd_lookup bd p") simp+ with SL P have K_len: "k < length ls \<and> ls ! k = p" unfolding bd_def ls_def by (simp add: subset_dfs_bij) with BD X R have "dfa_reach (det_nfa A) 0 w k" by (blast dest: det_nfa_reach) hence "k = dfa_steps (det_nfa A) 0 w" by (simp add: reach_def) hence "dfa_accepts (det_nfa A) w = snd (det_nfa A) ! k" by (simp add: accepts_def dfa_accepting_def) also from ls_def have "\<dots> = map (nfa_accepting A) ls ! k" by (simp add: det_nfa_def split_beta) also from K_len p_def have "\<dots> = nfa_accepts A w" by (simp add: accepts_def) finally show "dfa_accepts (det_nfa A) w = nfa_accepts A w" . qed end lemma det_wf_nfa: assumes A: "wf_nfa A n" shows "wf_dfa (det_nfa A) n" proof - from A interpret subset_DFS A n by unfold_locales show ?thesis by (rule det_wf_nfa) qed lemma det_nfa_accepts: assumes A: "wf_nfa A n" and w: "list_all (is_alph n) bss" shows "dfa_accepts (det_nfa A) bss = nfa_accepts A bss" proof - from A interpret subset_DFS A n by unfold_locales from w show ?thesis by (rule det_nfa_accepts) qed subsection \<open>Quantifiers\<close> fun quantify_bdd :: "nat \<Rightarrow> bool list bdd \<Rightarrow> bool list bdd" where "quantify_bdd i (Leaf q) = Leaf q" | "quantify_bdd 0 (Branch l r) = (bdd_binop bv_or l r)" | "quantify_bdd (Suc i) (Branch l r) = Branch (quantify_bdd i l) (quantify_bdd i r)" lemma bddh_quantify_bdd: assumes "bddh (Suc n) bdd" and "v \<le> n" shows "bddh n (quantify_bdd v bdd)" using assms by (induct v bdd arbitrary: n rule: quantify_bdd.induct) (auto simp: bddh_binop split: nat.splits) lemma quantify_bdd_is_node: assumes "bdd_all (nfa_is_node N) bdd" shows "bdd_all (nfa_is_node N) (quantify_bdd v bdd)" using assms by (induct v bdd rule: quantify_bdd.induct) (simp add: bdd_all_bdd_binop[of "nfa_is_node N" _ "nfa_is_node N" _ "nfa_is_node N" bv_or, OF _ _ bv_or_is_node])+ definition quantify_nfa :: "nat \<Rightarrow> nfa \<Rightarrow> nfa" where "quantify_nfa i = (\<lambda>(bdds, as). (map (quantify_bdd i) bdds, as))" lemma quantify_nfa_well_formed_aut: assumes "wf_nfa N (Suc n)" and "v \<le> n" shows "wf_nfa (quantify_nfa v N) n" proof - from assms have 1: "list_all (bddh (Suc n)) (fst N)" and 2: "list_all (bdd_all (nfa_is_node N)) (fst N)" by (simp add: wf_nfa_def)+ from 1 assms have 3: "list_all (bddh n) (fst (quantify_nfa v N))" by (simp add: quantify_nfa_def bddh_quantify_bdd list_all_iff split_beta) from 2 have "list_all (bdd_all (nfa_is_node N)) (fst (quantify_nfa v N))" by (simp add: quantify_bdd_is_node list_all_iff split_beta quantify_nfa_def) hence "list_all (bdd_all (nfa_is_node (quantify_nfa v N))) (fst (quantify_nfa v N))" by (simp add: quantify_nfa_def split_beta nfa_is_node_def) with 3 assms show ?thesis by (simp add: wf_nfa_def quantify_nfa_def split_beta) qed fun insertl :: "nat \<Rightarrow> 'a \<Rightarrow> 'a list \<Rightarrow> 'a list" where "insertl i a [] = [a]" | "insertl 0 a bs = a # bs" | "insertl (Suc i) a (b # bs) = b # (insertl i a bs)" lemma insertl_len: "length (insertl n x vs) = Suc (length vs)" by (induct n x vs rule: insertl.induct) simp+ lemma insertl_0_eq: "insertl 0 x xs = x # xs" by (cases xs) simp_all lemma bdd_lookup_quantify_bdd_set_of_bv: assumes "length w = n" and "bddh (Suc n) bdd" and "bdd_all (nfa_is_node N) bdd" and "v \<le> n" shows "set_of_bv (bdd_lookup (quantify_bdd v bdd) w) = (\<Union>b. set_of_bv (bdd_lookup bdd (insertl v b w)))" using assms proof (induct v bdd arbitrary: n w rule: quantify_bdd.induct) case (2 l r w) hence N: "nfa_is_node N (bdd_lookup l w)" "nfa_is_node N (bdd_lookup r w)" by (simp add: bdd_all_bdd_lookup)+ have "set_of_bv (bdd_lookup (quantify_bdd 0 (Branch l r)) w) = set_of_bv (bdd_lookup (bdd_binop bv_or l r) w)" by simp also from 2 have "\<dots> = set_of_bv (bv_or (bdd_lookup l w) (bdd_lookup r w))" by (simp add: bdd_lookup_binop) also from N have "\<dots> = set_of_bv (bdd_lookup l w) \<union> set_of_bv (bdd_lookup r w)" by (simp add: bv_or_set_of_bv) also have "\<dots> = set_of_bv (bdd_lookup (Branch l r) (insertl 0 False w)) \<union> set_of_bv (bdd_lookup (Branch l r) (insertl 0 True w))" by (cases w) simp+ also have "\<dots> = (\<Union>b \<in> {True, False}. set_of_bv (bdd_lookup (Branch l r) (insertl 0 b w)))" by auto also have "\<dots> = (\<Union>b. set_of_bv (bdd_lookup (Branch l r) (insertl 0 b w)))" by blast finally show ?case . next case (3 n l r k w) then obtain j where J: "k = Suc j" by (cases k) simp+ with 3 obtain a as where W: "w = a # as" by (cases w) auto with 3 J show ?case by (cases a) simp+ qed simp lemma subsetbdd_set_of_bv: assumes "wf_nfa N (length ws)" and "nfa_is_node N q" shows "set_of_bv (bdd_lookup (subsetbdd (fst N) q (nfa_emptybdd (length q))) ws) = (\<Union>i\<in>set_of_bv q. set_of_bv (bdd_lookup (fst N ! i) ws))" (is "set_of_bv ?q = _") proof (simp only: set_eq_iff, rule allI) fix x :: nat from assms have "bdd_all (nfa_is_node N) (subsetbdd (fst N) q (nfa_emptybdd (length q)))" by (simp add: wf_nfa_def bdd_all_is_node_subsetbdd) with assms have "nfa_is_node N ?q" by (simp add: wf_nfa_def bdd_all_bdd_lookup bddh_subsetbdd nfa_emptybdd_def) hence L: "length ?q = length (fst N)" by (simp add: nfa_is_node_def) { fix i assume H: "i < length (fst N)" with assms have "nfa_is_node N (bdd_lookup (fst N ! i) ws)" by (simp add: wf_nfa_def list_all_iff bdd_all_bdd_lookup) } with assms have I: "\<And>i. i < length q \<Longrightarrow> nfa_is_node N (bdd_lookup (fst N ! i) ws)" by (simp add: nfa_is_node_def) from L assms have "x \<in> set_of_bv ?q = (x < length (fst N) \<and> (\<exists>i \<in> set_of_bv q. bdd_lookup (fst N ! i) ws ! x \<and> i < length q))" by (auto simp add: set_of_bv_def bdd_lookup_subsetbdd) also from I have "\<dots> = (x \<in> (\<Union>i \<in> set_of_bv q. set_of_bv (bdd_lookup (fst N ! i) ws)))" by (auto simp: nfa_is_node_def set_of_bv_def) finally show "x \<in> set_of_bv ?q = (x \<in> (\<Union>i \<in> set_of_bv q. set_of_bv (bdd_lookup (fst N ! i) ws)))" . qed lemma nfa_trans_quantify_nfa: assumes "wf_nfa N (Suc n)" and "v \<le> n" and "is_alph n w" and "nfa_is_node N q" shows "set_of_bv (nfa_trans (quantify_nfa v N) q w) = (\<Union>b. set_of_bv (nfa_trans N q (insertl v b w)))" proof - from assms have V1: "wf_nfa (quantify_nfa v N) n" by (simp add: quantify_nfa_well_formed_aut) with assms have V2: "wf_nfa (quantify_nfa v N) (length w)" by (simp add: wf_nfa_def is_alph_def) from assms have N: "nfa_is_node (quantify_nfa v N) q" by (simp add: quantify_nfa_def wf_nfa_def split_beta nfa_is_node_def) { fix i assume H: "i \<in> set_of_bv q" with assms have "i < length (fst N)" by (simp add: nfa_is_node_def set_of_bv_def) with assms have "bddh (Suc n) (fst N ! i)" "bdd_all (nfa_is_node N) (fst N ! i)" by (simp add: wf_nfa_def list_all_iff)+ } with assms have I: "\<And>i. i \<in> set_of_bv q \<Longrightarrow> length w = n \<and> bddh (Suc n) (fst N ! i) \<and> bdd_all (nfa_is_node N) (fst N ! i) \<and> v \<le> n" by (simp add: is_alph_def) from assms have V3: "\<And>b. wf_nfa N (length (insertl v b w))" by (simp add: wf_nfa_def is_alph_def insertl_len) from N V2 have "set_of_bv (bdd_lookup (subsetbdd (fst (quantify_nfa v N)) q (nfa_emptybdd (length q))) w) = (\<Union>i\<in>set_of_bv q. set_of_bv (bdd_lookup (fst (quantify_nfa v N) ! i) w))" by (simp add: subsetbdd_set_of_bv) also from assms have "\<dots> = (\<Union>i\<in>set_of_bv q. set_of_bv (bdd_lookup (quantify_bdd v (fst N ! i)) w))" by (auto simp: quantify_nfa_def split_beta nfa_is_node_def set_of_bv_def) also have "\<dots> = (\<Union>i\<in>set_of_bv q. \<Union>b. set_of_bv (bdd_lookup (fst N ! i) (insertl v b w)))" proof (simp only: set_eq_iff, rule allI) fix x have "x \<in> (\<Union>i\<in>set_of_bv q. set_of_bv (bdd_lookup (quantify_bdd v (fst N ! i)) w)) = (\<exists>i\<in>set_of_bv q. x \<in> set_of_bv (bdd_lookup (quantify_bdd v (fst N ! i)) w))" by simp also have "\<dots> = ({i. i \<in> set_of_bv q \<and> x \<in> set_of_bv (bdd_lookup (quantify_bdd v (fst N ! i)) w)} \<noteq> {})" by auto also from I have "\<dots> = ({i. i \<in> set_of_bv q \<and> x \<in> (\<Union>b. set_of_bv (bdd_lookup (fst N ! i) (insertl v b w)))} \<noteq> {})" by (auto simp: bdd_lookup_quantify_bdd_set_of_bv[of w n _ N]) also have "\<dots> = (\<exists>i\<in>set_of_bv q. x \<in> (\<Union>b. set_of_bv (bdd_lookup (fst N ! i) (insertl v b w))))" by auto also have "\<dots> = (x \<in> (\<Union>i\<in>set_of_bv q. \<Union>b. set_of_bv (bdd_lookup (fst N ! i) (insertl v b w))))" by simp finally show "(x \<in> (\<Union>i\<in>set_of_bv q. set_of_bv (bdd_lookup (quantify_bdd v (fst N ! i)) w))) = (x \<in> (\<Union>i \<in> set_of_bv q. \<Union>b. set_of_bv (bdd_lookup (fst N ! i) (insertl v b w))))" . qed also have "\<dots> = (\<Union>b. \<Union>i\<in>set_of_bv q. set_of_bv (bdd_lookup (fst N ! i) (insertl v b w)))" by auto also from V3 assms have "\<dots> = (\<Union>b. set_of_bv (bdd_lookup (subsetbdd (fst N) q (nfa_emptybdd (length q))) (insertl v b w)))" by (simp add: subsetbdd_set_of_bv) finally show ?thesis by (simp add: nfa_trans_def) qed fun insertll :: "nat \<Rightarrow> 'a list \<Rightarrow> 'a list list \<Rightarrow> 'a list list" where "insertll i [] [] = []" | "insertll i (a # as) (bs # bss) = insertl i a bs # insertll i as bss" lemma insertll_len2: assumes "list_all (is_alph n) vs" and "length x = length vs" shows "list_all (is_alph (Suc n)) (insertll k x vs)" using assms by (induct k x vs rule: insertll.induct) (auto simp: insertl_len is_alph_def)+ lemma insertll_append: assumes "length xs = length vs" shows "insertll k (xs @ [x]) (vs @ [v]) = insertll k xs vs @ [insertl k x v]" using assms by (induct k xs vs rule: insertll.induct) simp+ lemma UN_UN_lenset: "(\<Union>b. \<Union>x\<in>{x. length x = n}. M b x) = (\<Union>bs\<in>{x. length x = Suc n}. M (last bs) (butlast bs))" proof auto fix x b xa assume "x \<in> M b xa" hence "length (xa @ [b]) = Suc (length xa) \<and> x \<in> M (last (xa @ [b])) (butlast (xa @ [b]))" by simp thus "\<exists>bs. length bs = Suc (length xa) \<and> x \<in> M (last bs) (butlast bs)" .. next fix x bs assume "x \<in> M (last bs) (butlast bs)" and "length bs = Suc n" hence "length (butlast bs) = n \<and> x \<in> M (last bs) (butlast bs)" by simp thus "\<exists>b xa. length xa = n \<and> x \<in> M b xa" by blast qed lemma nfa_steps_quantify_nfa: assumes "wf_nfa N (Suc n)" and "list_all (is_alph n) w" and "nfa_is_node N q" and "v \<le> n" shows "set_of_bv (nfa_steps (quantify_nfa v N) q w) = (\<Union>xs \<in> {x. length x = length w}. set_of_bv (nfa_steps N q (insertll v xs w)))" using assms proof (induct w rule: rev_induct) case Nil thus ?case by simp next case (snoc x xs) hence "wf_nfa (quantify_nfa v N) n" by (simp add: quantify_nfa_well_formed_aut) moreover from snoc have "nfa_is_node (quantify_nfa v N) q" by (simp add: nfa_is_node_def quantify_nfa_def split_beta) moreover note snoc ultimately have "nfa_is_node (quantify_nfa v N) (nfa_steps (quantify_nfa v N) q xs)" by (simp add: nfa_steps_is_node[of _ n]) hence N: "nfa_is_node N (nfa_steps (quantify_nfa v N) q xs)" (is "nfa_is_node N ?q") by (simp add: nfa_is_node_def quantify_nfa_def split_beta) from snoc have "\<And>b. length (insertl v b x) = Suc n" by (simp add: insertl_len is_alph_def) with snoc have B: "\<And>b. wf_nfa N (length (insertl v b x))" by simp from snoc have IV: "set_of_bv (nfa_steps (quantify_nfa v N) q xs) = (\<Union>x \<in>{x. length x = length xs}. set_of_bv (nfa_steps N q (insertll v x xs)))" by simp { fix bs :: "bool list" assume H: "length bs = length xs" with snoc have "list_all (is_alph (Suc n)) (insertll v bs xs)" by (simp add: insertll_len2) with snoc have "nfa_is_node N (nfa_steps N q (insertll v bs xs))" by (simp add: nfa_steps_is_node) } note N2 = this have "set_of_bv (nfa_steps (quantify_nfa v N) q (xs @ [x])) = set_of_bv (nfa_steps (quantify_nfa v N) ?q [x])" by simp also have "\<dots> = set_of_bv (nfa_trans (quantify_nfa v N) ?q x)" by simp also from snoc N have "\<dots> = (\<Union>b. set_of_bv (nfa_trans N ?q (insertl v b x)))" by (simp add: nfa_trans_quantify_nfa) also have "\<dots> = (\<Union>b. set_of_bv (bdd_lookup (subsetbdd (fst N) ?q (nfa_emptybdd (length ?q))) (insertl v b x)))" by (simp add: nfa_trans_def) also from N B have "\<dots> = (\<Union>b. \<Union>i\<in>set_of_bv ?q. set_of_bv (bdd_lookup (fst N ! i) (insertl v b x)))" by (simp add: subsetbdd_set_of_bv) also from IV have "\<dots> = (\<Union>b. \<Union>i\<in>(\<Union>x\<in>{x. length x = length xs}. set_of_bv (nfa_steps N q (insertll v x xs))). set_of_bv (bdd_lookup (fst N ! i) (insertl v b x)))" by simp also have "\<dots> = (\<Union>b. \<Union>y\<in>{x. length x = length xs}. \<Union>i\<in>set_of_bv (nfa_steps N q (insertll v y xs)). set_of_bv (bdd_lookup (fst N ! i) (insertl v b x)))" by simp also have "\<dots> = (\<Union>bs\<in>{x. length x = Suc (length xs)}. \<Union>i\<in>set_of_bv (nfa_steps N q (insertll v (butlast bs) xs)). set_of_bv (bdd_lookup (fst N ! i) (insertl v (last bs) x)))" by (simp add: UN_UN_lenset) also from N2 B have "\<dots> = (\<Union>bs\<in>{x. length x = Suc (length xs)}. set_of_bv (nfa_trans N (nfa_steps N q (insertll v (butlast bs) xs)) (insertl v (last bs) x)))" (is "?L = ?R") by (simp add: subsetbdd_set_of_bv[folded nfa_trans_def]) also have "\<dots> = (\<Union>bs\<in>{x. length x = Suc (length xs)}. set_of_bv (nfa_steps N q (insertll v (butlast bs) xs @ [insertl v (last bs) x])))" by simp also have "\<dots> = (\<Union>bs\<in>{x. length x = Suc (length xs)}. set_of_bv (nfa_steps N q (insertll v (butlast bs @ [last bs]) (xs @ [x]))))" by (auto simp: insertll_append) also have "\<dots> = (\<Union>bs\<in>{x. length x = Suc (length xs)}. set_of_bv (nfa_steps N q (insertll v bs (xs @ [x]))))" proof (rule set_eqI) fix xa have "(xa \<in> (\<Union>bs\<in>{x. length x = Suc (length xs)}. set_of_bv (nfa_steps N q (insertll v (butlast bs @ [last bs]) (xs @ [x]))))) = (\<exists>bs \<in> {x. length x = Suc (length xs)}. bs \<noteq> [] \<and> xa \<in> set_of_bv (nfa_steps N q (insertll v (butlast bs @ [last bs]) (xs @ [x]))))" by auto also have "\<dots> = (\<exists>bs \<in> {x. length x = Suc (length xs)}. bs \<noteq> [] \<and> xa \<in> set_of_bv (nfa_steps N q (insertll v bs (xs @ [x]))))" by auto also have "\<dots> = (xa \<in> (\<Union>bs\<in>{x. length x = Suc (length xs)}. set_of_bv (nfa_steps N q (insertll v bs (xs @ [x])))))" by auto finally show "(xa \<in> (\<Union>bs\<in>{x. length x = Suc (length xs)}. set_of_bv (nfa_steps N q (insertll v (butlast bs @ [last bs]) (xs @ [x]))))) = (xa \<in> (\<Union>bs\<in>{x. length x = Suc (length xs)}. set_of_bv (nfa_steps N q (insertll v bs (xs @ [x])))))" . qed finally show ?case by simp qed lemma nfa_accepts_quantify_nfa: assumes "wf_nfa A (Suc n)" and "i \<le> n" and "list_all (is_alph n) bss" shows "nfa_accepts (quantify_nfa i A) bss = (\<exists>bs. nfa_accepts A (insertll i bs bss) \<and> length bs = length bss)" proof - note Q0 = nfa_startnode_is_node[OF assms(1)] hence "nfa_is_node A (nfa_startnode (quantify_nfa i A))" by (simp add: nfa_startnode_def quantify_nfa_def split_beta) with assms have I: "set_of_bv (nfa_steps (quantify_nfa i A) (nfa_startnode (quantify_nfa i A)) bss) = (\<Union>bs \<in> {bs. length bs = length bss}. set_of_bv (nfa_steps A (nfa_startnode (quantify_nfa i A)) (insertll i bs bss)))" by (simp add: nfa_steps_quantify_nfa) have "nfa_accepts (quantify_nfa i A) bss = nfa_accepting (quantify_nfa i A) (nfa_steps (quantify_nfa i A) (nfa_startnode (quantify_nfa i A)) bss)" by (simp add: accepts_def) also have "\<dots> = (set_of_bv (snd (quantify_nfa i A)) \<inter> set_of_bv (nfa_steps (quantify_nfa i A) (nfa_startnode (quantify_nfa i A)) bss) \<noteq> {})" by (simp add: nfa_accepting_set_of_bv) also from I have "\<dots> = (set_of_bv (snd A) \<inter> (\<Union>bs \<in> {bs. length bs = length bss}. set_of_bv (nfa_steps A (nfa_startnode (quantify_nfa i A)) (insertll i bs bss))) \<noteq> {})" by (simp add: quantify_nfa_def split_beta) also have "\<dots> = ((\<Union>bs \<in> {bs. length bs = length bss}. set_of_bv (snd A) \<inter> set_of_bv (nfa_steps A (nfa_startnode (quantify_nfa i A)) (insertll i bs bss))) \<noteq> {})" by simp also have "\<dots> = (\<exists>bs \<in> {bs. length bs = length bss}. set_of_bv (snd A) \<inter> set_of_bv (nfa_steps A (nfa_startnode A) (insertll i bs bss)) \<noteq> {})" by (auto simp: nfa_startnode_def quantify_nfa_def split_beta) also have "\<dots> = (\<exists>bs. nfa_accepts A (insertll i bs bss) \<and> length bs = length bss)" by (auto simp: accepts_def nfa_accepting_set_of_bv) finally show ?thesis . qed subsection \<open>Right Quotient\<close> definition rquot_succs :: "nat bdd list \<times> bool list \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> nat list" where "rquot_succs M = (\<lambda>n x. [bdd_lookup (fst M ! x) (replicate n False)])" definition rquot_invariant :: "nat bdd list \<times> bool list \<Rightarrow> bool list \<Rightarrow> bool" where "rquot_invariant M = (\<lambda>l. length l = length (fst M))" definition "rquot_ins = (\<lambda>x l. l[x:=True])" definition rquot_memb :: "nat \<Rightarrow> bool list \<Rightarrow> bool" where "rquot_memb = (\<lambda>x l. l ! x)" definition rquot_empt :: "nat bdd list \<times> bool list \<Rightarrow> bool list" where "rquot_empt M = replicate (length (fst M)) False" definition "rquot_dfs M n x = gen_dfs (rquot_succs M n) rquot_ins rquot_memb (rquot_empt M) [x]" definition zeros :: "nat \<Rightarrow> nat \<Rightarrow> bool list list" where "zeros m n = replicate m (replicate n False)" lemma zeros_is_alpha: "list_all (is_alph v) (zeros n v)" by (induct n) (simp add: zeros_def is_alph_def)+ lemma zeros_rone: "zeros (Suc n) v = zeros n v @ zeros 1 v" by (simp add: zeros_def replicate_append_same) lemma zeros_len: "length (zeros n v) = n" by (simp add: zeros_def) lemma zeros_rtrancl: "(\<exists>n. dfa_reach M x (zeros n v) y) = ((x,y) \<in> (succsr (rquot_succs M v))\<^sup>*)" proof assume "\<exists>n. dfa_reach M x (zeros n v) y" then obtain n where N: "dfa_reach M x (zeros n v) y" .. define w where "w = zeros n v" hence W: "\<exists>n. w = zeros n v" by auto from w_def N have "dfa_reach M x w y" by simp from this W show "(x,y) \<in> (succsr (rquot_succs M v))\<^sup>*" proof induct case (snoc k ws y) then obtain n' where N': "ws @ [y] = zeros n' v" by blast have "length (ws @ [y]) > 0" by simp with N' have "n' > 0" by (simp add: zeros_len) then obtain n where NL: "n' = Suc n" by (cases n') simp+ hence "zeros n' v = zeros n v @ zeros 1 v" by (simp only: zeros_rone) also have "\<dots> = zeros n v @ [replicate v False]" by (simp add: zeros_def) finally have "zeros n' v = zeros n v @ [replicate v False]" . with N' have WS: "ws = zeros n v" "y = replicate v False" by auto hence "\<exists>n. ws = zeros n v" by auto with snoc have IV: "(x,k) \<in> (succsr (rquot_succs M v))\<^sup>*" by simp from WS have "dfa_trans M k y \<in> set (rquot_succs M v k)" by (simp add: rquot_succs_def dfa_trans_def) hence "(k, dfa_trans M k y) \<in> (succsr (rquot_succs M v))\<^sup>*" by (auto simp: succsr_def) with IV show ?case by simp qed simp next assume "(x,y) \<in> (succsr (rquot_succs M v))\<^sup>*" thus "\<exists>n. dfa_reach M x (zeros n v) y" proof induct case base have "dfa_reach M x (zeros 0 v) x" by (simp add: reach_nil zeros_def) thus "\<exists>n. dfa_reach M x (zeros n v) x" by (rule exI) next case (step y z) then obtain n where N: "dfa_reach M x (zeros n v) y" by blast with step have Z: "z = dfa_trans M y (replicate v False)" by (simp add: succsr_def rquot_succs_def dfa_trans_def) from N Z have "dfa_reach M x (zeros n v @ zeros 1 v) z" by (simp add: reach_snoc zeros_def) hence "dfa_reach M x (zeros (Suc n) v) z" by (simp only: zeros_rone) thus ?case by (rule exI) qed qed primrec map_index :: "('a \<Rightarrow> nat \<Rightarrow> 'b) \<Rightarrow> 'a list \<Rightarrow> nat \<Rightarrow> 'b list" where "map_index f [] n = []" | "map_index f (x#xs) n = f x n # map_index f xs (Suc n)" lemma map_index_len: "length (map_index f ls n) = length ls" by (induct ls arbitrary: n) simp+ lemma map_index_nth: assumes "i < length l" shows "map_index f l n ! i = f (l ! i) (n + i)" using assms proof (induct l arbitrary: n i) case (Cons a l n i) show ?case proof (cases "i = 0") case False then obtain j where J: "i = Suc j" by (cases i) simp+ with Cons show ?thesis by simp qed simp qed simp definition rquot :: "dfa \<Rightarrow> nat \<Rightarrow> dfa" where "rquot = (\<lambda>(bd, as) v. (bd, map_index (\<lambda>x n. nfa_accepting' as (rquot_dfs (bd, as) v n)) as 0))" lemma rquot_well_formed_aut: assumes "wf_dfa M n" shows "wf_dfa (rquot M n) n" using assms by (simp add: rquot_def split_beta wf_dfa_def map_index_len dfa_is_node_def) lemma rquot_node: "dfa_is_node (rquot M n) q = dfa_is_node M q" by (simp add: rquot_def dfa_is_node_def split_beta) lemma rquot_steps: "dfa_steps (rquot M n) x w = dfa_steps M x w" by (simp add: rquot_def dfa_trans_def [abs_def] split_beta) locale rquot_DFS = fixes A :: dfa and n :: nat assumes well_formed: "wf_dfa A n" sublocale rquot_DFS < DFS "rquot_succs A n" "dfa_is_node A" "rquot_invariant A" rquot_ins rquot_memb "rquot_empt A" proof (insert well_formed, unfold_locales) fix x y S assume "dfa_is_node A x" and "dfa_is_node A y" and "rquot_invariant A S" and "\<not> rquot_memb y S" thus "rquot_memb x (rquot_ins y S) = (x = y \<or> rquot_memb x S)" by (cases "x=y") (simp add: dfa_is_node_def rquot_invariant_def rquot_memb_def rquot_ins_def)+ qed (simp add: dfa_is_node_def rquot_memb_def rquot_empt_def rquot_succs_def rquot_invariant_def rquot_ins_def bounded_nat_set_is_finite[of _ "length (fst A)"] dfa_trans_is_node[unfolded dfa_trans_def dfa_is_node_def is_alph_def])+ context rquot_DFS begin lemma rquot_dfs_invariant: assumes "dfa_is_node A x" shows "rquot_invariant A (rquot_dfs A n x)" using assms well_formed unfolding rquot_dfs_def by (auto simp: dfs_invariant' empt_invariant) lemma dfa_reach_rquot: assumes "dfa_is_node A x" and "dfa_is_node A y" shows "rquot_memb y (rquot_dfs A n x) = (\<exists>m. dfa_reach A x (zeros m n) y)" proof - from assms have "rquot_memb y (rquot_dfs A n x) = ((x,y) \<in> (succsr (rquot_succs A n))\<^sup>*)" by (simp add: dfs_eq_rtrancl rquot_dfs_def) also have "\<dots> = (\<exists>m. dfa_reach A x (zeros m n) y)" by (simp add: zeros_rtrancl) finally show ?thesis . qed lemma rquot_accepting: assumes "dfa_is_node (rquot A n) q" shows "dfa_accepting (rquot A n) q = (\<exists>m. dfa_accepting A (dfa_steps A q (zeros m n)))" proof - from assms have Q: "dfa_is_node A q" by (simp add: rquot_node) with assms have "rquot_invariant A (rquot_dfs A n q)" by (simp add: rquot_dfs_invariant) hence L: "length (rquot_dfs A n q) = length (fst A)" by (simp add: rquot_invariant_def) have "nfa_accepting' (snd A) (rquot_dfs A n q) = (set_of_bv (snd A) \<inter> set_of_bv (rquot_dfs A n q) \<noteq> {})" by (simp add: nfa_accepting'_set_of_bv) also have "\<dots> = (\<exists>i. i < length (snd A) \<and> snd A ! i \<and> i < length (rquot_dfs A n q) \<and> rquot_dfs A n q ! i)" by (auto simp: set_of_bv_def) also from well_formed L have "\<dots> = (\<exists>i. dfa_is_node A i \<and> snd A ! i \<and> rquot_memb i (rquot_dfs A n q))" by (auto simp add: wf_dfa_def dfa_is_node_def rquot_memb_def) also have "\<dots> = ({i. dfa_is_node A i \<and> snd A ! i \<and> rquot_memb i (rquot_dfs A n q)} \<noteq> {})" by auto also from assms Q have "\<dots> = ({i. dfa_is_node A i \<and> snd A ! i \<and> (\<exists>m. dfa_reach A q (zeros m n) i)} \<noteq> {})" by (auto simp: dfa_reach_rquot) also have "\<dots> = ({i. \<exists>m. dfa_is_node A i \<and> snd A ! i \<and> i = dfa_steps A q (zeros m n)} \<noteq> {})" by (simp add: reach_def) also have "\<dots> = (\<exists>i m. dfa_is_node A i \<and> snd A ! i \<and> i = dfa_steps A q (zeros m n))" by auto also have "\<dots> = (\<exists>m. snd A ! dfa_steps A q (zeros m n))" proof assume "\<exists>m. snd A ! dfa_steps A q (zeros m n)" then obtain m where N: "snd A ! dfa_steps A q (zeros m n)" .. from well_formed Q zeros_is_alpha[of n m] have "dfa_is_node A (dfa_steps A q (zeros m n))" by (simp add: dfa_steps_is_node) with N show "\<exists>i m. dfa_is_node A i \<and> snd A ! i \<and> i = dfa_steps A q (zeros m n)" by auto qed auto finally have "nfa_accepting' (snd A) (rquot_dfs A n q) = (\<exists>m. snd A ! dfa_steps A q (zeros m n))" . with well_formed assms show ?thesis by (simp add: dfa_accepting_def rquot_def split_beta dfa_is_node_def map_index_nth wf_dfa_def) qed end lemma rquot_accepts: assumes A: "wf_dfa A n" and "list_all (is_alph n) bss" shows "dfa_accepts (rquot A n) bss = (\<exists>m. dfa_accepts A (bss @ zeros m n))" proof - from A interpret rquot_DFS A n by unfold_locales from assms have V: "wf_dfa (rquot A n) n" by (simp add: rquot_well_formed_aut) hence "dfa_is_node (rquot A n) 0" by (simp add: dfa_startnode_is_node) with assms V have q: "dfa_is_node (rquot A n) (dfa_steps (rquot A n) 0 bss)" by (simp add: dfa_steps_is_node) have "dfa_accepts (rquot A n) bss = dfa_accepting (rquot A n) (dfa_steps (rquot A n) 0 bss)" by (simp add: accepts_def) also from assms q have "\<dots> = (\<exists>m. dfa_accepting A (dfa_steps A (dfa_steps A 0 bss) (zeros m n)))" by (simp add: rquot_accepting rquot_steps) also have "\<dots> = (\<exists>m. dfa_accepting A (dfa_steps A 0 (bss @ zeros m n)))" by simp also have "\<dots> = (\<exists>m. dfa_accepts A (bss @ zeros m n))" by (simp add: accepts_def) finally show ?thesis . qed subsection \<open>Diophantine Equations\<close> fun eval_dioph :: "int list \<Rightarrow> nat list \<Rightarrow> int" where "eval_dioph (k # ks) (x # xs) = k * int x + eval_dioph ks xs" | "eval_dioph ks xs = 0" lemma eval_dioph_mult: "eval_dioph ks xs * int n = eval_dioph ks (map (\<lambda>x. x * n) xs)" by(induct ks xs rule: eval_dioph.induct) (simp_all add: distrib_right) lemma eval_dioph_add_map: "eval_dioph ks (map f xs) + eval_dioph ks (map g xs) = eval_dioph ks (map (\<lambda>x. f x + g x) (xs::nat list))" proof (induct ks xs rule: eval_dioph.induct) case (1 k ks x xs) have "eval_dioph (k # ks) (map f (x # xs)) + eval_dioph (k # ks) (map g (x # xs)) = (k * int (f x) + k * int (g x)) + (eval_dioph ks (map f xs) + eval_dioph ks (map g xs))" by simp also have "\<dots> = (k * int (f x) + k * int (g x)) + eval_dioph ks (map (\<lambda>x. f x + g x) xs)" by (simp add: 1) finally show ?case by (simp add: ac_simps distrib_left) qed simp_all lemma eval_dioph_div_mult: "eval_dioph ks (map (\<lambda>x. x div n) xs) * int n + eval_dioph ks (map (\<lambda>x. x mod n) xs) = eval_dioph ks xs" by (simp add: eval_dioph_mult o_def eval_dioph_add_map) lemma eval_dioph_mod: "eval_dioph ks xs mod int n = eval_dioph ks (map (\<lambda>x. x mod n) xs) mod int n" proof (induct ks xs rule: eval_dioph.induct) case (1 k ks x xs) have "eval_dioph (k # ks) (x # xs) mod int n = ((k * int x) mod int n + eval_dioph ks xs mod int n) mod int n" by (simp add: mod_add_eq) also have "\<dots> = ((k * (int x mod int n)) mod int n + eval_dioph ks (map (\<lambda>x. x mod n) xs) mod int n) mod int n" by (simp add: 1 mod_mult_right_eq) finally show ?case by (simp add: zmod_int mod_add_eq) qed simp_all lemma eval_dioph_div_mod: "(eval_dioph ks xs = l) = (eval_dioph ks (map (\<lambda>x. x mod 2) xs) mod 2 = l mod 2 \<and> eval_dioph ks (map (\<lambda>x. x div 2) xs) = (l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) div 2)" (is "?l = ?r") proof assume eq: ?l then have "eval_dioph ks xs mod 2 = l mod 2" by simp with eval_dioph_mod [of _ _ 2] have eq': "eval_dioph ks (map (\<lambda>x. x mod 2) xs) mod 2 = l mod 2" by simp from eval_dioph_div_mult [symmetric, of ks xs 2] eq have "eval_dioph ks (map (\<lambda>x. x div 2) xs) * 2 + eval_dioph ks (map (\<lambda>x. x mod 2) xs) = l" by simp then have "eval_dioph ks (map (\<lambda>x. x div 2) xs) * 2 = l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)" by (simp add: eq_diff_eq) then have "(eval_dioph ks (map (\<lambda>x. x div 2) xs) * 2) div 2 = (l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) div 2" by simp with eq' show ?r by simp next assume ?r (is "?r1 \<and> ?r2") then obtain eq1: ?r1 and eq2: ?r2 .. from eq1 have "(l - eval_dioph ks (map (\<lambda>x. x mod 2) xs) mod 2) mod 2 = (l - l mod 2) mod 2" by simp then have "(l mod 2 - eval_dioph ks (map (\<lambda>x. x mod 2) xs) mod 2 mod 2) mod 2 = (l mod 2 - l mod 2 mod 2) mod 2" by (simp only: mod_diff_eq) then have eq1': "(l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) mod 2 = 0" by (simp add: mod_diff_eq) from eq2 have "eval_dioph ks (map (\<lambda>x. x div 2) xs) * 2 + (l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) mod 2 = (l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) div 2 * 2 + (l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) mod 2" by simp then have "eval_dioph ks (map (\<lambda>x. x div 2) xs) * 2 + (l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) mod 2 = l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)" by simp with eq1' eval_dioph_div_mult [of _ 2] show ?l by (simp add: eq_diff_eq) qed lemma eval_dioph_ineq_div_mod: "(eval_dioph ks xs \<le> l) = (eval_dioph ks (map (\<lambda>x. x div 2) xs) \<le> (l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) div 2)" (is "?l = ?r") proof assume ?l with eval_dioph_div_mult [symmetric, of ks xs 2] have "eval_dioph ks (map (\<lambda>x. x div 2) xs) * 2 + eval_dioph ks (map (\<lambda>x. x mod 2) xs) \<le> l" by simp then have "eval_dioph ks (map (\<lambda>x. x div 2) xs) * 2 \<le> l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)" by (simp add: le_diff_eq) then have "(eval_dioph ks (map (\<lambda>x. x div 2) xs) * 2) div 2 \<le> (l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) div 2" by (rule zdiv_mono1) simp then show ?r by simp next assume ?r have "eval_dioph ks xs \<le> eval_dioph ks xs + (l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) mod 2" by simp also { from \<open>?r\<close> have "eval_dioph ks (map (\<lambda>x. x div 2) xs) * 2 \<le> (l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) div 2 * 2" by simp also have "\<dots> = l - eval_dioph ks (map (\<lambda>x. x mod 2) xs) - (l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) mod 2" by (simp add: eq_diff_eq) finally have "(eval_dioph ks (map (\<lambda>x. x div 2) xs) * 2 + eval_dioph ks (map (\<lambda>x. x mod 2) xs)) + (l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) mod 2 \<le> l" by simp with eval_dioph_div_mult [of _ 2] have "eval_dioph ks xs + (l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) mod 2 \<le> l" by simp } finally show ?l . qed lemma sum_list_abs_ge_0: "(0::int) \<le> sum_list (map abs ks)" by (induct ks) simp_all lemma zmult_div_aux1: assumes b: "b \<noteq> 0" shows "(a - a mod b) div b = (a::int) div b" proof - from minus_mod_eq_mult_div [symmetric, of b a] have "(b * (a div b)) div b = (a - a mod b) div b" by simp with b show ?thesis by simp qed lemma zmult_div_aux2: assumes b: "b \<noteq> 0" shows "((a::int) - a mod b) mod b = 0" using b minus_mod_eq_mult_div [symmetric, of b a, symmetric] by simp lemma div_abs_eq: assumes mod: "(a::int) mod b = 0" and b: "0 < b" shows "\<bar>a div b\<bar> = \<bar>a\<bar> div b" proof (cases "0 \<le> a") case True with pos_imp_zdiv_nonneg_iff [OF b] show ?thesis by auto next from b have "b \<noteq> 0" by auto case False then have "a < 0" by auto have "\<bar>a div b\<bar> = - (a div b)" by (simp add: div_neg_pos_less0 [OF \<open>a < 0\<close> b] zabs_def) with abs_of_neg [OF \<open>a < 0\<close>] zdiv_zminus1_eq_if [OF \<open>b \<noteq> 0\<close>] mod show ?thesis by simp qed lemma add_div_trivial: "0 \<le> c \<Longrightarrow> c < b \<Longrightarrow> ((a::int) * b + c) div b = a" by (simp add: div_add1_eq div_pos_pos_trivial) lemma dioph_rhs_bound: "\<bar>(l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) div 2\<bar> \<le> max \<bar>l\<bar> (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>)" proof - have "\<bar>(l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) div 2\<bar> = \<bar>(l - eval_dioph ks (map (\<lambda>x. x mod 2) xs) - (l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) mod 2) div 2\<bar>" (is "_ = \<bar>(_ - ?r) div 2\<bar>") by (simp add: zmult_div_aux1) also have "\<dots> = \<bar>l - eval_dioph ks (map (\<lambda>x. x mod 2) xs) - ?r\<bar> div 2" by (simp add: zmult_div_aux2 div_abs_eq) also have "\<bar>l - eval_dioph ks (map (\<lambda>x. x mod 2) xs) - ?r\<bar> \<le> \<bar>l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)\<bar> + \<bar>?r\<bar>" by (rule abs_triangle_ineq4) also have "\<bar>l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)\<bar> \<le> \<bar>l\<bar> + \<bar>eval_dioph ks (map (\<lambda>x. x mod 2) xs)\<bar>" by (rule abs_triangle_ineq4) also have "\<bar>eval_dioph ks (map (\<lambda>x. x mod 2) xs)\<bar> \<le> (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>)" proof (induct ks xs rule: eval_dioph.induct) case (1 k ks x xs) have "\<bar>k * int (x mod 2) + eval_dioph ks (map (\<lambda>x. x mod 2) xs)\<bar> \<le> \<bar>k * int (x mod 2)\<bar> + \<bar>eval_dioph ks (map (\<lambda>x. x mod 2) xs)\<bar>" by (rule abs_triangle_ineq) also have "\<bar>k * int (x mod 2)\<bar> \<le> \<bar>k\<bar> * \<bar>int (x mod 2)\<bar>" by (simp add: abs_mult) also have "\<bar>int (x mod 2)\<bar> \<le> 1" by simp finally have "\<bar>k * int (x mod 2) + eval_dioph ks (map (\<lambda>x. x mod 2) xs)\<bar> \<le> \<bar>k\<bar> + \<bar>eval_dioph ks (map (\<lambda>x. x mod 2) xs)\<bar>" by (auto simp add: mult_left_mono) with 1 show ?case by simp qed (simp_all add: sum_list_abs_ge_0) finally have ineq: "\<bar>(l - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) div 2\<bar> \<le> (\<bar>l\<bar> + (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>) + \<bar>?r\<bar>) div 2" by (simp add: zdiv_mono1) show ?thesis proof (cases "(\<Sum>k\<leftarrow>ks. \<bar>k\<bar>) \<le> \<bar>l\<bar>") case True note ineq also from True have "(\<bar>l\<bar> + (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>) + \<bar>?r\<bar>) div 2 \<le> (\<bar>l\<bar> * 2 + \<bar>?r\<bar>) div 2" by (simp add: zdiv_mono1) also have "\<dots> = \<bar>l\<bar>" by (simp add: add_div_trivial) finally show ?thesis by simp next case False note ineq also from False have "(\<bar>l\<bar> + (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>) + \<bar>?r\<bar>) div 2 \<le> ((\<Sum>k\<leftarrow>ks. \<bar>k\<bar>) * 2 + \<bar>?r\<bar>) div 2" by (simp add: zdiv_mono1) also have "\<dots> = (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>)" by (simp add: add_div_trivial) finally show ?thesis by simp qed qed lemma dioph_rhs_invariant: assumes m: "\<bar>m\<bar> \<le> max \<bar>l\<bar> (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>)" shows "\<bar>(m - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) div 2\<bar> \<le> max \<bar>l\<bar> (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>)" proof (cases "(\<Sum>k\<leftarrow>ks. \<bar>k\<bar>) \<le> \<bar>l\<bar>") case True have "\<bar>(m - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) div 2\<bar> \<le> max \<bar>m\<bar> (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>)" by (rule dioph_rhs_bound) also from True m have "\<bar>m\<bar> \<le> \<bar>l\<bar>" by simp finally show ?thesis by simp next case False have "\<bar>(m - eval_dioph ks (map (\<lambda>x. x mod 2) xs)) div 2\<bar> \<le> max \<bar>m\<bar> (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>)" by (rule dioph_rhs_bound) also from False m have "\<bar>m\<bar> \<le> (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>)" by simp also have "max (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>) (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>) \<le> max \<bar>l\<bar> (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>)" by simp finally show ?thesis by simp qed lemma bounded_int_set_is_finite: assumes S: "\<forall>(i::int)\<in>S. \<bar>i\<bar> < j" shows "finite S" proof (rule finite_subset) have "finite (int ` {n. n < nat j})" by (rule nat_seg_image_imp_finite [OF refl]) moreover have "finite ((\<lambda>n. - int n) ` {n. n < nat j})" by (rule nat_seg_image_imp_finite [OF refl]) ultimately show "finite (int ` {n. n < nat j} \<union> (\<lambda>n. - int n) ` {n. n < nat j})" by (rule finite_UnI) show "S \<subseteq> int ` {n. n < nat j} \<union> (\<lambda>n. - int n) ` {n. n < nat j}" proof fix i assume i: "i \<in> S" show "i \<in> int ` {n. n < nat j} \<union> (\<lambda>n. - int n) ` {n. n < nat j}" proof (cases "0 \<le> i") case True then have "i = int (nat i)" by simp moreover from i S have "nat i \<in> {n. n < nat j}" by auto ultimately have "i \<in> int ` {n. n < nat j}" by (rule image_eqI) then show ?thesis .. next case False then have "i = - int (nat (- i))" by simp moreover from i S have "nat (- i) \<in> {n. n < nat j}" by auto ultimately have "i \<in> (\<lambda>n. - int n) ` {n. n < nat j}" by (rule image_eqI) then show ?thesis .. qed qed qed primrec mk_nat_vecs :: "nat \<Rightarrow> nat list list" where "mk_nat_vecs 0 = [[]]" | "mk_nat_vecs (Suc n) = (let yss = mk_nat_vecs n in map (Cons 0) yss @ map (Cons 1) yss)" lemma mk_nat_vecs_bound: "\<forall>xs\<in>set (mk_nat_vecs n). \<forall>x\<in>set xs. x < 2" by (induct n) (auto simp add: Let_def) lemma mk_nat_vecs_mod_eq: "xs \<in> set (mk_nat_vecs n) \<Longrightarrow> map (\<lambda>x. x mod 2) xs = xs" apply (drule bspec [OF mk_nat_vecs_bound]) apply (induct xs) apply simp_all done definition "dioph_succs n ks m = List.map_filter (\<lambda>xs. if eval_dioph ks xs mod 2 = m mod 2 then Some ((m - eval_dioph ks xs) div 2) else None) (mk_nat_vecs n)" definition dioph_is_node :: "int list \<Rightarrow> int \<Rightarrow> int \<Rightarrow> bool" where "dioph_is_node ks l m = (\<bar>m\<bar> \<le> max \<bar>l\<bar> (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>))" definition dioph_invariant :: "int list \<Rightarrow> int \<Rightarrow> nat option list \<times> int list \<Rightarrow> bool" where "dioph_invariant ks l = (\<lambda>(is, js). length is = nat (2 * max \<bar>l\<bar> (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>) + 1))" definition "dioph_ins m = (\<lambda>(is, js). (is[int_encode m := Some (length js)], js @ [m]))" definition dioph_memb :: "int \<Rightarrow> nat option list \<times> int list \<Rightarrow> bool" where "dioph_memb m = (\<lambda>(is, js). is ! int_encode m \<noteq> None)" definition dioph_empt :: "int list \<Rightarrow> int \<Rightarrow> nat option list \<times> int list" where "dioph_empt ks l = (replicate (nat (2 * max \<bar>l\<bar> (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>) + 1)) None, [])" lemma int_encode_bound: "dioph_is_node ks l m \<Longrightarrow> int_encode m < nat (2 * max \<bar>l\<bar> (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>) + 1)" by (simp add: dioph_is_node_def int_encode_def sum_encode_def) arith interpretation dioph_dfs: DFS "dioph_succs n ks" "dioph_is_node ks l" "dioph_invariant ks l" dioph_ins dioph_memb "dioph_empt ks l" proof (standard, goal_cases) case (1 x y) then show ?case apply (simp add: dioph_memb_def dioph_ins_def split_beta dioph_invariant_def) apply (cases "x = y") apply (simp add: int_encode_bound) apply (simp add: inj_eq [OF inj_int_encode]) done next case 2 then show ?case by (simp add: dioph_memb_def dioph_empt_def int_encode_bound) next case 3 then show ?case apply (simp add: dioph_succs_def map_filter_def list_all_iff dioph_is_node_def) apply (rule allI impI)+ apply (erule subst [OF mk_nat_vecs_mod_eq]) apply (drule dioph_rhs_invariant) apply assumption done next case 4 then show ?case by (simp add: dioph_invariant_def dioph_empt_def) next case 5 then show ?case by (simp add: dioph_invariant_def dioph_ins_def split_beta) next case 6 then show ?case apply (rule bounded_int_set_is_finite [of _ "max \<bar>l\<bar> (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>) + 1"]) apply (rule ballI) apply (simp add: dioph_is_node_def) done qed definition "dioph_dfs n ks l = gen_dfs (dioph_succs n ks) dioph_ins dioph_memb (dioph_empt ks l) [l]" primrec make_bdd :: "(nat list \<Rightarrow> 'a) \<Rightarrow> nat \<Rightarrow> nat list \<Rightarrow> 'a bdd" where "make_bdd f 0 xs = Leaf (f xs)" | "make_bdd f (Suc n) xs = Branch (make_bdd f n (xs @ [0])) (make_bdd f n (xs @ [1]))" definition "eq_dfa n ks l = (let (is, js) = dioph_dfs n ks l in (map (\<lambda>j. make_bdd (\<lambda>xs. if eval_dioph ks xs mod 2 = j mod 2 then the (is ! int_encode ((j - eval_dioph ks xs) div 2)) else length js) n []) js @ [Leaf (length js)], map (\<lambda>j. j = 0) js @ [False]))" abbreviation (input) nat_of_bool :: "bool \<Rightarrow> nat" where "nat_of_bool \<equiv> of_bool" lemma nat_of_bool_bound: "nat_of_bool b < 2" by (cases b) simp_all lemma nat_of_bool_mk_nat_vecs: "length bs = n \<Longrightarrow> map nat_of_bool bs \<in> set (mk_nat_vecs n)" apply (induct n arbitrary: bs) apply simp apply (case_tac bs) apply simp apply (case_tac a) apply (simp_all add: Let_def) done lemma bdd_lookup_make_bdd: "length bs = n \<Longrightarrow> bdd_lookup (make_bdd f n xs) bs = f (xs @ map nat_of_bool bs)" apply (induct n arbitrary: bs xs) apply simp apply (case_tac bs) apply auto done primrec nat_of_bools :: "bool list \<Rightarrow> nat" where "nat_of_bools [] = 0" | "nat_of_bools (b # bs) = nat_of_bool b + 2 * nat_of_bools bs" primrec nats_of_boolss :: "nat \<Rightarrow> bool list list \<Rightarrow> nat list" where Nil: "nats_of_boolss n [] = replicate n 0" | Cons: "nats_of_boolss n (bs # bss) = map (\<lambda>(b, x). nat_of_bool b + 2 * x) (zip bs (nats_of_boolss n bss))" lemma nats_of_boolss_length: "list_all (is_alph n) bss \<Longrightarrow> length (nats_of_boolss n bss) = n" by (induct bss) (simp_all add: is_alph_def) lemma nats_of_boolss_mod2: assumes bs: "length bs = n" and bss: "list_all (is_alph n) bss" shows "map (\<lambda>x. x mod 2) (nats_of_boolss n (bs # bss)) = map nat_of_bool bs" proof - from bs bss have "map nat_of_bool (map fst (zip bs (nats_of_boolss n bss))) = map nat_of_bool bs" by (simp add: nats_of_boolss_length) then show ?thesis by (simp add: split_def o_def nat_of_bool_bound) qed lemma nats_of_boolss_div2: assumes bs: "length bs = n" and bss: "list_all (is_alph n) bss" shows "map (\<lambda>x. x div 2) (nats_of_boolss n (bs # bss)) = nats_of_boolss n bss" using bs bss by (simp add: split_def o_def nat_of_bool_bound nats_of_boolss_length) lemma zip_insertl: "length xs = length ys \<Longrightarrow> zip (insertl n x xs) (insertl n y ys) = insertl n (x, y) (zip xs ys)" by (induct n x xs arbitrary: ys rule: insertl.induct) (auto simp add: Suc_length_conv) lemma map_insertl: "map f (insertl i x xs) = insertl i (f x) (map f xs)" by (induct i x xs rule: insertl.induct) simp_all lemma insertl_replicate: "m \<le> n \<Longrightarrow> insertl m x (replicate n x) = x # replicate n x" apply (induct n arbitrary: m) apply simp apply (case_tac m) apply simp_all done lemma nats_of_boolss_insertll: "list_all (is_alph n) bss \<Longrightarrow> length bs = length bss \<Longrightarrow> i \<le> n \<Longrightarrow> nats_of_boolss (Suc n) (insertll i bs bss) = insertl i (nat_of_bools bs) (nats_of_boolss n bss)" by (induct i bs bss rule: insertll.induct) (simp_all add: zip_insertl nats_of_boolss_length insertll_len2 is_alph_def map_insertl insertl_replicate cong: conj_cong) lemma zip_replicate_map: "length xs = n \<Longrightarrow> zip (replicate n x) xs = map (Pair x) xs" apply (induct n arbitrary: xs) apply simp apply (case_tac xs) apply simp_all done lemma zip_replicate_mapr: "length xs = n \<Longrightarrow> zip xs (replicate n x) = map (\<lambda>y. (y, x)) xs" apply (induct n arbitrary: xs) apply simp apply (case_tac xs) apply simp_all done lemma zip_assoc: "map f (zip xs (zip ys zs)) = map (\<lambda>((x, y), z). f (x, (y, z))) (zip (zip xs ys) zs)" apply (induct xs arbitrary: ys zs) apply simp apply (case_tac ys) apply simp apply (case_tac zs) apply simp_all done lemma nats_of_boolss_append: "list_all (is_alph n) bss \<Longrightarrow> list_all (is_alph n) bss' \<Longrightarrow> nats_of_boolss n (bss @ bss') = map (\<lambda>(x, y). x + 2 ^ length bss * y) (zip (nats_of_boolss n bss) (nats_of_boolss n bss'))" by (induct bss) (auto simp add: nats_of_boolss_length zip_replicate_map o_def map_zip_map map_zip_map2 zip_assoc is_alph_def) lemma nats_of_boolss_zeros: "nats_of_boolss n (zeros m n) = replicate n 0" by (induct m) (simp_all add: zeros_def) declare nats_of_boolss.Cons [simp del] fun bools_of_nat :: "nat \<Rightarrow> nat \<Rightarrow> bool list" where "bools_of_nat k n = (if n = 0 then (if k = 0 then [] else False # bools_of_nat (k - 1) n) else (n mod 2 = 1) # bools_of_nat (k - 1) (n div 2))" lemma bools_of_nat_length: "k \<le> length (bools_of_nat k n)" apply (induct k n rule: bools_of_nat.induct) apply (case_tac "n = 0") apply (case_tac "k = 0") apply simp apply simp apply (subst bools_of_nat.simps) apply (simp del: bools_of_nat.simps) done lemma nat_of_bool_mod_eq: "nat_of_bool (n mod 2 = 1) = n mod 2" by (cases "n mod 2 = 1") simp_all lemma bools_of_nat_inverse: "nat_of_bools (bools_of_nat k n) = n" apply (induct k n rule: bools_of_nat.induct) apply (case_tac "n = 0") apply (case_tac "k = 0") apply simp apply simp apply (subst bools_of_nat.simps) apply (simp add: nat_of_bool_mod_eq [simplified] del: bools_of_nat.simps) done declare bools_of_nat.simps [simp del] lemma eval_dioph_replicate_0: "eval_dioph ks (replicate n 0) = 0" apply (induct n arbitrary: ks) apply simp apply (case_tac ks) apply simp_all done lemma dioph_dfs_bij: "(fst (dioph_dfs n ks l) ! int_encode i = Some k \<and> dioph_is_node ks l i) = (k < length (snd (dioph_dfs n ks l)) \<and> (snd (dioph_dfs n ks l) ! k = i))" proof - let ?dfs = "gen_dfs (dioph_succs n ks) dioph_ins dioph_memb (dioph_empt ks l) [l]" have "list_all (dioph_is_node ks l) [l]" by (simp add: dioph_is_node_def) with dioph_dfs.empt_invariant [of ks l] have "(fst ?dfs ! int_encode i = Some k \<and> dioph_is_node ks l i) = (k < length (snd ?dfs) \<and> (snd ?dfs ! k = i))" proof (induct rule: dioph_dfs.dfs_invariant) case base show ?case by (auto simp add: dioph_empt_def dioph_is_node_def int_encode_bound) next case (step S y) then show ?case by (cases "y = i") (auto simp add: dioph_ins_def dioph_memb_def dioph_is_node_def split_beta dioph_invariant_def int_encode_bound nth_append inj_eq [OF inj_int_encode]) qed then show ?thesis by (simp add: dioph_dfs_def) qed lemma dioph_dfs_mono: assumes z: "dioph_invariant ks l z" and xs: "list_all (dioph_is_node ks l) xs" and H: "fst z ! i = Some k" shows "fst (gen_dfs (dioph_succs n ks) dioph_ins dioph_memb z xs) ! i = Some k" using z xs H apply (rule dioph_dfs.dfs_invariant) apply (simp add: dioph_ins_def dioph_memb_def split_paired_all) apply (case_tac "i = int_encode x") apply simp_all done lemma dioph_dfs_start: "fst (dioph_dfs n ks l) ! int_encode l = Some 0" apply (simp add: dioph_dfs_def gen_dfs_simps dioph_dfs.empt dioph_is_node_def) apply (rule dioph_dfs_mono [of _ l]) apply (rule dioph_dfs.ins_invariant) apply (simp add: dioph_is_node_def) apply (rule dioph_dfs.empt_invariant) apply (simp add: dioph_dfs.empt dioph_is_node_def) apply (simp add: dioph_dfs.succs_is_node dioph_is_node_def) apply (simp add: dioph_ins_def dioph_empt_def int_encode_bound dioph_is_node_def) done lemma eq_dfa_error: "\<not> dfa_accepting (eq_dfa n ks l) (dfa_steps (eq_dfa n ks l) (length (snd (dioph_dfs n ks l))) bss)" apply (induct bss) apply (simp add: eq_dfa_def split_beta dfa_accepting_def nth_append) apply (simp add: eq_dfa_def split_beta nth_append dfa_trans_def) done lemma eq_dfa_accepting: "(l, m) \<in> (succsr (dioph_succs n ks))\<^sup>* \<Longrightarrow> list_all (is_alph n) bss \<Longrightarrow> dfa_accepting (eq_dfa n ks l) (dfa_steps (eq_dfa n ks l) (the (fst (dioph_dfs n ks l) ! int_encode m)) bss) = (eval_dioph ks (nats_of_boolss n bss) = m)" proof (induct bss arbitrary: m) case Nil have l: "dioph_is_node ks l l" by (simp add: dioph_is_node_def) with \<open>(l, m) \<in> (succsr (dioph_succs n ks))\<^sup>*\<close> have m: "dioph_is_node ks l m" by (rule dioph_dfs.succsr_is_node) with l Nil have "dioph_memb m (dioph_dfs n ks l)" by (simp add: dioph_dfs.dfs_eq_rtrancl dioph_dfs_def) then obtain k where k: "fst (dioph_dfs n ks l) ! int_encode m = Some k" by (auto simp add: dioph_memb_def) with m have "k < length (snd (dioph_dfs n ks l)) \<and> (snd (dioph_dfs n ks l) ! k = m)" by (simp add: dioph_dfs_bij [symmetric]) with k show ?case by (simp add: eval_dioph_replicate_0 dfa_accepting_def eq_dfa_def split_beta nth_append) next case (Cons bs bss) have l: "dioph_is_node ks l l" by (simp add: dioph_is_node_def) with \<open>(l, m) \<in> (succsr (dioph_succs n ks))\<^sup>*\<close> have m: "dioph_is_node ks l m" by (rule dioph_dfs.succsr_is_node) with l Cons have "dioph_memb m (dioph_dfs n ks l)" by (simp add: dioph_dfs.dfs_eq_rtrancl dioph_dfs_def) then obtain k where k: "fst (dioph_dfs n ks l) ! int_encode m = Some k" by (auto simp add: dioph_memb_def) with m have k': "k < length (snd (dioph_dfs n ks l)) \<and> (snd (dioph_dfs n ks l) ! k = m)" by (simp add: dioph_dfs_bij [symmetric]) show ?case proof (cases "eval_dioph ks (map nat_of_bool bs) mod 2 = m mod 2") case True with k' Cons have "bdd_lookup (fst (eq_dfa n ks l) ! k) bs = the (fst (dioph_dfs n ks l) ! int_encode ((m - eval_dioph ks (map nat_of_bool bs)) div 2))" by (simp add: eq_dfa_def split_beta nth_append bdd_lookup_make_bdd is_alph_def) moreover have "(l, (m - eval_dioph ks (map nat_of_bool bs)) div 2) \<in> (succsr (dioph_succs n ks))\<^sup>*" apply (rule rtrancl_into_rtrancl) apply (rule Cons) apply (simp add: dioph_succs_def succsr_def map_filter_def) apply (rule image_eqI [of _ _ "map nat_of_bool bs"]) using Cons apply (simp_all add: True nat_of_bool_mk_nat_vecs is_alph_def) done ultimately show ?thesis using True k k' Cons by (subst eval_dioph_div_mod) (simp add: nats_of_boolss_div2 nats_of_boolss_mod2 is_alph_def dfa_trans_def [abs_def]) next case False with k' Cons have "bdd_lookup (fst (eq_dfa n ks l) ! k) bs = length (snd (dioph_dfs n ks l))" by (simp add: eq_dfa_def split_beta nth_append bdd_lookup_make_bdd is_alph_def) with False k k' Cons show ?thesis by (subst eval_dioph_div_mod) (simp add: nats_of_boolss_div2 nats_of_boolss_mod2 is_alph_def dfa_trans_def eq_dfa_error) qed qed lemma eq_dfa_accepts: assumes bss: "list_all (is_alph n) bss" shows "dfa_accepts (eq_dfa n ks l) bss = (eval_dioph ks (nats_of_boolss n bss) = l)" by (simp add: accepts_def) (rule eq_dfa_accepting [of l l n ks, OF _ bss, simplified dioph_dfs_start, simplified]) lemma bddh_make_bdd: "bddh n (make_bdd f n xs)" by (induct n arbitrary: xs) simp_all lemma bdd_all_make_bdd: "bdd_all P (make_bdd f n xs) = (\<forall>ys\<in>set (mk_nat_vecs n). P (f (xs @ ys)))" by (induct n arbitrary: xs) (auto simp add: Let_def) lemma eq_wf_dfa: "wf_dfa (eq_dfa n ks l) n" proof - have "\<forall>x\<in>set (snd (dioph_dfs n ks l)). \<forall>ys\<in>set (mk_nat_vecs n). eval_dioph ks ys mod 2 = x mod 2 \<longrightarrow> the (fst (dioph_dfs n ks l) ! int_encode ((x - eval_dioph ks ys) div 2)) < Suc (length (snd (dioph_dfs n ks l)))" proof (intro ballI impI) fix x ys assume x: "x \<in> set (snd (dioph_dfs n ks l))" and ys: "ys \<in> set (mk_nat_vecs n)" and ys': "eval_dioph ks ys mod 2 = x mod 2" from x obtain k where k: "fst (dioph_dfs n ks l) ! int_encode x = Some k" and k': "dioph_is_node ks l x" by (auto simp add: in_set_conv_nth dioph_dfs_bij [symmetric]) from k have "dioph_memb x (dioph_dfs n ks l)" by (simp add: dioph_memb_def split_beta) moreover have ll: "dioph_is_node ks l l" by (simp add: dioph_is_node_def) ultimately have "(l, x) \<in> (succsr (dioph_succs n ks))\<^sup>*" using k' by (simp add: dioph_dfs.dfs_eq_rtrancl dioph_dfs_def) then have "(l, (x - eval_dioph ks ys) div 2) \<in> (succsr (dioph_succs n ks))\<^sup>*" apply (rule rtrancl_into_rtrancl) apply (simp add: succsr_def dioph_succs_def map_filter_def) apply (rule image_eqI [of _ _ ys]) apply (simp_all add: ys ys') done moreover from dioph_dfs.succs_is_node [OF k', of n] ys ys' have x': "dioph_is_node ks l ((x - eval_dioph ks ys) div 2)" by (auto simp add: dioph_succs_def map_filter_def list_all_iff) ultimately have "dioph_memb ((x - eval_dioph ks ys) div 2) (dioph_dfs n ks l)" by (simp add: dioph_dfs.dfs_eq_rtrancl dioph_dfs_def ll) then obtain k' where k': "fst (dioph_dfs n ks l) ! int_encode ((x - eval_dioph ks ys) div 2) = Some k'" by (auto simp add: dioph_memb_def) with x' have "k' < length (snd (dioph_dfs n ks l)) \<and> snd (dioph_dfs n ks l) ! k' = ((x - eval_dioph ks ys) div 2)" by (simp add: dioph_dfs_bij [symmetric]) with k' show "the (fst (dioph_dfs n ks l) ! int_encode ((x - eval_dioph ks ys) div 2)) < Suc (length (snd (dioph_dfs n ks l)))" by simp qed then show ?thesis by (simp add: eq_dfa_def split_beta wf_dfa_def dfa_is_node_def list_all_iff bddh_make_bdd bdd_all_make_bdd) qed subsection \<open>Diophantine Inequations\<close> definition "dioph_ineq_succs n ks m = map (\<lambda>xs. (m - eval_dioph ks xs) div 2) (mk_nat_vecs n)" interpretation dioph_ineq_dfs: DFS "dioph_ineq_succs n ks" "dioph_is_node ks l" "dioph_invariant ks l" dioph_ins dioph_memb "dioph_empt ks l" proof (standard, goal_cases) case (1 x y) then show ?case apply (simp add: dioph_memb_def dioph_ins_def split_beta dioph_invariant_def) apply (cases "x = y") apply (simp add: int_encode_bound) apply (simp add: inj_eq [OF inj_int_encode]) done next case 2 then show ?case by (simp add: dioph_memb_def dioph_empt_def int_encode_bound) next case 3 then show ?case apply (simp add: dioph_ineq_succs_def map_filter_def list_all_iff dioph_is_node_def) apply (rule ballI) apply (erule subst [OF mk_nat_vecs_mod_eq]) apply (drule dioph_rhs_invariant) apply assumption done next case 4 then show ?case by (simp add: dioph_invariant_def dioph_empt_def) next case 5 then show ?case by (simp add: dioph_invariant_def dioph_ins_def split_beta) next case 6 then show ?case apply (rule bounded_int_set_is_finite [of _ "max \<bar>l\<bar> (\<Sum>k\<leftarrow>ks. \<bar>k\<bar>) + 1"]) apply (rule ballI) apply (simp add: dioph_is_node_def) done qed definition "dioph_ineq_dfs n ks l = gen_dfs (dioph_ineq_succs n ks) dioph_ins dioph_memb (dioph_empt ks l) [l]" definition "ineq_dfa n ks l = (let (is, js) = dioph_ineq_dfs n ks l in (map (\<lambda>j. make_bdd (\<lambda>xs. the (is ! int_encode ((j - eval_dioph ks xs) div 2))) n []) js, map (\<lambda>j. 0 \<le> j) js))" lemma dioph_ineq_dfs_bij: "(fst (dioph_ineq_dfs n ks l) ! int_encode i = Some k \<and> dioph_is_node ks l i) = (k < length (snd (dioph_ineq_dfs n ks l)) \<and> (snd (dioph_ineq_dfs n ks l) ! k = i))" proof - let ?dfs = "gen_dfs (dioph_ineq_succs n ks) dioph_ins dioph_memb (dioph_empt ks l) [l]" have "list_all (dioph_is_node ks l) [l]" by (simp add: dioph_is_node_def) with dioph_dfs.empt_invariant [of ks l] have "(fst ?dfs ! int_encode i = Some k \<and> dioph_is_node ks l i) = (k < length (snd ?dfs) \<and> (snd ?dfs ! k = i))" proof (induct rule: dioph_ineq_dfs.dfs_invariant) case base show ?case by (auto simp add: dioph_empt_def dioph_is_node_def int_encode_bound) next case (step S y) then show ?case by (cases "y = i") (auto simp add: dioph_ins_def dioph_memb_def dioph_is_node_def split_beta dioph_invariant_def int_encode_bound nth_append inj_eq [OF inj_int_encode]) qed then show ?thesis by (simp add: dioph_ineq_dfs_def) qed lemma dioph_ineq_dfs_mono: assumes z: "dioph_invariant ks l z" and xs: "list_all (dioph_is_node ks l) xs" and H: "fst z ! i = Some k" shows "fst (gen_dfs (dioph_ineq_succs n ks) dioph_ins dioph_memb z xs) ! i = Some k" using z xs H apply (rule dioph_ineq_dfs.dfs_invariant) apply (simp add: dioph_ins_def dioph_memb_def split_paired_all) apply (case_tac "i = int_encode x") apply simp_all done lemma dioph_ineq_dfs_start: "fst (dioph_ineq_dfs n ks l) ! int_encode l = Some 0" apply (simp add: dioph_ineq_dfs_def gen_dfs_simps dioph_ineq_dfs.empt dioph_is_node_def) apply (rule dioph_ineq_dfs_mono [of _ l]) apply (rule dioph_ineq_dfs.ins_invariant) apply (simp add: dioph_is_node_def) apply (rule dioph_ineq_dfs.empt_invariant) apply (simp add: dioph_ineq_dfs.empt dioph_is_node_def) apply (simp add: dioph_ineq_dfs.succs_is_node dioph_is_node_def) apply (simp add: dioph_ins_def dioph_empt_def int_encode_bound dioph_is_node_def) done lemma ineq_dfa_accepts: assumes bss: "list_all (is_alph n) bss" shows "dfa_accepts (ineq_dfa n ks l) bss = (eval_dioph ks (nats_of_boolss n bss) \<le> l)" by (simp add: accepts_def) (rule ineq_dfa_accepting [of l l n ks, OF _ bss, simplified dioph_ineq_dfs_start, simplified]) lemma ineq_wf_dfa: "wf_dfa (ineq_dfa n ks l) n" proof - have "\<forall>x\<in>set (snd (dioph_ineq_dfs n ks l)). \<forall>ys\<in>set (mk_nat_vecs n). the (fst (dioph_ineq_dfs n ks l) ! int_encode ((x - eval_dioph ks ys) div 2)) < length (snd (dioph_ineq_dfs n ks l))" proof (intro ballI impI) fix x ys assume x: "x \<in> set (snd (dioph_ineq_dfs n ks l))" and ys: "ys \<in> set (mk_nat_vecs n)" from x obtain k where k: "fst (dioph_ineq_dfs n ks l) ! int_encode x = Some k" and k': "dioph_is_node ks l x" by (auto simp add: in_set_conv_nth dioph_ineq_dfs_bij [symmetric]) from k have "dioph_memb x (dioph_ineq_dfs n ks l)" by (simp add: dioph_memb_def split_beta) moreover have ll: "dioph_is_node ks l l" by (simp add: dioph_is_node_def) ultimately have "(l, x) \<in> (succsr (dioph_ineq_succs n ks))\<^sup>*" using k' by (simp add: dioph_ineq_dfs.dfs_eq_rtrancl dioph_ineq_dfs_def) then have "(l, (x - eval_dioph ks ys) div 2) \<in> (succsr (dioph_ineq_succs n ks))\<^sup>*" apply (rule rtrancl_into_rtrancl) apply (simp add: succsr_def dioph_ineq_succs_def) apply (rule image_eqI [of _ _ ys]) apply (simp_all add: ys) done moreover from dioph_ineq_dfs.succs_is_node [OF k', of n] ys have x': "dioph_is_node ks l ((x - eval_dioph ks ys) div 2)" by (simp add: dioph_ineq_succs_def list_all_iff) ultimately have "dioph_memb ((x - eval_dioph ks ys) div 2) (dioph_ineq_dfs n ks l)" by (simp add: dioph_ineq_dfs.dfs_eq_rtrancl dioph_ineq_dfs_def ll) then obtain k' where k': "fst (dioph_ineq_dfs n ks l) ! int_encode ((x - eval_dioph ks ys) div 2) = Some k'" by (auto simp add: dioph_memb_def) with x' have "k' < length (snd (dioph_ineq_dfs n ks l)) \<and> snd (dioph_ineq_dfs n ks l) ! k' = ((x - eval_dioph ks ys) div 2)" by (simp add: dioph_ineq_dfs_bij [symmetric]) with k' show "the (fst (dioph_ineq_dfs n ks l) ! int_encode ((x - eval_dioph ks ys) div 2)) < length (snd (dioph_ineq_dfs n ks l))" by simp qed moreover have "fst (dioph_ineq_dfs n ks l) ! int_encode l = Some 0 \<and> dioph_is_node ks l l" by (simp add: dioph_ineq_dfs_start dioph_is_node_def) then have "snd (dioph_ineq_dfs n ks l) \<noteq> []" by (simp add: dioph_ineq_dfs_bij) ultimately show ?thesis by (simp add: ineq_dfa_def split_beta wf_dfa_def dfa_is_node_def list_all_iff bddh_make_bdd bdd_all_make_bdd) qed section \<open>Presburger Arithmetic\<close> datatype pf = Eq "int list" int | Le "int list" int | And pf pf | Or pf pf | Imp pf pf | Forall pf | Exist pf | Neg pf type_synonym passign = "nat list" primrec eval_pf :: "pf \<Rightarrow> passign \<Rightarrow> bool" where "eval_pf (Eq ks l) xs = (eval_dioph ks xs = l)" | "eval_pf (Le ks l) xs = (eval_dioph ks xs \<le> l)" | "eval_pf (And p q) xs = (eval_pf p xs \<and> eval_pf q xs)" | "eval_pf (Or p q) xs = (eval_pf p xs \<or> eval_pf q xs)" | "eval_pf (Imp p q) xs = (eval_pf p xs \<longrightarrow> eval_pf q xs)" | "eval_pf (Forall p) xs = (\<forall>x. eval_pf p (x # xs))" | "eval_pf (Exist p) xs = (\<exists>x. eval_pf p (x # xs))" | "eval_pf (Neg p) xs = (\<not> eval_pf p xs)" function dfa_of_pf :: "nat \<Rightarrow> pf \<Rightarrow> dfa" where Eq: "dfa_of_pf n (Eq ks l) = eq_dfa n ks l" | Le: "dfa_of_pf n (Le ks l) = ineq_dfa n ks l" | And: "dfa_of_pf n (And p q) = and_dfa (dfa_of_pf n p) (dfa_of_pf n q)" | Or: "dfa_of_pf n (Or p q) = or_dfa (dfa_of_pf n p) (dfa_of_pf n q)" | Imp: "dfa_of_pf n (Imp p q) = imp_dfa (dfa_of_pf n p) (dfa_of_pf n q)" | Exist: "dfa_of_pf n (Exist p) = rquot (det_nfa (quantify_nfa 0 (nfa_of_dfa (dfa_of_pf (Suc n) p)))) n" | Forall: "dfa_of_pf n (Forall p) = dfa_of_pf n (Neg (Exist (Neg p)))" | Neg: "dfa_of_pf n (Neg p) = negate_dfa (dfa_of_pf n p)" by pat_completeness auto text \<open>Auxiliary measure function for termination proof\<close> primrec count_forall :: "pf \<Rightarrow> nat" where "count_forall (Eq ks l) = 0" | "count_forall (Le ks l) = 0" | "count_forall (And p q) = count_forall p + count_forall q" | "count_forall (Or p q) = count_forall p + count_forall q" | "count_forall (Imp p q) = count_forall p + count_forall q" | "count_forall (Exist p) = count_forall p" | "count_forall (Forall p) = 1 + count_forall p" | "count_forall (Neg p) = count_forall p" termination dfa_of_pf by (relation "measures [\<lambda>(n, pf). count_forall pf, \<lambda>(n, pf). size pf]") auto lemmas dfa_of_pf_induct = dfa_of_pf.induct [case_names Eq Le And Or Imp Exist Forall Neg] lemma dfa_of_pf_well_formed: "wf_dfa (dfa_of_pf n p) n" proof (induct n p rule: dfa_of_pf_induct) case (Eq n ks l) show ?case by (simp add: eq_wf_dfa) next case (Le n ks l) show ?case by (simp add: ineq_wf_dfa) next case (And n p q) then show ?case by (simp add: and_wf_dfa) next case (Or n p q) then show ?case by (simp add: or_wf_dfa) next case (Imp n p q) then show ?case by (simp add: imp_wf_dfa) next case (Neg n p) then show ?case by (simp add: negate_wf_dfa) next case (Exist n p) then show ?case by (simp add: rquot_well_formed_aut det_wf_nfa quantify_nfa_well_formed_aut dfa2wf_nfa) next case (Forall n p) then show ?case by simp qed lemma dfa_of_pf_correctness: "list_all (is_alph n) bss \<Longrightarrow> dfa_accepts (dfa_of_pf n p) bss = eval_pf p (nats_of_boolss n bss)" proof (induct n p arbitrary: bss rule: dfa_of_pf_induct) case (Eq n ks l) then show ?case by (simp add: eq_dfa_accepts) next case (Le n ks l) then show ?case by (simp add: ineq_dfa_accepts) next case (And n p q) then show ?case by (simp add: and_dfa_accepts [of _ n] dfa_of_pf_well_formed) next case (Or n p q) then show ?case by (simp add: or_dfa_accepts [of _ n] dfa_of_pf_well_formed) next case (Imp n p q) then show ?case by (simp add: imp_dfa_accepts [of _ n] dfa_of_pf_well_formed) next case (Neg n p) then show ?case by (simp add: dfa_accepts_negate [of _ n] dfa_of_pf_well_formed) next case (Exist n p) have "(\<exists>k bs. eval_pf p (nat_of_bools bs # nats_of_boolss n bss) \<and> length bs = length bss + k) = (\<exists>x. eval_pf p (x # nats_of_boolss n bss))" apply (rule iffI) apply (erule exE conjE)+ apply (erule exI) apply (erule exE) apply (rule_tac x="length (bools_of_nat (length bss) x) - length bss" in exI) apply (rule_tac x="bools_of_nat (length bss) x" in exI) apply (simp add: bools_of_nat_inverse bools_of_nat_length) done with Exist show ?case by (simp add: rquot_accepts det_wf_nfa quantify_nfa_well_formed_aut dfa2wf_nfa det_nfa_accepts [of _ n] zeros_is_alpha nfa_accepts_quantify_nfa [of _ n] nfa_of_dfa_accepts [of _ "Suc n"] insertll_len2 nats_of_boolss_insertll zeros_len nats_of_boolss_append nats_of_boolss_zeros zip_replicate_mapr nats_of_boolss_length o_def insertl_0_eq dfa_of_pf_well_formed cong: rev_conj_cong) next case (Forall n p) then show ?case by simp qed text \<open>The same with minimization after quantification.\<close> function dfa_of_pf' :: "nat \<Rightarrow> pf \<Rightarrow> dfa" where "dfa_of_pf' n (Eq ks l) = eq_dfa n ks l" | "dfa_of_pf' n (Le ks l) = ineq_dfa n ks l" | "dfa_of_pf' n (And p q) = and_dfa (dfa_of_pf' n p) (dfa_of_pf' n q)" | "dfa_of_pf' n (Or p q) = or_dfa (dfa_of_pf' n p) (dfa_of_pf' n q)" | "dfa_of_pf' n (Imp p q) = imp_dfa (dfa_of_pf' n p) (dfa_of_pf' n q)" | "dfa_of_pf' n (Exist p) = min_dfa (rquot (det_nfa (quantify_nfa 0 (nfa_of_dfa (dfa_of_pf' (Suc n) p)))) n)" | "dfa_of_pf' n (Forall p) = dfa_of_pf' n (Neg (Exist (Neg p)))" | "dfa_of_pf' n (Neg p) = negate_dfa (dfa_of_pf' n p)" by pat_completeness auto termination dfa_of_pf' by (relation "measures [\<lambda>(n, pf). count_forall pf, \<lambda>(n, pf). size pf]") auto lemmas dfa_of_pf'_induct = dfa_of_pf'.induct [case_names Eq Le And Or Imp Exist Forall Neg] lemma dfa_of_pf'_well_formed: "wf_dfa (dfa_of_pf' n p) n" proof (induct n p rule: dfa_of_pf'_induct) case (Eq n ks l) show ?case by (simp add: eq_wf_dfa) next case (Le n ks l) show ?case by (simp add: ineq_wf_dfa) next case (And n p q) then show ?case by (simp add: and_wf_dfa) next case (Or n p q) then show ?case by (simp add: or_wf_dfa) next case (Imp n p q) then show ?case by (simp add: imp_wf_dfa) next case (Neg n p) then show ?case by (simp add: negate_wf_dfa) next case (Exist n p) then show ?case by (simp add: rquot_well_formed_aut det_wf_nfa quantify_nfa_well_formed_aut dfa2wf_nfa min_dfa_wf) next case (Forall n p) then show ?case by simp qed lemma dfa_of_pf'_correctness: "list_all (is_alph n) bss \<Longrightarrow> dfa_accepts (dfa_of_pf' n p) bss = eval_pf p (nats_of_boolss n bss)" proof (induct n p arbitrary: bss rule: dfa_of_pf'_induct) case (Eq n ks l) then show ?case by (simp add: eq_dfa_accepts) next case (Le n ks l) then show ?case by (simp add: ineq_dfa_accepts) next case (And n p q) then show ?case by (simp add: and_dfa_accepts [of _ n] dfa_of_pf'_well_formed) next case (Or n p q) then show ?case by (simp add: or_dfa_accepts [of _ n] dfa_of_pf'_well_formed) next case (Imp n p q) then show ?case by (simp add: imp_dfa_accepts [of _ n] dfa_of_pf'_well_formed) next case (Neg n p) then show ?case by (simp add: dfa_accepts_negate [of _ n] dfa_of_pf'_well_formed) next case (Exist n p) have "(\<exists>k bs. eval_pf p (nat_of_bools bs # nats_of_boolss n bss) \<and> length bs = length bss + k) = (\<exists>x. eval_pf p (x # nats_of_boolss n bss))" apply (rule iffI) apply (erule exE conjE)+ apply (erule exI) apply (erule exE) apply (rule_tac x="length (bools_of_nat (length bss) x) - length bss" in exI) apply (rule_tac x="bools_of_nat (length bss) x" in exI) apply (simp add: bools_of_nat_inverse bools_of_nat_length) done with Exist show ?case by (simp add: rquot_accepts det_wf_nfa quantify_nfa_well_formed_aut dfa2wf_nfa det_nfa_accepts [of _ n] zeros_is_alpha nfa_accepts_quantify_nfa [of _ n] nfa_of_dfa_accepts [of _ "Suc n"] insertll_len2 nats_of_boolss_insertll zeros_len nats_of_boolss_append nats_of_boolss_zeros zip_replicate_mapr nats_of_boolss_length o_def insertl_0_eq dfa_of_pf'_well_formed min_dfa_accept [of _ n] min_dfa_wf rquot_well_formed_aut cong: rev_conj_cong) next case (Forall n p) then show ?case by simp qed end
(** Formal Reasoning About Programs <http://adam.chlipala.net/frap/> * Chapter 14: Separation Logic * Author: Adam Chlipala * License: https://creativecommons.org/licenses/by-nc-nd/4.0/ *) Require Import Frap Setoid Classes.Morphisms SepCancel. Set Implicit Arguments. Set Asymmetric Patterns. (** * Shared notations and definitions; main material starts afterward. *) Definition heap := fmap nat nat. Hint Extern 1 (_ <= _) => linear_arithmetic. Hint Extern 1 (@eq nat _ _) => linear_arithmetic. Ltac simp := repeat (simplify; subst; propositional; try match goal with | [ H : ex _ |- _ ] => invert H end); try linear_arithmetic. (** * Encore of last mixed-embedding language from last time *) (* Let's work with a variant of the imperative language from last time. *) Inductive loop_outcome acc := | Done (a : acc) | Again (a : acc). Inductive cmd : Set -> Type := | Return {result : Set} (r : result) : cmd result | Bind {result result'} (c1 : cmd result') (c2 : result' -> cmd result) : cmd result | Read (a : nat) : cmd nat | Write (a v : nat) : cmd unit | Loop {acc : Set} (init : acc) (body : acc -> cmd (loop_outcome acc)) : cmd acc | Fail {result} : cmd result (* But let's also add memory allocation and deallocation. *) | Alloc (numWords : nat) : cmd nat | Free (base numWords : nat) : cmd unit. Notation "x <- c1 ; c2" := (Bind c1 (fun x => c2)) (right associativity, at level 80). Notation "'for' x := i 'loop' c1 'done'" := (Loop i (fun x => c1)) (right associativity, at level 80). (* These helper functions respectively initialize a new span of memory and * remove a span of memory that the program is done with. *) Fixpoint initialize (h : heap) (base numWords : nat) : heap := match numWords with | O => h | S numWords' => initialize h base numWords' $+ (base + numWords', 0) end. Fixpoint deallocate (h : heap) (base numWords : nat) : heap := match numWords with | O => h | S numWords' => deallocate (h $- base) (base+1) numWords' end. (* Let's do the semantics a bit differently this time, falling back on classic * small-step operational semantics. *) Inductive step : forall A, heap * cmd A -> heap * cmd A -> Prop := | StepBindRecur : forall result result' (c1 c1' : cmd result') (c2 : result' -> cmd result) h h', step (h, c1) (h', c1') -> step (h, Bind c1 c2) (h', Bind c1' c2) | StepBindProceed : forall (result result' : Set) (v : result') (c2 : result' -> cmd result) h, step (h, Bind (Return v) c2) (h, c2 v) | StepLoop : forall (acc : Set) (init : acc) (body : acc -> cmd (loop_outcome acc)) h, step (h, Loop init body) (h, o <- body init; match o with | Done a => Return a | Again a => Loop a body end) | StepRead : forall h a v, h $? a = Some v -> step (h, Read a) (h, Return v) | StepWrite : forall h a v v', h $? a = Some v -> step (h, Write a v') (h $+ (a, v'), Return tt) | StepAlloc : forall h numWords a, (forall i, i < numWords -> h $? (a + i) = None) -> step (h, Alloc numWords) (initialize h a numWords, Return a) | StepFree : forall h a numWords, step (h, Free a numWords) (deallocate h a numWords, Return tt). Definition trsys_of (h : heap) {result} (c : cmd result) := {| Initial := {(h, c)}; Step := step (A := result) |}. (* Now let's get into the first distinctive feature of separation logic: an * assertion language that takes advantage of *pariality* of heaps. We give our * definitions inside a module, which will shortly be used as a parameter to * another module (from the book library), to get some free automation for * implications between these assertions. *) Module Import S <: SEP. Definition hprop := heap -> Prop. (* A [hprop] is a regular old assertion over heaps. *) (* Implication *) Definition himp (p q : hprop) := forall h, p h -> q h. (* Equivalence *) Definition heq (p q : hprop) := forall h, p h <-> q h. (* Lifting a pure proposition: it must hold, and the heap must be empty. *) Definition lift (P : Prop) : hprop := fun h => P /\ h = $0. (* Separating conjunction, one of the two big ideas of separation logic. * When does [star p q] apply to [h]? When [h] can be partitioned into two * subheaps [h1] and [h2], respectively compatible with [p] and [q]. See book * module [Map] for definitions of [split] and [disjoint]. *) Definition star (p q : hprop) : hprop := fun h => exists h1 h2, split h h1 h2 /\ disjoint h1 h2 /\ p h1 /\ q h2. (* Existential quantification *) Definition exis A (p : A -> hprop) : hprop := fun h => exists x, p x h. (* Convenient notations *) Notation "[| P |]" := (lift P) : sep_scope. Infix "*" := star : sep_scope. Notation "'exists' x .. y , p" := (exis (fun x => .. (exis (fun y => p)) ..)) : sep_scope. Delimit Scope sep_scope with sep. Notation "p === q" := (heq p%sep q%sep) (no associativity, at level 70). Notation "p ===> q" := (himp p%sep q%sep) (no associativity, at level 70). Local Open Scope sep_scope. (* And now we prove some key algebraic properties, whose details aren't so * important. The library automation uses these properties. *) Lemma iff_two : forall A (P Q : A -> Prop), (forall x, P x <-> Q x) -> (forall x, P x -> Q x) /\ (forall x, Q x -> P x). Proof. firstorder. Qed. Local Ltac t := (unfold himp, heq, lift, star, exis; propositional; subst); repeat (match goal with | [ H : forall x, _ <-> _ |- _ ] => apply iff_two in H | [ H : ex _ |- _ ] => destruct H | [ H : split _ _ $0 |- _ ] => apply split_empty_fwd in H end; propositional; subst); eauto 15. Theorem himp_heq : forall p q, p === q <-> (p ===> q /\ q ===> p). Proof. t. Qed. Theorem himp_refl : forall p, p ===> p. Proof. t. Qed. Theorem himp_trans : forall p q r, p ===> q -> q ===> r -> p ===> r. Proof. t. Qed. Theorem lift_left : forall p (Q : Prop) r, (Q -> p ===> r) -> p * [| Q |] ===> r. Proof. t. Qed. Theorem lift_right : forall p q (R : Prop), p ===> q -> R -> p ===> q * [| R |]. Proof. t. Qed. Hint Resolve split_empty_bwd'. Theorem extra_lift : forall (P : Prop) p, P -> p === [| P |] * p. Proof. t. apply split_empty_fwd' in H1; subst; auto. Qed. Theorem star_comm : forall p q, p * q === q * p. Proof. t. Qed. Theorem star_assoc : forall p q r, p * (q * r) === (p * q) * r. Proof. t. Qed. Theorem star_cancel : forall p1 p2 q1 q2, p1 ===> p2 -> q1 ===> q2 -> p1 * q1 ===> p2 * q2. Proof. t. Qed. Theorem exis_gulp : forall A p (q : A -> _), p * exis q === exis (fun x => p * q x). Proof. t. Qed. Theorem exis_left : forall A (p : A -> _) q, (forall x, p x ===> q) -> exis p ===> q. Proof. t. Qed. Theorem exis_right : forall A p (q : A -> _) x, p ===> q x -> p ===> exis q. Proof. t. Qed. End S. Export S. (* Instantiate our big automation engine to these definitions. *) Module Import Se := SepCancel.Make(S). (* ** Some extra predicates outside the set that the engine knows about *) (* Capturing single-mapping heaps *) Definition heap1 (a v : nat) : heap := $0 $+ (a, v). Definition ptsto (a v : nat) : hprop := fun h => h = heap1 a v. (* Helpful notations, some the same as above *) Notation "[| P |]" := (lift P) : sep_scope. Notation emp := (lift True). Infix "*" := star : sep_scope. Notation "'exists' x .. y , p" := (exis (fun x => .. (exis (fun y => p)) ..)) : sep_scope. Delimit Scope sep_scope with sep. Notation "p === q" := (heq p%sep q%sep) (no associativity, at level 70). Notation "p ===> q" := (himp p%sep q%sep) (no associativity, at level 70). Infix "|->" := ptsto (at level 30) : sep_scope. (* For describing a "struct" in memory at consecutive addresses *) Fixpoint multi_ptsto (a : nat) (vs : list nat) : hprop := match vs with | nil => emp | v :: vs' => a |-> v * multi_ptsto (a + 1) vs' end%sep. Infix "|-->" := multi_ptsto (at level 30) : sep_scope. (* We'll use this one to describe the struct returned by [Alloc]. *) Fixpoint zeroes (n : nat) : list nat := match n with | O => nil | S n' => zeroes n' ++ 0 :: nil end. (* For recording merely that a range of cells is mapped into our memory space *) Fixpoint allocated (a n : nat) : hprop := match n with | O => emp | S n' => (exists v, a |-> v) * allocated (a+1) n' end%sep. Infix "|->?" := allocated (at level 30) : sep_scope. (** * Finally, the Hoare logic *) Inductive hoare_triple : forall {result}, hprop -> cmd result -> (result -> hprop) -> Prop := (* First, four basic rules that look exactly the same as before *) | HtReturn : forall P {result : Set} (v : result), hoare_triple P (Return v) (fun r => P * [| r = v |])%sep | HtBind : forall P {result' result} (c1 : cmd result') (c2 : result' -> cmd result) Q R, hoare_triple P c1 Q -> (forall r, hoare_triple (Q r) (c2 r) R) -> hoare_triple P (Bind c1 c2) R | HtLoop : forall {acc : Set} (init : acc) (body : acc -> cmd (loop_outcome acc)) I, (forall acc, hoare_triple (I (Again acc)) (body acc) I) -> hoare_triple (I (Again init)) (Loop init body) (fun r => I (Done r)) | HtFail : forall {result}, hoare_triple (fun _ => False) (Fail (result := result)) (fun _ _ => False) (* Now the new rules for primitive heap operations: *) | HtRead : forall a R, hoare_triple (exists v, a |-> v * R v)%sep (Read a) (fun r => a |-> r * R r)%sep (* To read from an address, it must be mapped into the address space with some * value. Afterward, that address is known to point to the result value [r]. * An additional *frame* predicate [R] is along for the ride. *) | HtWrite : forall a v', hoare_triple (exists v, a |-> v)%sep (Write a v') (fun _ => a |-> v')%sep (* To write to an address, just show that it's mapped into the address space. * Afterward, that address points to the value we've written. Note that this * rule is in the *small-footprint* style, with no frame predicate baked in. *) | HtAlloc : forall numWords, hoare_triple emp%sep (Alloc numWords) (fun r => r |--> zeroes numWords)%sep (* Allocation works in any memory, transitioning to a state where the result * value points to a sequence of zeroes. *) | HtFree : forall a numWords, hoare_triple (a |->? numWords)%sep (Free a numWords) (fun _ => emp)%sep (* Deallocation requires an argument pointing to the appropriate number of * words, taking us to a state where those addresses are unmapped. *) | HtConsequence : forall {result} (c : cmd result) P Q (P' : hprop) (Q' : _ -> hprop), hoare_triple P c Q -> P' ===> P -> (forall r, Q r ===> Q' r) -> hoare_triple P' c Q' (* This is essentially the same rule of consequence from before. *) | HtFrame : forall {result} (c : cmd result) P Q R, hoare_triple P c Q -> hoare_triple (P * R)%sep c (fun r => Q r * R)%sep. (* The *frame rule* is the other big idea of separation. We can extend any * Hoare triple by starring an arbitrary assertion [R] into both precondition * and postcondition. Note that rule [HtRead] built in a variant of the frame * rule, where the frame predicate [R] may depend on the value [v] being read. * The other operations can use this generic frame rule instead. *) Notation "{{ P }} c {{ r ~> Q }}" := (hoare_triple P%sep c (fun r => Q%sep)) (at level 90, c at next level). Lemma HtStrengthen : forall {result} (c : cmd result) P Q (Q' : _ -> hprop), hoare_triple P c Q -> (forall r, Q r ===> Q' r) -> hoare_triple P c Q'. Proof. simplify. eapply HtConsequence; eauto. reflexivity. Qed. Lemma HtWeaken : forall {result} (c : cmd result) P Q (P' : hprop), hoare_triple P c Q -> P' ===> P -> hoare_triple P' c Q. Proof. simplify. eapply HtConsequence; eauto. reflexivity. Qed. (* Now, we carry out a moderately laborious soundness proof! It's safe to skip * ahead to the text "Examples", but a few representative lemma highlights * include [invert_Read], [preservation], [progress], and the main theorem * [hoare_triple_sound]. *) Lemma invert_Return : forall {result : Set} (r : result) P Q, hoare_triple P (Return r) Q -> forall h, P h -> Q r h. Proof. induct 1; propositional; eauto. exists h, $0; propositional; eauto. unfold lift; propositional. unfold himp in *; eauto. unfold star, himp in *; simp; eauto 7. Qed. Hint Constructors hoare_triple. Lemma invert_Bind : forall {result' result} (c1 : cmd result') (c2 : result' -> cmd result) P Q, hoare_triple P (Bind c1 c2) Q -> exists R, hoare_triple P c1 R /\ forall r, hoare_triple (R r) (c2 r) Q. Proof. induct 1; propositional; eauto. invert IHhoare_triple; propositional. eexists; propositional. eapply HtWeaken. eassumption. auto. eapply HtStrengthen. apply H4. auto. simp. exists (fun r => x r * R)%sep. propositional. eapply HtFrame; eauto. eapply HtFrame; eauto. Qed. Lemma invert_Loop : forall {acc : Set} (init : acc) (body : acc -> cmd (loop_outcome acc)) P Q, hoare_triple P (Loop init body) Q -> exists I, (forall acc, hoare_triple (I (Again acc)) (body acc) I) /\ (forall h, P h -> I (Again init) h) /\ (forall r h, I (Done r) h -> Q r h). Proof. induct 1; propositional; eauto. invert IHhoare_triple; propositional. exists x; propositional; eauto. unfold himp in *; eauto. simp. exists (fun o => x o * R)%sep; propositional; eauto. unfold star in *; simp; eauto 7. unfold star in *; simp; eauto 7. Qed. Lemma invert_Fail : forall result P Q, hoare_triple P (Fail (result := result)) Q -> forall h, P h -> False. Proof. induct 1; propositional; eauto. unfold star in *; simp; eauto. Qed. (* Now that we proved enough basic facts, let's hide the definitions of all * these predicates, so that we reason about them only through automation. *) Opaque heq himp lift star exis ptsto. Lemma unit_not_nat : unit = nat -> False. Proof. simplify. assert (exists x : unit, forall y : unit, x = y). exists tt; simplify. cases y; reflexivity. rewrite H in H0. invert H0. specialize (H1 (S x)). linear_arithmetic. Qed. Lemma invert_Read : forall a P Q, hoare_triple P (Read a) Q -> exists R, (P ===> exists v, a |-> v * R v)%sep /\ forall r, a |-> r * R r ===> Q r. Proof. induct 1; simp; eauto. exists R; simp. cancel; auto. cancel; auto. apply unit_not_nat in x0; simp. apply unit_not_nat in x0; simp. eauto 7 using himp_trans. exists (fun n => x n * R)%sep; simp. rewrite H1. cancel. rewrite <- H2. cancel. Qed. Lemma invert_Write : forall a v' P Q, hoare_triple P (Write a v') Q -> exists R, (P ===> (exists v, a |-> v) * R)%sep /\ a |-> v' * R ===> Q tt. Proof. induct 1; simp; eauto. symmetry in x0. apply unit_not_nat in x0; simp. exists emp; simp. cancel; auto. cancel; auto. symmetry in x0. apply unit_not_nat in x0; simp. eauto 7 using himp_trans. exists (x * R)%sep; simp. rewrite H1. cancel. cancel. rewrite <- H2. cancel. Qed. Lemma invert_Alloc : forall numWords P Q, hoare_triple P (Alloc numWords) Q -> forall r, P * r |--> zeroes numWords ===> Q r. Proof. induct 1; simp; eauto. apply unit_not_nat in x0; simp. cancel. apply unit_not_nat in x0; simp. rewrite H0. eauto using himp_trans. rewrite <- IHhoare_triple. cancel. Qed. (* Temporarily transparent again! *) Transparent heq himp lift star exis ptsto. Lemma zeroes_initialize' : forall h a v, h $? a = None -> (fun h' : heap => h' = h $+ (a, v)) ===> (fun h' => h' = h) * a |-> v. Proof. unfold himp, star, split, ptsto, disjoint; simp. exists h, (heap1 a v). propositional. maps_equal. unfold heap1. rewrite lookup_join2. simp. simp. apply lookup_None_dom in H. propositional. cases (h $? k). rewrite lookup_join1; auto. eauto using lookup_Some_dom. rewrite lookup_join2; auto. unfold heap1; simp. eauto using lookup_None_dom. unfold heap1 in *. cases (a ==n a0); simp. Qed. (* Opaque again! *) Opaque heq himp lift star exis ptsto. Lemma multi_ptsto_app : forall ls2 ls1 a, a |--> ls1 * (a + length ls1) |--> ls2 ===> a |--> (ls1 ++ ls2). Proof. induct ls1; simp; cancel; auto. replace (a + 0) with a by linear_arithmetic. cancel. rewrite <- IHls1. cancel. replace (a0 + 1 + length ls1) with (a0 + S (length ls1)) by linear_arithmetic. cancel. Qed. Lemma length_zeroes : forall n, length (zeroes n) = n. Proof. induct n; simplify; auto. rewrite app_length; simplify. linear_arithmetic. Qed. Lemma initialize_fresh : forall a' h a numWords, a' >= a + numWords -> initialize h a numWords $? a' = h $? a'. Proof. induct numWords; simp; auto. Qed. Lemma zeroes_initialize : forall numWords a h, (forall i, i < numWords -> h $? (a + i) = None) -> (fun h' => h' = initialize h a numWords) ===> (fun h' => h' = h) * a |--> zeroes numWords. Proof. induct numWords; simp. cancel; auto. rewrite <- multi_ptsto_app. rewrite zeroes_initialize'. erewrite IHnumWords. simp. rewrite length_zeroes. cancel; auto. auto. rewrite initialize_fresh; auto. Qed. Lemma invert_Free : forall a numWords P Q, hoare_triple P (Free a numWords) Q -> P ===> a |->? numWords * Q tt. Proof. induct 1; simp; eauto. symmetry in x0. apply unit_not_nat in x0; simp. symmetry in x0. apply unit_not_nat in x0; simp. cancel; auto. rewrite H0. rewrite IHhoare_triple. cancel; auto. rewrite IHhoare_triple. cancel; auto. Qed. (* Temporarily transparent again! *) Transparent heq himp lift star exis ptsto. Lemma do_deallocate' : forall a Q h, ((exists v, a |-> v) * Q)%sep h -> Q (h $- a). Proof. unfold ptsto, star, split, heap1; simp. invert H1. replace ($0 $+ (a, x1) $++ x0 $- a) with x0; auto. maps_equal. cases (k ==n a); simp. specialize (H a). simp. cases (x0 $? a); auto. exfalso; apply H; equality. rewrite lookup_join2; auto. apply lookup_None_dom. simp. Qed. Lemma do_deallocate : forall Q numWords a h, (a |->? numWords * Q)%sep h -> Q (deallocate h a numWords). Proof. induct numWords; simp. unfold star, exis, lift in H; simp. apply split_empty_fwd' in H0; simp. apply IHnumWords. clear IHnumWords. apply do_deallocate'. Opaque heq himp lift star exis ptsto. match goal with | [ H : ?P h |- ?Q h ] => assert (P ===> Q) by cancel end. Transparent himp. apply H0; auto. Opaque himp. Qed. Lemma HtReturn' : forall P {result : Set} (v : result) Q, P ===> Q v -> hoare_triple P (Return v) Q. Proof. simp. eapply HtStrengthen. constructor. simp. cancel. Qed. (* Temporarily transparent again! *) Transparent heq himp lift star exis ptsto. Lemma preservation : forall {result} (c : cmd result) h c' h', step (h, c) (h', c') -> forall Q, hoare_triple (fun h' => h' = h) c Q -> hoare_triple (fun h'' => h'' = h') c' Q. Proof. induct 1; simplify. apply invert_Bind in H0; simp. eauto. apply invert_Bind in H; simp. specialize (invert_Return H); eauto using HtWeaken. apply invert_Loop in H; simp. econstructor. eapply HtWeaken. eauto. assumption. simp. cases r. apply HtReturn'. unfold himp; simp; eauto. eapply HtStrengthen. eauto. unfold himp; simp; eauto. apply invert_Read in H0; simp. apply HtReturn'. assert ((exists v, a |-> v * x v)%sep h') by auto. unfold exis, star in H1; simp. unfold ptsto in H4; subst. unfold split in H1; subst. unfold heap1 in H. rewrite lookup_join1 in H by (simp; sets). unfold himp; simp. invert H. apply H2. unfold star. exists (heap1 a v), x2; propositional. unfold split; reflexivity. unfold ptsto; reflexivity. apply invert_Write in H0; simp. apply HtReturn'. simp. assert (((exists v : nat, a |-> v) * x)%sep h) by auto. unfold star in H1; simp. invert H4. unfold ptsto in H5; subst. unfold split in H3; subst. unfold heap1 in H. rewrite lookup_join1 in H by (simp; sets). unfold himp; simp. invert H. apply H2. unfold star. exists ($0 $+ (a, v')), x1; propositional. unfold split. unfold heap1. maps_equal. rewrite lookup_join1 by (simp; sets). simp. repeat rewrite lookup_join2 by (simp; sets); reflexivity. unfold disjoint in *; simp. cases (a0 ==n a); simp. apply H1 with (a0 := a). unfold heap1; simp. equality. assumption. unfold ptsto; reflexivity. apply invert_Alloc with (r := a) in H0. apply HtReturn'. unfold himp; simp. eapply himp_trans in H0; try apply zeroes_initialize. auto. assumption. apply invert_Free in H. assert ((a |->? numWords * Q tt)%sep h) by auto. apply HtReturn'. unfold himp; simp. eapply do_deallocate. eauto. Qed. Lemma deallocate_None : forall a' numWords h a, h $? a' = None -> deallocate h a numWords $? a' = None. Proof. induct numWords; simp. rewrite IHnumWords; simp. cases (a ==n a'); simp. Qed. Lemma preservation_finite : forall {result} (c : cmd result) h c' h' bound, step (h, c) (h', c') -> (forall a, a >= bound -> h $? a = None) -> exists bound', forall a, a >= bound' -> h' $? a = None. Proof. induct 1; simplify; eauto. exists bound; simp. cases (a ==n a0); simp. rewrite H0 in H; equality. auto. exists (max bound (a + numWords)); simp. rewrite initialize_fresh; auto. exists bound; simp. eauto using deallocate_None. Qed. Hint Constructors step. Lemma progress : forall {result} (c : cmd result) P Q, hoare_triple P c Q -> forall h h1 h2, split h h1 h2 -> disjoint h1 h2 -> P h1 -> (exists bound, forall a, a >= bound -> h $? a = None) -> (exists r, c = Return r) \/ (exists h' c', step (h, c) (h', c')). Proof. induct 1; simp; repeat match goal with | [ H : forall _ h1 _, _ -> _ -> ?P h1 -> _, H' : ?P _ |- _ ] => eapply H in H'; clear H; try eassumption; simp end; eauto. invert H1. right; exists h, (Return x0). constructor. unfold split, ptsto, heap1 in *; simp. unfold star in H2; simp. unfold split in H; simp. rewrite lookup_join1; simp. rewrite lookup_join1; simp. sets. eapply lookup_Some_dom. rewrite lookup_join1; simp. sets. right; exists (h $+ (a, v')), (Return tt). unfold split, exis, ptsto, heap1 in *; simp. econstructor. rewrite lookup_join1; simp. sets. unfold lift in H1; simp. apply split_empty_fwd' in H; simp. right; exists (initialize h2 x numWords), (Return x). constructor. simp; auto. unfold star in H2; simp. apply IHhoare_triple with (h := h) (h1 := x0) (h2 := h2 $++ x1); eauto. unfold split in *; simp. rewrite (@join_comm _ _ h2 x1). apply join_assoc. sets. cases (h2 $? x2). cases (x1 $? x2). specialize (H2 x2). specialize (H1 x2). rewrite lookup_join2 in H1. apply H1; equality. unfold not. simplify. cases (x0 $? x2). exfalso; apply H2; equality. apply lookup_None_dom in Heq1; propositional. apply lookup_None_dom in Heq0; propositional. apply lookup_None_dom in Heq; propositional. unfold split, disjoint in *; simp. cases (h2 $? a). rewrite lookup_join1 in H8. apply H1 with (a := a); auto. rewrite lookup_join1; auto. cases (x0 $? a); try equality. eauto using lookup_Some_dom. eauto using lookup_Some_dom. rewrite lookup_join2 in H8. eapply H2; eassumption. eauto using lookup_None_dom. Qed. Lemma hoare_triple_sound' : forall P {result} (c : cmd result) Q, hoare_triple P c Q -> P $0 -> invariantFor (trsys_of $0 c) (fun p => (exists bound, forall a, a >= bound -> fst p $? a = None) /\ hoare_triple (fun h' => h' = fst p) (snd p) Q). Proof. simplify. apply invariant_induction; simplify. propositional; subst; simplify. exists 0; simp. eapply HtWeaken; eauto. unfold himp; simplify; equality. cases s. cases s'. simp. eauto using preservation_finite. eauto using preservation. Qed. Theorem hoare_triple_sound : forall P {result} (c : cmd result) Q, hoare_triple P c Q -> P $0 -> invariantFor (trsys_of $0 c) (fun p => (exists r, snd p = Return r) \/ (exists p', step p p')). Proof. simplify. eapply invariant_weaken. eapply hoare_triple_sound'; eauto. simp. specialize (progress H3); simplify. specialize (H2 (fst s) (fst s) $0). assert (split (fst s) (fst s) $0) by auto. assert (disjoint (fst s) $0) by auto. assert (exists bound, forall a, a >= bound -> fst s $? a = None) by eauto. cases s; simp; eauto. Qed. (* Fancy theorem to help us rewrite within preconditions and postconditions *) Instance hoare_triple_morphism : forall A, Proper (heq ==> eq ==> (eq ==> heq) ==> iff) (@hoare_triple A). Proof. Transparent himp. repeat (hnf; intros). unfold pointwise_relation in *; intuition subst. eapply HtConsequence; eauto. rewrite H; reflexivity. intros. hnf in H1. specialize (H1 r _ eq_refl). rewrite H1; reflexivity. eapply HtConsequence; eauto. rewrite H; reflexivity. intros. hnf in H1. specialize (H1 r _ eq_refl). rewrite H1; reflexivity. Opaque himp. Qed. (** * Examples *) Opaque heq himp lift star exis ptsto. (* Here comes some automation that we won't explain in detail, instead opting to * use examples. *) Theorem use_lemma : forall result P' (c : cmd result) (Q : result -> hprop) P R, hoare_triple P' c Q -> P ===> P' * R -> hoare_triple P c (fun r => Q r * R)%sep. Proof. simp. eapply HtWeaken. eapply HtFrame. eassumption. eauto. Qed. Theorem HtRead' : forall a v, hoare_triple (a |-> v)%sep (Read a) (fun r => a |-> v * [| r = v |])%sep. Proof. simp. apply HtWeaken with (exists r, a |-> r * [| r = v |])%sep. eapply HtStrengthen. apply HtRead. simp. cancel; auto. subst; cancel. cancel; auto. Qed. Theorem HtRead'' : forall p P R, P ===> (exists v, p |-> v * R v) -> hoare_triple P (Read p) (fun r => p |-> r * R r)%sep. Proof. simp. eapply HtWeaken. apply HtRead. assumption. Qed. Ltac basic := apply HtReturn' || eapply HtWrite || eapply HtAlloc || eapply HtFree. Ltac step0 := basic || eapply HtBind || (eapply use_lemma; [ basic | cancel; auto ]) || (eapply use_lemma; [ eapply HtRead' | solve [ cancel; auto ] ]) || (eapply HtRead''; solve [ cancel ]) || (eapply HtStrengthen; [ eapply use_lemma; [ basic | cancel; auto ] | ]) || (eapply HtConsequence; [ apply HtFail | .. ]). Ltac step := step0; simp. Ltac ht := simp; repeat step. Ltac conseq := simplify; eapply HtConsequence. Ltac use_IH H := conseq; [ apply H | .. ]; ht. Ltac loop_inv0 Inv := (eapply HtWeaken; [ apply HtLoop with (I := Inv) | .. ]) || (eapply HtConsequence; [ apply HtLoop with (I := Inv) | .. ]). Ltac loop_inv Inv := loop_inv0 Inv; ht. Ltac use H := (eapply use_lemma; [ eapply H | cancel; auto ]) || (eapply HtStrengthen; [ eapply use_lemma; [ eapply H | cancel; auto ] | ]). Ltac heq := intros; apply himp_heq; split. (* That's the end of the largely unexplained automation. Let's prove some * programs! *) (** ** Swapping with two pointers *) Definition swap p q := tmpp <- Read p; tmpq <- Read q; _ <- Write p tmpq; Write q tmpp. (* Looking at the precondition here, note how we no longer work with explicit * functions over heaps. All that is hidden within the assertion language. * Also note that the definition of [*] gives us nonaliasing of [p] and [q] for * free! *) Theorem swap_ok : forall p q a b, {{p |-> a * q |-> b}} swap p q {{_ ~> p |-> b * q |-> a}}. Proof. unfold swap. (* [simp] is our generic simplifier for this file. *) simp. (* We generally just keep calling [step] to advance forward by one atomic * statement. *) step. step. (* We do often want to use [simp] to clean up the goal after [step] infers an * intermediate assertion. *) simp. step. step. simp. step. step. simp. step. (* The [cancel] procedure repeatedly finds matching subformulas on the two * sides of [===>], removing them and recurring, possibly learning the values * of some unification variables each time. *) cancel. subst. cancel. Qed. Opaque swap. (* This command prevents later proofs from peeking inside the implementation of * [swap]. Instead, we only reason about it through [swap_ok]. *) (* Two swaps in a row provide a kind of rotation across three addresses. *) Definition rotate p q r := _ <- swap p q; swap q r. Theorem rotate_ok : forall p q r a b c, {{p |-> a * q |-> b * r |-> c}} rotate p q r {{_ ~> p |-> b * q |-> c * r |-> a}}. Proof. unfold rotate. simp. step. (* Now we invoke our earlier theorem by name. Note that its precondition only * matches a subset of our current precondition. The rest of state is left * alone, which we can prove "for free" by the frame rule. *) use swap_ok. simp. use swap_ok. simp. cancel. Qed. Opaque rotate. (** ** Initializing a fresh object *) Definition init := p <- Alloc 2; _ <- Write p 7; _ <- Write (p+1) 8; Return p. Theorem init_ok : {{emp}} init {{p ~> p |--> [7; 8]}}. Proof. unfold init. simp. step. step. simp. step. step. simp. step. step. simp. step. cancel. Qed. Opaque init. Theorem the_circle_of_life_ok : {{emp}} p <- init; Free p 2 {{_ ~> emp}}. Proof. step. use init_ok. simp. step. cancel. Qed. Theorem ultra_combo_ok : {{emp}} p <- init; _ <- swap p (p+1); Return p {{p ~> p |--> [8; 7]}}. Proof. step. use init_ok. simp. step. use swap_ok. simp. step. cancel. Qed. (** ** In-place reversal of a singly linked list *) (* Let's give a recursive definition of how a linked list should be laid out in * memory. *) Fixpoint linkedList (p : nat) (ls : list nat) := match ls with | nil => [| p = 0 |] (* An empty list is associated with a null pointer and no memory * contents. *) | x :: ls' => [| p <> 0 |] * exists p', p |--> [x; p'] * linkedList p' ls' (* A nonempty list is associated with a nonnull pointer and a two-cell * struct, which points to a further list. *) end%sep. (* The definition of [linkedList] is recursive in the list. Let's also prove * lemmas for simplifying [linkedList] invocations based on values of [p]. *) Theorem linkedList_null : forall ls, linkedList 0 ls === [| ls = nil |]. Proof. (* Tactic [heq] breaks an equivalence into two implications. *) heq; cases ls; cancel. Qed. Theorem linkedList_nonnull : forall p ls, p <> 0 -> linkedList p ls === exists x ls' p', [| ls = x :: ls' |] * p |--> [x; p'] * linkedList p' ls'. Proof. heq; cases ls; cancel; match goal with | [ H : _ = _ :: _ |- _ ] => invert H end; cancel. Qed. Hint Rewrite <- rev_alt. Hint Rewrite rev_involutive. (* Let's hide the definition of [linkedList], so that we *only* reason about it * via the two lemmas we just proved. *) Opaque linkedList. (* In-place linked-list reverse, the "hello world" of separation logic! *) Definition reverse p := pr <- for pr := (p, 0) loop let (p, r) := pr in if p ==n 0 then Return (Done pr) else tmp <- Read (p + 1); _ <- Write (p+1) r; Return (Again (tmp, p)) done; Return (snd pr). (* Helper function to peel away the [Done]/[Again] status of a [loop_outcome] *) Definition valueOf {A} (o : loop_outcome A) := match o with | Done v => v | Again v => v end. Theorem reverse_ok : forall p ls, {{linkedList p ls}} reverse p {{r ~> linkedList r (rev ls)}}. Proof. unfold reverse. simp. step. (* When we reach a loop, we give the invariant with a special tactic. *) loop_inv (fun o => exists ls1 ls2, [| ls = rev_append ls1 ls2 |] * linkedList (fst (valueOf o)) ls2 * linkedList (snd (valueOf o)) ls1 * [| match o with | Done (p, _) => p = 0 | _ => True end |])%sep. cases (a ==n 0); simp. step. cancel. step. (* We use [setoid_rewrite] for rewriting under binders ([exists], in this * case). Note that we also specify hypothesis [n] explicitly, since * [setoid_rewrite] isn't smart enough to infer parameters otherwise. *) setoid_rewrite (linkedList_nonnull _ n). step. simp. step. step. simp. step. setoid_rewrite (linkedList_nonnull _ n). cancel. simp. setoid_rewrite linkedList_null. cancel. equality. simp. step. cancel. simp. setoid_rewrite linkedList_null. cancel. simp. cancel. Qed. Opaque reverse. (* ** Calling [reverse] twice, to illustrate the *frame rule* *) Theorem reverse_two_ok : forall p1 ls1 p2 ls2, {{linkedList p1 ls1 * linkedList p2 ls2}} p1 <- reverse p1; p2 <- reverse p2; Return (p1, p2) {{ps ~> linkedList (fst ps) (rev ls1) * linkedList (snd ps) (rev ls2)}}. Proof. simp. step. use reverse_ok. simp. step. use reverse_ok. simp. step. cancel. Qed. (* Note that the intuitive correctness theorem would be *false* for lists * sharing any cells in common! The inherent disjointness of [*] saves us from * worrying about those cases. *) (* ** Computing the length of a linked list *) (* To state a good loop invariant, it will be helpful to define * *list segments* that end with some pointer beside null. *) Fixpoint linkedListSegment (p : nat) (ls : list nat) (q : nat) := match ls with | nil => [| p = q |] | x :: ls' => [| p <> 0 |] * exists p', p |-> x * (p+1) |-> p' * linkedListSegment p' ls' q end%sep. (* Next, two [linkedListSegment] lemmas analogous to those for [linkedList] * above *) Lemma linkedListSegment_empty : forall p ls, linkedList p ls ===> linkedList p ls * linkedListSegment p nil p. Proof. cancel. Qed. Lemma linkedListSegment_append : forall q r x ls p, q <> 0 -> linkedListSegment p ls q * q |-> x * (q+1) |-> r ===> linkedListSegment p (ls ++ x :: nil) r. Proof. induct ls; cancel; auto. subst; cancel. rewrite <- IHls; cancel; auto. Qed. (* One more [linkedList] lemma will be helpful. We'll re-reveal the predicate's * definition to prove the lemma. *) Transparent linkedList. Lemma linkedListSegment_null : forall ls p, linkedListSegment p ls 0 ===> linkedList p ls. Proof. induct ls; cancel; auto. Qed. Opaque linkedList linkedListSegment. (* A few algebraic properties of list operations: *) Hint Rewrite <- app_assoc. Hint Rewrite app_length app_nil_r. (* We tie a few of them together into this lemma. *) Lemma move_along : forall A (ls : list A) x2 x1 x0 x, ls = x2 ++ x1 -> x1 = x0 :: x -> ls = (x2 ++ [x0]) ++ x. Proof. simp. Qed. Hint Resolve move_along. Theorem length_ok : forall p ls, {{linkedList p ls}} q_len <- for q_len := (p, 0) loop let (q, len) := q_len in if q ==n 0 then Return (Done q_len) else tmp <- Read (q + 1); Return (Again (tmp, len+1)) done; Return (snd q_len) {{len ~> linkedList p ls * [| len = length ls |]}}. Proof. simp. step. loop_inv (fun o => exists ls1 ls2, [| ls = ls1 ++ ls2 |] * linkedListSegment p ls1 (fst (valueOf o)) * linkedList (fst (valueOf o)) ls2 * [| snd (valueOf o) = length ls1 |] * [| match o with | Done (q, _) => q = 0 /\ ls2 = nil | _ => True end |])%sep. cases (a ==n 0); simp. step. setoid_rewrite linkedList_null. cancel. simp. step. setoid_rewrite (linkedList_nonnull _ n). step. simp. step. cancel. eauto. simp. setoid_rewrite <- linkedListSegment_append. cancel. auto. simp. simp. rewrite linkedListSegment_empty. cancel. simp. step. cancel. simp. simp. rewrite linkedListSegment_null. rewrite linkedList_null. cancel. simp. Qed.
% This is part of the TFTB Reference Manual. % Copyright (C) 1996 CNRS (France) and Rice University (US). % See the file refguide.tex for copying conditions. \section*{Glossary and summary} This section contains detailed descriptions of all the Time-Frequency Toolbox functions. It begins with a glossary and a list of functions grouped by subject area and continues with the reference entries in alphabetical order. Information is also available through the online help facility. \vspace*{1cm} \begin{center} \begin{tabular}{|c|c|} \hline AF & Ambiguity function\\ AR & Auto-regressive (filter or model)\\ ASK & Amplitude shift keyed signal\\ BJD & Born-Jordan distribution\\ BPSK & Binary phase shift keyed signal\\ BUD & Butterworth distribution\\ CWD & Choi-Williams distribution\\ FM & Frequency modulation\\ FSK & Frequency shift keyed signal\\ GRD & Generalized rectangular distribution\\ HT & Hough transform\\ MHD & Margenau-Hill distribution \\ MHSD & Margenau-Hill-Spectrogram distribution \\ MMCE & Minimum mean cross-entropy\\ NAF & Narrow-band ambiguity function\\ PMHD & Pseudo Margenau-Hill distribution\\ PWVD & Pseudo Wigner-Ville distribution\\ QPSK & Quaternary phase shift keyed signal\\ RID & Reduced interference distribution\\ STFT & Short-time Fourier transform\\ TFR & Time-frequency representation\\ WAF & Wide-band ambiguity function\\ WVD & Wigner-Ville distribution\\ ZAM & Zhao-Atlas-Marks distribution\\ \hline \end{tabular} \end{center}
/* * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) */ #ifndef BOOST_UUID_SEED_RNG_DEPRECATED_HPP #define BOOST_UUID_SEED_RNG_DEPRECATED_HPP #include <boost/uuid/detail/seed_rng.hpp> #if defined(__GNUC__) || defined(_MSC_VER) #pragma message("This header is implementation detail and provided for backwards compatibility.") #endif #endif // BOOST_UUID_SEED_RNG_DEPRECATED_HPP
lemma continuous_on_compose[continuous_intros]: "continuous_on s f \<Longrightarrow> continuous_on (f ` s) g \<Longrightarrow> continuous_on s (g \<circ> f)"
[STATEMENT] lemma bound_on_all_plans_bounds_problem_plan_bound_: fixes P f PROB assumes "(\<forall>PROB' as s. finite PROB \<and> (P PROB') \<and> (s \<in> valid_states PROB') \<and> (as \<in> valid_plans PROB') \<longrightarrow> (\<exists>as'. (exec_plan s as = exec_plan s as') \<and> (subseq as' as) \<and> (length as' < f PROB') ) )" "(P PROB)" "finite PROB" shows "(problem_plan_bound PROB < f PROB)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. problem_plan_bound PROB < f PROB [PROOF STEP] unfolding problem_plan_bound_def MPLS_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. (SUP (s, as)\<in>{(s, as). s \<in> valid_states PROB \<and> as \<in> valid_plans PROB}. Inf (PLS s as)) < f PROB [PROOF STEP] using assms bound_on_all_plans_bounds_problem_plan_bound_thesis' expanded_problem_plan_bound_thm_1 [PROOF STATE] proof (prove) using this: \<forall>PROB' as s. finite PROB \<and> P PROB' \<and> s \<in> valid_states PROB' \<and> as \<in> valid_plans PROB' \<longrightarrow> (\<exists>as'. exec_plan s as = exec_plan s as' \<and> subseq as' as \<and> length as' < f PROB') P PROB finite PROB \<lbrakk>finite ?PROB; \<forall>as s. s \<in> valid_states ?PROB \<and> as \<in> valid_plans ?PROB \<longrightarrow> (\<exists>as'. exec_plan s as = exec_plan s as' \<and> subseq as' as \<and> length as' < ?k)\<rbrakk> \<Longrightarrow> problem_plan_bound ?PROB < ?k problem_plan_bound ?PROB = (SUP (s, as)\<in>{(s, as). s \<in> valid_states ?PROB \<and> as \<in> valid_plans ?PROB}. Inf (PLS s as)) goal (1 subgoal): 1. (SUP (s, as)\<in>{(s, as). s \<in> valid_states PROB \<and> as \<in> valid_plans PROB}. Inf (PLS s as)) < f PROB [PROOF STEP] by metis
function MALA(likelihood_and_derivative_calculator, protein_at_observations, measurement_variance, number_of_samples, initial_position, step_size, proposal_covariance=I, thinning_rate=1, known_parameter_dict=Dict()) mean_protein = mean(protein_at_observations[:,2]) # initialise the covariance proposal matrix number_of_parameters = length(initial_position) - length(keys(known_parameter_dict)) known_parameter_indices = [Int(known_parameter_dict[i][1]) for i in keys(known_parameter_dict)] known_parameter_values = [known_parameter_dict[i][2] for i in keys(known_parameter_dict)] unknown_parameter_indices = [i for i in 1:length(initial_position) if i ∉ known_parameter_indices] # check if default value is used, and set to q x q identity if proposal_covariance == I identity = true else identity = false proposal_cholesky = cholesky(proposal_covariance + 1e-8*I).L end proposal_covariance_inverse = inv(proposal_covariance) # initialise samples matrix and acceptance ratio counter accepted_moves = 0 mcmc_samples = zeros(number_of_samples,number_of_parameters) mcmc_samples[1,:] = initial_position[unknown_parameter_indices] number_of_iterations = number_of_samples*thinning_rate # set LAP parameters k = 1 c0 = 1.0 c1 = log(10)/log(number_of_samples/5) # initial markov chain current_position = copy(initial_position) current_log_likelihood, current_log_likelihood_gradient = likelihood_and_derivative_calculator(protein_at_observations, current_position, mean_protein, measurement_variance) for iteration_index in 2:number_of_iterations # progress measure if iteration_index%(number_of_iterations//10)==0 println(string("Progress: ",100*(iteration_index/number_of_iterations),'%')) end #if proposal = zeros(length(initial_position)) if identity proposal[unknown_parameter_indices] .= ( current_position[unknown_parameter_indices] .+ step_size.*current_log_likelihood_gradient[unknown_parameter_indices]./2 + sqrt(step_size)*randn(number_of_parameters) ) else proposal[unknown_parameter_indices] .= ( current_position[unknown_parameter_indices] .+ step_size.*proposal_covariance*(current_log_likelihood_gradient[unknown_parameter_indices]./2) .+ sqrt(step_size).*proposal_cholesky*randn(number_of_parameters) ) end # compute transition probabilities for acceptance step # fix known parameters # import pdb; pdb.set_trace() if length(known_parameter_dict) > 0 proposal[known_parameter_indices] .= copy(known_parameter_values) end #if proposal_log_likelihood, proposal_log_likelihood_gradient = likelihood_and_derivative_calculator(protein_at_observations, proposal, mean_protein, measurement_variance) # if any of the parameters were negative we get -inf for the log likelihood if proposal_log_likelihood == -Inf if iteration_index%thinning_rate == 0 mcmc_samples[Int64(iteration_index/thinning_rate),:] .= current_position[unknown_parameter_indices] end # LAP stuff also needed here acceptance_probability = 0 if iteration_index%k == 0 && iteration_index > 1 gamma_1 = 1/(iteration_index^c1) gamma_2 = c0*gamma_1 log_step_size_squared = log(step_size^2) + gamma_2*(acceptance_probability - 0.574) step_size = sqrt(exp(log_step_size_squared)) end continue end forward_helper_variable = ( proposal[unknown_parameter_indices] .- current_position[unknown_parameter_indices] .- step_size.*proposal_covariance*(current_log_likelihood_gradient[unknown_parameter_indices]./2) ) backward_helper_variable = ( current_position[unknown_parameter_indices] .- proposal[unknown_parameter_indices] .- step_size.*proposal_covariance*(proposal_log_likelihood_gradient[unknown_parameter_indices]./2) ) transition_kernel_pdf_forward = (-transpose(forward_helper_variable)*proposal_covariance_inverse*forward_helper_variable) /(2*step_size) transition_kernel_pdf_backward = (-transpose(backward_helper_variable)*proposal_covariance_inverse*backward_helper_variable)/(2*step_size) # accept-reject step acceptance_probability = min(1,exp(proposal_log_likelihood - transition_kernel_pdf_forward - current_log_likelihood + transition_kernel_pdf_backward)) # print(acceptance_probability) if rand() < acceptance_probability current_position .= proposal current_log_likelihood = proposal_log_likelihood current_log_likelihood_gradient .= proposal_log_likelihood_gradient accepted_moves += 1 end if iteration_index%thinning_rate == 0 mcmc_samples[Int64(iteration_index/thinning_rate),:] .= current_position[unknown_parameter_indices] end # LAP stuff if iteration_index%k == 0 && iteration_index > 1 gamma_1 = 1/(iteration_index^c1) gamma_2 = c0*gamma_1 log_step_size_squared = log(step_size^2) + gamma_2*(acceptance_probability - 0.574) step_size = sqrt(exp(log_step_size_squared)) end end # for println(string("Acceptance ratio: ",accepted_moves/number_of_iterations)) return mcmc_samples end """ A function which gives a (hopefully) decent MALA output for a given dataset with known or unknown parameters. If a previous output already exists, this will be used to create a proposal covariance matrix, otherwise one will be constructed with a two step warm-up process. """ function run_mala_for_dataset(data_filename, protein_at_observations, measurement_variance, number_of_parameters, known_parameter_dict, step_size = 1, number_of_chains = 8, number_of_samples = 80000) # make sure all data starts from time "zero" for i in 1:size(protein_at_observations,1) protein_at_observations[i,1] -= protein_at_observations[1,1] end mean_protein = mean(protein_at_observations[:,2]) loading_path = joinpath(dirname(pathof(DelayedKalmanFilter)),"../test/data") saving_path = joinpath(dirname(pathof(DelayedKalmanFilter)),"../test/output") # if we already have mcmc samples, we can use them to construct a covariance matrix to directly sample if ispath(joinpath(saving_path,string("final_parallel_mala_output_",data_filename))) println("Posterior samples already exist, sampling directly without warm up...") mala_output = load(joinpath(saving_path,string("final_parallel_mala_output_",data_filename)),"mcmc_samples") previous_number_of_chains = size(mala_output,1) previous_number_of_samples = size(mala_output,2) previous_number_of_parameters = size(mala_output,3) # construct proposal covariance matrix new_number_of_samples = previous_number_of_samples - Int64(floor(previous_number_of_samples/2)) burn_in = Int64(floor(previous_number_of_samples/2))+1 samples_with_burn_in = reshape(mala_output[:,burn_in:end,:], (new_number_of_samples*previous_number_of_chains,previous_number_of_parameters)) proposal_covariance = cov(samples_with_burn_in) # start from mean states = zeros((number_of_chains,7)) states[:,[3,4]] = [log(log(2)/30),log(log(2)/90)] states[:,[1,2,5,6,7]] = mean(samples_with_burn_in,dims=1) # turn into array of arrays initial_states = [states[i,:] for i in 1:size(states,1)] # pool_of_processes = mp_pool.ThreadPool(processes = number_of_chains) # process_results = [ pool_of_processes.apply_async(MALA, # args=(protein_at_observations, # measurement_variance, # number_of_samples, # initial_state, # step_size, # proposal_covariance, # 1, # known_parameters)) # for initial_state in initial_states ] # ## Let the pool know that these are all so that the pool will exit afterwards # # this is necessary to prevent memory overflows. # pool_of_processes.close() # # array_of_chains = zeros(number_of_chains,number_of_samples,number_of_parameters) # for chain_index, process_result in enumerate(process_results): # this_chain = process_result.get() # array_of_chains[chain_index,:,:] = this_chain # pool_of_processes.join() array_of_chains = reshape(MALA(log_likelihood_and_derivative_with_prior_and_transformation, protein_at_observations, measurement_variance, number_of_samples, initial_states[1], step_size, proposal_covariance, 1, known_parameter_dict), (1,number_of_samples,number_of_parameters)) save(joinpath(saving_path,string("final_parallel_mala_output_",data_filename)),"mcmc_samples",array_of_chains) else # warm up chain println(string("New data set, initial warm up with ",string(Int64(floor(number_of_samples*0.3)))," samples...")) # Initialise by Latin Hypercube sampling println("Latin Hypercube sampling initial positions...") plan, _ = LHCoptim(20,number_of_parameters,1000) scaled_plan = scaleLHC(plan,[(minimum(protein_at_observations[:,2])/2,2*mean(protein_at_observations[:,2])), (2.5,5.5), (log(0.1),log(100.)), (log(0.1),log(35.)), (5.,35.)]) # sample subset - one for each chain sampling_indices = sample(1:20,number_of_chains;replace=false) scaled_plan = scaled_plan[sampling_indices,:] states = zeros(number_of_chains,7) if number_of_chains > 1 states[:,[3,4]] .= [log(log(2)/30),log(log(2)/90)] else states[:,[3,4]] = [log(log(2)/30),log(log(2)/90)] end states[:,[1,2,5,6,7]] .= scaled_plan # turn into array of arrays initial_states = [states[i,:] for i in 1:size(states,1)] println(string("Warming up with ",string(Int64(0.3*number_of_samples))," samples...")) initial_burnin_number_of_samples = Int64(0.3*number_of_samples) # pool_of_processes = mp_pool.ThreadPool(processes = number_of_chains) # process_results = [ pool_of_processes.apply_async(hes_inference.kalman_mala, # args=(protein_at_observations, # measurement_variance, # initial_burnin_number_of_samples, # initial_state, # step_size, # np.power(np.diag([2*mean_protein,4,9,8,39]),2),# initial variances are width of prior squared # 1, # thinning rate # known_parameters)) # for initial_state in initial_states ] # ## Let the pool know that these are all so that the pool will exit afterwards # # this is necessary to prevent memory overflows. # pool_of_processes.close() # # array_of_chains = zeros(number_of_chains,initial_burnin_number_of_samples,number_of_parameters) # for chain_index, process_result in enumerate(process_results): # this_chain = process_result.get() # array_of_chains[chain_index,:,:] = this_chain # pool_of_processes.join() proposal_covariance = diagm([2*mean_protein,4,9,8,39]).^2 array_of_chains = reshape(MALA(log_likelihood_and_derivative_with_prior_and_transformation, protein_at_observations, measurement_variance, initial_burnin_number_of_samples, initial_states[1], step_size, proposal_covariance, 1, known_parameter_dict), (1,initial_burnin_number_of_samples,number_of_parameters)) save(joinpath(saving_path,string("first_parallel_mala_output_",data_filename)),"mcmc_samples",array_of_chains) println(string("Second warm up with ",string(Int64(number_of_samples*0.7))," samples...")) second_burnin_number_of_samples = Int64(0.7*number_of_samples) # construct proposal covariance matrix new_number_of_samples = initial_burnin_number_of_samples - Int64(floor(initial_burnin_number_of_samples/2)) burn_in = Int64(floor(initial_burnin_number_of_samples/2))+1 samples_with_burn_in = reshape(array_of_chains[:,burn_in:end,:], (new_number_of_samples*number_of_chains,number_of_parameters)) proposal_covariance = cov(samples_with_burn_in) # make new initial states println("Latin Hypercube sampling initial positions...") plan, _ = LHCoptim(20,number_of_parameters,1000) scaled_plan = scaleLHC(plan,[(minimum(protein_at_observations[:,2])/2,2*mean(protein_at_observations[:,2])), (2.5,5.5), (log(0.1),log(100.)), (log(0.1),log(35.)), (5.,35.)]) # sample subset - one for each chain sampling_indices = sample(1:20,number_of_chains;replace=false) scaled_plan = scaled_plan[sampling_indices,:] states = zeros(number_of_chains,7) if number_of_chains > 1 states[:,[3,4]] .= [log(log(2)/30),log(log(2)/90)] else states[:,[3,4]] = [log(log(2)/30),log(log(2)/90)] end states[:,[1,2,5,6,7]] .= scaled_plan # turn into array of arrays initial_states = [states[i,:] for i in 1:size(states,1)] # pool_of_processes = mp_pool.ThreadPool(processes = number_of_chains) # process_results = [ pool_of_processes.apply_async(hes_inference.kalman_mala, # args=(protein_at_observations, # measurement_variance, # second_burnin_number_of_samples, # initial_state, # step_size, # proposal_covariance, # 1, # thinning rate # known_parameters)) # for initial_state in initial_states ] # ## Let the pool know that these are all finished so that the pool will exit afterwards # # this is necessary to prevent memory overflows. # pool_of_processes.close() # # array_of_chains = zeros(number_of_chains,second_burnin_number_of_samples,number_of_parameters) # for chain_index, process_result in enumerate(process_results): # this_chain = process_result.get() # array_of_chains[chain_index,:,:] = this_chain # pool_of_processes.join() array_of_chains = reshape(MALA(log_likelihood_and_derivative_with_prior_and_transformation, protein_at_observations, measurement_variance, second_burnin_number_of_samples, initial_states[1], step_size, proposal_covariance, 1, known_parameter_dict), (1,second_burnin_number_of_samples,number_of_parameters)) save(joinpath(saving_path,string("second_parallel_mala_output_",data_filename)),"mcmc_samples",array_of_chains) # sample directly println("Now sampling directly...") new_number_of_samples = second_burnin_number_of_samples - Int64(floor(second_burnin_number_of_samples/2)) burn_in = Int64(floor(second_burnin_number_of_samples/2))+1 samples_with_burn_in = reshape(array_of_chains[:,burn_in:end,:], (new_number_of_samples*number_of_chains,number_of_parameters)) proposal_covariance = cov(samples_with_burn_in) # make new initial states println("Latin Hypercube sampling initial positions...") plan, _ = LHCoptim(20,number_of_parameters,1000) scaled_plan = scaleLHC(plan,[(minimum(protein_at_observations[:,2])/2,2*mean(protein_at_observations[:,2])), (2.5,5.5), (log(0.1),log(100.)), (log(0.1),log(35.)), (5.,35.)]) # sample subset - one for each chain sampling_indices = sample(1:20,number_of_chains;replace=false) scaled_plan = scaled_plan[sampling_indices,:] states = zeros(number_of_chains,7) if number_of_chains > 1 states[:,[3,4]] .= [log(log(2)/30),log(log(2)/90)] else states[:,[3,4]] = [log(log(2)/30),log(log(2)/90)] end states[:,[1,2,5,6,7]] .= scaled_plan # turn into array of arrays initial_states = [states[i,:] for i in 1:size(states,1)] # pool_of_processes = mp_pool.ThreadPool(processes = number_of_chains) # process_results = [ pool_of_processes.apply_async(hes_inference.kalman_mala, # args=(protein_at_observations, # measurement_variance, # number_of_samples, # initial_state, # step_size, # proposal_covariance, # 1, # thinning rate # known_parameters)) # for initial_state in initial_states ] # ## Let the pool know that these are all finished so that the pool will exit afterwards # # this is necessary to prevent memory overflows. # pool_of_processes.close() # # array_of_chains = np.zeros((number_of_chains,number_of_samples,number_of_parameters)) # for chain_index, process_result in enumerate(process_results): # this_chain = process_result.get() # array_of_chains[chain_index,:,:] = this_chain # pool_of_processes.join() array_of_chains = reshape(MALA(log_likelihood_and_derivative_with_prior_and_transformation, protein_at_observations, measurement_variance, number_of_samples, initial_states[1], step_size, proposal_covariance, 1, known_parameter_dict), (1,number_of_samples,number_of_parameters)) save(joinpath(saving_path,string("final_parallel_mala_output_",data_filename)),"mcmc_samples",array_of_chains) end #if-else end
function [results] = computeQualityMetrics(image) % Anisotrophy Test [gray,rgb] = biqaa.blindimagequality(image,8,6,0,'degree'); results.biqaa_gray = gray; results.biqaa_rgb = rgb; % BIQI Test results.biqi = biqi.biqi(image); % Bliinds2 Test results.bliinds = bliinds2.bliinds2_score(image); % IQVG Test addpath('/Users/dsoellinger/Documents/git/uni/Matlab Toolbox/cjlin1_svnlib/libsvm/matlab'); results.iqvg = iqvg.IQVG(image); rmpath('/Users/dsoellinger/Documents/git/uni/Matlab Toolbox/cjlin1_svnlib/libsvm/matlab'); % NIQE Test results.niqe = niqe.niqe(image); % DIVINE Test addpath('/Users/dsoellinger/Documents/git/uni/Matlab Toolbox/gregfreeman_libsvm/libsvm/matlab'); results.divine = divine.divine(image); rmpath('/Users/dsoellinger/Documents/git/uni/Matlab Toolbox/gregfreeman_libsvm/libsvm/matlab'); % BRISQUE Test % Requires libsvm 3.22. Make sure that it is installed. results.brisque = brisque.brisquescore(image); end
{-# LANGUAGE CPP #-} {-# LANGUAGE TypeFamilies, TypeOperators #-} {-# LANGUAGE FlexibleInstances, EmptyCase, LambdaCase #-} {-# LANGUAGE ScopedTypeVariables #-} -- experiment {-# LANGUAGE MagicHash #-} {-# LANGUAGE TypeInType #-} -- -- Experiment -- {-# LANGUAGE MagicHash #-} {-# OPTIONS_GHC -Wall #-} -- {-# OPTIONS_GHC -fno-warn-unused-imports #-} -- TEMP -- {-# OPTIONS_GHC -fno-warn-unused-binds #-} -- TEMP ---------------------------------------------------------------------- -- | -- Module : ConCat.Rep -- Copyright : (c) 2016 Conal Elliott -- -- Maintainer : [email protected] -- Stability : experimental -- -- Convert to and from standard representations. -- TODO: Can I replace HasRep with Generic or Newtype? ---------------------------------------------------------------------- module ConCat.Rep (HasRep(..), inAbst,inAbst2, inAbstF1, inRepr,inRepr2) where import Data.Monoid -- import Data.Newtypes.PrettyDouble import Control.Applicative (WrappedMonad(..)) import qualified GHC.Generics as G import Data.Complex (Complex(..)) -- import GHC.TypeLits (KnownNat) import Data.Proxy import Data.Functor.Identity (Identity(..)) import Control.Monad.Trans.Reader (ReaderT(..)) import Control.Monad.Trans.Writer (WriterT(..)) import Control.Monad.Trans.State (StateT(..)) -- import Data.Finite (Finite,finite,getFinite) -- import Data.Finite.Internal (Finite(..)) -- import Data.Void (Void) -- TODO: more -- import ConCat.Complex -- import GHC.Types (TYPE) -- import GHC.Exts (Int(..),Int#) -- TODO: Eliminate most of the following when I drop these types. import ConCat.Misc ((:*),(:+),Parity(..),(<~),bottom) -- import TypeUnary.TyNat (Z,S) -- import TypeUnary.Nat (Nat(..),IsNat(..)) -- import TypeUnary.Vec (Vec(..)) -- | Convert to and from standard representations. Used for transforming case -- expression scrutinees and constructor applications. The 'repr' method should -- convert to a standard representation (unit, products, sums), or closer to -- such a representation, via another type with a 'HasRep' instance. The 'abst' -- method should reveal a constructor so that we can perform the -- case-of-known-constructor transformation. -- It is very important to give @INLINE@ pragmas for 'repr' and 'abst' definitions. class HasRep a where type Rep a repr :: a -> Rep a abst :: Rep a -> a -- -- Identity as @'abst' . 'repr'@. -- abstRepr :: HasRep a => a -> a -- abstRepr = abst . repr #define INLINES {-# INLINE repr #-};{-# INLINE abst #-} instance HasRep (Proxy a) where type Rep (Proxy a) = () repr Proxy = () abst () = Proxy INLINES instance HasRep (a,b,c) where type Rep (a,b,c) = ((a,b),c) repr (a,b,c) = ((a,b),c) abst ((a,b),c) = (a,b,c) INLINES instance HasRep (a,b,c,d) where type Rep (a,b,c,d) = ((a,b),(c,d)) repr (a,b,c,d) = ((a,b),(c,d)) abst ((a,b),(c,d)) = (a,b,c,d) INLINES instance HasRep (a,b,c,d,e) where type Rep (a,b,c,d,e) = ((a,b,c,d),e) repr (a,b,c,d,e) = ((a,b,c,d),e) abst ((a,b,c,d),e) = (a,b,c,d,e) INLINES instance HasRep (a,b,c,d,e,f) where type Rep (a,b,c,d,e,f) = ((a,b,c,d),(e,f)) repr (a,b,c,d,e,f) = ((a,b,c,d),(e,f)) abst ((a,b,c,d),(e,f)) = (a,b,c,d,e,f) INLINES instance HasRep (a,b,c,d,e,f,g) where type Rep (a,b,c,d,e,f,g) = ((a,b,c,d),(e,f,g)) repr (a,b,c,d,e,f,g) = ((a,b,c,d),(e,f,g)) abst ((a,b,c,d),(e,f,g)) = (a,b,c,d,e,f,g) INLINES instance HasRep (a,b,c,d,e,f,g,h) where type Rep (a,b,c,d,e,f,g,h) = ((a,b,c,d),(e,f,g,h)) repr (a,b,c,d,e,f,g,h) = ((a,b,c,d),(e,f,g,h)) abst ((a,b,c,d),(e,f,g,h)) = (a,b,c,d,e,f,g,h) INLINES #if 1 -- I'm now synthesizing HasRep instances for newtypes. -- Oh! I still need support for explicit uses. #define WrapRep(abstT,reprT,con) \ instance HasRep (abstT) where { type Rep (abstT) = reprT; repr (con a) = a ; abst a = con a } WrapRep(Sum a,a,Sum) -- WrapRep(PrettyDouble,Double,PrettyDouble) WrapRep(Product a,a,Product) WrapRep(All,Bool,All) WrapRep(Any,Bool,Any) WrapRep(Dual a,a,Dual) WrapRep(Endo a,a->a,Endo) WrapRep(WrappedMonad m a,m a,WrapMonad) WrapRep(Identity a,a,Identity) WrapRep(ReaderT e m a, e -> m a, ReaderT) WrapRep(WriterT w m a, m (a,w), WriterT) WrapRep(StateT s m a, s -> m (a,s), StateT) WrapRep(Parity,Bool,Parity) -- instance KnownNat n => HasRep (Finite n) where -- type Rep (Finite n) = Integer -- -- abst = finite -- -- repr = getFinite -- abst n = Finite n -- repr (Finite n) = n -- instance KnownNat n => HasRep (Finite n) where -- type Rep (Finite n) = Int -- abst n = Finite (fromIntegral n) -- repr (Finite n) = fromInteger n -- Since Finite is a newtype, the HasRep instance doesn't come into play. #endif -- TODO: Generate these dictionaries on the fly during compilation, so we won't -- have to list them here. -- Experimental treatment of Maybe instance HasRep (Maybe a) where type Rep (Maybe a) = Bool :* a repr (Just a) = (True,a) repr Nothing = (False, bottom) abst (True,a ) = Just a abst (False,_) = Nothing INLINES -- TODO: LambdaCCC.Prim has an BottomP primitive. If the error ever occurs, -- replace with ErrorP (taking a string argument) and tweak the reification. -- Generalize Maybe to sums: -- I use this version for circuits. Restore it later, after I'm handing :+ in reify-rules. -- instance HasRep (a :+ b) where -- type Rep (a :+ b) = Bool :* (a :* b) -- repr (Left a) = (False,(a,undefined)) -- error "repr on Maybe: undefined value" -- repr (Right b) = (True,(undefined,b)) -- abst (False,(a,_)) = Left a -- abst (True ,(_,b)) = Right b -- -- TODO: Redefine `Maybe` representation as sum: -- -- type instance Rep (Maybe a) = Unit :+ a -- ... instance HasRep (Complex a) where type Rep (Complex a) = a :* a repr (a :+ a') = (a,a') abst (a,a') = (a :+ a') INLINES -- instance HasRep (G.V1 p) where -- type Rep (G.V1 p) = Void -- repr = \ case -- abst = \ case -- INLINES instance HasRep (G.U1 p) where type Rep (G.U1 p) = () repr G.U1 = () abst () = G.U1 INLINES instance HasRep (G.Par1 p) where type Rep (G.Par1 p) = p repr = G.unPar1 abst = G.Par1 INLINES instance HasRep (G.K1 i c p) where type Rep (G.K1 i c p) = c repr = G.unK1 abst = G.K1 INLINES instance HasRep (G.M1 i c f p) where type Rep (G.M1 i c f p) = f p repr = G.unM1 abst = G.M1 INLINES instance HasRep ((f G.:+: g) p) where type Rep ((f G.:+: g) p) = f p :+ g p repr (G.L1 x) = Left x repr (G.R1 x) = Right x abst (Left x) = G.L1 x abst (Right x) = G.R1 x INLINES instance HasRep ((f G.:*: g) p) where type Rep ((f G.:*: g) p) = f p :* g p repr (x G.:*: y) = (x,y) abst (x,y) = (x G.:*: y) INLINES instance HasRep ((g G.:.: f) p) where type Rep ((g G.:.: f) p) = g (f p) repr = G.unComp1 abst = G.Comp1 INLINES -- TODO: Can I *replace* HasRep with Generic? {-------------------------------------------------------------------- Utilities --------------------------------------------------------------------} inAbst :: (HasRep p, HasRep q) => (Rep p -> Rep q) -> (p -> q) inAbst = abst <~ repr {-# INLINE inAbst #-} inAbst2 :: (HasRep p, HasRep q, HasRep r) => (Rep p -> Rep q -> Rep r) -> (p -> q -> r) inAbst2 = inAbst <~ repr {-# INLINE inAbst2 #-} inAbstF1 :: (HasRep p, HasRep q, Functor f) => (f (Rep p) -> Rep q) -> (f p -> q) inAbstF1 = abst <~ fmap repr {-# INLINE inAbstF1 #-} inRepr :: (HasRep p, HasRep q) => (p -> q) -> (Rep p -> Rep q) inRepr = repr <~ abst {-# INLINE inRepr #-} inRepr2 :: (HasRep p, HasRep q, HasRep r) => (p -> q -> r) -> (Rep p -> Rep q -> Rep r) inRepr2 = inRepr <~ abst {-# INLINE inRepr2 #-} {-------------------------------------------------------------------- Unlifted types --------------------------------------------------------------------} #if 0 -- Represent unboxed types as boxed counterparts. instance HasRep Int# where type Rep Int# = Int abst (I# n) = n repr n = I# n INLINES #elif 0 -- Represent boxed types as unboxed counterparts. instance HasRep Int where type Rep Int = Int# abst n = I# n repr (I# n) = n INLINES #endif -- data Int = I# Int# -- Defined in ‘GHC.Types’ -- class HasRep (a :: TYPE r) where -- type Rep a :: TYPE s -- repr :: a -> Rep a -- abst :: Rep a -> a
/- Copyright (c) 2019 Simon Hudon. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Author: Simon Hudon -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.control.traversable.basic import Mathlib.tactic.simpa import Mathlib.PostPort namespace Mathlib /-- pretty print a `loc` -/ /-- shift `pos` `n` columns to the left -/ namespace tactic /-- parse structure instance of the shape `{ field1 := value1, .. , field2 := value2 }` -/ /-- pretty print structure instance -/ /-- Attribute containing a table that accumulates multiple `squeeze_simp` suggestions -/ /-- dummy declaration used as target of `squeeze_loc` attribute -/ def squeeze_loc_attr_carrier : Unit := Unit.unit /-- Format a list of arguments for use with `simp` and friends. This omits the list entirely if it is empty. -/ /-- Emit a suggestion to the user. If inside a `squeeze_scope` block, the suggestions emitted through `mk_suggestion` will be aggregated so that every tactic that makes a suggestion can consider multiple execution of the same invocation. If `at_pos` is true, make the suggestion at `p` instead of the current position. -/ /-- translate a `pexpr` into a `simp` configuration -/ /-- translate a `pexpr` into a `dsimp` configuration -/ /-- `same_result proof tac` runs tactic `tac` and checks if the proof produced by `tac` is equivalent to `proof`. -/ /-- `filter_simp_set g call_simp user_args simp_args` returns `args'` such that, when calling `call_simp tt /- only -/ args'` on the goal `g` (`g` is a meta var) we end up in the same state as if we had called `call_simp ff (user_args ++ simp_args)` and removing any one element of `args'` changes the resulting proof. -/ /-- make a `simp_arg_type` that references the name given as an argument -/ /-- tactic combinator to create a `simp`-like tactic that minimizes its argument list. * `slow`: adds all rfl-lemmas from the environment to the initial list (this is a slower but more accurate strategy) * `no_dflt`: did the user use the `only` keyword? * `args`: list of `simp` arguments * `tac`: how to invoke the underlying `simp` tactic -/ namespace interactive /-- Turn a `simp_arg_type` into a string. -/ /-- combinator meant to aggregate the suggestions issued by multiple calls of `squeeze_simp` (due, for instance, to `;`). Can be used as: ```lean example {α β} (xs ys : list α) (f : α → β) : (xs ++ ys.tail).map f = xs.map f ∧ (xs.tail.map f).length = xs.length := begin have : xs = ys, admit, squeeze_scope { split; squeeze_simp, -- `squeeze_simp` is run twice, the first one requires -- `list.map_append` and the second one `[list.length_map, list.length_tail]` -- prints only one message and combine the suggestions: -- > Try this: simp only [list.length_map, list.length_tail, list.map_append] squeeze_simp [this] -- `squeeze_simp` is run only once -- prints: -- > Try this: simp only [this] }, end ``` -/ /-- `squeeze_simp`, `squeeze_simpa` and `squeeze_dsimp` perform the same task with the difference that `squeeze_simp` relates to `simp` while `squeeze_simpa` relates to `simpa` and `squeeze_dsimp` relates to `dsimp`. The following applies to `squeeze_simp`, `squeeze_simpa` and `squeeze_dsimp`. `squeeze_simp` behaves like `simp` (including all its arguments) and prints a `simp only` invocation to skip the search through the `simp` lemma list. For instance, the following is easily solved with `simp`: ```lean example : 0 + 1 = 1 + 0 := by simp ``` To guide the proof search and speed it up, we may replace `simp` with `squeeze_simp`: ```lean example : 0 + 1 = 1 + 0 := by squeeze_simp -- prints: -- prints: -- Try this: simp only [add_zero, eq_self_iff_true, zero_add] -- Try this: simp only [add_zero, eq_self_iff_true, zero_add] ``` `squeeze_simp` suggests a replacement which we can use instead of `squeeze_simp`. ```lean example : 0 + 1 = 1 + 0 := by simp only [add_zero, eq_self_iff_true, zero_add] ``` `squeeze_simp only` prints nothing as it already skips the `simp` list. This tactic is useful for speeding up the compilation of a complete file. Steps: 1. search and replace ` simp` with ` squeeze_simp` (the space helps avoid the replacement of `simp` in `@[simp]`) throughout the file. 2. Starting at the beginning of the file, go to each printout in turn, copy the suggestion in place of `squeeze_simp`. 3. after all the suggestions were applied, search and replace `squeeze_simp` with `simp` to remove the occurrences of `squeeze_simp` that did not produce a suggestion. Known limitation(s): * in cases where `squeeze_simp` is used after a `;` (e.g. `cases x; squeeze_simp`), `squeeze_simp` will produce as many suggestions as the number of goals it is applied to. It is likely that none of the suggestion is a good replacement but they can all be combined by concatenating their list of lemmas. `squeeze_scope` can be used to combine the suggestions: `by squeeze_scope { cases x; squeeze_simp }` * sometimes, `simp` lemmas are also `_refl_lemma` and they can be used without appearing in the resulting proof. `squeeze_simp` won't know to try that lemma unless it is called as `squeeze_simp?` -/ /-- see `squeeze_simp` -/ /-- `squeeze_dsimp` behaves like `dsimp` (including all its arguments) and prints a `dsimp only` invocation to skip the search through the `simp` lemma list. See the doc string of `squeeze_simp` for examples. -/ end interactive end tactic end Mathlib
{-# OPTIONS --without-K #-} open import lib.Basics open import lib.Relation module lib.types.SetQuotient where module _ {i} {A : Type i} {j} where private data #SetQuotient-aux (R : Rel A j) : Type i where #q[_] : A → #SetQuotient-aux R data #SetQuotient (R : Rel A j) : Type i where #setquot : #SetQuotient-aux R → (Unit → Unit) → #SetQuotient R SetQuotient : (R : Rel A j) → Type i SetQuotient = #SetQuotient module _ {R : Rel A j} where q[_] : (a : A) → SetQuotient R q[ a ] = #setquot #q[ a ] _ postulate -- HIT quot-rel : {a₁ a₂ : A} → R a₁ a₂ → q[ a₁ ] == q[ a₂ ] postulate -- HIT SetQuotient-level : is-set (SetQuotient R) SetQuotient-is-set = SetQuotient-level module SetQuotElim {k} {P : SetQuotient R → Type k} (p : (x : SetQuotient R) → is-set (P x)) (q[_]* : (a : A) → P q[ a ]) (rel* : ∀ {a₁ a₂} (r : R a₁ a₂) → q[ a₁ ]* == q[ a₂ ]* [ P ↓ quot-rel r ]) where f : Π (SetQuotient R) P f = f-aux phantom phantom where f-aux : Phantom p → Phantom {A = ∀ {a₁ a₂} (r : R a₁ a₂) → _} rel* → Π (SetQuotient R) P f-aux phantom phantom (#setquot #q[ a ] _) = q[ a ]* postulate -- HIT quot-rel-β : ∀ {a₁ a₂} (r : R a₁ a₂) → apd f (quot-rel r) == rel* r open SetQuotElim public renaming (f to SetQuot-elim) module SetQuotRec {i} {A : Type i} {j} {R : Rel A j} {k} {B : Type k} (p : is-set B) (q[_]* : A → B) (rel* : ∀ {a₁ a₂} (r : R a₁ a₂) → q[ a₁ ]* == q[ a₂ ]*) where private module M = SetQuotElim (λ x → p) q[_]* (λ {a₁ a₂} r → ↓-cst-in (rel* r)) f : SetQuotient R → B f = M.f open SetQuotRec public renaming (f to SetQuot-rec)
Set Implicit Arguments. (** some default polymorphic functions *) Section FUNCTIONS. Variables A B C : Type. Definition id (x : A) := x. Definition const (x : A)(y : B) := x. Definition uncurry (f : A -> B -> C)(p : A * B) := match p with | (x,y) => f x y end. Definition curry (f : (A * B) -> C)(x : A)(y : B) : C := f (x , y). Definition compose (f : B -> C)(g : A -> B)(x : A) : C := f (g x). Definition flip (f : B -> A -> C)(x : A)(y : B) := f y x. End FUNCTIONS. Notation "f ':@:' g" := (compose f g)(at level 40, left associativity). (* useful function on sumbool *) Definition sumbool_to_bool {A B : Prop} (p : {A} + {B}) := if p then true else false.
lemma complex_eqI [intro?]: "Re x = Re y \<Longrightarrow> Im x = Im y \<Longrightarrow> x = y"
lemma interior_complement: "interior (- S) = - closure S"
(*<*) (* * Knowledge-based programs. * (C)opyright 2011, Peter Gammie, peteg42 at gmail.com. * License: BSD *) theory Kripke imports Main begin (*>*) section \<open>A modal logic of knowledge\<close> text\<open> \label{sec:kbps-logic-of-knowledge} We begin with the standard syntax and semantics of the propositional logic of knowledge based on \emph{Kripke structures}. More extensive treatments can be found in \citet{Lenzen:1978}, \citet{Chellas:1980}, \citet{Hintikka:1962} and \citet[Chapter~2]{FHMV:1995}. The syntax includes one knowledge modality per agent, and one for \emph{common knowledge} amongst a set of agents. It is parameterised by the type @{typ "'a"} of agents and @{typ "'p"} of propositions. \<close> datatype ('a, 'p) Kform = Kprop "'p" | Knot "('a, 'p) Kform" | Kand "('a, 'p) Kform" "('a, 'p) Kform" | Kknows "'a" "('a, 'p) Kform" ("\<^bold>K\<^sub>_ _") | Kcknows "'a list" "('a, 'p) Kform" ("\<^bold>C\<^bsub>_\<^esub> _") text\<open> A Kripke structure consists of a set of \emph{worlds} of type @{typ "'w"}, one \emph{accessibility relation} between worlds for each agent and a \emph{valuation function} that indicates the truth of a proposition at a world. This is a very general story that we will quickly specialise. \<close> type_synonym 'w Relation = "('w \<times> 'w) set" record ('a, 'p, 'w) KripkeStructure = worlds :: "'w set" relations :: "'a \<Rightarrow> 'w Relation" valuation :: "'w \<Rightarrow> 'p \<Rightarrow> bool" definition kripke :: "('a, 'p, 'w) KripkeStructure \<Rightarrow> bool" where "kripke M \<equiv> \<forall>a. relations M a \<subseteq> worlds M \<times> worlds M" definition mkKripke :: "'w set \<Rightarrow> ('a \<Rightarrow> 'w Relation) \<Rightarrow> ('w \<Rightarrow> 'p \<Rightarrow> bool) \<Rightarrow> ('a, 'p, 'w) KripkeStructure" where "mkKripke ws rels val \<equiv> \<lparr> worlds = ws, relations = \<lambda>a. rels a \<inter> ws \<times> ws, valuation = val \<rparr>" (*<*) lemma kripkeI[intro]: assumes "\<And>a. relations M a \<subseteq> worlds M \<times> worlds M" shows "kripke M" using assms unfolding kripke_def by simp lemma kripke_rels_worlds[dest]: assumes "(w, w') \<in> relations M a" assumes M: "kripke M" shows "w \<in> worlds M \<and> w' \<in> worlds M" using assms unfolding kripke_def by auto lemma kripke_tc_rels_worlds[dest]: assumes R: "(w, w') \<in> (\<Union>a \<in> as. relations M a)\<^sup>+" assumes M: "kripke M" shows "w \<in> worlds M \<and> w' \<in> worlds M" using assms by (induct rule: trancl_induct) auto lemma kripke_rels_trc_worlds: assumes R: "(w, w') \<in> (\<Union>a. relations M a)\<^sup>*" assumes w: "w \<in> worlds M" assumes M: "kripke M" assumes W: "W = worlds M" shows "w' \<in> W" using assms by (induct rule: rtrancl_induct) auto lemma mkKripke_kripke[intro, simp]: "kripke (mkKripke ws rels val)" unfolding kripke_def mkKripke_def by clarsimp lemma mkKripke_simps[simp]: "worlds (mkKripke ws rels val) = ws" "relations (mkKripke ws rels val) = (\<lambda>a. rels a \<inter> ws \<times> ws)" "valuation (mkKripke ws rels val) = val" unfolding mkKripke_def by simp_all (*>*) text \<open> The standard semantics for knowledge is given by taking the accessibility relations to be equivalence relations, yielding the S$5_n$ structures, so-called due to their axiomatisation. \<close> definition S5n :: "('a, 'p, 'w) KripkeStructure \<Rightarrow> bool" where "S5n M \<equiv> \<forall>a. equiv (worlds M) (relations M a)" (*<*) lemma S5nI[intro]: "\<lbrakk> \<And>a. equiv (worlds M) (relations M a) \<rbrakk> \<Longrightarrow> S5n M" by (simp add: S5n_def) lemma S5nD[dest]: "S5n M \<Longrightarrow> equiv (worlds M) (relations M a)" by (simp add: S5n_def) lemma S5n_kripke[intro]: "S5n M \<Longrightarrow> kripke M" by (rule kripkeI, erule equivE[OF S5nD], auto simp add: refl_on_def) lemma S5n_rels_closed: "S5n M \<Longrightarrow> relations M a `` (relations M a `` X) \<subseteq> relations M a `` X" apply (drule S5nD[where a=a]) apply (erule equivE) apply (auto dest: refl_onD symD transD) done (*>*) text\<open> Intuitively an agent considers two worlds to be equivalent if it cannot distinguish between them. \<close> subsection\<open>Satisfaction\<close> text\<open> A formula $\phi$ is satisfied at a world $w$ in Kripke structure $M$ in the following way:\<close> fun models :: "('a, 'p, 'w) KripkeStructure \<Rightarrow> 'w \<Rightarrow> ('a, 'p) Kform \<Rightarrow> bool" ("(_, _ \<Turnstile> _)" [80,0,80] 80) where "M, w \<Turnstile> (Kprop p) = valuation M w p" | "M, w \<Turnstile> (Knot \<phi>) = (\<not> M, w \<Turnstile> \<phi>)" | "M, w \<Turnstile> (Kand \<phi> \<psi>) = (M, w \<Turnstile> \<phi> \<and> M, w \<Turnstile> \<psi>)" | "M, w \<Turnstile> (\<^bold>K\<^sub>a \<phi>) = (\<forall>w' \<in> relations M a `` {w}. M, w' \<Turnstile> \<phi>)" | "M, w \<Turnstile> (\<^bold>C\<^bsub>as\<^esub> \<phi>) = (\<forall>w' \<in> (\<Union>a \<in> set as. relations M a)\<^sup>+ `` {w}. M, w' \<Turnstile> \<phi>)" text\<open> The first three clauses are standard. The clause for @{term "Kknows a \<phi>"} expresses the idea that an agent knows @{term "\<phi>"} at world @{term "w"} in structure @{term "M"} iff @{term "\<phi>"} is true at all worlds it considers possible. The clause for @{term "Kcknows as \<phi>"} captures what it means for the set of agents @{term "as"} to commonly know @{term "\<phi>"}; roughly, everyone knows @{term "\<phi>"} and knows that everyone knows it, and so forth. Note that the transitive closure and the reflexive-transitive closure generate the same relation due to the reflexivity of the agents' accessibility relations; we use the former as it has a more pleasant induction principle. \<close> (*<*) lemma S5n_rels_eq: assumes S5n: "S5n M" and ww': "(w, w') \<in> relations M a" shows "relations M a `` {w} = relations M a `` {w'}" using S5nD[OF S5n] ww' by - (rule equiv_class_eq, blast+) text\<open> A key property of the semantics for common knowledge is that it forms an equivalence class itself. \<close> lemma tc_equiv: assumes E: "\<And>i. i \<in> is \<Longrightarrow> equiv A (f i)" and is_nempty: "is \<noteq> {}" shows "equiv A ((\<Union>i\<in>is. f i)\<^sup>+)" proof(rule equivI) from E is_nempty show "refl_on A ((\<Union>i\<in>is. f i)\<^sup>+)" unfolding equiv_def apply - apply (rule refl_onI) apply (rule trancl_Int_subset) apply (auto dest: refl_onD refl_onD1 refl_onD2) done from E show "sym ((\<Union>i\<in>is. f i)\<^sup>+)" apply - apply (rule sym_trancl) unfolding equiv_def sym_def by blast show "trans ((\<Union>i\<in>is. f i)\<^sup>+)" by (rule trans_trancl) qed lemma S5n_tc_rels_eq: assumes S5n: "S5n M" and ww': "(w, w') \<in> (\<Union>a \<in> as. relations M a)\<^sup>+" shows "(\<Union>a \<in> as. relations M a)\<^sup>+ `` {w} = (\<Union>a \<in> as. relations M a)\<^sup>+ `` {w'}" apply (cases "as = {}") apply fastforce apply (rule equiv_class_eq[OF _ ww']) apply (erule tc_equiv[OF S5nD[OF S5n]]) done text\<open>We can show that the standard S5 properties hold of this semantics:\<close> lemma S5n_knowledge_generalisation: "\<lbrakk> S5n M; \<forall>w \<in> worlds M. M, w \<Turnstile> \<phi> \<rbrakk> \<Longrightarrow> M, w \<Turnstile> Kknows a \<phi>" unfolding S5n_def equiv_def refl_on_def by auto lemma S5n_knowledge: "\<lbrakk> S5n M; w \<in> worlds M; M, w \<Turnstile> Kknows a \<phi> \<rbrakk> \<Longrightarrow> M, w \<Turnstile> \<phi>" unfolding S5n_def equiv_def refl_on_def by auto lemma S5n_positive_introspection: "\<lbrakk> S5n M; w \<in> worlds M; M, w \<Turnstile> Kknows a \<phi> \<rbrakk> \<Longrightarrow> M, w \<Turnstile> Kknows a (Kknows a \<phi>)" unfolding S5n_def equiv_def by simp (blast dest: transD) lemma S5n_negative_introspection: "\<lbrakk> S5n M; w \<in> worlds M; M, w \<Turnstile> Knot (Kknows a \<phi>) \<rbrakk> \<Longrightarrow> M, w \<Turnstile> Kknows a (Knot (Kknows a \<phi>))" unfolding S5n_def equiv_def by simp (blast dest: symD transD) (*>*) text\<open> The relation between knowledge and common knowledge can be understood as follows, following \citet[\S2.4]{FHMV:1995}. Firstly, that $\phi$ is common knowledge to a set of agents $as$ can be seen as asserting that everyone in $as$ knows $\phi$ and moreover knows that it is common knowledge amongst $as$. \<close> lemma S5n_common_knowledge_fixed_point: assumes S5n: "S5n M" assumes w: "w \<in> worlds M" assumes a: "a \<in> set as" shows "M, w \<Turnstile> Kcknows as \<phi> \<longleftrightarrow> M, w \<Turnstile> Kand (Kknows a \<phi>) (Kknows a (Kcknows as \<phi>))" (*<*) proof assume CK: "M, w \<Turnstile> Kcknows as \<phi>" from S5n a w CK have "M, w \<Turnstile> Kknows a \<phi>" and "M, w \<Turnstile> Kknows a (Kcknows as \<phi>)" by (auto intro: trancl_into_trancl2) then show "M, w \<Turnstile> Kand (Kknows a \<phi>) (Kknows a (Kcknows as \<phi>))" by simp next assume "M, w \<Turnstile> Kand (Kknows a \<phi>) (Kknows a (Kcknows as \<phi>))" hence "M, w \<Turnstile> (Kknows a (Kcknows as \<phi>))" by simp with S5n w show "M, w \<Turnstile> (Kcknows as \<phi>)" by (rule S5n_knowledge) qed (*>*) text\<open> Secondly we can provide an induction schema for the introduction of common knowledge: from everyone in $as$ knows that $\phi$ implies $\phi \land \psi$, and that $\phi$ is satisfied at world $w$, infer that $\psi$ is common knowledge amongst $as$ at $w$. \<close> lemma S5n_common_knowledge_induct: assumes S5n: "S5n M" assumes w: "w \<in> worlds M" assumes E: "\<forall>a \<in> set as. \<forall>w \<in> worlds M. M, w \<Turnstile> \<phi> \<longrightarrow> M, w \<Turnstile> \<^bold>K\<^sub>a (Kand \<phi> \<psi>)" assumes p: "M, w \<Turnstile> \<phi>" shows "M, w \<Turnstile> \<^bold>C\<^bsub>as\<^esub> \<psi>" (*<*) proof - { fix w' assume ww': "(w, w') \<in> (\<Union>x\<in>set as. relations M x)\<^sup>+" from ww' S5n E p w have "M, w' \<Turnstile> Kand \<phi> \<psi>" by ( induct rule: trancl_induct , simp_all, blast+) } thus ?thesis by simp qed (* We actually use a simpler variant. *) lemma S5n_common_knowledge_fixed_point_simpler: assumes S5n: "S5n M" and w: "w \<in> worlds M" and a: "a \<in> set as" shows "M, w \<Turnstile> Kcknows as \<phi> \<longleftrightarrow> M, w \<Turnstile> Kknows a (Kcknows as \<phi>)" proof assume CK: "M, w \<Turnstile> Kcknows as \<phi>" from S5n a w CK show "M, w \<Turnstile> Kknows a (Kcknows as \<phi>)" by (auto intro: trancl_into_trancl2) next assume "M, w \<Turnstile> Kknows a (Kcknows as \<phi>)" with S5n w show "M, w \<Turnstile> (Kcknows as \<phi>)" by (rule S5n_knowledge) qed (*>*) (* **************************************** *) subsection \<open>Generated models\<close> text\<open> \label{sec:generated_models} The rest of this section introduces the technical machinery we use to relate Kripke structures. Intuitively the truth of a formula at a world depends only on the worlds that are reachable from it in zero or more steps, using any of the accessibility relations at each step. Traditionally this result is called the \emph{generated model property} \citep[\S3.4]{Chellas:1980}. Given the model generated by @{term "w"} in @{term "M"}: \<close> definition gen_model :: "('a, 'p, 'w) KripkeStructure \<Rightarrow> 'w \<Rightarrow> ('a, 'p, 'w) KripkeStructure" where "gen_model M w \<equiv> let ws' = worlds M \<inter> (\<Union>a. relations M a)\<^sup>* `` {w} in \<lparr> worlds = ws', relations = \<lambda>a. relations M a \<inter> (ws' \<times> ws'), valuation = valuation M \<rparr>" (*<*) lemma gen_model_worldsD[dest]: "w' \<in> worlds (gen_model M w) \<Longrightarrow> w' \<in> worlds M" unfolding gen_model_def by simp lemma gen_model_world_refl: "w \<in> worlds M \<Longrightarrow> w \<in> worlds (gen_model M w)" unfolding gen_model_def by simp lemma gen_model_rels_worlds[dest]: assumes "(w', w'') \<in> relations (gen_model M w) a" shows "w' \<in> worlds (gen_model M w) \<and> w'' \<in> worlds (gen_model M w)" using assms unfolding gen_model_def by simp lemma gen_model_rels_tc_worlds[dest]: assumes "(w', w'') \<in> (\<Union>a \<in> as. relations (gen_model M w) a)\<^sup>+" shows "w'' \<in> worlds (gen_model M w)" using assms by (induct rule: trancl_induct) auto lemma gen_model_rels[dest]: assumes "(w', w'') \<in> relations (gen_model M w) a" shows "(w', w'') \<in> relations M a" using assms unfolding gen_model_def by simp lemma gen_model_worlds: "worlds (gen_model M w) = worlds M \<inter> (\<Union>a. relations M a)\<^sup>* `` {w}" unfolding gen_model_def by simp lemma gen_model_tc_rels[dest]: assumes M: "kripke M" and R: "(w', w'') \<in> (\<Union>a \<in> as. relations (gen_model M w) a)\<^sup>+" shows "(w', w'') \<in> (\<Union>a \<in> as. relations M a)\<^sup>+" using R proof(induct rule: trancl_induct) case (base y) with M show ?case by auto next case (step y z) with M have "y \<in> worlds (gen_model M w)" and "z \<in> worlds (gen_model M w)" by auto with M step show ?case by (auto intro: trancl_into_trancl) qed lemma gen_model_rels_rev[dest]: assumes M: "kripke M" and "w' \<in> worlds (gen_model M w)" and "(w', w'') \<in> relations M a" shows "(w', w'') \<in> relations (gen_model M w) a" using assms unfolding gen_model_def by (auto intro: rtrancl_into_rtrancl) lemma gen_model_tc_rels_rev[dest]: assumes M: "kripke M" and R: "(w', w'') \<in> (\<Union>a \<in> as. relations M a)\<^sup>+" and W: "w' \<in> worlds (gen_model M w)" shows "(w', w'') \<in> (\<Union>a \<in> as. relations (gen_model M w) a)\<^sup>+" using R W proof(induct rule: trancl_induct) case (base y) with M show ?case by auto next case (step y z) with M have "y \<in> worlds (gen_model M w)" and "z \<in> worlds (gen_model M w)" by auto with M step show ?case by (auto intro: trancl_into_trancl) qed lemma gen_model_kripke: shows "kripke (gen_model M w)" unfolding gen_model_def by auto (*>*) text\<open> where we take the image of @{term "w"} under the reflexive transitive closure of the agents' relations, we can show that the satisfaction of a formula @{term "\<phi>"} at a world @{term "w'"} is preserved, provided @{term "w'"} is relevant to the world @{term "w"} that the sub-model is based upon: \<close> lemma gen_model_semantic_equivalence: assumes M: "kripke M" assumes w': "w' \<in> worlds (gen_model M w)" shows "M, w' \<Turnstile> \<phi> \<longleftrightarrow> (gen_model M w), w' \<Turnstile> \<phi>" (*<*) proof - { fix w w' assume "w' \<in> worlds (gen_model M w)" hence "M, w' \<Turnstile> \<phi> \<longleftrightarrow> (gen_model M w), w' \<Turnstile> \<phi>" proof(induct \<phi> arbitrary: w') case (Kknows a f w') show ?case proof assume lhs: "M, w' \<Turnstile> Kknows a f" with Kknows show "gen_model M w, w' \<Turnstile> Kknows a f" by auto next assume rhs: "gen_model M w, w' \<Turnstile> Kknows a f" with M Kknows show "M, w' \<Turnstile> Kknows a f" by (simp, blast) qed next case (Kcknows as f w') show ?case proof assume lhs: "M, w' \<Turnstile> Kcknows as f" with M Kcknows show "gen_model M w, w' \<Turnstile> Kcknows as f" by (simp, blast) next assume rhs: "gen_model M w, w' \<Turnstile> Kcknows as f" with M Kcknows show "M, w' \<Turnstile> Kcknows as f" by (simp, blast) qed qed (simp_all add: gen_model_def) } with w' show ?thesis by simp qed (*>*) text\<open> This is shown by a straightforward structural induction over the formula @{term "\<phi>"}. \<close> (*<*) lemma gen_model_S5n: assumes S5n: "S5n M" shows "S5n (gen_model M w)" proof(intro S5nI equivI) show "\<And>n. refl_on (worlds (gen_model M w)) (relations (gen_model M w) n)" apply (rule equivE[OF S5nD[OF S5n]]) by - (rule refl_onI, auto simp add: refl_on_def gen_model_def) show "\<And>n. sym (relations (gen_model M w) n)" apply (rule equivE[OF S5nD[OF S5n]]) by (unfold gen_model_def sym_def, auto) show "\<And>n. trans (relations (gen_model M w) n)" apply (rule equivE[OF S5nD[OF S5n]]) by - (rule transI, simp add: gen_model_def, unfold trans_def, blast) qed text\<open> If two models generate the same sub-model for a world, they satisfy the same formulas at that world. \<close> lemma gen_model_eq: assumes M: "kripke M" and M': "kripke M'" and "gen_model M w = gen_model M' w" and "w' \<in> worlds (gen_model M' w)" shows "M, w' \<Turnstile> \<phi> \<longleftrightarrow> M', w' \<Turnstile> \<phi>" using assms gen_model_semantic_equivalence[OF M, where w=w] gen_model_semantic_equivalence[OF M', where w=w] by auto text \<open> Our final lemma in this section is technical: it allows us to move between two Kripke structures that have the same relevant worlds. \<close> lemma gen_model_subset_aux: assumes R: "\<And>a. relations M a \<inter> T \<times> T = relations M' a \<inter> T \<times> T" and T: "(\<Union>a. relations M a)\<^sup>* `` {t} \<subseteq> T" shows "(\<Union>x. relations M x)\<^sup>* `` {t} \<subseteq> (\<Union>x. relations M' x)\<^sup>* `` {t}" proof - { fix x assume "(t, x) \<in> (\<Union>x. relations M x)\<^sup>*" hence "(t, x) \<in> (\<Union>x. relations M' x)\<^sup>*" proof(induct rule: rtrancl_induct) case (step y z) with R T have "(y, z) \<in> (\<Union>a. relations M' a)" by auto (blast dest: rtrancl_trans) with step show ?case by (blast intro: rtrancl_trans) qed simp } thus ?thesis by blast qed lemma gen_model_subset: assumes M: "kripke M" and M': "kripke M'" and R: "\<And>a. relations M a \<inter> T \<times> T = relations M' a \<inter> T \<times> T" and tMT: "(\<Union>a. relations M a)\<^sup>* `` {t} \<subseteq> T" and tM'T: "(\<Union>a. relations M' a)\<^sup>* `` {t} \<subseteq> T" and tM: "t \<in> worlds M" and tM': "t \<in> worlds M'" and V: "valuation M = valuation M'" shows "gen_model M t = gen_model M' t" proof - from tMT tM'T gen_model_subset_aux[OF R] gen_model_subset_aux[OF R[symmetric]] have F: "(\<Union>a. relations M a)\<^sup>* `` {t} = (\<Union>a. relations M' a)\<^sup>* `` {t}" by - (rule, simp_all) from M tMT tM have G: "(\<Union>a. relations M a)\<^sup>* `` {t} \<subseteq> worlds M" by (auto dest: kripke_rels_trc_worlds) from M' tM'T tM' have H: "(\<Union>a. relations M' a)\<^sup>* `` {t} \<subseteq> worlds M'" by (auto dest: kripke_rels_trc_worlds) from F G H have WORLDS: "worlds (gen_model M t) = worlds (gen_model M' t)" unfolding gen_model_def by (auto iff: Int_absorb1) have RELATIONS: "\<And>a. relations (gen_model M t) a = relations (gen_model M' t) a" proof (simp add: Int_absorb1 G H gen_model_def) fix a { fix a x y assume XY: "(x, y) \<in> relations M a \<inter> (\<Union>x. relations M x)\<^sup>* `` {t} \<times> (\<Union>x. relations M x)\<^sup>* `` {t}" from XY tMT have "(x, y) \<in> relations M a \<inter> T \<times> T" by blast with R have "(x, y) \<in> relations M' a \<inter> T \<times> T" by blast with F XY tM'T have "(x, y) \<in> relations M' a \<inter> (\<Union>x. relations M' x)\<^sup>* `` {t} \<times> (\<Union>x. relations M' x)\<^sup>* `` {t}" by blast } moreover { fix a x y assume XY: "(x, y) \<in> relations M' a \<inter> (\<Union>x. relations M' x)\<^sup>* `` {t} \<times> (\<Union>x. relations M' x)\<^sup>* `` {t}" from XY tM'T have "(x, y) \<in> relations M' a \<inter> T \<times> T" by blast with R have "(x, y) \<in> relations M a \<inter> T \<times> T" by blast with F XY tMT have "(x, y) \<in> relations M a \<inter> (\<Union>x. relations M x)\<^sup>* `` {t} \<times> (\<Union>x. relations M x)\<^sup>* `` {t}" by blast } ultimately show "Restr (relations M a) ((\<Union>x. relations M x)\<^sup>* `` {t}) = Restr (relations M' a) ((\<Union>x. relations M' x)\<^sup>* `` {t})" apply - apply rule apply rule apply auto[1] apply rule apply (case_tac x) apply (simp (no_asm_use)) apply (metis Image_singleton_iff mem_Sigma_iff) done qed from WORLDS RELATIONS V show ?thesis unfolding gen_model_def by simp qed (*>*) subsection \<open>Simulations\<close> text\<open> \label{sec:kripke-theory-simulations} A \emph{simulation}, or \emph{p-morphism}, is a mapping from the worlds of one Kripke structure to another that preserves the truth of all formulas at related worlds \citep[\S3.4, Ex. 3.60]{Chellas:1980}. Such a function @{term "f"} must satisfy four properties. Firstly, the image of the set of worlds of @{term "M"} under @{term "f"} should equal the set of worlds of @{term "M'"}. \<close> definition sim_range :: "('a, 'p, 'w1) KripkeStructure \<Rightarrow> ('a, 'p, 'w2) KripkeStructure \<Rightarrow> ('w1 \<Rightarrow> 'w2) \<Rightarrow> bool" where "sim_range M M' f \<equiv> worlds M' = f ` worlds M \<and> (\<forall>a. relations M' a \<subseteq> worlds M' \<times> worlds M')" text\<open>The value of a proposition should be the same at corresponding worlds:\<close> definition sim_val :: "('a, 'p, 'w1) KripkeStructure \<Rightarrow> ('a, 'p, 'w2) KripkeStructure \<Rightarrow> ('w1 \<Rightarrow> 'w2) \<Rightarrow> bool" where "sim_val M M' f \<equiv> \<forall>u \<in> worlds M. valuation M u = valuation M' (f u)" text\<open> If two worlds are related in @{term "M"}, then the simulation maps them to related worlds in @{term "M'"}; intuitively the simulation relates enough worlds. We term this the \emph{forward} property. \<close> definition sim_f :: "('a, 'p, 'w1) KripkeStructure \<Rightarrow> ('a, 'p, 'w2) KripkeStructure \<Rightarrow> ('w1 \<Rightarrow> 'w2) \<Rightarrow> bool" where "sim_f M M' f \<equiv> \<forall>a u v. (u, v) \<in> relations M a \<longrightarrow> (f u, f v) \<in> relations M' a" text\<open> Conversely, if two worlds @{term "f u"} and @{term "v'"} are related in @{term "M'"}, then there is a pair of related worlds @{term "u"} and @{term "v"} in @{term "M"} where @{term "f v = v'"}. Intuitively the simulation makes enough distinctions. We term this the \emph{reverse} property. \<close> definition sim_r :: "('a, 'p, 'w1) KripkeStructure \<Rightarrow> ('a, 'p, 'w2) KripkeStructure \<Rightarrow> ('w1 \<Rightarrow> 'w2) \<Rightarrow> bool" where "sim_r M M' f \<equiv> \<forall>a. \<forall>u \<in> worlds M. \<forall>v'. (f u, v') \<in> relations M' a \<longrightarrow> (\<exists>v. (u, v) \<in> relations M a \<and> f v = v')" definition "sim M M' f \<equiv> sim_range M M' f \<and> sim_val M M' f \<and> sim_f M M' f \<and> sim_r M M' f" (*<*) lemma sim_rangeI[intro]: "\<lbrakk> worlds M' = f ` worlds M; (\<And>a. relations M' a \<subseteq> worlds M' \<times> worlds M') \<rbrakk> \<Longrightarrow> sim_range M M' f" unfolding sim_range_def by simp lemma sim_valI[intro]: "(\<And>u. u \<in> worlds M \<Longrightarrow> valuation M u = valuation M' (f u)) \<Longrightarrow> sim_val M M' f" unfolding sim_val_def by simp lemma sim_fI[intro]: "(\<And>a u v. (u, v) \<in> relations M a \<Longrightarrow> (f u, f v) \<in> relations M' a) \<Longrightarrow> sim_f M M' f" unfolding sim_f_def by simp lemma sim_fD: "\<lbrakk> (u, v) \<in> relations M a; sim M M' f \<rbrakk> \<Longrightarrow> (f u, f v) \<in> relations M' a" unfolding sim_def sim_f_def by blast lemma sim_rI[intro]: "(\<And>a u v'. \<lbrakk>u \<in> worlds M; (f u, v') \<in> relations M' a\<rbrakk> \<Longrightarrow> (\<exists>v. (u, v) \<in> relations M a \<and> f v = v')) \<Longrightarrow> sim_r M M' f" unfolding sim_r_def by simp lemma sim_rD: "\<lbrakk> (f u, v') \<in> relations M' a; sim M M' f; u \<in> worlds M \<rbrakk> \<Longrightarrow> (\<exists>v. (u, v) \<in> relations M a \<and> f v = v')" unfolding sim_def sim_r_def by blast lemma simI[intro]: "\<lbrakk> sim_range M M' f; sim_val M M' f; sim_f M M' f; sim_r M M' f \<rbrakk> \<Longrightarrow> sim M M' f" unfolding sim_def by simp text\<open>The identity is a simulation:\<close> lemma sim_id: "kripke M \<Longrightarrow> sim M M id" (*<*) unfolding sim_def sim_r_def sim_f_def sim_range_def sim_val_def by auto (*>*) (*>*) text\<open> Due to the common knowledge modality, we need to show the simulation properties lift through the transitive closure. In particular we can show that forward simulation is preserved: \<close> Reverse simulation also: \<close> lemma sim_r_tc: assumes M: "kripke M" assumes s: "sim M M' f" assumes u: "u \<in> worlds M" assumes fuv': "(f u, v') \<in> (\<Union>a\<in>as. relations M' a)\<^sup>+" obtains v where "f v = v'" and "(u, v) \<in> (\<Union>a\<in>as. relations M a)\<^sup>+" (*<*) proof - assume E: "\<And>v. \<lbrakk>f v = v'; (u, v) \<in> (\<Union>a\<in>as. relations M a)\<^sup>+\<rbrakk> \<Longrightarrow> thesis" from fuv' have as_nempty: "as \<noteq> {}" by auto from fuv' have "\<exists>v. f v = v' \<and> (u, v) \<in> (\<Union>a\<in>as. relations M a)\<^sup>+" proof(induct rule: trancl_induct) case (base v') with u s as_nempty show ?case by (blast dest: sim_rD) next case (step v' w') hence fuv': "(f u, v') \<in> (\<Union>a\<in>as. relations M' a)\<^sup>+" and v'w': "(v', w') \<in> (\<Union>a\<in>as. relations M' a)" by fast+ from step obtain v where vv': "f v = v'" and uv: "(u, v) \<in> (\<Union>a\<in>as. relations M a)\<^sup>+" by blast from s v'w' vv' kripke_tc_rels_worlds[OF uv M] obtain w where ww': "f w = w'" and vw: "(v, w) \<in> (\<Union>a\<in>as. relations M a)" by (blast dest: sim_rD) from uv vw ww' show ?case by (blast intro: trancl_trans) qed with E show thesis by blast qed lemma sim_f_trc: assumes uv': "(u, v) \<in> (\<Union>a. relations M a)\<^sup>*" and s: "sim M M' f" shows "(f u, f v) \<in> (\<Union>a. relations M' a)\<^sup>*" using assms by - ( induct rule: rtrancl_induct[OF uv'] , auto dest: sim_fD intro: rtrancl_into_rtrancl ) lemma sim_r_trc: assumes s: "sim M M' f" and fuv': "(f u, v') \<in> (\<Union>a. relations M' a)\<^sup>*" and M: "kripke M" and u: "u \<in> worlds M" obtains v where "f v = v'" and "(u, v) \<in> (\<Union>a. relations M a)\<^sup>*" proof - assume E: "\<And>v. \<lbrakk>f v = v'; (u, v) \<in> (\<Union>a. relations M a)\<^sup>*\<rbrakk> \<Longrightarrow> thesis" from fuv' have "\<exists>v. f v = v' \<and> (u, v) \<in> (\<Union>a. relations M a)\<^sup>*" proof(induct rule: rtrancl_induct) case base show ?case by blast next case (step v' w') hence fuv': "(f u, v') \<in> (\<Union>a. relations M' a)\<^sup>*" and v'w': "(v', w') \<in> (\<Union>a. relations M' a)" by fast+ from step obtain v where vv': "f v = v'" and uv: "(u, v) \<in> (\<Union>a. relations M a)\<^sup>*" by blast from s v'w' vv' kripke_rels_trc_worlds[OF uv u M] obtain w where ww': "f w = w'" and vw: "(v, w) \<in> (\<Union>a. relations M a)" by (blast dest: sim_rD) from uv vw ww' show ?case by (blast intro: rtrancl_trans) qed with E show thesis by blast qed lemma sim_kripke: "\<lbrakk> sim M M' f; kripke M \<rbrakk> \<Longrightarrow> kripke M'" unfolding sim_def sim_range_def by (rule kripkeI, blast) lemma sim_S5n: assumes S5n: "S5n M" and sim: "sim M M' f" shows "S5n M'" proof(intro S5nI equivI) fix a from S5n sim show "refl_on (worlds M') (relations M' a)" using sim_kripke S5n_kripke unfolding S5n_def equiv_def sim_def sim_f_def sim_range_def by - (rule refl_onI, (simp, blast dest: refl_onD)+) next fix a { fix u v assume uv: "(u, v) \<in> relations M' a" from sim uv obtain u' where uw: "u' \<in> worlds M" and fu: "u = f u'" unfolding sim_def sim_range_def by bestsimp from sim uv fu uw obtain v' where u'v': "(u', v') \<in> relations M a" and fv: "v = f v'" unfolding sim_def sim_r_def sim_range_def by best from S5n u'v' have "(v', u') \<in> relations M a" unfolding S5n_def equiv_def by (blast dest: symD) with sim fu fv have "(v, u) \<in> relations M' a" unfolding sim_def sim_f_def by simp } thus "sym (relations M' a)" by (blast intro: symI) next fix a { fix x y z assume xy: "(x, y) \<in> relations M' a" and yz: "(y, z) \<in> relations M' a" from sim xy obtain x' where xw: "x' \<in> worlds M" and fx: "x = f x'" unfolding sim_def sim_range_def by bestsimp from sim xy fx xw obtain y' where x'y': "(x', y') \<in> relations M a" and fy: "y = f y'" unfolding sim_def sim_r_def sim_range_def by best from S5n sim yz fy x'y' obtain z' where y'z': "(y', z') \<in> relations M a" and fz: "z = f z'" unfolding sim_def sim_r_def sim_range_def by best from S5n x'y' y'z' have "(x', z') \<in> relations M a" unfolding S5n_def equiv_def by (blast dest: transD) with sim fx fy fz have "(x, z) \<in> relations M' a" unfolding sim_def sim_f_def by simp } thus "trans (relations M' a)" by (blast intro: transI) qed (*>*) text\<open> Finally we establish the key property of simulations, that they preserve the satisfaction of all formulas in the following way: \<close> lemma sim_semantic_equivalence: assumes M: "kripke M" assumes s: "sim M M' f" assumes u: "u \<in> worlds M" shows "M, u \<Turnstile> \<phi> \<longleftrightarrow> M', f u \<Turnstile> \<phi>" (*<*) using u proof(induct \<phi> arbitrary: u) case (Kknows a \<psi> u) hence u: "u \<in> worlds M" by fast show ?case proof assume lhs: "M, u \<Turnstile> Kknows a \<psi>" { fix v' assume "(f u, v') \<in> relations M' a" with s u obtain v where uv: "(u, v) \<in> relations M a" and vv': "f v = v'" by (blast dest: sim_rD) from lhs uv have "M, v \<Turnstile> \<psi>" by simp with kripke_rels_worlds[OF uv M] vv' Kknows have "M', v' \<Turnstile> \<psi>" by auto } thus "M', f u \<Turnstile> Kknows a \<psi>" by simp next assume rhs: "M', f u \<Turnstile> Kknows a \<psi>" { fix v assume uv: "(u, v) \<in> relations M a" with s have "(f u, f v) \<in> relations M' a" by (blast dest: sim_fD) with rhs have "M', f v \<Turnstile> \<psi>" by simp with kripke_rels_worlds[OF uv M] Kknows s have "M, v \<Turnstile> \<psi>" by auto } thus "M, u \<Turnstile> Kknows a \<psi>" by simp qed next case (Kcknows as \<psi>) hence u: "u \<in> worlds M" by fast show ?case proof assume lhs: "M, u \<Turnstile> Kcknows as \<psi>" { fix v' assume "(f u, v') \<in> (\<Union>x\<in>set as. relations M' x)\<^sup>+" with M s u obtain v where uv: "(u, v) \<in> (\<Union>x\<in>set as. relations M x)\<^sup>+" and vv': "f v = v'" by (blast intro: sim_r_tc) from uv lhs have "M, v \<Turnstile> \<psi>" by simp with kripke_tc_rels_worlds[OF uv M] vv' Kcknows have "M', v' \<Turnstile> \<psi>" by auto } thus "M', f u \<Turnstile> Kcknows as \<psi>" by simp next assume rhs: "M', f u \<Turnstile> Kcknows as \<psi>" { fix v assume uv: "(u, v) \<in> (\<Union>x\<in>set as. relations M x)\<^sup>+" with s have "(f u, f v) \<in> (\<Union>x\<in>set as. relations M' x)\<^sup>+" by (blast dest: sim_f_tc) with rhs have "M', f v \<Turnstile> \<psi>" by simp with kripke_tc_rels_worlds[OF uv M] Kcknows s have "M, v \<Turnstile> \<psi>" by auto } thus "M, u \<Turnstile> Kcknows as \<psi>" by simp qed qed (insert s, auto simp add: sim_range_def sim_def sim_val_def) (*>*) text\<open> The proof is by structural induction over the formula @{term "\<phi>"}. The knowledge cases appeal to our two simulation preservation lemmas. \citet{DBLP:journals/toplas/Sangiorgi09} surveys the history of p-morphisms and the related concept of \emph{bisimulation}. This is all we need to know about Kripke structures. \<close> (*<*) end (*>*)
function yearly_tmax_correction(filenameout) % this program is design to adjust the daily Tmax generated by WG using % new yearly Tmax series produced by FFT. % load the yearly averaged Tmax after FFT load('Pnew_tmax'); tmax_FFT=Pnew_tmax'; % load WG generated daily Tmax load(filenameout); [n,m]=size(gTmax); gTmax=gTmax'; tmax_WG=reshape(gTmax,[],1); n=length(tmax_FFT); % years m=length(tmax_WG); % days % calculate yearly Tmax generated by WG, namely (Y) j=1; Z=zeros(n,1); for i=1:365:m Z(j,1)=mean(tmax_WG(i:i+365-1,1)); j=j+1; end % calculate the ratio of yearly Tmax between those after FFT and % generated by weather generator for i=1:n tmax_ratio(i,1)=tmax_FFT(i,1)-Z(i,1); end % extend the yearly Tmax ratio to daily scale,the data in each % year are the same tmax_extent=zeros(m,1); j=1; for i=1:365:m tmax_extent(i:i+365-1,1)=tmax_ratio(j,1); j=j+1; end % adjust the daily Tmax generated by WG using above ratios tmax_adjust=zeros(size(tmax_WG)); for i=1:m tmax_adjust(i,1)=tmax_WG(i,1)+tmax_extent(i,1); end yearly_corrected_tmax=tmax_adjust; save('yearly_corrected_tmax','yearly_corrected_tmax')
function [Y] = mnbinpdf(X, W, K) %MNBINPDF Maszle's negative binomial probability density function % % Y = MNBINPDF(X, W, K) % % returns the probabilities of the values X using a negative binomial % distribution with mean W and clumping parameter K. % % This is an alternate form of the negative binomial commonly % employed in biology to describe aggregated count data. % W and K are related to the traditional parametrization by the % relationships: % % P = K/(K + W), and K = R, % % with the distinction that K is any positive real number, not just % positive integers. % % X should only be integers and is rounded if not. Calculation is % optimized by taking the exponential of the GAMMALN function. % % See also MNBINRND %---------------------------------------------------------------------- % Copyright (c) 1997. Don R. Maszle. All rights reserved. % % -- Revisions ----- % Author: Don R. Maszle % E-mail: [email protected] % -- SCCS --------- %---------------------------------------------------------------------- %---------------------------------------------------------------------- % We use the definition from Bradley and May, Trans. R.Soc.Trop.Med.Hyg. % v72(3), 1978, p262. % A notational convenience a = W/(W + K); X = round(X); Y = ((1 - a).^K) ... .* (exp( gammaln(X + K) + X.*log(a) - gammaln(K) - gammaln(X + 1)));
Require Import Coq.Strings.String. Require Export SystemFR.ErasedSingleton. Require Export SystemFR.SubtypeList. Require Export SystemFR.EvalListMatch. Require Export SystemFR.ReducibilitySubtype. Require Export SystemFR.ErasedQuant. Opaque reducible_values. Opaque list_match. Lemma reducible_union_left: forall ρ t T1 T2, valid_interpretation ρ -> [ ρ ⊨ t : T1 ] -> [ ρ ⊨ t : T_union T1 T2 ]. Proof. unfold reduces_to; steps. eexists; repeat step || simp_red; eauto using reducible_values_closed. Qed. Lemma reducible_union_right: forall ρ t T1 T2, valid_interpretation ρ -> [ ρ ⊨ t : T2 ] -> [ ρ ⊨ t : T_union T1 T2 ]. Proof. unfold reduces_to; steps. eexists; repeat step || simp_red; eauto using reducible_values_closed. Qed. Opaque List. Lemma tmatch_value: forall ρ v t2 t3 T2 T3, valid_interpretation ρ -> wf t3 2 -> wf T2 0 -> wf T3 2 -> is_erased_term t2 -> is_erased_term t3 -> is_erased_type T2 -> is_erased_type T3 -> pfv t2 term_var = nil -> pfv t3 term_var = nil -> pfv T2 term_var = nil -> pfv T3 term_var = nil -> [ ρ ⊨ v : List ]v -> [ ρ ⊨ t2 : T2 ] -> (forall h t, [ ρ ⊨ h : T_top ] -> [ ρ ⊨ t : List ] -> [ ρ ⊨ open 0 (open 1 t3 h) t : open 0 (open 1 T3 h) t ]) -> [ ρ ⊨ list_match v t2 t3 : List_Match v T2 T3 ]. Proof. intros; evaluate_list_match; steps; eauto with wf. - eapply star_backstep_reducible; eauto; repeat step || apply wf_list_match || apply is_erased_term_list_match || apply pfv_list_match; eauto with wf. unfold List_Match. apply reducible_union_left; auto. apply reducible_type_refine with uu; repeat step || simp_red || apply reducible_value_expr; eauto using equivalent_refl with step_tactic. - eapply reducibility_equivalent2; eauto using equivalent_sym; repeat step || list_utils; t_closer. unfold List_Match. apply reducible_union_right; auto. apply reducible_exists with h; repeat step || open_none; t_closer. + apply reducible_value_expr; repeat step || simp_red_goal. + apply reducible_exists with l; repeat step || open_none; t_closer; eauto using reducible_value_expr. apply reducible_type_refine with uu; repeat step || open_none; t_closer; eauto using reducible_value_expr. apply reducible_value_expr; repeat light || simp_red_goal. apply equivalent_refl; steps; t_closer. Qed. Lemma tmatch: forall ρ t t2 t3 T2 T3, valid_interpretation ρ -> wf t3 2 -> wf T2 0 -> wf T3 2 -> is_erased_term t2 -> is_erased_term t3 -> is_erased_type T2 -> is_erased_type T3 -> pfv t2 term_var = nil -> pfv t3 term_var = nil -> pfv T2 term_var = nil -> pfv T3 term_var = nil -> [ ρ ⊨ t : List ] -> [ ρ ⊨ t2 : T2 ] -> (forall h t, [ ρ ⊨ h : T_top ]v -> [ ρ ⊨ t : List ]v -> [ ρ ⊨ open 0 (open 1 t3 h) t : open 0 (open 1 T3 h) t ]) -> [ ρ ⊨ list_match t t2 t3 : List_Match t T2 T3 ]. Proof. intros. unfold reduces_to in H11; steps. apply reducibility_equivalent2 with (list_match v t2 t3); steps; t_closer. - apply equivalent_sym. equivalent_star; repeat step || apply is_erased_term_list_match || apply wf_list_match || apply pfv_list_match; eauto using evaluate_list_match_scrut; t_closer. - apply subtype_reducible with (List_Match v T2 T3). + apply tmatch_value; steps. unfold reduces_to in H11; steps. unfold reduces_to in H17; steps. eapply reducibility_equivalent2 with (open 0 (open 1 t3 h) v1); repeat step || apply is_erased_type_open || apply equivalent_context || apply wf_open || apply fv_nils_open; t_closer; try solve [ apply equivalent_sym; equivalent_star ]. eapply reducibility_rtl; steps; eauto; t_closer. rewrite (swap_term_holes_open t3); steps; t_closer. eapply reducibility_equivalent2 with (open 0 (open 1 (swap_term_holes t3 0 1) v1) v0); repeat step || apply is_erased_type_open || apply is_erased_open || apply equivalent_context || apply wf_swap_term_holes_3 || apply wf_open || apply fv_nils_open; t_closer; try solve [ apply equivalent_sym; equivalent_star ]. rewrite (swap_term_holes_open T3); steps; t_closer. eapply reducibility_rtl; eauto; repeat step || apply is_erased_type_open || apply fv_nils_open; eauto; t_closer. rewrite <- (swap_term_holes_open t3); steps; t_closer. rewrite <- (swap_term_holes_open T3); steps; t_closer. + apply subtype_list_match_scrut; steps. apply equivalent_sym; equivalent_star. Qed. Lemma open_tmatch_helper: forall Θ Γ t t2 t3 T2 T3 x1 x2, ~ x1 ∈ pfv_context Γ term_var -> ~ x2 ∈ pfv_context Γ term_var -> x1 <> x2 -> wf t3 2 -> wf T2 0 -> wf T3 2 -> is_erased_term t2 -> is_erased_term t3 -> is_erased_type T2 -> is_erased_type T3 -> subset (fv t2) (support Γ) -> subset (fv t3) (support Γ) -> subset (fv T2) (support Γ) -> subset (fv T3) (support Γ) -> [ Θ; Γ ⊨ t : List ] -> [ Θ; Γ ⊨ t2 : T2 ] -> [ Θ; (x1, T_top) :: (x2, List) :: Γ ⊨ open 0 (open 1 t3 (fvar x1 term_var)) (fvar x2 term_var) : open 0 (open 1 T3 (fvar x1 term_var)) (fvar x2 term_var) ] -> [ Θ; Γ ⊨ list_match t t2 t3 : List_Match t T2 T3 ]. Proof. unfold open_reducible; repeat step || apply tmatch || t_instantiate_sat3 || rewrite substitute_list_match || rewrite substitute_List_Match; t_closer. unshelve epose proof (H15 ρ ((x1, h) :: (x2, t0) :: lterms) _ _ _); repeat step || apply SatCons || t_substitutions; t_closer. Qed. Lemma open_tmatch: forall Γ t t2 t3 T2 T3 x1 x2, ~ x1 ∈ pfv_context Γ term_var -> ~ x2 ∈ pfv_context Γ term_var -> x1 <> x2 -> wf t 0 -> wf t2 0 -> wf t3 2 -> wf T2 0 -> wf T3 2 -> is_erased_term t -> is_erased_term t2 -> is_erased_term t3 -> is_erased_type T2 -> is_erased_type T3 -> subset (fv t) (support Γ) -> subset (fv t2) (support Γ) -> subset (fv t3) (support Γ) -> subset (fv T2) (support Γ) -> subset (fv T3) (support Γ) -> [ Γ ⊫ t : List ] -> [ Γ ⊫ t2 : T2 ] -> [ (x1, T_top) :: (x2, List) :: Γ ⊫ open 0 (open 1 t3 (fvar x1 term_var)) (fvar x2 term_var) : open 0 (open 1 T3 (fvar x1 term_var)) (fvar x2 term_var) ] -> [ Γ ⊫ list_match t t2 t3 : T_singleton (List_Match t T2 T3) (list_match t t2 t3) ]. Proof. repeat step || apply open_reducible_singleton || apply is_erased_term_list_match || apply wf_list_match; t_closer; eauto using open_tmatch_helper. eapply subset_transitive; eauto using pfv_list_match2; repeat step || sets. Qed.
------------------------------------------------------------------------------ -- The FOTC streams type ------------------------------------------------------------------------------ {-# OPTIONS --exact-split #-} {-# OPTIONS --no-sized-types #-} {-# OPTIONS --no-universe-polymorphism #-} {-# OPTIONS --without-K #-} module FOTC.Data.Stream.Type where open import FOTC.Base open import FOTC.Base.List ------------------------------------------------------------------------------ -- The FOTC streams type (co-inductive predicate for total streams). -- Functional for the Stream predicate. -- StreamF : (D → Set) → D → Set -- StreamF A xs = ∃[ x' ] ∃[ xs' ] xs ≡ x' ∷ xs' ∧ A xs' -- Stream is the greatest fixed-point of StreamF (by Stream-out and -- Stream-coind). postulate Stream : D → Set postulate -- Stream is a post-fixed point of StreamF, i.e. -- -- Stream ≤ StreamF Stream. Stream-out : ∀ {xs} → Stream xs → ∃[ x' ] ∃[ xs' ] xs ≡ x' ∷ xs' ∧ Stream xs' {-# ATP axiom Stream-out #-} -- Stream is the greatest post-fixed point of StreamF, i.e. -- -- ∀ A. A ≤ StreamF A ⇒ A ≤ Stream. -- -- N.B. This is an axiom schema. Because in the automatic proofs we -- *must* use an instance, we do not add this postulate as an ATP -- axiom. postulate Stream-coind : (A : D → Set) → -- A is post-fixed point of StreamF. (∀ {xs} → A xs → ∃[ x' ] ∃[ xs' ] xs ≡ x' ∷ xs' ∧ A xs') → -- Stream is greater than A. ∀ {xs} → A xs → Stream xs
/- Copyright (c) 2019 Johan Commelin. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Johan Commelin ! This file was ported from Lean 3 source module field_theory.chevalley_warning ! leanprover-community/mathlib commit e001509c11c4d0f549d91d89da95b4a0b43c714f ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathbin.FieldTheory.Finite.Basic /-! # The Chevalley–Warning theorem This file contains a proof of the Chevalley–Warning theorem. Throughout most of this file, `K` denotes a finite field and `q` is notation for the cardinality of `K`. ## Main results 1. Let `f` be a multivariate polynomial in finitely many variables (`X s`, `s : σ`) such that the total degree of `f` is less than `(q-1)` times the cardinality of `σ`. Then the evaluation of `f` on all points of `σ → K` (aka `K^σ`) sums to `0`. (`sum_eval_eq_zero`) 2. The Chevalley–Warning theorem (`char_dvd_card_solutions_of_sum_lt`). Let `f i` be a finite family of multivariate polynomials in finitely many variables (`X s`, `s : σ`) such that the sum of the total degrees of the `f i` is less than the cardinality of `σ`. Then the number of common solutions of the `f i` is divisible by the characteristic of `K`. ## Notation - `K` is a finite field - `q` is notation for the cardinality of `K` - `σ` is the indexing type for the variables of a multivariate polynomial ring over `K` -/ universe u v open BigOperators section FiniteField open MvPolynomial open Function hiding eval open Finset FiniteField variable {K σ ι : Type _} [Fintype K] [Field K] [Fintype σ] [DecidableEq σ] -- mathport name: exprq local notation "q" => Fintype.card K theorem MvPolynomial.sum_eval_eq_zero (f : MvPolynomial σ K) (h : f.totalDegree < (q - 1) * Fintype.card σ) : (∑ x, eval x f) = 0 := by haveI : DecidableEq K := Classical.decEq K calc (∑ x, eval x f) = ∑ x : σ → K, ∑ d in f.support, f.coeff d * ∏ i, x i ^ d i := by simp only [eval_eq'] _ = ∑ d in f.support, ∑ x : σ → K, f.coeff d * ∏ i, x i ^ d i := sum_comm _ = 0 := sum_eq_zero _ intro d hd obtain ⟨i, hi⟩ : ∃ i, d i < q - 1; exact f.exists_degree_lt (q - 1) h hd calc (∑ x : σ → K, f.coeff d * ∏ i, x i ^ d i) = f.coeff d * ∑ x : σ → K, ∏ i, x i ^ d i := mul_sum.symm _ = 0 := (mul_eq_zero.mpr ∘ Or.inr) _ calc (∑ x : σ → K, ∏ i, x i ^ d i) = ∑ (x₀ : { j // j ≠ i } → K) (x : { x : σ → K // x ∘ coe = x₀ }), ∏ j, (x : σ → K) j ^ d j := (Fintype.sum_fiberwise _ _).symm _ = 0 := Fintype.sum_eq_zero _ _ intro x₀ let e : K ≃ { x // x ∘ coe = x₀ } := (Equiv.subtypeEquivCodomain _).symm calc (∑ x : { x : σ → K // x ∘ coe = x₀ }, ∏ j, (x : σ → K) j ^ d j) = ∑ a : K, ∏ j : σ, (e a : σ → K) j ^ d j := (e.sum_comp _).symm _ = ∑ a : K, (∏ j, x₀ j ^ d j) * a ^ d i := (Fintype.sum_congr _ _ _) _ = (∏ j, x₀ j ^ d j) * ∑ a : K, a ^ d i := by rw [mul_sum] _ = 0 := by rw [sum_pow_lt_card_sub_one _ hi, MulZeroClass.mul_zero] intro a let e' : Sum { j // j = i } { j // j ≠ i } ≃ σ := Equiv.sumCompl _ letI : Unique { j // j = i } := { default := ⟨i, rfl⟩ uniq := fun ⟨j, h⟩ => Subtype.val_injective h } calc (∏ j : σ, (e a : σ → K) j ^ d j) = (e a : σ → K) i ^ d i * ∏ j : { j // j ≠ i }, (e a : σ → K) j ^ d j := by rw [← e'.prod_comp, Fintype.prod_sum_type, univ_unique, prod_singleton] rfl _ = a ^ d i * ∏ j : { j // j ≠ i }, (e a : σ → K) j ^ d j := by rw [Equiv.subtypeEquivCodomain_symm_apply_eq] _ = a ^ d i * ∏ j, x₀ j ^ d j := (congr_arg _ (Fintype.prod_congr _ _ _)) -- see below _ = (∏ j, x₀ j ^ d j) * a ^ d i := mul_comm _ _ · -- the remaining step of the calculation above rintro ⟨j, hj⟩ show (e a : σ → K) j ^ d j = x₀ ⟨j, hj⟩ ^ d j rw [Equiv.subtypeEquivCodomain_symm_apply_ne] #align mv_polynomial.sum_eval_eq_zero MvPolynomial.sum_eval_eq_zero variable [DecidableEq K] (p : ℕ) [CharP K p] /-- The **Chevalley–Warning theorem**, finitary version. Let `(f i)` be a finite family of multivariate polynomials in finitely many variables (`X s`, `s : σ`) over a finite field of characteristic `p`. Assume that the sum of the total degrees of the `f i` is less than the cardinality of `σ`. Then the number of common solutions of the `f i` is divisible by `p`. -/ theorem char_dvd_card_solutions_of_sum_lt {s : Finset ι} {f : ι → MvPolynomial σ K} (h : (∑ i in s, (f i).totalDegree) < Fintype.card σ) : p ∣ Fintype.card { x : σ → K // ∀ i ∈ s, eval x (f i) = 0 } := by have hq : 0 < q - 1 := by rw [← Fintype.card_units, Fintype.card_pos_iff] exact ⟨1⟩ let S : Finset (σ → K) := { x ∈ univ | ∀ i ∈ s, eval x (f i) = 0 } have hS : ∀ x : σ → K, x ∈ S ↔ ∀ i : ι, i ∈ s → eval x (f i) = 0 := by intro x simp only [S, true_and_iff, sep_def, mem_filter, mem_univ] /- The polynomial `F = ∏ i in s, (1 - (f i)^(q - 1))` has the nice property that it takes the value `1` on elements of `{x : σ → K // ∀ i ∈ s, (f i).eval x = 0}` while it is `0` outside that locus. Hence the sum of its values is equal to the cardinality of `{x : σ → K // ∀ i ∈ s, (f i).eval x = 0}` modulo `p`. -/ let F : MvPolynomial σ K := ∏ i in s, 1 - f i ^ (q - 1) have hF : ∀ x, eval x F = if x ∈ S then 1 else 0 := by intro x calc eval x F = ∏ i in s, eval x (1 - f i ^ (q - 1)) := eval_prod s _ x _ = if x ∈ S then 1 else 0 := _ simp only [(eval x).map_sub, (eval x).map_pow, (eval x).map_one] split_ifs with hx hx · apply Finset.prod_eq_one intro i hi rw [hS] at hx rw [hx i hi, zero_pow hq, sub_zero] · obtain ⟨i, hi, hx⟩ : ∃ i : ι, i ∈ s ∧ eval x (f i) ≠ 0 := by simpa only [hS, not_forall, not_imp] using hx apply Finset.prod_eq_zero hi rw [pow_card_sub_one_eq_one (eval x (f i)) hx, sub_self] -- In particular, we can now show: have key : (∑ x, eval x F) = Fintype.card { x : σ → K // ∀ i ∈ s, eval x (f i) = 0 } rw [Fintype.card_of_subtype S hS, card_eq_sum_ones, Nat.cast_sum, Nat.cast_one, ← Fintype.sum_extend_by_zero S, sum_congr rfl fun x hx => hF x] -- With these preparations under our belt, we will approach the main goal. show p ∣ Fintype.card { x // ∀ i : ι, i ∈ s → eval x (f i) = 0 } rw [← CharP.cast_eq_zero_iff K, ← key] show (∑ x, eval x F) = 0 -- We are now ready to apply the main machine, proven before. apply F.sum_eval_eq_zero -- It remains to verify the crucial assumption of this machine show F.total_degree < (q - 1) * Fintype.card σ calc F.total_degree ≤ ∑ i in s, (1 - f i ^ (q - 1)).totalDegree := total_degree_finset_prod s _ _ ≤ ∑ i in s, (q - 1) * (f i).totalDegree := (sum_le_sum fun i hi => _) -- see ↓ _ = (q - 1) * ∑ i in s, (f i).totalDegree := mul_sum.symm _ < (q - 1) * Fintype.card σ := by rwa [mul_lt_mul_left hq] -- Now we prove the remaining step from the preceding calculation show (1 - f i ^ (q - 1)).totalDegree ≤ (q - 1) * (f i).totalDegree calc (1 - f i ^ (q - 1)).totalDegree ≤ max (1 : MvPolynomial σ K).totalDegree (f i ^ (q - 1)).totalDegree := total_degree_sub _ _ _ ≤ (f i ^ (q - 1)).totalDegree := by simp only [max_eq_right, Nat.zero_le, total_degree_one] _ ≤ (q - 1) * (f i).totalDegree := total_degree_pow _ _ #align char_dvd_card_solutions_of_sum_lt char_dvd_card_solutions_of_sum_lt /-- The **Chevalley–Warning theorem**, fintype version. Let `(f i)` be a finite family of multivariate polynomials in finitely many variables (`X s`, `s : σ`) over a finite field of characteristic `p`. Assume that the sum of the total degrees of the `f i` is less than the cardinality of `σ`. Then the number of common solutions of the `f i` is divisible by `p`. -/ theorem char_dvd_card_solutions_of_fintype_sum_lt [Fintype ι] {f : ι → MvPolynomial σ K} (h : (∑ i, (f i).totalDegree) < Fintype.card σ) : p ∣ Fintype.card { x : σ → K // ∀ i, eval x (f i) = 0 } := by simpa using char_dvd_card_solutions_of_sum_lt p h #align char_dvd_card_solutions_of_fintype_sum_lt char_dvd_card_solutions_of_fintype_sum_lt /-- The **Chevalley–Warning theorem**, unary version. Let `f` be a multivariate polynomial in finitely many variables (`X s`, `s : σ`) over a finite field of characteristic `p`. Assume that the total degree of `f` is less than the cardinality of `σ`. Then the number of solutions of `f` is divisible by `p`. See `char_dvd_card_solutions_of_sum_lt` for a version that takes a family of polynomials `f i`. -/ theorem char_dvd_card_solutions {f : MvPolynomial σ K} (h : f.totalDegree < Fintype.card σ) : p ∣ Fintype.card { x : σ → K // eval x f = 0 } := by let F : Unit → MvPolynomial σ K := fun _ => f have : (∑ i : Unit, (F i).totalDegree) < Fintype.card σ := h simpa only [F, Fintype.univ_punit, forall_eq, mem_singleton] using char_dvd_card_solutions_of_sum_lt p this #align char_dvd_card_solutions char_dvd_card_solutions /-- The **Chevalley–Warning theorem**, binary version. Let `f₁`, `f₂` be two multivariate polynomials in finitely many variables (`X s`, `s : σ`) over a finite field of characteristic `p`. Assume that the sum of the total degrees of `f₁` and `f₂` is less than the cardinality of `σ`. Then the number of common solutions of the `f₁` and `f₂` is divisible by `p`. -/ theorem char_dvd_card_solutions_of_add_lt {f₁ f₂ : MvPolynomial σ K} (h : f₁.totalDegree + f₂.totalDegree < Fintype.card σ) : p ∣ Fintype.card { x : σ → K // eval x f₁ = 0 ∧ eval x f₂ = 0 } := by let F : Bool → MvPolynomial σ K := fun b => cond b f₂ f₁ have : (∑ b : Bool, (F b).totalDegree) < Fintype.card σ := (add_comm _ _).trans_lt h simpa only [F, Bool.forall_bool] using char_dvd_card_solutions_of_fintype_sum_lt p this #align char_dvd_card_solutions_of_add_lt char_dvd_card_solutions_of_add_lt end FiniteField
section {* Executable Type Checker *} theory Wasm_Checker imports Wasm_Checker_Types begin fun convert_cond :: "t \<Rightarrow> t \<Rightarrow> sx option \<Rightarrow> bool" where "convert_cond t1 t2 sx = ((t1 \<noteq> t2) \<and> (t_sec t1 = t_sec t2) \<and> (sx = None) = ((is_float_t t1 \<and> is_float_t t2) \<or> (is_int_t t1 \<and> is_int_t t2 \<and> (t_length t1 < t_length t2))))" fun same_lab_h :: "nat list \<Rightarrow> (t list) list \<Rightarrow> t list \<Rightarrow> (t list) option" where "same_lab_h [] _ ts = Some ts" | "same_lab_h (i#is) lab_c ts = (if i \<ge> length lab_c then None else (if lab_c!i = ts then same_lab_h is lab_c (lab_c!i) else None))" fun same_lab :: "nat list \<Rightarrow> (t list) list \<Rightarrow> (t list) option" where "same_lab [] lab_c = None" | "same_lab (i#is) lab_c = (if i \<ge> length lab_c then None else same_lab_h is lab_c (lab_c!i))" lemma same_lab_h_conv_list_all: assumes "same_lab_h ils ls ts' = Some ts" shows "list_all (\<lambda>i. i < length ls \<and> ls!i = ts) ils \<and> ts' = ts" using assms proof(induction ils) case (Cons a ils) thus ?case apply (simp,safe) apply (metis not_less option.distinct(1))+ done qed simp lemma same_lab_conv_list_all: assumes "same_lab ils ls = Some ts" shows "list_all (\<lambda>i. i < length ls \<and> ls!i = ts) ils" using assms proof (induction rule: same_lab.induct) case (2 i "is" lab_c) thus ?case using same_lab_h_conv_list_all by (metis (mono_tags, lifting) list_all_simps(1) not_less option.distinct(1) same_lab.simps(2)) qed simp lemma list_all_conv_same_lab_h: assumes "list_all (\<lambda>i. i < length ls \<and> ls!i = ts) ils" shows "same_lab_h ils ls ts = Some ts" using assms by (induction ils, simp_all) lemma list_all_conv_same_lab: assumes "list_all (\<lambda>i. i < length ls \<and>ls!i = ts) (is@[i])" shows "same_lab (is@[i]) ls = Some ts" using assms proof (induction "(is@[i])") case (Cons a x) thus ?case using list_all_conv_same_lab_h[OF Cons(3)] by (metis option.distinct(1) same_lab.simps(2) same_lab_h.simps(2)) qed auto fun b_e_type_checker :: "t_context \<Rightarrow> b_e list \<Rightarrow> tf \<Rightarrow> bool" and check :: "t_context \<Rightarrow> b_e list \<Rightarrow> checker_type \<Rightarrow> checker_type" and check_single :: "t_context \<Rightarrow> b_e \<Rightarrow> checker_type \<Rightarrow> checker_type" where "b_e_type_checker \<C> es (tn _> tm) = c_types_agree (check \<C> es (Type tn)) tm" | "check \<C> es ts = (case es of [] \<Rightarrow> ts | (e#es) \<Rightarrow> (case ts of Bot \<Rightarrow> Bot | _ \<Rightarrow> check \<C> es (check_single \<C> e ts)))" (* foldl (\<lambda> ts e. (case ts of Bot \<Rightarrow> Bot | _ \<Rightarrow> check_single \<C> e ts)) es primrec foldl :: "('b \<Rightarrow> 'a \<Rightarrow> 'b) \<Rightarrow> 'b \<Rightarrow> 'a list \<Rightarrow> 'b" where foldl_Nil: "foldl f a [] = a" | foldl_Cons: "foldl f a (x # xs) = foldl f (f a x) xs" *) (* num ops *) | "check_single \<C> (C v) ts = type_update ts [] (Type [typeof v])" | "check_single \<C> (Unop_i t _) ts = (if is_int_t t then type_update ts [TSome t] (Type [t]) else Bot)" | "check_single \<C> (Unop_f t _) ts = (if is_float_t t then type_update ts [TSome t] (Type [t]) else Bot)" | "check_single \<C> (Binop_i t iop) ts = (if is_int_t t \<and> (is_secret_t t \<longrightarrow> safe_binop_i iop) then type_update ts [TSome t, TSome t] (Type [t]) else Bot)" | "check_single \<C> (Binop_f t _) ts = (if is_float_t t then type_update ts [TSome t, TSome t] (Type [t]) else Bot)" | "check_single \<C> (Testop t _) ts = (if is_int_t t then type_update ts [TSome t] (Type [T_i32 (t_sec t)]) else Bot)" | "check_single \<C> (Relop_i t _) ts = (if is_int_t t then type_update ts [TSome t, TSome t] (Type [T_i32 (t_sec t)]) else Bot)" | "check_single \<C> (Relop_f t _) ts = (if is_float_t t then type_update ts [TSome t, TSome t] (Type [T_i32 (t_sec t)]) else Bot)" (* convert *) | "check_single \<C> (Cvtop t1 Convert t2 sx) ts = (if (convert_cond t1 t2 sx) then type_update ts [TSome t2] (Type [t1]) else Bot)" (* reinterpret *) | "check_single \<C> (Cvtop t1 Reinterpret t2 sx) ts = (if ((t1 \<noteq> t2) \<and> (t_sec t1 = t_sec t2) \<and> t_length t1 = t_length t2 \<and> sx = None) then type_update ts [TSome t2] (Type [t1]) else Bot)" (* classify *) | "check_single \<C> (Cvtop t1 Classify t2 sx) ts = (if (is_int_t t2 \<and> is_public_t t2 \<and> classify_t t2 = t1 \<and> sx = None) then type_update ts [TSome t2] (Type [t1]) else Bot)" (* declassify *) | "check_single \<C> (Cvtop t1 Declassify t2 sx) ts = (if ((trust_t \<C>) = Trusted \<and> is_int_t t2 \<and> is_secret_t t2 \<and> declassify_t t2 = t1 \<and> sx = None) then type_update ts [TSome t2] (Type [t1]) else Bot)" (* unreachable, nop, drop, select *) | "check_single \<C> (Unreachable) ts = type_update ts [] (TopType [])" | "check_single \<C> (Nop) ts = ts" | "check_single \<C> (Drop) ts = type_update ts [TAny] (Type [])" | "check_single \<C> (Select sec) ts = type_update_select sec ts" (* block *) | "check_single \<C> (Block (tn _> tm) es) ts = (if (b_e_type_checker (\<C>\<lparr>label := ([tm] @ (label \<C>))\<rparr>) es (tn _> tm)) then type_update ts (to_ct_list tn) (Type tm) else Bot)" (* loop *) | "check_single \<C> (Loop (tn _> tm) es) ts = (if (b_e_type_checker (\<C>\<lparr>label := ([tn] @ (label \<C>))\<rparr>) es (tn _> tm)) then type_update ts (to_ct_list tn) (Type tm) else Bot)" (* if *) | "check_single \<C> (If (tn _> tm) es1 es2) ts = (if (b_e_type_checker (\<C>\<lparr>label := ([tm] @ (label \<C>))\<rparr>) es1 (tn _> tm) \<and> b_e_type_checker (\<C>\<lparr>label := ([tm] @ (label \<C>))\<rparr>) es2 (tn _> tm)) then type_update ts (to_ct_list (tn@[T_i32 Public])) (Type tm) else Bot)" (* br *) | "check_single \<C> (Br i) ts = (if i < length (label \<C>) then type_update ts (to_ct_list ((label \<C>)!i)) (TopType []) else Bot)" (* br_if *) | "check_single \<C> (Br_if i) ts = (if i < length (label \<C>) then type_update ts (to_ct_list ((label \<C>)!i @ [T_i32 Public])) (Type ((label \<C>)!i)) else Bot)" (* br_table *) | "check_single \<C> (Br_table is i) ts = (case (same_lab (is@[i]) (label \<C>)) of None \<Rightarrow> Bot | Some tls \<Rightarrow> type_update ts (to_ct_list (tls @ [T_i32 Public])) (TopType []))" (* return *) | "check_single \<C> (Return) ts = (case (return \<C>) of None \<Rightarrow> Bot | Some tls \<Rightarrow> type_update ts (to_ct_list tls) (TopType []))" (* call *) | "check_single \<C> (Call i) ts = (if i < length (func_t \<C>) then (case ((func_t \<C>)!i) of (tr,(tn _> tm)) \<Rightarrow> if (trust_compat (trust_t \<C>) tr) then type_update ts (to_ct_list tn) (Type tm) else Bot) else Bot)" (* call_indirect *) | "check_single \<C> (Call_indirect i) ts = (if (table \<C>) \<noteq> None \<and> i < length (types_t \<C>) then (case ((types_t \<C>)!i) of (tr,(tn _> tm)) \<Rightarrow> if (trust_compat (trust_t \<C>) tr) then type_update ts (to_ct_list (tn@[T_i32 Public])) (Type tm) else Bot) else Bot)" (* get_local *) | "check_single \<C> (Get_local i) ts = (if i < length (local \<C>) then type_update ts [] (Type [(local \<C>)!i]) else Bot)" (* set_local *) | "check_single \<C> (Set_local i) ts = (if i < length (local \<C>) then type_update ts [TSome ((local \<C>)!i)] (Type []) else Bot)" (* tee_local *) | "check_single \<C> (Tee_local i) ts = (if i < length (local \<C>) then type_update ts [TSome ((local \<C>)!i)] (Type [(local \<C>)!i]) else Bot)" (* get_global *) | "check_single \<C> (Get_global i) ts = (if i < length (global \<C>) then type_update ts [] (Type [tg_t ((global \<C>)!i)]) else Bot)" (* set_global *) | "check_single \<C> (Set_global i) ts = (if i < length (global \<C>) \<and> is_mut (global \<C> ! i) then type_update ts [TSome (tg_t ((global \<C>)!i))] (Type []) else Bot)" (* load *) | "check_single \<C> (Load t tp_sx a off) ts = (case (memory \<C>) of Some (m, sec) \<Rightarrow> if t_sec t = sec \<and> load_store_t_bounds a (option_projl tp_sx) t then type_update ts [TSome (T_i32 Public)] (Type [t]) else Bot | None \<Rightarrow> Bot)" (* store *) | "check_single \<C> (Store t tp a off) ts = (case (memory \<C>) of Some (m, sec) \<Rightarrow> if t_sec t = sec \<and> load_store_t_bounds a tp t then type_update ts [TSome (T_i32 Public),TSome t] (Type []) else Bot | None \<Rightarrow> Bot)" (* current_memory *) | "check_single \<C> Current_memory ts = (if (memory \<C>) \<noteq> None then type_update ts [] (Type [T_i32 Public]) else Bot)" (* grow_memory *) | "check_single \<C> Grow_memory ts = (if (memory \<C>) \<noteq> None then type_update ts [TSome (T_i32 Public)] (Type [T_i32 Public]) else Bot)" end
(* Title: HOL/Eisbach/Examples.thy Author: Daniel Matichuk, NICTA/UNSW *) section \<open>Basic Eisbach examples\<close> theory Examples imports MainRLT Eisbach_Tools begin subsection \<open>Basic methods\<close> method my_intros = (rule conjI | rule impI) lemma "P \<and> Q \<longrightarrow> Z \<and> X" apply my_intros+ oops method my_intros' uses intros = (rule conjI | rule impI | rule intros) lemma "P \<and> Q \<longrightarrow> Z \<or> X" apply (my_intros' intros: disjI1)+ oops method my_spec for x :: 'a = (drule spec[where x="x"]) lemma "\<forall>x. P x \<Longrightarrow> P x" apply (my_spec x) apply assumption done subsection \<open>Focusing and matching\<close> method match_test = (match premises in U: "P x \<and> Q x" for P Q x \<Rightarrow> \<open>print_term P, print_term Q, print_term x, print_fact U\<close>) lemma "\<And>x. P x \<and> Q x \<Longrightarrow> A x \<and> B x \<Longrightarrow> R x y \<Longrightarrow> True" apply match_test \<comment> \<open>Valid match, but not quite what we were expecting..\<close> back \<comment> \<open>Can backtrack over matches, exploring all bindings\<close> back back back back back \<comment> \<open>Found the other conjunction\<close> back back back oops text \<open>Use matching to avoid "improper" methods\<close> lemma focus_test: shows "\<And>x. \<forall>x. P x \<Longrightarrow> P x" apply (my_spec "x :: 'a", assumption)? \<comment> \<open>Wrong x\<close> apply (match conclusion in "P x" for x \<Rightarrow> \<open>my_spec x, assumption\<close>) done text \<open>Matches are exclusive. Backtracking will not occur past a match\<close> method match_test' = (match conclusion in "P \<and> Q" for P Q \<Rightarrow> \<open>print_term P, print_term Q, rule conjI[where P="P" and Q="Q"]; assumption\<close> \<bar> "H" for H \<Rightarrow> \<open>print_term H\<close>) text \<open>Solves goal\<close> lemma "P \<Longrightarrow> Q \<Longrightarrow> P \<and> Q" apply match_test' done text \<open>Fall-through case never taken\<close> lemma "P \<and> Q" apply match_test'? oops lemma "P" apply match_test' oops method my_spec_guess = (match conclusion in "P (x :: 'a)" for P x \<Rightarrow> \<open>drule spec[where x=x], print_term P, print_term x\<close>) lemma "\<forall>x. P (x :: nat) \<Longrightarrow> Q (x :: nat)" apply my_spec_guess oops method my_spec_guess2 = (match premises in U[thin]:"\<forall>x. P x \<longrightarrow> Q x" and U':"P x" for P Q x \<Rightarrow> \<open>insert spec[where x=x, OF U], print_term P, print_term Q\<close>) lemma "\<forall>x. P x \<longrightarrow> Q x \<Longrightarrow> Q x \<Longrightarrow> Q x" apply my_spec_guess2? \<comment> \<open>Fails. Note that both "P"s must match\<close> oops lemma "\<forall>x. P x \<longrightarrow> Q x \<Longrightarrow> P x \<Longrightarrow> Q x" apply my_spec_guess2 apply (erule mp) apply assumption done subsection \<open>Higher-order methods\<close> method higher_order_example for x methods meth = (cases x, meth, meth) lemma assumes A: "x = Some a" shows "the x = a" by (higher_order_example x \<open>simp add: A\<close>) subsection \<open>Recursion\<close> method recursion_example for x :: bool = (print_term x, match (x) in "A \<and> B" for A B \<Rightarrow> \<open>print_term A, print_term B, recursion_example A, recursion_example B | -\<close>) lemma "P" apply (recursion_example "(A \<and> D) \<and> (B \<and> C)") oops subsection \<open>Solves combinator\<close> lemma "A \<Longrightarrow> B \<Longrightarrow> A \<and> B" apply (solves \<open>rule conjI\<close>)? \<comment> \<open>Doesn't solve the goal!\<close> apply (solves \<open>rule conjI, assumption, assumption\<close>) done subsection \<open>Demo\<close> named_theorems intros and elims and subst method prop_solver declares intros elims subst = (assumption | rule intros | erule elims | subst subst | subst (asm) subst | (erule notE; solves prop_solver))+ lemmas [intros] = conjI impI disjCI iffI notI lemmas [elims] = impCE conjE disjE lemma "((A \<or> B) \<and> (A \<longrightarrow> C) \<and> (B \<longrightarrow> C)) \<longrightarrow> C" apply prop_solver done method guess_all = (match premises in U[thin]:"\<forall>x. P (x :: 'a)" for P \<Rightarrow> \<open>match premises in "?H (y :: 'a)" for y \<Rightarrow> \<open>rule allE[where P = P and x = y, OF U]\<close> | match conclusion in "?H (y :: 'a)" for y \<Rightarrow> \<open>rule allE[where P = P and x = y, OF U]\<close>\<close>) lemma "(\<forall>x. P x \<longrightarrow> Q x) \<Longrightarrow> P y \<Longrightarrow> Q y" apply guess_all apply prop_solver done lemma "(\<forall>x. P x \<longrightarrow> Q x) \<Longrightarrow> P z \<Longrightarrow> P y \<Longrightarrow> Q y" apply (solves \<open>guess_all, prop_solver\<close>) \<comment> \<open>Try it without solve\<close> done method guess_ex = (match conclusion in "\<exists>x. P (x :: 'a)" for P \<Rightarrow> \<open>match premises in "?H (x :: 'a)" for x \<Rightarrow> \<open>rule exI[where x=x]\<close>\<close>) lemma "P x \<Longrightarrow> \<exists>x. P x" apply guess_ex apply prop_solver done method fol_solver = ((guess_ex | guess_all | prop_solver); solves fol_solver) declare allI [intros] exE [elims] ex_simps [subst] all_simps [subst] lemma "(\<forall>x. P x) \<and> (\<forall>x. Q x) \<Longrightarrow> (\<forall>x. P x \<and> Q x)" and "\<exists>x. P x \<longrightarrow> (\<forall>x. P x)" and "(\<exists>x. \<forall>y. R x y) \<longrightarrow> (\<forall>y. \<exists>x. R x y)" by fol_solver+ text \<open> Eisbach_Tools provides the catch method, which catches run-time method errors. In this example the OF attribute throws an error when it can't compose H with A, forcing H to be re-bound to different members of imps until it succeeds. \<close> lemma assumes imps: "A \<Longrightarrow> B" "A \<Longrightarrow> C" "B \<Longrightarrow> D" assumes A: "A" shows "B \<and> C" apply (rule conjI) apply ((match imps in H:"_ \<Longrightarrow> _" \<Rightarrow> \<open>catch \<open>rule H[OF A], print_fact H\<close> \<open>print_fact H, fail\<close>\<close>)+) done text \<open> Eisbach_Tools provides the curry and uncurry attributes. This is useful when the number of premises of a thm isn't known statically. The pattern \<^term>\<open>P \<Longrightarrow> Q\<close> matches P against the major premise of a thm, and Q is the rest of the premises with the conclusion. If we first uncurry, then \<^term>\<open>P \<Longrightarrow> Q\<close> will match P with the conjunction of all the premises, and Q with the final conclusion of the rule. \<close> lemma assumes imps: "A \<Longrightarrow> B \<Longrightarrow> C" "D \<Longrightarrow> C" "E \<Longrightarrow> D \<Longrightarrow> A" shows "(A \<longrightarrow> B \<longrightarrow> C) \<and> (D \<longrightarrow> C)" by (match imps[uncurry] in H[curry]:"_ \<Longrightarrow> C" (cut, multi) \<Rightarrow> \<open>match H in "E \<Longrightarrow> _" \<Rightarrow> fail \<bar> _ \<Rightarrow> \<open>simp add: H\<close>\<close>) end
module test_mod implicit none private type, public :: my_t private real :: x, y contains procedure, private :: write_my_t generic :: write(formatted) => write_my_t end type my_t interface my_t module procedure init_my_t end interface contains function init_my_t(x, y) result(my_val) implicit none real, value :: x, y type(my_t) :: my_val my_val%x = x my_val%y = y end function init_my_t subroutine write_my_t(this, unit_nr, iotype, v_list, iostat, iomsg) implicit none class(my_t), intent(in) :: this integer, intent(in) :: unit_nr character(*), intent(in) :: iotype integer, dimension(:), intent(in) :: v_list integer, intent(out) :: iostat character(*), intent(inout) :: iomsg character(len=128) :: fmt_str if (size(v_list) == 2) then write (fmt_str, fmt='("(", """", A, " = "", F", I0, ".", I0, ", ", """, ", A, " = "", F", I0, ".", I0, ")")') & 'x', v_list(1), v_list(2), & 'y', v_list(1), v_list(2) else fmt_str = '("x = ", F0.5, ", y = ", F0.5)' end if write (unit=unit_nr, fmt=fmt_str, iostat=iostat, iomsg=iomsg) this%x, this%y end subroutine write_my_t end module test_mod
(* Title: Containers/Containers_Auxiliary.thy Author: Andreas Lochbihler, KIT *) theory Containers_Auxiliary imports "HOL-Library.Monad_Syntax" begin chapter \<open>An executable linear order on sets\<close> text_raw \<open>\label{chapter:linear:order:set}\<close> section \<open>Auxiliary definitions\<close> lemma insert_bind_set: "insert a A \<bind> f = f a \<union> (A \<bind> f)" by(auto simp add: Set.bind_def) lemma set_bind_iff: "set (List.bind xs f) = Set.bind (set xs) (set \<circ> f)" by(induct xs)(simp_all add: insert_bind_set) lemma set_bind_conv_fold: "set xs \<bind> f = fold ((\<union>) \<circ> f) xs {}" by(induct xs rule: rev_induct)(simp_all add: insert_bind_set) lemma card_gt_1D: assumes "card A > 1" shows "\<exists>x y. x \<in> A \<and> y \<in> A \<and> x \<noteq> y" proof(rule ccontr) from assms have "A \<noteq> {}" by auto then obtain x where "x \<in> A" by auto moreover assume "\<not> ?thesis" ultimately have "A = {x}" by auto with assms show False by simp qed lemma card_eq_1_iff: "card A = 1 \<longleftrightarrow> (\<exists>x. A = {x})" proof assume card: "card A = 1" hence [simp]: "finite A" using card_gt_0_iff[of A] by simp have "A = {THE x. x \<in> A}" proof(intro equalityI subsetI) fix x assume x: "x \<in> A" hence "(THE x. x \<in> A) = x" proof(rule the_equality) fix x' assume x': "x' \<in> A" show "x' = x" proof(rule ccontr) assume neq: "x' \<noteq> x" from x x' have eq: "A = insert x (insert x' (A - {x, x'}))" by auto have "card A = 2 + card (A - {x, x'})" using neq by(subst eq)(simp) with card show False by simp qed qed thus "x \<in> {THE x. x \<in> A}" by simp next fix x assume "x \<in> {THE x. x \<in> A}" hence x: "x = (THE x. x \<in> A)" by simp from card have "A \<noteq> {}" by auto then obtain x' where x': "x' \<in> A" by blast thus "x \<in> A" unfolding x proof(rule theI) fix x assume x: "x \<in> A" show "x = x'" proof(rule ccontr) assume neq: "x \<noteq> x'" from x x' have eq: "A = insert x (insert x' (A - {x, x'}))" by auto have "card A = 2 + card (A - {x, x'})" using neq by(subst eq)(simp) with card show False by simp qed qed qed thus "\<exists>x. A = {x}" .. qed auto lemma card_eq_Suc_0_ex1: "card A = Suc 0 \<longleftrightarrow> (\<exists>!x. x \<in> A)" by(auto simp only: One_nat_def[symmetric] card_eq_1_iff) context linorder begin lemma sorted_last: "\<lbrakk> sorted xs; x \<in> set xs \<rbrakk> \<Longrightarrow> x \<le> last xs" by(cases xs rule: rev_cases)(auto simp add: sorted_append) end lemma empty_filter_conv: "[] = filter P xs \<longleftrightarrow> (\<forall>x\<in>set xs. \<not> P x)" by(auto dest: sym simp add: filter_empty_conv) definition ID :: "'a \<Rightarrow> 'a" where "ID = id" lemma ID_code [code, code_unfold]: "ID = (\<lambda>x. x)" by(simp add: ID_def id_def) lemma ID_Some: "ID (Some x) = Some x" by(simp add: ID_def) lemma ID_None: "ID None = None" by(simp add: ID_def) text \<open>lexicographic order on pairs\<close> context fixes leq_a :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<sqsubseteq>\<^sub>a" 50) and less_a :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<sqsubset>\<^sub>a" 50) and leq_b :: "'b \<Rightarrow> 'b \<Rightarrow> bool" (infix "\<sqsubseteq>\<^sub>b" 50) and less_b :: "'b \<Rightarrow> 'b \<Rightarrow> bool" (infix "\<sqsubset>\<^sub>b" 50) begin definition less_eq_prod :: "('a \<times> 'b) \<Rightarrow> ('a \<times> 'b) \<Rightarrow> bool" (infix "\<sqsubseteq>" 50) where "less_eq_prod = (\<lambda>(x1, x2) (y1, y2). x1 \<sqsubset>\<^sub>a y1 \<or> x1 \<sqsubseteq>\<^sub>a y1 \<and> x2 \<sqsubseteq>\<^sub>b y2)" definition less_prod :: "('a \<times> 'b) \<Rightarrow> ('a \<times> 'b) \<Rightarrow> bool" (infix "\<sqsubset>" 50) where "less_prod = (\<lambda>(x1, x2) (y1, y2). x1 \<sqsubset>\<^sub>a y1 \<or> x1 \<sqsubseteq>\<^sub>a y1 \<and> x2 \<sqsubset>\<^sub>b y2)" lemma less_eq_prod_simps [simp]: "(x1, x2) \<sqsubseteq> (y1, y2) \<longleftrightarrow> x1 \<sqsubset>\<^sub>a y1 \<or> x1 \<sqsubseteq>\<^sub>a y1 \<and> x2 \<sqsubseteq>\<^sub>b y2" by(simp add: less_eq_prod_def) lemma less_prod_simps [simp]: "(x1, x2) \<sqsubset> (y1, y2) \<longleftrightarrow> x1 \<sqsubset>\<^sub>a y1 \<or> x1 \<sqsubseteq>\<^sub>a y1 \<and> x2 \<sqsubset>\<^sub>b y2" by(simp add: less_prod_def) end context fixes leq_a :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<sqsubseteq>\<^sub>a" 50) and less_a :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<sqsubset>\<^sub>a" 50) and leq_b :: "'b \<Rightarrow> 'b \<Rightarrow> bool" (infix "\<sqsubseteq>\<^sub>b" 50) and less_b :: "'b \<Rightarrow> 'b \<Rightarrow> bool" (infix "\<sqsubset>\<^sub>b" 50) assumes lin_a: "class.linorder leq_a less_a" and lin_b: "class.linorder leq_b less_b" begin abbreviation (input) less_eq_prod' :: "('a \<times> 'b) \<Rightarrow> ('a \<times> 'b) \<Rightarrow> bool" (infix "\<sqsubseteq>" 50) where "less_eq_prod' \<equiv> less_eq_prod leq_a less_a leq_b" abbreviation (input) less_prod' :: "('a \<times> 'b) \<Rightarrow> ('a \<times> 'b) \<Rightarrow> bool" (infix "\<sqsubset>" 50) where "less_prod' \<equiv> less_prod leq_a less_a less_b" lemma linorder_prod: "class.linorder (\<sqsubseteq>) (\<sqsubset>)" proof - interpret a: linorder "(\<sqsubseteq>\<^sub>a)" "(\<sqsubset>\<^sub>a)" by(fact lin_a) interpret b: linorder "(\<sqsubseteq>\<^sub>b)" "(\<sqsubset>\<^sub>b)" by(fact lin_b) show ?thesis by unfold_locales auto qed end hide_const less_eq_prod' less_prod' end
/- Copyright (c) 2020 Anne Baanen. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Anne Baanen, Devon Tuma ! This file was ported from Lean 3 source module ring_theory.polynomial.scale_roots ! leanprover-community/mathlib commit 40ac1b258344e0c2b4568dc37bfad937ec35a727 ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathlib.RingTheory.NonZeroDivisors import Mathlib.Data.Polynomial.AlgebraMap /-! # Scaling the roots of a polynomial This file defines `scaleRoots p s` for a polynomial `p` in one variable and a ring element `s` to be the polynomial with root `r * s` for each root `r` of `p` and proves some basic results about it. -/ variable {A K R S : Type _} [CommRing A] [IsDomain A] [Field K] [CommRing R] [CommRing S] variable {M : Submonoid A} namespace Polynomial open BigOperators Polynomial /-- `scaleRoots p s` is a polynomial with root `r * s` for each root `r` of `p`. -/ noncomputable def scaleRoots (p : R[X]) (s : R) : R[X] := ∑ i in p.support, monomial i (p.coeff i * s ^ (p.natDegree - i)) #align polynomial.scale_roots Polynomial.scaleRoots @[simp] theorem coeff_scaleRoots (p : R[X]) (s : R) (i : ℕ) : (scaleRoots p s).coeff i = coeff p i * s ^ (p.natDegree - i) := by simp (config := { contextual := true }) [scaleRoots, coeff_monomial] #align polynomial.coeff_scale_roots Polynomial.coeff_scaleRoots theorem coeff_scaleRoots_natDegree (p : R[X]) (s : R) : (scaleRoots p s).coeff p.natDegree = p.leadingCoeff := by rw [leadingCoeff, coeff_scaleRoots, tsub_self, pow_zero, mul_one] #align polynomial.coeff_scale_roots_nat_degree Polynomial.coeff_scaleRoots_natDegree @[simp] theorem zero_scaleRoots (s : R) : scaleRoots 0 s = 0 := by ext simp #align polynomial.zero_scale_roots Polynomial.zero_scaleRoots theorem support_scaleRoots_le (p : R[X]) (s : R) : (scaleRoots p s).support ≤ p.support := by intro simpa using left_ne_zero_of_mul #align polynomial.support_scale_roots_le Polynomial.support_scaleRoots_le theorem support_scaleRoots_eq (p : R[X]) {s : R} (hs : s ∈ nonZeroDivisors R) : (scaleRoots p s).support = p.support := le_antisymm (support_scaleRoots_le p s) (by intro i simp only [coeff_scaleRoots, Polynomial.mem_support_iff] intro p_ne_zero ps_zero have := pow_mem hs (p.natDegree - i) _ ps_zero contradiction) #align polynomial.support_scale_roots_eq Polynomial.support_scaleRoots_eq @[simp] theorem degree_scaleRoots (p : R[X]) {s : R} : degree (scaleRoots p s) = degree p := by haveI := Classical.propDecidable by_cases hp : p = 0 · rw [hp, zero_scaleRoots] refine' le_antisymm (Finset.sup_mono (support_scaleRoots_le p s)) (degree_le_degree _) rw [coeff_scaleRoots_natDegree] intro h have := leadingCoeff_eq_zero.mp h contradiction #align polynomial.degree_scale_roots Polynomial.degree_scaleRoots @[simp] theorem natDegree_scaleRoots (p : R[X]) (s : R) : natDegree (scaleRoots p s) = natDegree p := by simp only [natDegree, degree_scaleRoots] #align polynomial.nat_degree_scale_roots Polynomial.natDegree_scaleRoots theorem monic_scaleRoots_iff {p : R[X]} (s : R) : Monic (scaleRoots p s) ↔ Monic p := by simp only [Monic, leadingCoeff, natDegree_scaleRoots, coeff_scaleRoots_natDegree] #align polynomial.monic_scale_roots_iff Polynomial.monic_scaleRoots_iff theorem scaleRoots_eval₂_mul {p : S[X]} (f : S →+* R) (r : R) (s : S) : eval₂ f (f s * r) (scaleRoots p s) = f s ^ p.natDegree * eval₂ f r p := calc _ = (scaleRoots p s).support.sum fun i => f (coeff p i * s ^ (p.natDegree - i)) * (f s * r) ^ i := by simp [eval₂_eq_sum, sum_def] _ = p.support.sum fun i => f (coeff p i * s ^ (p.natDegree - i)) * (f s * r) ^ i := (Finset.sum_subset (support_scaleRoots_le p s) fun i _hi hi' => by let this : coeff p i * s ^ (p.natDegree - i) = 0 := by simpa using hi' simp [this]) _ = p.support.sum fun i : ℕ => f (p.coeff i) * f s ^ (p.natDegree - i + i) * r ^ i := (Finset.sum_congr rfl fun i _hi => by simp_rw [f.map_mul, f.map_pow, pow_add, mul_pow, mul_assoc]) _ = p.support.sum fun i : ℕ => f s ^ p.natDegree * (f (p.coeff i) * r ^ i) := (Finset.sum_congr rfl fun i hi => by rw [mul_assoc, mul_left_comm, tsub_add_cancel_of_le] exact le_natDegree_of_ne_zero (Polynomial.mem_support_iff.mp hi)) _ = f s ^ p.natDegree * p.support.sum fun i : ℕ => f (p.coeff i) * r ^ i := Finset.mul_sum.symm _ = f s ^ p.natDegree * eval₂ f r p := by simp [eval₂_eq_sum, sum_def] #align polynomial.scale_roots_eval₂_mul Polynomial.scaleRoots_eval₂_mul theorem scaleRoots_eval₂_eq_zero {p : S[X]} (f : S →+* R) {r : R} {s : S} (hr : eval₂ f r p = 0) : eval₂ f (f s * r) (scaleRoots p s) = 0 := by rw [scaleRoots_eval₂_mul, hr, mul_zero] #align polynomial.scale_roots_eval₂_eq_zero Polynomial.scaleRoots_eval₂_eq_zero theorem scaleRoots_aeval_eq_zero [Algebra S R] {p : S[X]} {r : R} {s : S} (hr : aeval r p = 0) : aeval (algebraMap S R s * r) (scaleRoots p s) = 0 := scaleRoots_eval₂_eq_zero (algebraMap S R) hr #align polynomial.scale_roots_aeval_eq_zero Polynomial.scaleRoots_aeval_eq_zero theorem scaleRoots_eval₂_eq_zero_of_eval₂_div_eq_zero {p : A[X]} {f : A →+* K} (hf : Function.Injective f) {r s : A} (hr : eval₂ f (f r / f s) p = 0) (hs : s ∈ nonZeroDivisors A) : eval₂ f (f r) (scaleRoots p s) = 0 := by convert @scaleRoots_eval₂_eq_zero _ _ _ _ p f _ s hr rw [← mul_div_assoc, mul_comm, mul_div_cancel] exact map_ne_zero_of_mem_nonZeroDivisors _ hf hs #align polynomial.scale_roots_eval₂_eq_zero_of_eval₂_div_eq_zero Polynomial.scaleRoots_eval₂_eq_zero_of_eval₂_div_eq_zero theorem scaleRoots_aeval_eq_zero_of_aeval_div_eq_zero [Algebra A K] (inj : Function.Injective (algebraMap A K)) {p : A[X]} {r s : A} (hr : aeval (algebraMap A K r / algebraMap A K s) p = 0) (hs : s ∈ nonZeroDivisors A) : aeval (algebraMap A K r) (scaleRoots p s) = 0 := scaleRoots_eval₂_eq_zero_of_eval₂_div_eq_zero inj hr hs #align polynomial.scale_roots_aeval_eq_zero_of_aeval_div_eq_zero Polynomial.scaleRoots_aeval_eq_zero_of_aeval_div_eq_zero theorem map_scaleRoots (p : R[X]) (x : R) (f : R →+* S) (h : f p.leadingCoeff ≠ 0) : (p.scaleRoots x).map f = (p.map f).scaleRoots (f x) := by ext simp [Polynomial.natDegree_map_of_leadingCoeff_ne_zero _ h] #align polynomial.map_scale_roots Polynomial.map_scaleRoots end Polynomial
```python import jax.numpy as jnp from sympy import * ``` ```python x,y,z=symbols('x y z') init_printing(use_unicode=True) ``` ```python print("\ncos(x) =",diff(cos(x),x),"\n") diff(exp(x**2),x) ``` Podemos calcular derivadas complejas: Tomelos la séptima derivada de exp(xyz) con respecto a sus variables, por ejemplo: ## $\frac{\partial^7}{\partial x \partial y^2 \partial z^4}\exp(xyz)$ ```python expr= exp(x*y*z) diff(expr,x,y,y,z,z,z,z) ``` Para las derivadas numéricas: ## $f'(x_i)\approx \frac{\Delta f}{\Delta x}= \frac{f(x_{i+1})-f(x_i)}{x_{i+1}-x_i}$ ```python import numpy as np import matplotlib.pyplot as plt def derivative(x,y): xp=np.zeros(len(x)) yp=np.zeros(len(x)) for i in range(len(x)-1): yp[i]=y[i+1]-y[i] xp[i]=x[i+1]-x[i] return (yp/xp) ``` ```python x1=np.linspace(-10,10,100) f1=x1**2*np.sin(x1) der=derivative(x1,f1) ``` /Users/diegobarbosa/opt/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:10: RuntimeWarning: invalid value encountered in true_divide # Remove the CWD from sys.path while we load stuff. ```python plt.figure(figsize=(15,8)) plt.plot(x1,f1,'r*',label='Function') plt.plot(x1,der,'kd',label='Numerical derivative') plt.legend(fontsize=15) plt.xlabel("x",fontsize=15) plt.ylabel("y",fontsize=15) plt.legend(fontsize=15) plt.grid() ``` ```python from jax import grad,jit,vmap import jax.numpy as jnp x=jnp.linspace(-5,5,100) grad_f=jit(vmap(grad(jnp.tanh))) g2=jit(vmap(grad(grad(jnp.tanh)))) g3=jit(vmap(grad(grad(grad(jnp.tanh))))) plt.figure(figsize=(15,8)) #plt.plot(x,y,'o') plt.plot(x,np.tanh(x)) plt.plot(x,grad_f(x)) plt.plot(x,g2(x)) plt.plot(x,g3(x)) plt.xlabel("Mediciones",fontsize=15) plt.ylabel("Observaciones",fontsize=15) plt.legend(["Datos"],fontsize=15) plt.grid() plt.show() ``` ```python ```
Load LFindLoad. Load LFindLoad. From adtind Require Import goal9. From lfind Require Import LFind. Lemma lfind_state (x1:natural) (x2:natural) (x3:natural) (n:natural) (l:lst) (IHl:@eq lst (drop (Succ x1) (drop (Succ x2) (Cons x3 l))) (drop (Succ x2) (drop (Succ x1) (Cons x3 l)))):@eq lst (drop x2 (drop (Succ x1) (Cons n l))) (drop x1 (drop (Succ x2) (Cons n l))). Admitted. From QuickChick Require Import QuickChick. QCInclude "/home/yousef/lemmafinder/benchmark/_lfind_clam_lf_goal9_drop_Cons_assoc_33_drop_Cons/". QCInclude ".". Extract Constant defNumTests => "50". Derive Show for natural. Derive Arbitrary for natural. Instance Dec_Eq_natural : Dec_Eq natural. Proof. dec_eq. Qed. Derive Show for lst. Derive Arbitrary for lst. Instance Dec_Eq_lst : Dec_Eq lst. Proof. dec_eq. Qed. Open Scope string_scope. Parameter print : natural -> string -> natural. Extract Constant print => "Extract.print". Definition collect_data (x1:natural) (x2:natural) (x3:natural) (n:natural) (l:lst) := let lfind_var := "x1:" ++ "(" ++ show x1 ++ ")"++ "|" ++"x2:" ++ "(" ++ show x2 ++ ")"++ "|" ++"x3:" ++ "(" ++ show x3 ++ ")"++ "|" ++"n:" ++ "(" ++ show n ++ ")"++ "|" ++"l:" ++ "(" ++ show l ++ ")" in let lfind_v := print x1 lfind_var in lfind_state lfind_v x2 x3 n l. QuickChick collect_data. Success.
%% Copyright (C) 2014-2016, 2022 Colin B. Macdonald %% %% This file is part of OctSymPy. %% %% OctSymPy is free software; you can redistribute it and/or modify %% it under the terms of the GNU General Public License as published %% by the Free Software Foundation; either version 3 of the License, %% or (at your option) any later version. %% %% This software is distributed in the hope that it will be useful, %% but WITHOUT ANY WARRANTY; without even the implied warranty %% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See %% the GNU General Public License for more details. %% %% You should have received a copy of the GNU General Public %% License along with this software; see the file COPYING. %% If not, see <http://www.gnu.org/licenses/>. %% -*- texinfo -*- %% @documentencoding UTF-8 %% @defmethod @@sym asind (@var{x}) %% Symbolic inverse sin function with output in degrees. %% %% Example: %% @example %% @group %% asind (sqrt (sym (2))/2) %% @result{} (sym) 45 %% %% syms x %% y = asind (x) %% @result{} y = (sym) %% 180⋅asin(x) %% ─────────── %% π %% @end group %% @end example %% %% @seealso{@@sym/sind, @@sym/asin} %% @end defmethod function y = asind(x) if (nargin ~= 1) print_usage (); end y = elementwise_op ('lambda a: deg(asin(a))', x); end %!error asind (sym(1), 2) %!assert (isequaln (asind (sym(nan)), sym(nan))) %!test %! f1 = asind (sym(1)/2); %! f2 = asind (1/2); %! assert (double (f1), f2, -eps) %!test %! D = [1 2; 3 4]/4; %! A = sym([1 2; 3 4])/4; %! f1 = asind (A); %! f2 = asind (D); %! assert (double (f1), f2, -eps)
function loadInputFileCallback(grfBlockHandle) % This function runs an input file and loads the contents into the block % masks. %% Run the input file selected in the global reference frame % Find input filename mask = Simulink.Mask.get(grfBlockHandle); InputFile = mask.getParameter('InputFile'); inputFilePath = InputFile.Value; %% Set class parameters in block mask % Get all simulink blocks blocks = find_system(bdroot,'Type','Block'); % Loop through blocks, writing each to the input file for i=1:length(blocks) % Variable names and values of a block names = get_param(blocks{i},'MaskNames'); blockHandle = getSimulinkBlockHandle(blocks{i}); % Check if the block is from the WEC-Sim library if any(contains(names,{'simu','waves'})) % Global reference frame type = 0; elseif any(contains(names,{'body'})) % flexible or rigid body type = 1; elseif any(contains(names,{'pto'})) % pto type = 2; elseif any(contains(names,{'constraint'})) % constraint type = 3; elseif any(contains(names,{'mooring'})) && any(contains(names,{'stiffness'})) % mooring matrix type = 4; elseif any(contains(names,{'mooring'})) && any(contains(names,{'moorDynLines'})) % moorDyn type = 5; elseif any(contains(names,{'cable'})) type = 6; end writeBlocksFromInput(blockHandle,type,inputFilePath); % write blocks again to account for read only params that are now open if type==1 || type==0 writeBlocksFromInput(blockHandle,type,inputFilePath); end end end
lemma (in sigma_algebra) sets_Collect_countable_Ex1': assumes "\<And>i. i \<in> I \<Longrightarrow> {x\<in>\<Omega>. P i x} \<in> M" assumes "countable I" shows "{x\<in>\<Omega>. \<exists>!i\<in>I. P i x} \<in> M"
(* Revealed a missing re-consideration of postponed problems *) Module A. Inductive flat_type := Unit | Prod (A B : flat_type). Inductive exprf (op : flat_type -> flat_type -> Type) {var : Type} : flat_type -> Type := | Op {t1 tR} (opc : op t1 tR) (args : exprf op t1) : exprf op tR. Inductive op : flat_type -> flat_type -> Type := . Arguments Op {_ _ _ _} _ _. Definition bound_op {var} {src2 dst2} (opc2 : op src2 dst2) : forall (args2 : exprf op (var:=var) src2), Op opc2 args2 = Op opc2 args2 := match opc2 return (forall args2, Op opc2 args2 = Op opc2 args2) with end. End A. (* A shorter variant *) Module B. Inductive exprf (op : unit -> Type) : Type := | A : exprf op | Op tR (opc : op tR) (args : exprf op) : exprf op. Inductive op : unit -> Type := . Definition bound_op (dst2 : unit) (opc2 : op dst2) : forall (args2 : exprf op), Op op dst2 opc2 args2 = A op := match opc2 in op h return (forall args2 : exprf ?[U], Op ?[V] ?[I] opc2 args2 = A op) with end. End B.
```python l=1.0 n=100 h=l/(n-1) τ = 1e-4 δ = 0.01 Gr = 100 C = 1 from sympy import simplify, collect from sympy.abc import y, x, z, m, C, h, G, d, t m = x - t*((d**2)*(y - 2*x + z)/(h**2) - G*(d**2)*m*(y-z)/(2*h) - C) ``` ```python collect(m, m) ``` $\displaystyle - t \left(- C - \frac{G d^{2} m \left(y - z\right)}{2 h} + \frac{d^{2} \left(- 2 x + y + z\right)}{h^{2}}\right) + x$
Formal statement is: lemma to_fract_eq_iff [simp]: "to_fract x = to_fract y \<longleftrightarrow> x = y" Informal statement is: The function to_fract is injective.
Require Import VST.floyd.proofauto. Require Import VST.progs.minexample. Instance CompSpecs : compspecs. make_compspecs prog. Defined. Definition Vprog : varspecs. mk_varspecs prog. Defined. Definition minexample_spec : ident * funspec := DECLARE _minexample WITH a: val, b: val, c: val, d: val, sh : share PRE [] PROP () LOCAL () SEP (data_at sh tint Vzero a; data_at sh tint Vzero a -* data_at sh tint Vzero b) POST [ tint ] PROP() LOCAL (temp ret_temp Vzero) SEP (). Definition Gprog : funspecs := ltac:(with_library prog [minexample_spec]). Lemma body_minexample : semax_body Vprog Gprog f_minexample minexample_spec. Proof. start_function. (* pattern-accepting behavior: *) Fail (gather_SEP (data_at sh tint Vzero a) (data_at sh tint Vzero a -* data_at sh tint Vzero b)). (* equivalent numerical behavior: *) gather_SEP 0 1. Abort.
/* Copyright 2012 Rogier van Dalen. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /** \file Define a sequence parser, which parses two inputs after one another */ #ifndef PARSE_LL_SEQUENCE_HPP_INCLUDED #define PARSE_LL_SEQUENCE_HPP_INCLUDED #include "fwd.hpp" #include "core.hpp" #include "error.hpp" // For std::tuple #include <utility> #include <boost/optional.hpp> #include <boost/utility/typed_in_place_factory.hpp> namespace parse_ll { template <bool expect, class Parser1, class Parser2> struct sequence_parser : parser_base <sequence_parser <expect, Parser1, Parser2> > { Parser1 parser_1; Parser2 parser_2; public: sequence_parser (Parser1 const & parser_1, Parser2 const & parser_2) : parser_1 (parser_1), parser_2 (parser_2) {} }; struct sequence_parser_tag; template <bool expect, class Parser1, class Parser2> struct decayed_parser_tag <sequence_parser <expect, Parser1, Parser2>> { typedef sequence_parser_tag type; }; template <bool expect, class Policy, class Parser1, class Parser2, class Input, class Output1 = typename detail::parser_output <Policy, Parser1, Input >::type, class Output2 = typename detail::parser_output <Policy, Parser2, Input >::type> struct sequence_outcome; /** Sequence of two consecutive parses. If the first parser is a sequence parser, the output of the second parser is appended to the output of the first one. This can therefore be seen as a parser that uses recursion to implement variable-length sequences. The expect parser variant of the sequence parser has "expect = 1". It is like a sequence parser but does not retain the left parse, because backtracking is impossible once the left parse has succeeded. The second parser is expected to succeed if the first does, otherwise an exception is thrown. success() can therefore short-circuit. \todo With a proper range library, it should probably be implemented with a variadic sequence of parsers. \note All outcomes are currently kept in memory. However, outcomes may be large-ish, and include redundant input ranges. It may be possible to keep a reduced version of the parses, that is asserted to have been successful and does not keep the input. Laborious. \todo The lazy version of sequence_parser should roughly correspond to the one for repeat_parser. It may even use similar strategies for caching rest(). \todo What should be the model for a > b >> c ? Once a has succeeded, both b and c must succeed. One of three options must be chosen: 1. > and >> are separate, as currently implemented. If an error occurs in c, the position reported will be at b. 2. > infects >>. This is essentially a change from what the writer of the parser intended, but the only difference, really, is where the error is reported. The great advantage would be that the input range for b will not be retained once b has been parsed. 3. >> following > will return a different error type, which contains both input ranges \todo When Rime gets used, it will be possible to automatically turn "expect" on if parser_2.success() is always true. \todo It would be good to get rid of Output1 and Output2, which might lead to horribly long compiler errors. However, in the recursive version of this, this makes operation::output very complicated. Postpone until the sequential version. */ template <bool expect, class Policy, class Parser1, class Parser2, class Input, class Output1, class Output2> struct sequence_outcome { typedef typename detail::parser_outcome <Policy, Parser1, Input>::type outcome_1_type; typedef typename detail::parser_outcome <Policy, Parser2, Input>::type outcome_2_type; outcome_1_type outcome_1; // If success (outcome_1): boost::optional <outcome_2_type> outcome_2; // If !success (outcome_1): boost::optional <Input> input; public: sequence_outcome (Policy const & policy, Parser1 const & parser_1, Parser2 const & parser_2, Input const & input) : outcome_1 (parse (policy, parser_1, input)) { if (success (outcome_1)) { // The in-place factory makes sure that operator= is not needed. outcome_2 = boost::in_place <outcome_2_type, outcome_2_type> ( parse (policy, parser_2, skip_over (policy.skip_parser(), rest (outcome_1)))); // For an expect parser, outcome_2 must succeed if outcome_1 does. if (expect && !success (*outcome_2)) // Otherwise, construction fails. throw error() << error_at <Input> (rest (outcome_1)); } else this->input = input; } }; namespace operation { template <> struct parse <sequence_parser_tag> { template <class Policy, bool expect, class Parser1, class Parser2, class Input> sequence_outcome <expect, Policy, Parser1, Parser2, Input> operator() (Policy const & policy, sequence_parser <expect, Parser1, Parser2> const & parser, Input const & input) const { return sequence_outcome <expect, Policy, Parser1, Parser2, Input> ( policy, parser.parser_1, parser.parser_2, input); } }; template <> struct describe <sequence_parser_tag> { template <class Parser> const char * operator() (Parser const &) const { return "sequence"; } }; template <bool except, class Policy, class Parser1, class Parser2, class Input> struct success < sequence_outcome <except, Policy, Parser1, Parser2, Input>> { bool operator() ( sequence_outcome <false, Policy, Parser1, Parser2, Input> const & outcome) const { return ::parse_ll::success (outcome.outcome_1) && ::parse_ll::success (*outcome.outcome_2); } bool operator() (sequence_outcome <true, Policy, Parser1, Parser2, Input > const & outcome) const { // If the first parser succeeds, the next must do too. return ::parse_ll::success (outcome.outcome_1); } }; // something1 + someting2 -> tuple <something1, something2> template <bool except, class Policy, class Parser1, class Parser2, class Input, class Output1, class Output2> struct output <sequence_outcome <except, Policy, Parser1, Parser2, Input, Output1, Output2>> { std::tuple <Output1, Output2> operator() (sequence_outcome <except, Policy, Parser1, Parser2, Input> const & outcome) const { return std::tuple <Output1, Output2> ( ::parse_ll::output (outcome.outcome_1), ::parse_ll::output (*outcome.outcome_2)); } }; //*** Special case type 1: void // void + void -> void template <bool except, class Policy, class Parser1, class Parser2, class Input> struct output <sequence_outcome < except, Policy, Parser1, Parser2, Input, void, void>> { // Never executed because it is short-circuited globally. void operator() (sequence_outcome <except, Policy, Parser1, Parser2, Input> const & outcome) const; }; // something + void -> tuple <something> template <bool except, class Policy, class Parser1, class Parser2, class Input, class Output1> struct output <sequence_outcome < except, Policy, Parser1, Parser2, Input, Output1, void>> { std::tuple <Output1> operator() (sequence_outcome <except, Policy, Parser1, Parser2, Input> const & outcome) const { return std::tuple <Output1> ( ::parse_ll::output (outcome.outcome_1)); } }; // void + something -> tuple <something> template <bool except, class Policy, class Parser1, class Parser2, class Input, class Output2> struct output <sequence_outcome < except, Policy, Parser1, Parser2, Input,void, Output2>> { std::tuple <Output2> operator() (sequence_outcome <except, Policy, Parser1, Parser2, Input> const & outcome) const { return std::tuple <Output2> ( ::parse_ll::output (*outcome.outcome_2)); } }; //*** Special case type 2: Parser1 is a sequence_parser. // tuple <something1...> (from sequence_parser) + something2 // -> tuple <something1..., something2> template <bool except, class Policy, class Parser1, class Parser2, class Input, class ... Output1, class Output2> struct output <sequence_outcome <except, Policy, Parser1, Parser2, Input, std::tuple <Output1...>, Output2>> { std::tuple <Output1 ..., Output2> operator() (sequence_outcome <except, Policy, Parser1, Parser2, Input> const & outcome) const { return std::tuple_cat (::parse_ll::output (outcome.outcome_1), std::tuple <Output2> (::parse_ll::output (*outcome.outcome_2))); } }; // tuple <something...> (from sequence_parser) + void // -> tuple <something...> template <bool except, class Policy, bool sub_except, class Parser11, class Parser12, class Parser2, class Input, class ... Output1> struct output <sequence_outcome <except, Policy, sequence_parser <sub_except, Parser11, Parser12>, Parser2, Input, std::tuple <Output1...>, void>> { template <class Parser1> std::tuple <Output1...> operator() (sequence_outcome <except, Policy, Parser1, Parser2, Input> const & outcome) const { return ::parse_ll::output (outcome.outcome_1); } }; template <bool except, class Policy, class Parser1, class Parser2, class Input> struct rest <sequence_outcome <except, Policy, Parser1, Parser2, Input>> { Input operator() (sequence_outcome <except, Policy, Parser1, Parser2, Input> const & outcome) const { return ::parse_ll::rest (*outcome.outcome_2); } }; } // namespace operation } // namespace parse_ll #endif // PARSE_LL_SEQUENCE_HPP_INCLUDED
############################################################################# #### ## #W ace.gi ACE Package Alexander Hulpke #W Greg Gamble ## ## `Head' file for the GAP interface to the ACE (Advanced Coset Enumerator), ## by George Havas and Colin Ramsay. The original interface was written by ## Alexander Hulpke and extensively modified by Greg Gamble. ## #Y Copyright (C) 2000 Centre for Discrete Mathematics and Computing #Y Department of Information Technology & Electrical Eng. #Y University of Queensland, Australia. ## ############################################################################# #### ## #V ACEData . . . . . . . record used by various functions of the ACE package ## ## The fields of ACEData are: ## ## "binary" . . the path of the ACE binary ## "tmpdir" . . the path of the temporary directory for ACE i/o files ## "ni" . . . . record for a non-interactive process ## "io" . . . . list of data records for ACEStart IO Streams ## "infile" . . the path of the ACE input file ## "outfile" . . the path of the ACE output file ## "version" . . the version of the current ACE binary ## InstallValue( ACEData, rec( binary := ExternalFilename(DirectoriesPackagePrograms("ace"), "ace"), tmpdir := DirectoryTemporary(), ni := rec(), io := [] # Initially no ACEStart IO Streams ) ); ACEData.infile := Filename(ACEData.tmpdir, "in"); ACEData.outfile := Filename(ACEData.tmpdir, "out"); PrintTo(ACEData.infile, "\n"); # Fire up ACE with a null input (ACEData.infile contains only a "\n") # ... to generate a banner (which has ACE's current version) Exec(Concatenation(ACEData.binary, "<", ACEData.infile, ">", ACEData.outfile)); ACEData.version := StringFile( ACEData.outfile ); ACEData.scratch := PositionSublist(ACEData.version, "ACE") + 4; ACEData.version := ACEData.version{[ACEData.scratch .. Position(ACEData.version, ' ', ACEData.scratch) - 1]}; Unbind(ACEData.scratch); # We don't need ACEData.scratch, anymore. ############################################################################# ## #I InfoClass ## # Set the default level of InfoACE SetInfoLevel(InfoACE, 1); ############################################################################# #### ## #V ACEIgnoreUnknownDefault . . . . . . . . . . . . the default value of the ## . . . . . . . . . . . . . . . . . . . . . . . . `aceignoreunknown' option ## ACEIgnoreUnknownDefault := true; ############################################################################# #### ## Ensure no zombie ACE processes from interactive (ACEStart) sessions are ## . . . . . . . . . . . . . . . . . left lying around when user quits GAP ## InstallAtExit( ACEQuitAll ); #E ace.gi . . . . . . . . . . . . . . . . . . . . . . . . . . . . ends here
``` %pylab inline ``` Welcome to pylab, a matplotlib-based Python environment [backend: module://IPython.zmq.pylab.backend_inline]. For more information, type 'help(pylab)'. ``` from sympy import Symbol, fresnels, fresnelc, oo, I, re, im, series, Rational, sin, cos, exp, plot from sympy.plotting import plot, plot_parametric from matplotlib.pyplot import figsize ``` Plot of the two Fresnel integrals $S(x)$ and $C(x)$ ``` x = Symbol("x") ``` ``` plot(fresnels(x), fresnelc(x), (x, 0, 8)) ``` The Cornu spiral defined as the parametric curve $u(t),v(t) := C(t), S(t)$ ``` figsize(8,8) plot_parametric(fresnelc(x), fresnels(x)) ``` Compute and plot the leading order behaviour around $x=0$ ``` ltc = series(fresnelc(x), x, n=2).removeO() lts = series(fresnels(x), x, n=4).removeO() ``` ``` lts, ltc ``` (pi*x**3/6, x) ``` figsize(4,4) plot(fresnels(x), lts, (x, 0, 1)) plot(fresnelc(x), ltc, (x, 0, 1)) ``` Compute and plot the asymptotic series expansion at $x=\infty$ ``` # Series expansion at infinity is not implemented yet #ats = series(fresnels(x), x, oo) #atc = series(fresnelc(x), x, oo) ``` ``` # However we can use the well known values ats = Rational(1,2) - cos(pi*x**2/2)/(pi*x) atc = Rational(1,2) + sin(pi*x**2/2)/(pi*x) ``` ``` figsize(4,4) plot(fresnels(x), ats, (x, 6, 8)) plot(fresnelc(x), atc, (x, 6, 8)) ``` Another nice example of a parametric plot ``` alpha = Symbol("alpha") r = 3.0 circ = r*exp(1.0j*alpha) ``` ``` figsize(8,8) plot_parametric(re(fresnelc(circ)), im(fresnelc(circ)), (alpha, 0, 2*pi)) ``` ``` ```
module Web.Internal.WebidlTypes import JS -------------------------------------------------------------------------------- -- Interfaces -------------------------------------------------------------------------------- export data DOMException : Type where [external] export ToFFI DOMException DOMException where toFFI = id export FromFFI DOMException DOMException where fromFFI = Just export SafeCast DOMException where safeCast = unsafeCastOnPrototypeName "DOMException" -------------------------------------------------------------------------------- -- Callbacks -------------------------------------------------------------------------------- export data Function : Type where [external] export ToFFI Function Function where toFFI = id export FromFFI Function Function where fromFFI = Just export data VoidFunction : Type where [external] export ToFFI VoidFunction VoidFunction where toFFI = id export FromFFI VoidFunction VoidFunction where fromFFI = Just
Require Import Fiat.Parsers.Reflective.Semantics. Require Import Fiat.Parsers.Reflective.ParserSyntax. Require Import Fiat.Parsers.Reflective.ParserSemanticsOptimized. Require Import Fiat.Parsers.Reflective.ParserSoundness. Require Import Fiat.Parsers.Reflective.PartialUnfold. Require Import Fiat.Parsers.Reflective.ParserPartialUnfold. Set Implicit Arguments. Module opt. Section polypnormalize. Context (is_valid_nonterminal : list nat -> nat -> bool) (strlen : nat) (char_at_matches_interp : nat -> Reflective.RCharExpr Ascii.ascii -> bool) (split_string_for_production : nat * (nat * nat) -> nat -> nat -> list nat). Let interp {T} := opt.interp_has_parse_term (T := T) is_valid_nonterminal strlen char_at_matches_interp split_string_for_production. Lemma polypnormalize_correct {T} (term : polyhas_parse_term T) : ParserSyntaxEquivalence.has_parse_term_equiv nil (term interp_TypeCode) (term (normalized_of interp_TypeCode)) -> interp (term _) = interp (polypnormalize term _). Proof. apply polypnormalize_correct; assumption. Qed. End polypnormalize. End opt.
After stumbling upon the Twitter account, YourAnonNews- associated with the recently acclaimed activist group Anonymous according to the bio-I had found an intriguing tweet on the account regarding a reporter who endured an intimidating confrontation with National Security Agency security after attempting to photograph a new NSA structure cleverly named Utah Data Center. Its purpose: to intercept, decipher, analyze, and store vast swaths of the world’s communications as they zap down from satellites and zip through the underground and undersea cables of international, foreign, and domestic networks. The heavily fortified $2 billion center should be up and running in September 2013. Flowing through its servers and routers and stored in near-bottomless databases will be all forms of communication, including the complete contents of private emails, cell phone calls, and Google searches, as well as all sorts of personal data trails—parking receipts, travel itineraries, bookstore purchases, and other digital “pocket litter.” It is, in some measure, the realization of the “total information awareness” program created during the first term of the Bush administration—an effort that was killed by Congress in 2003 after it caused an outcry over its potential for invading Americans’ privacy. Forbes (adorable, if I may say so) privacy beat reporter, Kashmir Hill, had a few hours to spare whilst on a trip in Utah and took a spontaneous visit to the NSA’s immense data center that is still under construction. She decided to travel to the site with professor Randy Dryer who happens to be a practicing and experienced media lawyer as well. Hill and Dryer’s time of leisure was met with unexpected trouble- they pissed off security. NSA security was unsettled with Hill’s presence because they were aware she was taking photographs of the scene. In a column she recently wrote for her Forbes “Not-So-Private Parts” regarding the incident, she illustrated an hour long interrogation and encounter with stern, mysterious officers . Hill noted in her editorial that one of the officers recorded their conversation in a notebook. She depicted a surreal environment entirely: a bizarre blinking sign, very small warning signs with trespassing rules, curious statements made by the officers regarding the building and NSA policy and a questionable structure resembling a gas station but with no pumps- all next a highway with a breath-taking mountain view. The NSA officers commanded Hill to delete a few of her photographs and advised that doing so would ease the process in avoiding a trespassing charge. Hill was bullied in defense of privacy for the agency that is exactly why anyone should raise concern about this whole event. We, as citizens, do not hold the same opportunity and authority as the NSA officers to defend our own privacy that is being trespassed within the Utah NSA establishment. Although Hill was stripped of her photographic evidence, her journalism skills were not robbed as she used her voice to exercise the right to search for the truth in reflection upon a frightening thought in her column. Her words are wise, certainly surpassing the impact of what her photography would have served. Honestly I was starting to feel pretty nervous at this point but also painfully aware of the irony of the situation. They didn’t want me to capture information about a facility that will soon be harvesting and storing massive amounts of information about American citizens, potentially including many photos that have been privately sent. Kashmir Hill should be commended for serving her duty to journalism in such a profound way. She is a real dudette with whistle-blowing qualities. Even though it may not seem that she has accomplished much success on the matter, she verbally exposed an unusual occurrence that is important for everyone to be aware of. At what extent do we, as citizens, allow our privacy to be invaded? Why has this story and the facility itself received such minimum attention in comparison to our collective, personal data becoming the center of attention for the building’s existence? The Anonymous Twitter account, YourAnonNews, noted this paradoxical nature how we, as a whole, are not encouraged to learn about it, simultaneously illuminating their groups mission, “We like doing things we’re not encouraged to do.” Then, the account reminded followers of one important thing: this NSA creation was brought to life by American tax dollars. If there is one thing I can urge you to do- it’s read both of these articles in the links below and open your eyes.
||| A `Sink` is a monadic streaming function that consumes ||| data but produces no relevant output. module Rhone.JS.Sink import Control.Category import Control.Monad.Dom import Data.Maybe import Data.MSF import JS import Text.Html import Web.Dom import Web.Html %default total ||| Sets the innerHTML property of the referenced node to ||| the input string value. export %inline innerHtml : LiftJSIO m => ElemRef t -> MSF m String () innerHtml = arrM . rawInnerHtmlAt ||| Replaces the target's child nodes with a `Text` node ||| displaying the input `String`. The `String` will be ||| properly escaped before being inserted. export text : LiftJSIO m => ElemRef t -> MSF m String () text ref = escape ^>> innerHtml ref -------------------------------------------------------------------------------- -- Attributes -------------------------------------------------------------------------------- ||| Sets or removes the attribute of the given name ||| at the given target element. export attribute : LiftJSIO m => (name : String) -> MSF m (NP I [ElemRef t, Maybe String]) () attribute name = arrM $ \[ref,m] => liftJSIO $ do el <- castElementByRef {t2 = Element} ref case m of Just s => setAttribute el name s Nothing => removeAttribute el name ||| Sets or unsets the attribute of the given element. export %inline attributeAt : LiftJSIO m => (name : String) -> ElemRef t -> MSF m (Maybe String) () attributeAt = firstArg . attribute ||| Sets the attribute of the given name at the given target element. export attribute_ : LiftJSIO m => (name : String) -> MSF m (NP I [ElemRef t, String]) () attribute_ name = (\[a,b] => [a,Just b]) ^>> attribute name ||| Sets or unsets the attribute of the given element. export %inline attributeAt_ : LiftJSIO m => (name : String) -> ElemRef t -> MSF m String () attributeAt_ = firstArg . attribute_ ||| Sets or unsets the boolean attribute of the given name at ||| the given target element. export boolAttribute : LiftJSIO m => (name : String) -> MSF m (NP I [ElemRef t, Bool]) () boolAttribute name = (\[a,b] => [a,toMaybe b ""]) ^>> attribute name ||| Sets or unsets the `disabled` attribute of the given element. export %inline disabled : LiftJSIO m => MSF m (NP I [ElemRef t, Bool]) () disabled = boolAttribute "disabled" ||| Sets or unsets the `disabled` attribute of the given element. export %inline disabledAt : LiftJSIO m => ElemRef t -> MSF m Bool () disabledAt = firstArg disabled ||| Sets or unsets the `hidden` attribute of the given element. export %inline hidden : LiftJSIO m => MSF m (NP I [ElemRef t, Bool]) () hidden = boolAttribute "hidden" ||| Sets or unsets the `hidden` attribute of the given element. export %inline hiddenAt : LiftJSIO m => ElemRef t -> MSF m Bool () hiddenAt = firstArg hidden ||| Sets the `class` attribute of the given element. export %inline class : LiftJSIO m => MSF m (NP I [ElemRef t, String]) () class = attribute_ "class" ||| Sets the `class` attribute of the given element. export %inline classAt : LiftJSIO m => ElemRef t -> MSF m String () classAt = firstArg class -------------------------------------------------------------------------------- -- Input Validation -------------------------------------------------------------------------------- ||| Interface for DOM elements that can have a custom ||| validity message set. public export interface SafeCast t => SetValidity t where setValidityMessage : t -> String -> JSIO () export SetValidity HTMLButtonElement where setValidityMessage = setCustomValidity export SetValidity HTMLFieldSetElement where setValidityMessage = setCustomValidity export SetValidity HTMLInputElement where setValidityMessage = setCustomValidity export SetValidity HTMLObjectElement where setValidityMessage = setCustomValidity export SetValidity HTMLOutputElement where setValidityMessage = setCustomValidity export SetValidity HTMLSelectElement where setValidityMessage = setCustomValidity export SetValidity HTMLTextAreaElement where setValidityMessage = setCustomValidity export setValidityMessageAt : SetValidity t => LiftJSIO m => ElemRef t -> String -> m () setValidityMessageAt ref s = liftJSIO (getElementByRef ref >>= (`setValidityMessage` s)) ||| Sets a custom validity message at the given target element export validityMessageAt : SetValidity t => LiftJSIO m => ElemRef t -> MSF m String () validityMessageAt = arrM . setValidityMessageAt ||| Sets or unsets a custom validity message at the given target element ||| depending on whether the input value is a `Left`. export leftInvalid : SetValidity t => LiftJSIO m => ElemRef t -> MSF m (Either String x) () leftInvalid ref = either id (const "") ^>> validityMessageAt ref -------------------------------------------------------------------------------- -- Value -------------------------------------------------------------------------------- public export interface SafeCast t => SetValue t where setValue' : String -> t -> JSIO () public export SetValue HTMLButtonElement where setValue' = (value =.) public export SetValue HTMLDataElement where setValue' = (value =.) public export SetValue HTMLInputElement where setValue' = (value =.) public export SetValue HTMLOptionElement where setValue' = (value =.) public export SetValue HTMLOutputElement where setValue' = (value =.) public export SetValue HTMLParamElement where setValue' = (value =.) public export SetValue HTMLSelectElement where setValue' = (value =.) public export SetValue HTMLTextAreaElement where setValue' = (value =.) public export SetValue RadioNodeList where setValue' = (value =.) export setValue : LiftJSIO m => SetValue t => ElemRef t -> String -> m () setValue r s = getElementByRef r >>= liftJSIO . setValue' s export value : LiftJSIO m => SetValue t => MSF m (NP I [ElemRef t,String]) () value = arrM $ \[r,s] => setValue r s export %inline valueOf : LiftJSIO m => SetValue t => ElemRef t -> MSF m String () valueOf = firstArg value export setChecked : LiftJSIO m => Bool -> HTMLInputElement -> m () setChecked b el = liftJSIO $ set (checked el) b export checked : LiftJSIO m => MSF m (NP I [ElemRef HTMLInputElement,Bool]) () checked = arrM $ \[r,b] => getElementByRef r >>= setChecked b export %inline isChecked : LiftJSIO m => ElemRef HTMLInputElement -> MSF m Bool () isChecked = firstArg checked namespace LocalStorage export setItem : LiftJSIO m => MSF m (NP I [String,String]) () setItem = arrM $ \[k,v] => liftJSIO (window >>= localStorage >>= (\s => setItem s k v)) export %inline setItemAt : LiftJSIO m => (key : String) -> MSF m String () setItemAt = firstArg setItem
\setchapterimage[6.5cm]{Grid_FullView_Logo} \setchapterpreamble[u]{\margintoc} \chapter{IceCube Realtime Alerts} \labch{realtime} \begin{fquote}[Alan Turing][Computing Machinery and Intelligence][1950]A very large part of space-time must be investigated, if reliable results are to be obtained. Otherwise we may (as most English children do) decide that everybody speaks English, and that it is silly to learn French. \end{fquote} As a complement to the likelihood analysis of archival data introduced in Chapter \ref{ch:llh}, neutrino astronomy can also be conducted through \emph{realtime} analysis, in which automated neutrino alerts are published with low latency. These public neutrino alerts can then be \emph{followed-up} by external observatories, in order to search for possible photon counterparts. This latter aspect is explained in more detail in Chapter \ref{ch:ztf_too}, where one such follow-up program with the Zwicky Transient Facility (ZTF) is outlined. In this chapter, the IceCube Realtime Program is introduced. As part of this thesis, the author maintained and further developed the IceCube Realtime System from October 2018 to mid 2020, acting as first responder to the vast majority of neutrino alerts in that period. \section{Realtime Multi-Messenger Astronomy} In recent years, there has been a significant interest in the study of transient and variable objects in astronomy (see also Chapter \ref{ch:sources}). Driven primarily by the speed at which objects can evolve and disappear, particularly GRBs and latterly Kilonovae, it is often essential that astronomy can be done with minimal latency. In this vein, it is now commonplace for detectors to automatically issue so-called alerts for observations that meet given criteria, to enable other instruments to rapidly obtain near-simultaneous observations. Public realtime alerts are automatically issued e.g by GRB-searching instruments such as \textit{Swift}-BAT and \textit{Fermi}-GBM, while gravitational-wave events are issued by the LIGO-VIRGO observatories, and high-energy neutrino alerts are issued by IceCube. These alerts are typically issued via the Gamma-ray Coordination Network (GCN)\footnote{ \url{https://gcn.gsfc.nasa.gov} } system as machine-readable \emph{GCN Notices}, which can then be used to trigger automated telescope scheduling or notification systems. The GCN Notices are then supplemented by \emph{GCN Circulars}\footnote{ \url{https://gcn.gsfc.nasa.gov/gcn3_archive.html}}, which are short text summaries of observations that can be rapidly released to the wider astronomy community. GCN Circulars are typically sent both by the original observatory and by any observatories which subsequently follow-up the initial detection. For observations of higher community interest, for example detections of potential multi-messenger counterparts, similar text summaries are often shared via the \emph{Astronomer's Telegram} (ATEL) network \footnote{\url{https://www.astronomerstelegram.org/}}. These GCN Circulars and ATELs are citeable records of realtime observations, and often form the basis of later peer-reviewed publications. \section{The IceCube Realtime System} The track component of the IceCube Realtime System has been operating since 2016, providing the first source of single high-energy neutrino alerts \sidecite{ic_realtime_17}. The first iteration of the alert system consisted of two streams, namely \emph{High-Energy Starting Events} (HESE) and \emph{Extremely High Energy} (EHE) track events (see also Chapter \ref{ch:icecube}). Both EHE \sidecite{ic_ehe_16} and HESE \sidecite{ic_hese_14} were established event selections used to identify likely-astrophysical neutrino tracks, which were then adapted to realtime alert selections. This original system of IceCube alerts continued until May 2019, at which point a new \emph{V2 alert system} was implemented \sidecite{ic_realtime_19}. While the original EHE selection was maintained, the HESE alert selection was improved to reduce the cascade contamination, and improve the astrophysical purity. In addition, a new alert stream based on the \emph{Gamma-ray Follow-Up} (GFU) event selection was initiated \sidecite{kintscher_thesis}, with a significantly-elevated rate relative to EHE and HESE alerts. The publication of these three alert streams was unified into a new \emph{IceCube Astrotrack} GCN Notice stream, which was further subdivided into \emph{Gold} and \emph{Bronze} based on the average purity of alerts \footnote{\emph{Silver} alerts were initially reserved for a planned future high-energy cascade stream \cite{kintscher_thesis}, but this intended naming convention plan appears to have been forgotten because these were ultimately named \emph{IceCube Cascade} alerts}. Gold Astrotrack alerts contain an average of 50\% astrophysical neutrinos, with the remainder arising from atmospheric background, while Bronze Astrotrack alerts have an average of 30\% astrophysical neutrinos (see Section \ref{sec:alert_rates} for more details). As explained in Chapter \ref{ch:icecube}, filters to identify relevant events are deployed on computers at the South Pole, and detections are flagged with low latency. After fast ``online'' reconstruction algorithms are applied to events, an automated machine-readable ``notice'' is distributed via the GCN system. In parallel, data from the event is transmitted via satellite to a computing centre in Madison, Wisconsin where a full likelihood scan is performed on the event. The alerts are vetted by humans to assess the event quality, with visual inspection being used to confirm classified topology and event reconstructions. The operating state of the detector is additionally checked. Following these steps, a plain-text GCN Circular is distributed to confirm the good nature of the alert, and to provide the updated localisation arising from the full likelihood scan. Each neutrino alert is assigned a unique name with an IceCube detector prefix, the UTC date on which the alert was issued, and a letter denoting the order of alerts on that day (e.g IC170922A or IC190922B). \section{Alert Reconstruction} \label{sec:alert_reco} The standard reconstruction methods introduced in Chapter \ref{ch:icecube} are generally optimised for large datasets, but for the handful of realtime alert, more computationally-intensive reconstruction methods can be employed. In particular, for multi-messenger counterpart searches the success depends critically on the impact of systematic uncertainties on the localisation, which are not directly accounted for in the likelihood analysis reconstruction methods. To measure this effect, individual IceCube events can be calibrated using \emph{resimulations}. In this approach, many neutrinos are simulated from a similar direction to the reconstructed one of the alerts \sidecite{ic_panstarrs_19}. During the simulation stage, different realisations of systematic uncertainties such as ice properties are randomly chosen, providing an ensemble that corresponds to the limits of knowledge of detector properties. A series of cuts are then applied to select simulated events which `look similar' to the one observed, accounting for deposited energy, reconstructed direction and stochasticity. Each simulated event is then reconstructed using the same Millipede method as for realtime events (see Chapter \ref{ch:icecube}), and the best fit position for each simulated event is found. By comparing the difference in log likelihood (LLH) value between the best fit and simulated truth for each event, a histogram can be constructed that describes the PDF of the $\Delta_{LLH}$ values for events similar to a given alert. By combining this information with the LLH landscape for an individual event, the full localisation uncertainty including systematic uncertainty can be derived for that event. \begin{figure} \centering \includegraphics{realtime/resimulations} \caption{Cumulative Distribution Functions (CDFs) for the resimulations of \emph{IC160427A} and \emph{IC170922A}. The published threshold values are indicated by dashed lines, which for IC160427A differ slightly from the plotted distribution.} \label{fig:resimulations} \end{figure} The outcome of resimulations for two alerts, IC160427A and IC170922A, can be seen as a Cumulative Distribution Function (CDF) in Figure \ref{fig:resimulations}. 250 resimulated events were selected for IC160427A \footnote {Of these 250 events, only 117 were available for inclusion in Figure \ref{fig:resimulations}. The remainder have been tragically lost to the sands of time. This explains the offset in critical values from the CDF for this curve.}, while 159 were selected for IC170922A. Dotted lines indicate 50\% containment, while dashed lines indicate 90\% containment. For IC160427A, 50\% of resimulated events had an LLH offset between best fit and MC truth of 11.1, and 90\% of events had an offset within $\Delta_{LLH} < 32.1$. By drawing a contour on the likelihood landscape of an alert at $LLH - LLH_{best} = 11.1$ and 32.1, we can say that the true neutrino arrival direction will lie inside those contours 50\% and 90\% of the time respectively. In comparison, for IC170922A, these critical $\Delta_{LLH}$ values are somewhat smaller at 4.9 and 20.8 respectively. \begin{figure} \centering \includegraphics{realtime/contour_IC190730A} \caption{An example likelihood contour for IC190730A, illustrating both IC160247A and IC170922A resimulations.} \label{fig:ic190730a_contour} \end{figure} The resimulation process can take many weeks and thousands of CPU hours, so is generally not performed for each new alert. Instead, the outcome of previous resimulations can be used as a rapid solution. At present, all new alerts are issued using the same resimulations from the first public alert (IC160427A, see Section \ref{sec:nu_alerts}). Additional tailored resimulations are employed in a handful of cases, though an effort to perform more comprehensive resimulations is planned. A typical contour is shown in Figure \ref{fig:ic190730a_contour}, for IC190730A. The contour inferred from the IC160247A resimulation is shown in the dotted line, while the IC170922A interpretation is solid. \section{Alert Rates} \label{sec:alert_rates} The alert selections are designed based on Monte Carlo simulations, using the recent IceCube measured astrophysical neutrino flux and spectral index of E$^{-2.19}$ as a signal assumption \sidecite{ic_diffuse_17}. PDFs are then constructed similar to those in Figure \ref{fig:mc_dec_e} of Chapter \ref{ch:llh}, calculating signal/background ratios as a function of energy proxy and reconstructed declination, $\delta$, with thresholds then selected such that the integrated background contamination is <70\% (for Bronze) or <50\% (for Gold). Each alert is also assigned an individual \emph{signalness} value using the same method, defined as: \begin{equation} signalness(E_{reco}|\delta) = \frac{N_{sig} (E \geq E_{reco}|\delta)}{N_{sig}(E \geq E_{reco}|\delta) + N_{bkg}(E \geq E_{reco}|\delta)} \end{equation} \begin{figure} \centering \includegraphics{realtime/alert_cdf} \caption{Cumulative number of IceCube alerts, in 3-month bins, and excluding retractions. The dashed line indicates the transition from V1 to V2 alerts.} \label{fig:alert_cdf} \end{figure} The cumulative alert count for each stream can be seen in Figure \ref{fig:alert_cdf}, binned in 3-month periods since the start of public alerts in April 2016 and excluding alerts subsequently retracted. From program start to selection upgrade in June 2016, an average of 0.12 alerts per week were issued under the V1 selection, with the dotted line indicating the transition date. This increased to 0.47 alerts per week under the V2 alert selection, averaged from the changeover date to 2021 June 30. The alert rate thus roughly quadrupled with the transition from V1 to V2, with one alert issued every 2 weeks. As can be seen in Figure \ref{fig:alert_cdf}, this fluctuates somewhat over time, as expected for a Poisson counting experiment. % \begin{table*} % \centering % \begin{tabular}{||c |c |c c| c ||} % \hline % Alert Selection & Stream Name &Signal Rate& Background Rate& Total Rate \\ % & &[yr$^{-1}$]& [yr$^{-1}$]&[yr$^{-1}$]\\ % \hline % Alerts V1 & HESE & 1.1 & 3.7 & 4.8\\ % & EHE & 2.5 & 1.9 & 4.4\\ % \hline % \textbf{Alerts V1}& \textbf{HESE + EHE} & \textbf{3.6} & \textbf{5.6} & \textbf{9.2}\\ % \hline % Alerts V2 & Gold & 6.6 & 6.1& 12.7\\ % & Bronze & 2.8 & 14.7& 17.5\\ % \hline % \textbf{Alerts V2} & \textbf{Gold + Bronze} & \textbf{9.4} & \textbf{20.8} & \textbf{30.2} \\ % \hline \hline % \end{tabular} % \caption{IceCube realtime alert rates.} % \label{tab:alert_rates} %\end{table*} \section{Noteworthy IceCube Neutrino Alerts} \label{sec:nu_alerts} A summary of all neutrino alerts issued to date is provided in Appendix Table \ref{tab:all_nu_alerts}. Individual neutrino alerts of interest are summarised below. \subsection{IC160427A} \subsubsection*{The ``PanSTARRS Supernova Neutrino"} The first alert issued under this system, HESE alert IC160427A \sidecite{ic160427a}, was found to be in spatial coincidence with an optical transient detected by the Pan-STARRS Observatory in following up observations \cite{ic_panstarrs_19}. This transient, PS16cgx, was initially tentatively classified as a Type Ic supernova, for which various models have predicted neutrino emission (see Chapter \ref{ch:sources}). However, the further photometric evolution instead indicated that this was more likely a Type Ia Supernova, for which no neutrino emission would be expected. Given that this was the first HESE alert, and the first high-energy neutrino alert for which a possible counterpart was identified, dedicated resimulations of this event were undertaken following the method outlined in Section~\ref{sec:alert_reco}. This was the first characterisation of the impact of systematic uncertainties in modelling the polar glacial ice on directional reconstruction with IceCube for high-energy alerts, and the data from this resimulation continue to be used in all new IceCube alerts. \subsection{IC170922A} \subsubsection*{The ``TXS 0506+056 Neutrino''} Subsequent neutrino alerts did not yield any probable counterparts, until the detection of EHE alert IC170922A\footnote{alternatively named \emph{`The neutrino that launched one thousand telecons'} (Credit: E. Blaufuss)} \sidecite{ic170922a} in spatial coincidence with flaring blazar TXS 0506+056 \sidecite{fermi_txs_atel_17}. After accounting for trial correction from historical neutrino alerts, a chance coincidence was disfavoured at the 3$\sigma$ level \sidecite{ic_txs_mm_18}. The event was also resimulated in the same manner as IC160427A . Remarkably, despite its radically-different topology of through-going muon rather than starting track, the results were found to be broadly consistent. More details of TXS 0506+056 are given in Section \ref{subsec:txs}. \subsection{IC190331A} \subsubsection*{The ``Multi-PeV neutrino''} A starting track was observed on 2019 March 30 \sidecite{ic190331a}, with a deposited energy of $\sim$5.3 PeV. The initial reconstruction based on SplineMPE was inaccurate (for the reasons outlined in Chapter \ref{ch:icecube}), but the update Millipede reconstruction yielded a well-localised 90\% uncertainty region of $\sim$1 sq. deg. on the sky. Given the extraordinarily high deposited energy, and the downgoing starting track topology which disfavours any atmospheric origin (see Chapter \ref{ch:icecube}), IC190331A was one of a handful of track alerts for which an astrophysical origin can be reasonably described as `highly likely'. However, no candidate electromagnetic counterpart was identified in follow-up observations \sidecite{krauss_20}. \begin{figure}[!ht] \includegraphics{realtime/ic190331a_vid} \caption{Event view of IC190331A. The light arrival time goes from red (early) to blue (late). The initial reconstruction is illustrated by the red line.} \label{fig:ic190331a} \end{figure} \subsection{IC190730A} \label{sec:ic190730a} \subsubsection*{The ``ICRC neutrino''} IC190730A was a Gold neutrino alert with a signalness of 67\% \sidecite{ic190730a}, which arrived in the middle of the neutrino astronomy session of the 36th \emph{International Cosmic Ray Conference} (ICRC). Following the automated notice, the full likelihood reconstruction clearly showed it was well-localised, and spatially coincident with PKS 1502+106, an FSRQ (see Chapter \ref{ch:sources}). This particular blazar is extremely bright, being 15th brightest in the sky in terms of integrated gamma-ray energy flux. Owing to its high redshift of z=1.84, it is one of the most luminous known blazars \sidecite{franckowiak_20}. This coincidence was reported in the corresponding GCN Circular, and triggered a broad multi-wavelength follow-up campaign. Though the blazar was not found to be flaring in gamma rays at short timescales \cite{franckowiak_20}, OVRO reported that the radio flux was in recent months elevated relative to the decade long observation baseline \sidecite{ovro_19}. Similar behaviour has been claimed for TXS 0506+056, and other neutrino-coincident blazars \cite{ovro_19}. Comprehensive time-dependent modelling has found that the detection of a neutrino alert from PKS 1502+106 is consistent with the multi-wavelength observations of this object, so a neutrino-blazar association is plausible but likely unrelated to the flaring activity \sidecite{rodrigues_21}. %The chance coincidence for at least one neutrino alert to be coincident with any of the 15 brightest blazars was calculated by the author, and found to be disfavoured at the level of 2.n sigma following the procedure in N. \subsection{IC190922B } \subsubsection*{The ``SN2019pqh neutrino''} On 2019 September 22, IceCube for the first time reported multiple high-energy neutrino alerts on a single day\footnote{By coincidence, this also happened to be the 2nd `birthday' of the IC170922A detection.}. The latter of these was IC190922B, a Gold alert with a reported signalness of 50.5\% \sidecite{ic190922b}. Follow-up observations of this alert were undertaken by the author with the Zwicky Transient Facility (ZTF), as part of a dedicated neutrino follow-up program introduced in Chapter \ref{ch:ztf_too}. One candidate supernova was found, SN2019pqh, in spatial and temporal coincidence with the alert \sidecite{ic190922b_ztf}. Unlike PS16cgx, this was the first young supernova reported in spatial coincidence with a high-energy neutrino which would be compatible with the CSM-Interaction model introduced in Chapter \ref{ch:sources}. However, as explained in Section \ref{sec:sn2019pqh}, further spectroscopic observations ultimately disfavoured the association. \subsection{IC191001A} \subsubsection*{The ``Bran Stark neutrino''} A high-energy Gold neutrino event was detected on 2019 October 10th, with an estimated signalness of 59\% \sidecite{ic191001a}. A bright Tidal Disruption Event (TDE) AT2019dsg was found by the author in spatial coincidence with this alert \sidecite{bran}, as part of follow-up observations using ZTF. This association is explained in much greater detail in Chapter \ref{ch:bran}. As with all ZTF-detected TDEs, AT2019dsg was assigned a nickname inspired by once-popular HBO series \emph{Game of Thrones}, in this case named after character \emph{Bran Stark} \sidecite{van_velzen_20}. \subsection{IC200107A } \subsubsection{The ``Flaring extreme blazar neutrino''} IC200107A was a high-energy neutrino which passed the initial HESE event selection, but did not qualify as a Gold or Bronze Astrotrack event due to a possible cascade classification \sidecite{ic200107a}. However, visual inspection of the event suggested a starting-track topology, which was confirmed by a dedicated Neural Network topology classifier \sidecite{ic_dnn_19}. A GCN Circular was thus issued to publicise the neutrino arrival direction. However, given the extraordinary nature of the neutrino selection, no signalness estimate was provided. Two gamma-ray detected blazars were found within the neutrino localisation, 4FGL J0955.1+3551 and 4FGL J0957.8+3423. The latter of these was not significantly detected in gamma rays at the time of neutrino detection, nor was there any evidence of flaring activity contemporaneous with the neutrino detection. The other blazar 4FGL J0955.1+3551 (also known as BZB J0955+3551 and 3HSP J095507.9+355101) belongs to the specific subclass of \emph{extreme blazars}, which are characterised by synchrotron peaks at very high frequencies, which had been proposed as especially promising candidates of high-energy neutrinos \sidecite{padovani_16}. Follow-up observations by \emph{Swift}-XRT of the source revealed a dramatic simultaneous X-ray flare \sidecite{krauss_ic200107a, giommi_ic200107a}, of particular interest because of the importance of X-ray photons in the p$\gamma$ neutrino production models introduced in Chapter \ref{ch:theory}. More comprehensive multi-frequency modelling has confirmed that the detection of a neutrino alert from an extreme blazar is realistic, though the simultaneous X-ray flare may not be directly related to the neutrino production \sidecite{paliya_20, giommi_20, petropoulou_20}. The probability for chance coincidence of the neutrino alert with an extreme blazar flaring in X-rays was calculated by the author of this thesis to be 3.7 $\times 10^{-3}$, but in the absence of any systematic X-ray blazar follow-up program it is unclear how this number could be trial corrected (see Chapter \ref{ch:llh}) \cite{paliya_20}. The statistical evidence for this association is thus insufficient to draw any definitive conclusions about the neutrino origin. \subsection{IC200530A} \subsubsection{The ``Tywin Lannister neutrino''} IC200530A was a high-energy Gold alert detected on 2020 May 30th, with a signalness estimate of 59\% \sidecite{ic200530a}. The neutrino localisation was also observed as part of the ZTF neutrino follow-up program, and the bright nuclear flare AT2019fdr was identified by the author as a probable optical counterpart \sidecite{ic200530a_ztf}. The object was later classified as a likely TDE \sidecite{frederick_20}, the second found in spatial and temporal coincidence with a high-energy neutrino. More details on this object are given in Section \ref{sec:at2019fdr}. The ZTF-assigned \emph{Game of Thrones} name was \emph{Tywin Lannister}. \subsection{IC201120A} \subsubsection*{The ``Cygnus Cocoon Neutrino"} IC201120A was a Bronze alert detected on 2020 November 11th, with an estimated signalness of 50\% \sidecite{ic201120a}. The neutrino was quickly identified as being spatially coincident with the \emph{Cygnus Cocoon}, a nearby star-forming region within the Milky Way, which has been identified as probable galactic source of PeV cosmic rays \sidecite{fermi_cc_11}. The source has been detected at 1-100 TeV energies by the HAWC observatory, with sufficient precision to support a hadronic origin \sidecite{hawc_cc_21}. The LHAASO collaboration also recently reported the detection PeV gamma rays from the Cygnus Cocoon region \sidecite{lhaaso_21}. Moreover, observations from the \emph{CARPET-2} air shower detector \sidecite{carpet2_09} reported a simultaneous excess of gamma-ray events from this region over a period of 85 calendar days around the neutrino event \sidecite{carpet2_21}. The implied gamma-ray fluence from this flare was 13 $\pm$ 4 GeV cm$^{-2}$. The reported statistical significance was 3.1$\sigma$, though this number was not trial-corrected to account for the many neutrino alerts issued by IceCube. This possible excess of TeV-PeV gamma rays has not yet been corroborated by other detectors, though in principle many should be sensitive. The localisation of IC201120A was also atypically large, spanning 64. sq. deg., reducing the statistical significance of any association with the Cygnus Cocoon region.
Some egg farmers are in business to provide people with healthy breakfasts. But at The Virtual Casino, we’re laying way more on the table than just scrambled deliciousness. Enter the Hen House for your chance to catch a golden egg and find yourself enjoying a feast of wealth. You’re in charge of the henhouse and there are tons of opportunities to get rich by finding the right eggs. Enjoy up to 25 free games, two big jackpots, and lots of cash hidden in every nest. But keep your eyes open for the rotten eggs. You’ll want to avoid those at all costs and head straight for the golden ones instead! Hen House is one of the easiest games to play at The Virtual Casino. The hens are busy keeping their eggs warm in this one, so they won’t bother you. Get started by choosing how much you want to bet and how many lines you want to play. Feel free to bet anywhere from $0.01 to $5 per line and from one line through to all 25 lines. Just use the arrows next to the Bet and Lines buttons to adjust your selections. When you’re ready for the hens to lay their eggs, just click the Spin button. The hens will immediately start clucking, the reels will spin, and they’ll come to a stop one by one. You literally don’t have to do anything. If you’ve matched up the right symbols, the game will automatically pay you out credits based on how much you’ve bet. Never farmed eggs before? Don’t sweat it. It’s really easy—and you don’t even have to wake up at the crack of dawn to make sure the hens are doing their thing. Just follow these simple game controls to take charge of the coop. Bet: You don’t need a big nest egg to play Hen House. Just use the arrows next to the Bet button to lower or raise your bet. You can wager anywhere from $0.01 to $5 per line. Lines: hen House is a 25-payline game. You can play all 25 lines, just one, or anywhere in between. Use the arrows next to the Lines button to change your lines selection. Spin: Ready to watch the hens lay their eggs? Just click the Spin button to get the hens to start clucking. Autoplay: Rather let the software do all the work? Turn AutoPlay on and you don’t have to worry about hitting Spin. Ever. The software will do it automatically for you, right after your winning bets have been paid out. You’ll get paid out automatically every time you’ve matched up the right combination of symbols. Some symbols pay out more than others, like the Eggs for example. Hit three or more of those and you’ll get to play a free games feature where up to 25 on-the-hen-house games could be yours. A massive barn full of hens might feel overwhelming at first. It’s a bit chaotic so we totally understand why you might want to decrease your paylines selection. We recommend not to. If you’re short on funds, decrease your bet amount instead. By decreasing the number of lines you like to play, you risk missing out on a winning combination landing on a payline. And that could mean missing a big jackpot. Instead, lower your bet amount. You’ll still get to play all 25 paylines, but it won’t cost you as much, and you won’t miss out on hitting it big. The substitute symbol in Hen House is the Hen. Like many other online slots games, the substitute symbol can replace any other symbol in the game to help you complete a winning payline and come out with a win. However, in Hen House, the Hen symbol can’t sub in for the Egg symbol.
Hal Blaine contributed so much to such a large number of rock and pop’s greatest hits, that his music will continue to be heard and appreciated for as long as there are radios. Overall, Juliana Hatfield’s Weird is closer to good than to great. It would appear that Martin Phillipps and company are experiencing a late-career renaissance that bodes well for their future. The show cemented Joe Jackson’s reputation as an inscrutable and enigmatic songwriter, a talented musician and social outsider who speaks for Everyman.
State Before: n : Nat ⊢ 2 > 0 State After: no goals Tactic: simp
(* Title: System_Of_Equations_IArrays.thy Author: Jose Divasón <jose.divasonm at unirioja.es> Author: Jesús Aransay <jesus-maria.aransay at unirioja.es> *) header{*Solving systems of equations using the Gauss Jordan algorithm over nested IArrays*} theory System_Of_Equations_IArrays imports System_Of_Equations Bases_Of_Fundamental_Subspaces_IArrays begin subsection{*Previous definitions and properties*} definition greatest_not_zero :: "'a::{zero} iarray => nat" where "greatest_not_zero A = the (List.find (\<lambda>n. A !! n \<noteq> 0) (rev [0..<IArray.length A]))" lemma vec_to_iarray_exists: shows "(\<exists>b. A $ b \<noteq> 0) = IArray_Addenda.exists (\<lambda>b. (vec_to_iarray A) !! b \<noteq> 0) (IArray[0..<IArray.length (vec_to_iarray A)])" proof (unfold IArray_Addenda.exists.simps length_vec_to_iarray, auto simp del: sub_def) fix b assume Ab: "A $ b \<noteq> 0" show "\<exists>b\<in>{0..<CARD('a)}. vec_to_iarray A !! b \<noteq> 0" by (rule bexI[of _ "to_nat b"], unfold vec_to_iarray_nth', auto simp add: Ab to_nat_less_card[of b]) next fix b assume b: "b < CARD('a)" and Ab_vec: "vec_to_iarray A !! b \<noteq> 0" show "\<exists>b. A $ b \<noteq> 0" by (rule exI[of _ "from_nat b"], metis Ab_vec vec_to_iarray_nth[OF b]) qed corollary vec_to_iarray_exists': shows "(\<exists>b. A $ b \<noteq> 0) = IArray_Addenda.exists (\<lambda>b. (vec_to_iarray A) !! b \<noteq> 0) (IArray (rev [0..<IArray.length (vec_to_iarray A)]))" by (simp add: vec_to_iarray_exists is_none_def find_None_iff) lemma not_is_zero_iarray_eq_iff: "(\<exists>b. A $ b \<noteq> 0) = (\<not> is_zero_iarray (vec_to_iarray A))" by (metis (full_types) is_zero_iarray_eq_iff vec_eq_iff zero_index) lemma vec_to_iarray_greatest_not_zero: assumes ex_b: "(\<exists>b. A $ b \<noteq> 0)" shows "greatest_not_zero (vec_to_iarray A) = to_nat (GREATEST' b. A $ b \<noteq> 0)" proof - let ?P="(\<lambda>n. (vec_to_iarray A) !! n \<noteq> 0)" let ?xs="(rev [0..<IArray.length (vec_to_iarray A)])" have "\<exists>a. (List.find ?P ?xs) = Some a" proof(rule ccontr, simp, unfold find_None_iff) assume "\<not> (\<exists>x. x \<in> set (rev [0..<length (IArray.list_of (vec_to_iarray A))]) \<and> IArray.list_of (vec_to_iarray A) ! x \<noteq> 0)" thus False using ex_b unfolding set_rev by (auto, unfold length_def[symmetric] sub_def[symmetric] length_vec_to_iarray,metis to_nat_less_card vec_to_iarray_nth') qed from this obtain a where a: "(List.find ?P ?xs) = Some a" by blast from this obtain ia where ia_less_length: "ia<length ?xs" and P_xs_ia: "?P (?xs!ia)" and a_eq: "a = ?xs!ia" and all_zero: "(\<forall>j<ia. \<not> ?P (?xs!j))" unfolding find_Some_iff by auto have ia_less_card: "ia < CARD('a)" using ia_less_length by (metis diff_zero length_rev length_upt length_vec_to_iarray) have ia_less_length': "ia < length ([0..<IArray.length (vec_to_iarray A)])" using ia_less_length unfolding length_rev . have a_less_card: "a < CARD('a)" unfolding a_eq unfolding rev_nth[OF ia_less_length'] using nth_upt[of 0 "(length [0..<IArray.length (vec_to_iarray A)] - Suc ia)" "(length [0..<IArray.length (vec_to_iarray A)])" ] by (metis diff_less length_upt length_vec_to_iarray minus_nat.diff_0 plus_nat.add_0 zero_less_Suc zero_less_card_finite) have "(GREATEST' b. A $ b \<noteq> 0) = from_nat a" proof (rule Greatest'_equality) have "A $ from_nat a = (vec_to_iarray A) !! a" by (rule vec_to_iarray_nth[symmetric,OF a_less_card]) also have "... \<noteq> 0" using P_xs_ia unfolding a_eq[symmetric] . finally show "A $ from_nat a \<noteq> 0" . next fix y assume Ay: "A $ y \<noteq> 0" show "y \<le> from_nat a" proof (rule ccontr) assume "\<not> y \<le> from_nat a" hence y_greater_a: "y > from_nat a" by simp have y_greater_a': "to_nat y > a" using y_greater_a using to_nat_mono[of "from_nat a" y] using to_nat_from_nat_id by (metis a_less_card) have "a = ?xs ! ia" using a_eq . also have "... = [0..<IArray.length (vec_to_iarray A)] ! (length [0..<IArray.length (vec_to_iarray A)] - Suc ia)" by (rule rev_nth[OF ia_less_length']) also have "... = 0 + (length [0..<IArray.length (vec_to_iarray A)] - Suc ia)" apply (rule nth_upt) using ia_less_length' by fastforce also have "... = (length [0..<IArray.length (vec_to_iarray A)] - Suc ia)" by simp finally have "a = (length [0..<IArray.length (vec_to_iarray A)] - Suc ia)" . hence ia_eq: "ia = length [0..<IArray.length (vec_to_iarray A)] - (Suc a)" by (metis Suc_diff_Suc Suc_eq_plus1_left diff_diff_cancel less_imp_le ia_less_length length_rev) def ja\<equiv>"length [0..<IArray.length (vec_to_iarray A)] - to_nat y - 1" have ja_less_length: "ja < length [0..<IArray.length (vec_to_iarray A)]" unfolding ja_def by (metis diff_0_eq_0 diff_Suc_eq_diff_pred diff_Suc_less diff_right_commute ia_eq ia_less_length' neq0_conv) have suc_i_le: "IArray.length (vec_to_iarray A)\<ge>Suc (to_nat y)" unfolding vec_to_iarray_def using to_nat_less_card[of y] by auto have "?xs ! ja = [0..<IArray.length (vec_to_iarray A)] ! (length [0..<IArray.length (vec_to_iarray A)] - Suc ja)" unfolding rev_nth[OF ja_less_length] .. also have "... = 0 + (length [0..<IArray.length (vec_to_iarray A)] - Suc ja)" apply (rule nth_upt, auto simp del: length_def) unfolding ja_def by (metis diff_Suc_less ia_less_length' length_upt less_nat_zero_code minus_nat.diff_0 neq0_conv) also have "... = (length [0..<IArray.length (vec_to_iarray A)] - Suc ja)" by simp also have "... = to_nat y" unfolding ja_def using suc_i_le by force finally have xs_ja_eq_y: "?xs ! ja = to_nat y" . have ja_less_ia: "ja < ia" unfolding ja_def ia_eq by (auto simp del: length_def, metis Suc_leI suc_i_le diff_less_mono2 le_imp_less_Suc less_le_trans y_greater_a') hence eq_0: "vec_to_iarray A !! (?xs ! ja) = 0" using all_zero by simp hence "A $ y = 0" using vec_to_iarray_nth'[of A y] unfolding xs_ja_eq_y by simp thus False using Ay by contradiction qed qed thus ?thesis unfolding greatest_not_zero_def a unfolding to_nat_eq[symmetric] unfolding to_nat_from_nat_id[OF a_less_card] by simp qed subsection{*Consistency and inconsistency*} definition "consistent_iarrays A b = (let GJ=Gauss_Jordan_iarrays_PA A; rank_A = length [x\<leftarrow>IArray.list_of (snd GJ) . \<not> is_zero_iarray x]; P_mult_b = fst(GJ) *iv b in (rank_A \<ge> (if (\<not> is_zero_iarray P_mult_b) then (greatest_not_zero P_mult_b + 1) else 0)))" definition "inconsistent_iarrays A b = (\<not> consistent_iarrays A b)" lemma matrix_to_iarray_consistent[code]: "consistent A b = consistent_iarrays (matrix_to_iarray A) (vec_to_iarray b)" unfolding consistent_eq_rank_ge_code unfolding consistent_iarrays_def Let_def unfolding Gauss_Jordan_PA_eq unfolding rank_Gauss_Jordan_code[symmetric, unfolded Let_def] unfolding snd_Gauss_Jordan_iarrays_PA_eq unfolding rank_iarrays_code[symmetric] unfolding matrix_to_iarray_rank unfolding matrix_to_iarray_fst_Gauss_Jordan_PA[symmetric] unfolding vec_to_iarray_matrix_matrix_mult[symmetric] unfolding not_is_zero_iarray_eq_iff using vec_to_iarray_greatest_not_zero[unfolded not_is_zero_iarray_eq_iff] by force lemma matrix_to_iarray_inconsistent[code]: "inconsistent A b = inconsistent_iarrays (matrix_to_iarray A) (vec_to_iarray b)" unfolding inconsistent_def inconsistent_iarrays_def unfolding matrix_to_iarray_consistent .. definition "solve_consistent_rref_iarrays A b = IArray.of_fun (\<lambda>j. if (IArray_Addenda.exists (\<lambda>i. A !! i !! j = 1 \<and> j=least_non_zero_position_of_vector (row_iarray i A)) (IArray[0..<nrows_iarray A])) then b !! (least_non_zero_position_of_vector (column_iarray j A)) else 0) (ncols_iarray A)" lemma exists_solve_consistent_rref: fixes A::"'a::{field}^'cols::{mod_type}^'rows::{mod_type}" assumes rref: "reduced_row_echelon_form A" shows "(\<exists>i. A $ i $ j = 1 \<and> j = (LEAST n. A $ i $ n \<noteq> 0)) = (IArray_Addenda.exists (\<lambda>i. (matrix_to_iarray A) !! i !! (to_nat j) = 1 \<and> (to_nat j)=least_non_zero_position_of_vector (row_iarray i (matrix_to_iarray A))) (IArray[0..<nrows_iarray (matrix_to_iarray A)]))" proof (rule) assume "\<exists>i. A $ i $ j = 1 \<and> j = (LEAST n. A $ i $ n \<noteq> 0)" from this obtain i where Aij: "A $ i $ j = 1" and j_eq: "j = (LEAST n. A $ i $ n \<noteq> 0)" by blast show "IArray_Addenda.exists (\<lambda>i. matrix_to_iarray A !! i !! to_nat j = 1 \<and> to_nat j = least_non_zero_position_of_vector (row_iarray i (matrix_to_iarray A))) (IArray [0..<nrows_iarray (matrix_to_iarray A)])" unfolding IArray_Addenda.exists.simps find_Some_iff apply (rule bexI[of _ "to_nat i"])+ proof (auto, unfold sub_def[symmetric]) show "to_nat i < nrows_iarray (matrix_to_iarray A)" unfolding matrix_to_iarray_nrows[symmetric] nrows_def using to_nat_less_card by fast have "to_nat j = to_nat (LEAST n. A $ i $ n \<noteq> 0)" unfolding j_eq by simp also have "... = to_nat (LEAST n. A $ i $ n \<noteq> 0 \<and> 0\<le>n)" by (metis least_mod_type) also have "...= least_non_zero_position_of_vector_from_index (vec_to_iarray (row i A)) (to_nat (0::'cols))" proof (rule vec_to_iarray_least_non_zero_position_of_vector_from_index''[symmetric, of "0::'cols" i A]) show "\<not> vector_all_zero_from_index (to_nat (0::'cols), vec_to_iarray (row i A))" unfolding vector_all_zero_from_index_eq[symmetric, of "0::'cols" "row i A"] unfolding row_def vec_nth_inverse using Aij least_mod_type[of j] by fastforce qed also have "... = least_non_zero_position_of_vector (row_iarray (to_nat i) (matrix_to_iarray A))" unfolding vec_to_iarray_row least_non_zero_position_of_vector_def unfolding to_nat_0 .. finally show "to_nat j = least_non_zero_position_of_vector (row_iarray (to_nat i) (matrix_to_iarray A))" . show "matrix_to_iarray A !! mod_type_class.to_nat i !! mod_type_class.to_nat j = 1" unfolding matrix_to_iarray_nth using Aij . qed next assume ex_eq: "IArray_Addenda.exists (\<lambda>i. matrix_to_iarray A !! i !! to_nat j = 1 \<and> to_nat j = least_non_zero_position_of_vector (row_iarray i (matrix_to_iarray A))) (IArray [0..<nrows_iarray (matrix_to_iarray A)])" have "\<exists>y. List.find (\<lambda>i. matrix_to_iarray A !! i !! to_nat j = 1 \<and> to_nat j = least_non_zero_position_of_vector (row_iarray i (matrix_to_iarray A))) [0..<nrows_iarray (matrix_to_iarray A)] = Some y" proof (rule ccontr, simp del: length_def sub_def, unfold find_None_iff) assume" \<not> (\<exists>x. x \<in> set [0..<nrows_iarray (matrix_to_iarray A)] \<and> matrix_to_iarray A !! x !! mod_type_class.to_nat j = 1 \<and> mod_type_class.to_nat j = least_non_zero_position_of_vector (row_iarray x (matrix_to_iarray A)))" thus False using ex_eq unfolding IArray_Addenda.exists.simps by auto qed from this obtain y where y: "List.find (\<lambda>i. matrix_to_iarray A !! i !! to_nat j = 1 \<and> to_nat j = least_non_zero_position_of_vector (row_iarray i (matrix_to_iarray A))) [0..<nrows_iarray (matrix_to_iarray A)] = Some y" by blast from this obtain i where i_less_length: "i<length [0..<nrows_iarray (matrix_to_iarray A)]" and Aij_1: "matrix_to_iarray A !! ([0..<nrows_iarray (matrix_to_iarray A)] ! i) !! to_nat j = 1" and j_eq: "to_nat j = least_non_zero_position_of_vector (row_iarray ([0..<nrows_iarray (matrix_to_iarray A)] ! i) (matrix_to_iarray A))" and y_eq: "y = [0..<nrows_iarray (matrix_to_iarray A)] ! i" and least: "(\<forall>ja<i. \<not> (matrix_to_iarray A !! ([0..<nrows_iarray (matrix_to_iarray A)] ! ja) !! to_nat j = 1 \<and> to_nat j = least_non_zero_position_of_vector (row_iarray ([0..<nrows_iarray (matrix_to_iarray A)] ! ja) (matrix_to_iarray A))))" unfolding find_Some_iff by blast show "\<exists>i. A $ i $ j = 1 \<and> j = (LEAST n. A $ i $ n \<noteq> 0)" proof (rule exI[of _ "from_nat i"], rule conjI) have i_rw: "[0..<nrows_iarray (matrix_to_iarray A)] ! i = i" using nth_upt[of 0 i "nrows_iarray (matrix_to_iarray A)"] using i_less_length by auto have i_less_card: "i < CARD ('rows)" using i_less_length unfolding nrows_iarray_def matrix_to_iarray_def by auto show A_ij: "A $ from_nat i $ j = 1" using Aij_1 unfolding i_rw using matrix_to_iarray_nth[of A "from_nat i" j] unfolding to_nat_from_nat_id[OF i_less_card] by simp have "to_nat j = least_non_zero_position_of_vector (row_iarray ([0..<nrows_iarray (matrix_to_iarray A)] ! i) (matrix_to_iarray A))" using j_eq . also have "... = least_non_zero_position_of_vector_from_index (row_iarray i (matrix_to_iarray A)) 0" unfolding least_non_zero_position_of_vector_def i_rw .. also have "... = least_non_zero_position_of_vector_from_index (vec_to_iarray (row (from_nat i) A)) (to_nat (0::'cols))" unfolding vec_to_iarray_row unfolding to_nat_from_nat_id[OF i_less_card] unfolding to_nat_0 .. also have "... = to_nat (LEAST n. A $ (from_nat i) $ n \<noteq> 0 \<and> 0 \<le> n)" proof (rule vec_to_iarray_least_non_zero_position_of_vector_from_index'') show "\<not> vector_all_zero_from_index (to_nat (0::'cols), vec_to_iarray (row (from_nat i) A))" unfolding vector_all_zero_from_index_eq[symmetric] using A_ij by (metis iarray_to_vec_vec_to_iarray least_mod_type vec_matrix vec_to_iarray_row' zero_neq_one) qed also have "... = to_nat (LEAST n. A $ (from_nat i) $ n \<noteq> 0)" using least_mod_type by metis finally show "j = (LEAST n. A $ from_nat i $ n \<noteq> 0)" unfolding to_nat_eq . qed qed lemma to_nat_the_solve_consistent_rref: fixes A::"'a::{field}^'cols::{mod_type}^'rows::{mod_type}" assumes rref: "reduced_row_echelon_form A" and exists: "(\<exists>i. A $ i $ j = 1 \<and> j = (LEAST n. A $ i $ n \<noteq> 0))" shows "to_nat (THE i. A $ i $ j = 1) = least_non_zero_position_of_vector (column_iarray (to_nat j) (matrix_to_iarray A))" proof - obtain i where Aij: "A $ i $ j = 1" and j:"j = (LEAST n. A $ i $ n \<noteq> 0)" using exists by blast have "least_non_zero_position_of_vector (column_iarray (to_nat j) (matrix_to_iarray A)) = least_non_zero_position_of_vector (vec_to_iarray (column j A))" unfolding vec_to_iarray_column .. also have "... = least_non_zero_position_of_vector_from_index (vec_to_iarray (column j A)) (to_nat (0::'rows))" unfolding least_non_zero_position_of_vector_def to_nat_0 .. also have "... = to_nat (LEAST n. A $ n $ j \<noteq> 0 \<and> 0 \<le> n)" proof (rule vec_to_iarray_least_non_zero_position_of_vector_from_index') show "\<not> vector_all_zero_from_index (to_nat (0::'rows), vec_to_iarray (column j A))" unfolding vector_all_zero_from_index_eq[symmetric] column_def using Aij least_mod_type[of i] by fastforce qed also have "... = to_nat (LEAST n. A $ n $ j \<noteq> 0)" using least_mod_type by metis finally have least_eq: "least_non_zero_position_of_vector (column_iarray (to_nat j) (matrix_to_iarray A)) = to_nat (LEAST n. A $ n $ j \<noteq> 0)" . have i_eq_least: "i=(LEAST n. A $ n $ j \<noteq> 0)" proof (rule Least_equality[symmetric]) show "A $ i $ j \<noteq> 0" by (metis Aij zero_neq_one) show "\<And>y. A $ y $ j \<noteq> 0 \<Longrightarrow> i \<le> y" by (metis (mono_tags) Aij is_zero_row_def' j order_refl rref rref_condition4 zero_neq_one) qed have the_eq_least_pos: "(THE i. A $ i $ j = 1) = from_nat (least_non_zero_position_of_vector (column_iarray (to_nat j) (matrix_to_iarray A)))" proof (rule the_equality) show " A $ from_nat (least_non_zero_position_of_vector (column_iarray (to_nat j) (matrix_to_iarray A))) $ j = 1" unfolding least_eq from_nat_to_nat_id i_eq_least[symmetric] using Aij . fix a assume a: "A $ a $ j = 1" show "a = from_nat (least_non_zero_position_of_vector (column_iarray (to_nat j) (matrix_to_iarray A)))" unfolding least_eq from_nat_to_nat_id by (metis Aij a i_eq_least is_zero_row_def' j rref rref_condition4_explicit zero_neq_one) qed have "to_nat (THE i. A $ i $ j = 1) = to_nat (from_nat (least_non_zero_position_of_vector (column_iarray (to_nat j) (matrix_to_iarray A)))::'rows)" using the_eq_least_pos by auto also have "... = (least_non_zero_position_of_vector (column_iarray (to_nat j) (matrix_to_iarray A)))" by (rule to_nat_from_nat_id, unfold least_eq, simp add: to_nat_less_card) also have "... = to_nat (LEAST n. A $ n $ j \<noteq> 0)" unfolding least_eq from_nat_to_nat_id .. finally have "(THE i. A $ i $ j = 1) = (LEAST n. A $ n $ j \<noteq> 0)" unfolding to_nat_eq . thus ?thesis unfolding least_eq from_nat_to_nat_id unfolding to_nat_eq . qed lemma iarray_exhaust2: "(xs = ys) = (IArray.list_of xs = IArray.list_of ys)" by (metis iarray.exhaust list_of.simps) lemma vec_to_iarray_solve_consistent_rref: fixes A::"'a::{field}^'cols::{mod_type}^'rows::{mod_type}" assumes rref: "reduced_row_echelon_form A" shows "vec_to_iarray (solve_consistent_rref A b) = solve_consistent_rref_iarrays (matrix_to_iarray A) (vec_to_iarray b)" proof(unfold iarray_exhaust2 list_eq_iff_nth_eq length_def[symmetric] sub_def[symmetric], rule conjI) show "IArray.length (vec_to_iarray (solve_consistent_rref A b)) = IArray.length (solve_consistent_rref_iarrays (matrix_to_iarray A) (vec_to_iarray b))" unfolding solve_consistent_rref_def solve_consistent_rref_iarrays_def unfolding ncols_iarray_def matrix_to_iarray_def by (simp add: vec_to_iarray_def) show "\<forall>i<IArray.length (vec_to_iarray (solve_consistent_rref A b)). vec_to_iarray (solve_consistent_rref A b) !! i = solve_consistent_rref_iarrays (matrix_to_iarray A) (vec_to_iarray b) !! i" proof (clarify) fix i assume i: "i < IArray.length (vec_to_iarray (solve_consistent_rref A b))" hence i_less_card: "i<CARD('cols)" unfolding vec_to_iarray_def by auto hence i_less_ncols: "i<(ncols_iarray (matrix_to_iarray A))" unfolding ncols_eq_card_columns . show "vec_to_iarray (solve_consistent_rref A b) !! i = solve_consistent_rref_iarrays (matrix_to_iarray A) (vec_to_iarray b) !! i" unfolding vec_to_iarray_nth[OF i_less_card] unfolding solve_consistent_rref_def unfolding vec_lambda_beta unfolding solve_consistent_rref_iarrays_def unfolding of_fun_nth[OF i_less_ncols] unfolding exists_solve_consistent_rref[OF rref, of "from_nat i", symmetric, unfolded to_nat_from_nat_id[OF i_less_card]] using to_nat_the_solve_consistent_rref[OF rref, of "from_nat i", symmetric, unfolded to_nat_from_nat_id[OF i_less_card]] using vec_to_iarray_nth' by metis qed qed subsection{*Independence and dependence*} definition "independent_and_consistent_iarrays A b = (let GJ = Gauss_Jordan_iarrays_PA A; rank_A = length [x\<leftarrow>IArray.list_of (snd GJ) . \<not> is_zero_iarray x]; P_mult_b = fst GJ *iv b; consistent_A = ((if \<not> is_zero_iarray P_mult_b then greatest_not_zero P_mult_b + 1 else 0) \<le> rank_A); dim_solution_set = ncols_iarray A - rank_A in consistent_A \<and> dim_solution_set = 0)" definition "dependent_and_consistent_iarrays A b = (let GJ = Gauss_Jordan_iarrays_PA A; rank_A = length [x\<leftarrow>IArray.list_of (snd GJ) . \<not> is_zero_iarray x]; P_mult_b = fst GJ *iv b; consistent_A = ((if \<not> is_zero_iarray P_mult_b then greatest_not_zero P_mult_b + 1 else 0) \<le> rank_A); dim_solution_set = ncols_iarray A - rank_A in consistent_A \<and> dim_solution_set > 0)" lemma matrix_to_iarray_independent_and_consistent[code]: shows "independent_and_consistent A b = independent_and_consistent_iarrays (matrix_to_iarray A) (vec_to_iarray b)" unfolding independent_and_consistent_def unfolding independent_and_consistent_iarrays_def unfolding dim_solution_set_homogeneous_eq_dim_null_space unfolding matrix_to_iarray_consistent unfolding consistent_iarrays_def unfolding dim_null_space_iarray unfolding rank_iarrays_code unfolding snd_Gauss_Jordan_iarrays_PA_eq[symmetric] unfolding Let_def .. lemma matrix_to_iarray_dependent_and_consistent[code]: shows "dependent_and_consistent A b = dependent_and_consistent_iarrays (matrix_to_iarray A) (vec_to_iarray b)" unfolding dependent_and_consistent_def unfolding dependent_and_consistent_iarrays_def unfolding dim_solution_set_homogeneous_eq_dim_null_space unfolding matrix_to_iarray_consistent unfolding consistent_iarrays_def unfolding dim_null_space_iarray unfolding rank_iarrays_code unfolding snd_Gauss_Jordan_iarrays_PA_eq[symmetric] unfolding Let_def .. subsection{*Solve a system of equations over nested IArrays*} definition "solve_system_iarrays A b = (let A' = Gauss_Jordan_iarrays_PA A in (snd A', fst A' *iv b))" lemma matrix_to_iarray_fst_solve_system: "matrix_to_iarray (fst (solve_system A b)) = fst (solve_system_iarrays (matrix_to_iarray A) (vec_to_iarray b))" unfolding solve_system_def solve_system_iarrays_def Let_def fst_conv by (metis matrix_to_iarray_snd_Gauss_Jordan_PA) lemma vec_to_iarray_snd_solve_system: "vec_to_iarray (snd (solve_system A b)) = snd (solve_system_iarrays (matrix_to_iarray A) (vec_to_iarray b))" unfolding solve_system_def solve_system_iarrays_def Let_def snd_conv by (metis matrix_to_iarray_fst_Gauss_Jordan_PA vec_to_iarray_matrix_matrix_mult) definition "solve_iarrays A b = (let GJ_P=Gauss_Jordan_iarrays_PA A; P_mult_b = fst GJ_P *iv b; rank_A = length [x\<leftarrow>IArray.list_of (snd GJ_P) . \<not> is_zero_iarray x]; consistent_Ab = (if \<not> is_zero_iarray P_mult_b then greatest_not_zero P_mult_b + 1 else 0) \<le> rank_A; GJ_transpose = Gauss_Jordan_iarrays_PA (transpose_iarray A); basis = set (map (\<lambda>i. row_iarray i (fst GJ_transpose)) [rank_A..<ncols_iarray A]) in (if consistent_Ab then Some (solve_consistent_rref_iarrays (snd GJ_P) P_mult_b,basis) else None))" definition "pair_vec_vecset A = (if Option.is_none A then None else Some (vec_to_iarray (fst (the A)), vec_to_iarray` (snd (the A))))" lemma pair_vec_vecset_solve[code_unfold]: shows "pair_vec_vecset (solve A b) = solve_iarrays (matrix_to_iarray A) (vec_to_iarray b)" unfolding pair_vec_vecset_def proof (auto) assume none_solve_Ab: "Option.is_none (solve A b)" show "None = solve_iarrays (matrix_to_iarray A) (vec_to_iarray b)" proof - def GJ_P == "Gauss_Jordan_iarrays_PA (matrix_to_iarray A)" def P_mult_b == "fst GJ_P *iv vec_to_iarray b" def rank_A == "length [x\<leftarrow>IArray.list_of (snd GJ_P) . \<not> is_zero_iarray x]" have "\<not> consistent A b" using none_solve_Ab unfolding solve_def unfolding is_none_def by auto hence "\<not> consistent_iarrays (matrix_to_iarray A) (vec_to_iarray b)" using matrix_to_iarray_consistent by auto hence "\<not> (if \<not> is_zero_iarray P_mult_b then greatest_not_zero P_mult_b + 1 else 0) \<le> rank_A" unfolding GJ_P_def P_mult_b_def rank_A_def using consistent_iarrays_def unfolding Let_def by fast thus ?thesis unfolding solve_iarrays_def Let_def unfolding GJ_P_def P_mult_b_def rank_A_def by presburger qed next assume not_none: "\<not> Option.is_none (solve A b)" show "Some (vec_to_iarray (fst (the (solve A b))), vec_to_iarray ` snd (the (solve A b))) = solve_iarrays (matrix_to_iarray A) (vec_to_iarray b)" proof - def GJ_P == "Gauss_Jordan_iarrays_PA (matrix_to_iarray A)" def P_mult_b == "fst GJ_P *iv vec_to_iarray b" def rank_A == "length [x\<leftarrow>IArray.list_of (snd GJ_P) . \<not> is_zero_iarray x]" def GJ_transpose == "Gauss_Jordan_iarrays_PA (transpose_iarray (matrix_to_iarray A))" def basis == "set (map (\<lambda>i. row_iarray i (fst GJ_transpose)) [rank_A..<ncols_iarray (matrix_to_iarray A)])" def P_mult_b == "fst GJ_P *iv vec_to_iarray b" have consistent_Ab: "consistent A b" using not_none unfolding solve_def unfolding is_none_def by metis hence "consistent_iarrays (matrix_to_iarray A) (vec_to_iarray b)" using matrix_to_iarray_consistent by auto hence "(if \<not> is_zero_iarray P_mult_b then greatest_not_zero P_mult_b + 1 else 0) \<le> rank_A" unfolding GJ_P_def P_mult_b_def rank_A_def using consistent_iarrays_def unfolding Let_def by fast hence solve_iarrays_rw: "solve_iarrays (matrix_to_iarray A) (vec_to_iarray b) = Some (solve_consistent_rref_iarrays (snd GJ_P) P_mult_b, basis)" unfolding solve_iarrays_def Let_def P_mult_b_def GJ_P_def rank_A_def basis_def GJ_transpose_def by auto have snd_rw: "vec_to_iarray ` basis_null_space A = basis" unfolding basis_def GJ_transpose_def rank_A_def GJ_P_def unfolding vec_to_iarray_basis_null_space unfolding basis_null_space_iarrays_def Let_def unfolding snd_Gauss_Jordan_iarrays_PA_eq unfolding rank_iarrays_code[symmetric] unfolding matrix_to_iarray_transpose[symmetric] unfolding matrix_to_iarray_rank[symmetric] unfolding rank_transpose[symmetric, of A] .. have fst_rw: "vec_to_iarray (solve_consistent_rref (fst (solve_system A b)) (snd (solve_system A b))) = solve_consistent_rref_iarrays (snd GJ_P) P_mult_b" using vec_to_iarray_solve_consistent_rref[OF rref_Gauss_Jordan, of A "fst (Gauss_Jordan_PA A) *v b"] unfolding solve_system_def Let_def fst_conv unfolding Gauss_Jordan_PA_eq snd_conv unfolding GJ_P_def P_mult_b_def unfolding vec_to_iarray_matrix_matrix_mult unfolding matrix_to_iarray_fst_Gauss_Jordan_PA[symmetric] unfolding matrix_to_iarray_snd_Gauss_Jordan_PA[symmetric] unfolding Gauss_Jordan_PA_eq . show ?thesis unfolding solve_iarrays_rw unfolding solve_def if_P[OF consistent_Ab] option.sel fst_conv snd_conv unfolding fst_rw snd_rw .. qed qed end
[STATEMENT] lemma Pause_parametric [transfer_rule]: "(C ===> ((=) ===> rel_gpv A C) ===> rel_gpv A C) Pause Pause" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (C ===> rel_rpv A C ===> rel_gpv A C) Generative_Probabilistic_Value.Pause Generative_Probabilistic_Value.Pause [PROOF STEP] by(simp add: rel_fun_def)
{-# LANGUAGE BangPatterns, ScopedTypeVariables, RecordWildCards, FlexibleContexts, TypeFamilies, DeriveGeneric #-} module Main where import Data.IDX ( decodeIDXFile, idxIntContent, IDXData ) {------------------------} import Numeric.LinearAlgebra as NL ( Vector, fromList ) {------------------------} import Data.Vector.Unboxed as DTU ( toList ) import Prelude hiding (readFile) import Neuro ( Samples, Sample, createNetwork, tanh', saveNetwork, trainNTimes ) main :: IO () main = do n <- createNetwork 784 [64] 10 samples <- importTrain {-----------------------------} let n' = trainNTimes 50 5.0 tanh tanh' n samples saveNetwork "smartNet5.nn" n' importTrain :: IO (Samples Double) importTrain = do Just idxTrain <- decodeIDXFile "train-images.idx3-ubyte" -- image Just idxResult <- decodeIDXFile "train-labels.idx1-ubyte" -- result return $ samples idxTrain idxResult samples :: IDXData -> IDXData -> [Sample Double] samples idxTrain idxResult = Prelude.zip (image idxTrain) (result idxResult) :: [Sample Double] image = matrix2x2 . _0_1to0_255 . DTU.toList . idxIntContent result = unitar . DTU.toList . idxIntContent unitar :: [Int] -> [NL.Vector Double] unitar [] = [] unitar (0:xs) = NL.fromList [1,-1,-1,-1,-1,-1,-1,-1,-1,-1] : unitar xs unitar (1:xs) = NL.fromList [-1,1,-1,-1,-1,-1,-1,-1,-1,-1] : unitar xs unitar (2:xs) = NL.fromList [-1,-1,1,-1,-1,-1,-1,-1,-1,-1] : unitar xs unitar (3:xs) = NL.fromList [-1,-1,-1,1,-1,-1,-1,-1,-1,-1] : unitar xs unitar (4:xs) = NL.fromList [-1,-1,-1,-1,1,-1,-1,-1,-1,-1] : unitar xs unitar (5:xs) = NL.fromList [-1,-1,-1,-1,-1,1,-1,-1,-1,-1] : unitar xs unitar (6:xs) = NL.fromList [-1,-1,-1,-1,-1,-1,1,-1,-1,-1] : unitar xs unitar (7:xs) = NL.fromList [-1,-1,-1,-1,-1,-1,-1,1,-1,-1] : unitar xs unitar (8:xs) = NL.fromList [-1,-1,-1,-1,-1,-1,-1,-1,1,-1] : unitar xs unitar (9:xs) = NL.fromList [-1,-1,-1,-1,-1,-1,-1,-1,-1,1] : unitar xs _0_1to0_255 :: [Int] -> [Double] _0_1to0_255 [] = [] _0_1to0_255 (x:xs) = (fromIntegral x / 255) : _0_1to0_255 xs matrix2x2 :: [Double] -> [NL.Vector Double] matrix2x2 [] = [] matrix2x2 xs = NL.fromList (Prelude.take 784 xs) : matrix2x2 (Prelude.drop 784 xs)
section \<open> Weakest Prespecification \<close> theory utp_wprespec imports "UTP2.utp_rel_laws" "UTP2.utp_wlp" begin named_theorems wp no_notation Equiv_Relations.quotient (infixl "'/'/" 90) definition wprespec :: "('a, 'c) urel \<Rightarrow> ('b, 'c) urel \<Rightarrow> ('a, 'b) urel" (infixl "'/'/" 70) where [rel]: "wprespec Y K = (\<not> ((\<not> Y) ;; K\<^sup>-))" lemma wprespec_alt_def: "(P // Q) = (\<not> Q ;; (\<not> P\<^sup>-))\<^sup>-" by (simp add: wprespec_def) theorem wprespec: "R \<sqsubseteq> P ;; Q \<longleftrightarrow> R // Q \<sqsubseteq> P" by (pred_auto add: wprespec_def) lemma wprespec1: "R \<sqsubseteq> (R // Q) ;; Q" by (simp add: wprespec) lemma wprespec2: "(P ;; Q) // Q \<sqsubseteq> P" using wprespec by blast lemma wprespec3: "R // Q \<sqsubseteq> II \<longleftrightarrow> R \<sqsubseteq> Q" by (metis seqr_left_unit wprespec) lemma wprespec4: "Q // Q \<sqsubseteq> II" by (simp add: wprespec3) lemma wprespec5 [wp]: "P // II = P" by (metis antisym ref_by_pred_is_leq seqr_right_unit wprespec1 wprespec2) lemma wprespec6 [wp]: "(R \<and> S) // Q = ((R // Q) \<and> (S // Q))" by (rel_auto) lemma wprespec6a [wp]: "(\<Squnion> n\<in>A. R(n)) // Q = (\<Squnion> n\<in>A. R(n) // Q)" by (rel_auto) lemma wprespec7 [wp]: "R // (P \<or> Q) = ((R // P) \<and> (R // Q))" by (simp add: seqr_or_distr subst_pred(4) wprespec_def) lemma wprespec7a [wp]: "R // (\<Sqinter> i\<in>A. P(i)) = (\<Squnion> i\<in>A. R // P(i))" by (pred_auto add: wprespec_def) lemma wprespec8 [wp]: "R // (P ;; Q)= R // Q // P" by (metis (no_types, lifting) pred_ba.order.eq_iff seqr_assoc wprespec wprespec wprespec wprespec1 wprespec1) theorem wprespec9: "R // Q = \<Sqinter> {Y. R \<sqsubseteq> Y ;; Q}" (is "?lhs = ?rhs") by (metis (no_types, lifting) mem_Collect_eq ref_lattice.Inf_eqI wprespec wprespec1) theorem wprespec10: "(R // Q) (s\<^sub>0, s) = (\<forall> s'. Q (s, s') \<longrightarrow> R (s\<^sub>0, s'))" by (pred_auto add: wprespec_def) lemma wprespec12: "Q\<^sup>- = ((\<not>II) // (\<not>Q))" by (simp add: wprespec_def) lemma wprespec13: "(\<not> (R // Q)) = (\<not>II) // ((\<not>Q) // (\<not>R))" by (pred_auto add: wprespec_def) lemma wprespec17 [wp]: "R // \<langle>\<sigma>\<rangle>\<^sub>a = R ;; (II // \<langle>\<sigma>\<rangle>\<^sub>a)" apply (pred_auto add: wprespec_def) by metis lemma wprespec17a [wp]: "II // \<langle>\<sigma>\<rangle>\<^sub>a = \<langle>\<sigma>\<rangle>\<^sub>a\<^sup>-" by (pred_auto add: wprespec_def) theorem wlp_as_wprespec: "P wlp b = post ((b\<^sup>>) // P )" apply (simp add: post_def) by (pred_auto add: wprespec_def) end
// All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // 3. Neither the name of the University of Southern Denmark nor the names of // its contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF SOUTHERN DENMARK BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. /** \author Wail Mustafa \file stereoCamera.cpp \brief */ #include <pluginlib/class_list_macros.h> #include <nodelet/nodelet.h> #include <boost/thread.hpp> #include "ros/ros.h" #include <ros/console.h> #include <ros/package.h> #include <image_transport/image_transport.h> #include <image_transport/subscriber_filter.h> #include <message_filters/subscriber.h> #include <cv_bridge/cv_bridge.h> #include "Mathematics/StereoCalibration.h" #include <message_filters/synchronizer.h> #include <message_filters/sync_policies/exact_time.h> #include <message_filters/sync_policies/approximate_time.h> #include <caros/CovisRos.hpp> #include <caros_common_msgs/StereoCalibration.h> #include <caros_common_msgs/CapturePoints2.h> #include <pcl/point_types.h> #include <pcl/point_cloud.h> #include <pcl/io/pcd_io.h> #include <pcl_conversions/pcl_conversions.h> #include <sensor_msgs/PointCloud2.h> //#include "opencv2/calib3d/calib3d.hpp" //#include "opencv2/imgproc/imgproc.hpp" //#include "opencv2/highgui/highgui.hpp" //#include "opencv2/contrib/contrib.hpp" #define EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET 1 using std::string; using namespace message_filters; typedef boost::shared_ptr<caros_common_msgs::StereoCalibration const> StereoCalibrationConstPtr; namespace nodelet_stereo_camera { class StereoPointCloud : public nodelet::Nodelet { public: StereoPointCloud() { } ~StereoPointCloud() { } private: virtual void onInit(); void connectCb(); // Publications boost::mutex connect_mutex_; boost::mutex process_mutex_; boost::shared_ptr<image_transport::ImageTransport> it_; // image_transport::Publisher pub_left_image_rect; // image_transport::Publisher pub_right_image_rect; // ros::Publisher pub_stereo_calibration_rect; image_transport::SubscriberFilter sub_left_image_rect; image_transport::SubscriberFilter sub_right_image_rect; Calibration::StereoCalibration stereoCalibrationRect; message_filters::Subscriber<caros_common_msgs::StereoCalibration> sub_calib_rect; int subscriber_queue_size_; int publisher_queue_size_; void imageCb(const sensor_msgs::ImageConstPtr& left_image, const sensor_msgs::ImageConstPtr& right_image, const caros_common_msgs::StereoCalibrationConstPtr& msg_calib_rect); bool toPointsCB(caros_common_msgs::CapturePoints2::Request &req, caros_common_msgs::CapturePoints2::Response &res); bool toPoints(); bool subscribeToImages(); pcl::PointCloud<pcl::PointXYZRGB> toPCLpointcloud(const cv::Mat& mat, const cv::Mat& rgb); typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, sensor_msgs::Image, caros_common_msgs::StereoCalibration> ApproximatePolicy; typedef message_filters::Synchronizer<ApproximatePolicy> ApproximateSync; boost::shared_ptr<ApproximateSync> approximate_sync_; typedef message_filters::sync_policies::ExactTime<sensor_msgs::Image, sensor_msgs::Image, caros_common_msgs::StereoCalibration> ExactPolicy; typedef message_filters::Synchronizer<ExactPolicy> ExactSync; boost::shared_ptr<ExactSync> exact_sync_; // boost::shared_ptr<PrepAndEarlyVisionCV> preprocessAndFilter; // IplImage * imgColorL; // IplImage * imgColorR; // IplImage * imgGreyFloatDummyL; // IplImage * imgGreyFloatDummyR; // IplImage * imgGreyL; // IplImage * imgGreyR; // Calibration::StereoCalibration* stereoCalibrationRaw; // Calibration::StereoCalibration stereoCalibrationRaw_; cv::Mat imgL; cv::Mat imgR; int numberOfDisparities; int SADWindowSize; boost::array<double, 9> left_camera_R; // leftCamra rotation to track change in calibration bool publishPoints; sensor_msgs::PointCloud2 scene; ros::Time stamp; std::string frame_id; ros::Publisher pointsPub; // for calibration data cv::Rect roi1, roi2; // cv::Mat_<double> M1, D1, M2, D2, R, T, R1, P1, R2, P2; // cv::Mat_<double> M1, M2, T, R1; ros::ServiceServer service; // int count; image_transport::Publisher pub_dum; }; void StereoPointCloud::onInit() { ROS_DEBUG_STREAM(" StereoPointCloud::onInit()"); ros::NodeHandle &nh = getNodeHandle(); // ros::NodeHandle &private_nh = getPrivateNodeHandle(); it_.reset(new image_transport::ImageTransport(nh)); // optional parameters nh.param("max_disparities", numberOfDisparities, 192); // should be dividable with 16 nh.param("block_size", SADWindowSize, 11); // should be odd number between 3 and 11 // Synchronize inputs. Topic subscriptions happen on demand in the connection callback. subscriber_queue_size_ = 5; publisher_queue_size_ = 1; // sync_.reset(new Sync(sub_left_image_raw, sub_right_image_raw, sub_calib_raw, subscriber_queue_size_)); // sync_->registerCallback(boost::bind(&StereoPreprocess::imageCb, this, _1, _2, _3)); // sync_.reset(new Sync(sub_left_image_raw, sub_right_image_raw, subscriber_queue_size_)); //Synchronize inputs. Topic subscriptions happen on demand in the connection // callback. Optionally do approximate synchronization. bool approx = false; if (approx) { approximate_sync_.reset( new ApproximateSync(ApproximatePolicy(subscriber_queue_size_), sub_left_image_rect, sub_right_image_rect, sub_calib_rect)); approximate_sync_->registerCallback(boost::bind(&StereoPointCloud::imageCb, this, _1, _2, _3)); } else { exact_sync_.reset( new ExactSync(ExactPolicy(subscriber_queue_size_), sub_left_image_rect, sub_right_image_rect, sub_calib_rect)); exact_sync_->registerCallback(boost::bind(&StereoPointCloud::imageCb, this, _1, _2, _3)); } // Set up dynamic reconfiguration // ReconfigureServer::CallbackType f = boost::bind(&DisparityNodelet::configCb, // this, _1, _2); // reconfigure_server_.reset(new ReconfigureServer(config_mutex_, private_nh)); // reconfigure_server_->setCallback(f); // count=0; ros::SubscriberStatusCallback connect_cb = boost::bind(&StereoPointCloud::connectCb, this); // Make sure we don't enter connectCb() between advertising and assigning to pub_disparity_ boost::lock_guard<boost::mutex> lock(connect_mutex_); // pub_dum=it_->advertise("dum", 5, connect_cb); service = nh.advertiseService("grab_point_cloud", &StereoPointCloud::toPointsCB, this); pointsPub = nh.advertise<sensor_msgs::PointCloud2>("points", 1, connect_cb); // image_transport::TransportHints hints("rect_color", ros::TransportHints(), getPrivateNodeHandle()); // sub_left_image_rect.subscribe(*it_, "left/image_rect_color", 5); // sub_right_image_rect.subscribe(*it_, "right/image_rect_color", 5); // sub_calib_rect.subscribe(nh, "stereo_calib_rect", 5); } void StereoPointCloud::imageCb(const sensor_msgs::ImageConstPtr& left_image, const sensor_msgs::ImageConstPtr& right_image, const caros_common_msgs::StereoCalibrationConstPtr& calib) { ROS_DEBUG_STREAM("StereoPointCloud::imageCb"); cv_bridge::CvImagePtr leftImageCVPtr, rightImageCVPtr; try { leftImageCVPtr = cv_bridge::toCvCopy(left_image, left_image->encoding); rightImageCVPtr = cv_bridge::toCvCopy(right_image, left_image->encoding); } catch (cv_bridge::Exception& e) { ROS_ERROR("Could not convert from encodings. %s", e.what()); } if (calib->left.R != left_camera_R) { left_camera_R = calib->left.R; stereoCalibrationRect = CovisRos::toCovis(*calib); // M1 = stereoCalibrationRect.leftCamera().getIntrinsic().toCvMat(); // M2 = stereoCalibrationRect.rightCamera().getIntrinsic().toCvMat(); // T = stereoCalibrationRect.rightCamera().getTranslation(); } imgL = leftImageCVPtr->image; imgR = rightImageCVPtr->image; stamp = left_image->header.stamp; frame_id = left_image->header.frame_id; toPoints(); if (publishPoints) { // scene.header=left_image->header; pointsPub.publish(scene); } } bool StereoPointCloud::toPointsCB(caros_common_msgs::CapturePoints2::Request &req, caros_common_msgs::CapturePoints2::Response &res) { ROS_INFO("Get point cloud called"); subscribeToImages(); numberOfDisparities = req.max_disparity; SADWindowSize = req.blocksize; bool resLocal = toPoints(); if (resLocal) res.outputScene = scene; return resLocal; } bool StereoPointCloud::toPoints() { // ROS_INFO("StereoPointCloud::toPoints()"); cv::Mat imgL_grey; cv::Mat imgR_grey; cvtColor(imgL, imgL_grey, CV_RGB2GRAY); cvtColor(imgR, imgR_grey, CV_RGB2GRAY); cv::StereoBM bm; bm.state->roi1 = roi1; //TODO: this was never assigned bm.state->roi2 = roi2; //TODO: this was never assigned bm.state->preFilterCap = 31; bm.state->SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 9; bm.state->minDisparity = 0; bm.state->numberOfDisparities = numberOfDisparities; bm.state->textureThreshold = 10; bm.state->uniquenessRatio = 10; bm.state->speckleWindowSize = 100; bm.state->speckleRange = 32; bm.state->disp12MaxDiff = -1; cv::Mat disp, disp8; bm(imgL_grey, imgR_grey, disp); //disp.convertTo(disp8, CV_8U, 255/(numberOfDisparities*16.)); //imwrite("temp_disparity.png", disp8); // Get parameters for reconstruction cv::Mat_<double> M1 = stereoCalibrationRect.leftCamera().getIntrinsic().toCvMat(); cv::Mat_<double> M2 = stereoCalibrationRect.rightCamera().getIntrinsic().toCvMat(); float T = stereoCalibrationRect.rightCamera().getTranslation()[0]; float f = M1(0, 0); // Focal length float B = T; // Baseline in the x direction float cx = M1(0, 2); // Center x coordinate float cy = M1(1, 2); // Center y coordinate float cx2 = M2(0, 2); // Center x coordinate of right image float dcx = cx - cx2; // Difference in center x coordinates int temp = disp.at<int16_t>(0, 0); for (int y = 0; y < disp.rows; ++y) { for (int x = 0; x < disp.cols; ++x) { if (temp > disp.at<int16_t>(y, x)) temp = disp.at<int16_t>(y, x); } } cv::Mat_<cv::Vec3f> xyz(disp.rows, disp.cols, cv::Vec3f(0, 0, 0)); // Resulting point cloud, initialized to zero for (int y = 0; y < disp.rows; ++y) { for (int x = 0; x < disp.cols; ++x) { cv::Vec3f& p = xyz(y, x); // Point to write to // Avoid invalid disparities if (disp.at<int16_t>(y, x) == temp) continue; if (disp.at<int16_t>(y, x) == 0) continue; // Maths from here: http://answers.opencv.org/upfiles/13535653931527438.jpg float d = float(disp.at<int16_t>(y, x)) / 16.0f; // Disparity float W = B / (-d + dcx); // Weighting p[0] = (float(x) - cx) * W; p[1] = (float(y) - cy) * W; p[2] = f * W; } } // ros::Time stamp = ros::Time::now(); pcl::PointCloud<pcl::PointXYZRGB> cloud = toPCLpointcloud(xyz, imgL); // pcl::PointCloud2 cloud2; pcl::toROSMsg<pcl::PointXYZRGB>(cloud, scene); scene.header.stamp = stamp; scene.header.frame_id = frame_id; return true; } pcl::PointCloud<pcl::PointXYZRGB> StereoPointCloud::toPCLpointcloud(const cv::Mat& mat, const cv::Mat& rgb) { pcl::PointCloud<pcl::PointXYZRGB> cloud(mat.cols,mat.rows,pcl::PointXYZRGB(std::numeric_limits<double>::quiet_NaN(),std::numeric_limits<double>::quiet_NaN(),std::numeric_limits<double>::quiet_NaN())); cloud.reserve(mat.cols * mat.rows); const double max_z = 2e3; // Disregard points farther away than 2 m //FILE* fp = fopen(filename, "wt"); int counter=-1; for (int y = 0; y < mat.rows; y++) { for (int x = 0; x < mat.cols; x++) { counter++; cv::Vec3f point = mat.at<cv::Vec3f>(y, x); // This omits zero points if (point[0] == 0 && point[1] == 0 && point[2] == 0) continue; // This omits points equal to or larger than max_z if (fabs(point[2] - max_z) < FLT_EPSILON || fabs(point[2]) > max_z) continue; //fprintf(fp, "%f %f %f\n", point[0], point[1], point[2]); // Point to write to pcl::PointXYZRGB p; // Scale position from mm to m p.x = 0.001 * point[0]; p.y = 0.001 * point[1]; p.z = 0.001 * point[2]; // OpenCV reads in images in BGR order, so we must switch to BGR for PCL cv::Vec3b pbgr = rgb.at<cv::Vec3b>(y, x); p.b = pbgr[0]; p.g = pbgr[1]; p.r = pbgr[2]; // cloud.push_back(p); cloud.at(counter)=p; } } return cloud; } void StereoPointCloud::connectCb() { // ROS_INFO_STREAM("StereoPointCloud::connectCb().."); boost::lock_guard<boost::mutex> lock(connect_mutex_); if (pointsPub.getNumSubscribers() == 0) { ROS_INFO_STREAM("publishing point clouds is off"); publishPoints = false; // sub_left_image_rect.unsubscribe(); // sub_right_image_rect.unsubscribe(); // sub_calib_rect.unsubscribe(); } else if (!sub_left_image_rect.getSubscriber()) { // ros::NodeHandle &nh = getNodeHandle(); ROS_INFO_STREAM("publishing point clouds is on"); publishPoints = true; subscribeToImages(); // image_transport::TransportHints hints("rect_color", ros::TransportHints(), getPrivateNodeHandle()); // sub_left_image_rect.subscribe(*it_, "left/image_rect_color", 5); // sub_right_image_rect.subscribe(*it_, "right/image_rect_color", 5); // sub_calib_rect.subscribe(nh, "stereo_calib_rect", 5); } } bool StereoPointCloud::subscribeToImages() { ros::NodeHandle &nh = getNodeHandle(); image_transport::TransportHints hints("rect_color", ros::TransportHints(), getPrivateNodeHandle()); sub_left_image_rect.subscribe(*it_, "left/image_rect_color", 5); sub_right_image_rect.subscribe(*it_, "right/image_rect_color", 5); sub_calib_rect.subscribe(nh, "stereo_calib_rect", 5); return true; } PLUGINLIB_DECLARE_CLASS(stereo_camera, StereoPointCloud, nodelet_stereo_camera::StereoPointCloud, nodelet::Nodelet); }
module STLC where open import Data.Nat open import Data.Empty open import Relation.Binary.PropositionalEquality -- infix 4 _⊢_ -- infix 4 _∋_∶_ -- infixl 5 _,_ infixr 7 _⟶_ infix 5 ƛ_ infixl 7 _∙_ -- infix 9 `_ infixl 10 _∶_ -- infix 5 μ_ -- infix 8 `suc_ -- infix 9 S_ -- infix 9 #_ data Type : Set where _⟶_ : Type → Type → Type ⊤ : Type data Context : Set data Term : Set infix 4 _∋_∶_ data _∋_∶_ : Context → Term → Type → Set infixl 10 _,_∶_ data Context where ∅ : Context _,_∶_ : Context → Term → Type → Context data Term where ‵_ : ∀ {Γ x A} → Γ ∋ x ∶ A → Term ƛ_ : Term → Term _∙_ : Term → Term → Term _∶_ : Term → Type → Term tt : Term data _∋_∶_ where Z : ∀ {Γ x A} --------- → Γ , x ∶ A ∋ x ∶ A S_ : ∀ {Γ x A y B} → Γ ∋ x ∶ A --------- → Γ , y ∶ B ∋ x ∶ A -- lookup : Context → ℕ → Type -- lookup (Γ , A) zero = A -- lookup (Γ , _) (suc n) = lookup Γ n -- lookup ∅ _ = ⊥-elim impossible -- where postulate impossible : ⊥ -- count : ∀ {Γ} → (n : ℕ) → Γ ∋ lookup Γ n -- count {Γ , _} zero = Z -- count {Γ , _} (suc n) = S (count n) -- count {∅} _ = ⊥-elim impossible -- where postulate impossible : ⊥ -- -- #_ : ∀ {Γ} → (n : ℕ) → Γ ⊢ lookup Γ n -- -- # n = ` count n -- a = {! !} infix 4 _⊢_⇐_ infix 4 _⊢_⇒_ data _⊢_⇐_ : Context → Term → Type → Set data _⊢_⇒_ : Context → Term → Type → Set variable Γ : Context x e f : Term A B : Type data _⊢_⇒_ where Var : Γ ∋ x ∶ A ---------------------------- → Γ ⊢ x ⇒ A Anno : Γ ⊢ e ⇐ A ---------------------------- → Γ ⊢ (e ∶ A) ⇒ A ⟶E : Γ ⊢ f ⇒ (A ⟶ B) → Γ ⊢ e ⇐ A ---------------------------- → Γ ⊢ f ∙ e ⇒ B data _⊢_⇐_ where Sub : Γ ∋ e ∶ A → A ≡ B ---------------------------- → Γ ⊢ e ⇐ B ⊤I : ∀ {Γ} ---------------------------- → Γ ⊢ tt ⇐ ⊤ ⟶I : (Γ , x ∶ A) ⊢ e ⇐ B ---------------------------- → Γ ⊢ ƛ e ⇐ A ⟶ B 4-4-Synth : ∅ , x ∶ A ⊢ x ⇒ A 4-4-Synth = Var Z 4-4-Check : ∅ , x ∶ A ⊢ x ⇐ A 4-4-Check = Sub Z refl 4-4-Sym→Elim : ∅ , f ∶ (A ⟶ B) , x ∶ A ⊢ (f ∙ x) ⇒ B 4-4-Sym→Elim = ⟶E (Var (S Z)) (Sub Z refl)
using Rebugger using Rebugger: StopException using Test, UUIDs, InteractiveUtils, REPL, Pkg, HeaderREPLs using REPL.LineEdit using Revise, Colors if !isdefined(Main, :RebuggerTesting) includet("testmodule.jl") # so the source code here gets loaded end const empty_kwvarargs = Rebugger.kwstasher() uuidextractor(str) = UUID(match(r"getstored\(\"([a-z0-9\-]+)\"\)", str).captures[1]) struct ErrorsOnShow end Base.show(io::IO, ::ErrorsOnShow) = throw(ArgumentError("no show")) @testset "Rebugger" begin id = uuid1() @test uuidextractor("vars = getstored(\"$id\") and more stuff") == id @testset "Debug core" begin @testset "Deepcopy" begin args = (3.2, rand(3,3), Rebugger, [Rebugger], "hello", sum, (2,3)) argc = Rebugger.safe_deepcopy(args...) @test argc == args end @testset "Signatures" begin @test Rebugger.signature_names!(:(f(x::Int, @nospecialize(y::String)))) == (:f, (:x, :y), (), ()) @test Rebugger.signature_names!(:(f(x::Int, $(Expr(:meta, :nospecialize, :(y::String)))))) == (:f, (:x, :y), (), ()) ex = :(f(::Type{T}, ::IndexStyle, x::Int, ::IndexStyle) where T) @test Rebugger.signature_names!(ex) == (:f, (:T, :__IndexStyle_1, :x, :__IndexStyle_2), (), ()) @test ex == :(f(::Type{T}, __IndexStyle_1::IndexStyle, x::Int, __IndexStyle_2::IndexStyle) where T) ex = :(f(Tuseless::Type{T}, ::IndexStyle, x::Int) where T) @test Rebugger.signature_names!(ex) == (:f, (:Tuseless, :__IndexStyle_1, :x), (), (:T,)) @test ex == :(f(Tuseless::Type{T}, __IndexStyle_1::IndexStyle, x::Int) where T) # issue #34 ex = :(_mapreduce_dim(f, op, ::NamedTuple{()}, A::AbstractArray, ::Colon)) @test Rebugger.signature_names!(ex) == (:_mapreduce_dim, (:f, :op, :__NamedTuple_1, :A, :__Colon_1), (), ()) @test ex == :(_mapreduce_dim(f, op, __NamedTuple_1::NamedTuple{()}, A::AbstractArray, __Colon_1::Colon)) end @testset "Caller buffer capture and insertion" begin function run_insertion(str, atstr) RebuggerTesting.cbdata1[] = RebuggerTesting.cbdata2[] = Rebugger.stashed[] = nothing io = IOBuffer() idx = findfirst(atstr, str) print(io, str) seek(io, first(idx)-1) callexpr = Rebugger.prepare_caller_capture!(io) capstring = String(take!(io)) capexpr = Meta.parse(capstring) try Core.eval(RebuggerTesting, capexpr) catch err isa(err, StopException) || rethrow(err) end end str = """ for i = 1:5 cbdata1[] = i foo(12, 13; akw="modified") cbdata2[] = i end """ @test run_insertion(str, "foo") @test RebuggerTesting.cbdata1[] == 1 @test RebuggerTesting.cbdata2[] == nothing @test Rebugger.stashed[] == (RebuggerTesting.foo, (12, 13), Rebugger.kwstasher(;akw="modified")) str = """ for i = 1:5 error("not caught") foo(12, 13; akw="modified") end """ @test_throws ErrorException("not caught") run_insertion(str, "foo") @test_throws Rebugger.StepException("Rebugger can only step into expressions, got 77") run_insertion("x = 77", "77") # Module-scoped calls io = IOBuffer() cmdstr = "Scope.func(x, y, z)" print(io, cmdstr) seek(io, 0) callexpr = Rebugger.prepare_caller_capture!(io) @test callexpr == :(Scope.func(x, y, z)) take!(io) # getindex and setindex! expressions cmdstr = "x = a[2,3]" print(io, cmdstr) seek(io, first(findfirst("a", cmdstr))-1) callexpr = Rebugger.prepare_caller_capture!(io) @test callexpr == :(getindex(a, 2, 3)) take!(io) cmdstr = "a[2,3] = x" print(io, cmdstr) seek(io, 0) callexpr = Rebugger.prepare_caller_capture!(io) @test callexpr == :(setindex!(a, x, 2, 3)) take!(io) # Expressions that go beyond "user intention". # More generally we should support marking, but in the case of && and || it's # handled by lowering, so there is nothing to step into anyway. for cmdstr in ("f1(x) && f2(z)", "f1(x) || f2(z)") print(io, cmdstr) seek(io, 0) callexpr = Rebugger.prepare_caller_capture!(io) @test callexpr == :(f1(x)) take!(io) end # issue #5 cmdstr = "abs(abs(x))" print(io, cmdstr) seek(io, 4) callexpr = Rebugger.prepare_caller_capture!(io) @test callexpr == :(abs(x)) take!(io) # splat expressions cmdstr = "foo(bar(x)..., 1)" print(io, cmdstr) idx = findfirst("bar", cmdstr) seek(io, first(idx)-1) callexpr = Rebugger.prepare_caller_capture!(io) @test callexpr == :(bar(x)) end @testset "Callee variable capture" begin def = quote function complexargs(x::A, y=1, str="1.0"; kw1=Float64, kw2=7, kwargs...) where A<:AbstractArray{T} where T return (x .+ y, parse(kw1, str), kw2) end end f = Core.eval(RebuggerTesting, def) @test f([8,9]) == ([9,10], 1.0, 7) m = collect(methods(f))[end] uuid = Rebugger.method_capture_from_callee(m, def) @test Rebugger.method_capture_from_callee(m, def) == uuid # calling twice returns the previously-defined objects fc = Rebugger.storefunc[uuid] @test_throws StopException fc([8,9], 2, "13"; kw1=Int, kw2=0) @test Rebugger.stored[uuid].varnames == (:x, :y, :str, :kw1, :kw2, :kwargs, :A, :T) @test Rebugger.stored[uuid].varvals == ([8,9], 2, "13", Int, 0, empty_kwvarargs, Vector{Int}, Int) @test_throws StopException fc([8,9]; otherkw=77) @test Rebugger.stored[uuid].varnames == (:x, :y, :str, :kw1, :kw2, :kwargs, :A, :T) @test Rebugger.stored[uuid].varvals == ([8,9], 1, "1.0", Float64, 7, pairs((otherkw=77,)), Vector{Int}, Int) uuid2 = Rebugger.method_capture_from_callee(m, def; overwrite=true) @test uuid2 != uuid # note overwriting methods are not stored in storefunc, but our old `f` will call the new method @test f([8,9], 2, "13"; kw1=Int, kw2=0) == ([10,11], 13, 0) Core.eval(RebuggerTesting, def) @test Rebugger.stored[uuid2].varnames == (:x, :y, :str, :kw1, :kw2, :kwargs, :A, :T) @test Rebugger.stored[uuid2].varvals == ([8,9], 2, "13", Int, 0, empty_kwvarargs, Vector{Int}, Int) def = quote @inline modifies!(x) = (x[1] += 1; x) end f = Core.eval(RebuggerTesting, def) @test f([8,9]) == [9,9] m = collect(methods(f))[end] uuid = Rebugger.method_capture_from_callee(m, def) fc = Rebugger.storefunc[uuid] @test_throws StopException fc([8,9]) @test Rebugger.stored[uuid].varnames == (:x,) @test Rebugger.stored[uuid].varvals == ([8,9],) # Extensions of functions from other modules m = @which RebuggerTesting.foo() uuid = Rebugger.method_capture_from_callee(m) fc = Rebugger.storefunc[uuid] @test_throws StopException fc() @test Rebugger.stored[uuid].varnames == Rebugger.stored[uuid].varvals == () end @testset "Step in" begin function run_stepin(str, atstr) io = IOBuffer() idx = findfirst(atstr, str) @test !isempty(idx) print(io, str) seek(io, first(idx)-1) Rebugger.stepin(io) end str = "RebuggerTesting.snoop0()" uuidref, cmd = run_stepin(str, str) uuid1 = uuidextractor(cmd) @test uuid1 == uuidref @test cmd == """ @eval Main.RebuggerTesting let () = Main.Rebugger.getstored("$uuid1") begin snoop1("Spy") end end""" _, cmd = run_stepin(cmd, "snoop1") uuid2 = uuidextractor(cmd) @test cmd == """ @eval Main.RebuggerTesting let (word,) = Main.Rebugger.getstored("$uuid2") begin snoop2(word, "on") end end""" _, cmd = run_stepin(cmd, "snoop2") uuid3 = uuidextractor(cmd) @test cmd == """ @eval Main.RebuggerTesting let (word1, word2) = Main.Rebugger.getstored("$uuid3") begin snoop3(word1, word2, "arguments") end end""" @test Rebugger.getstored(string(uuid1)) == () @test Rebugger.getstored(string(uuid2)) == ("Spy",) @test Rebugger.getstored(string(uuid3)) == ("Spy", "on") str = "RebuggerTesting.kwvarargs(1)" _, cmd = run_stepin(str, str) uuid = uuidextractor(cmd) @test cmd == """ @eval Main.RebuggerTesting let (x, kw1, kwargs) = Main.Rebugger.getstored("$uuid") begin kwvarargs2(x; kw1=kw1, kwargs...) end end""" @test Rebugger.getstored(string(uuid)) == (1, 1, empty_kwvarargs) cmd = run_stepin(cmd, "kwvarargs2") str = "RebuggerTesting.kwvarargs(1; passthrough=false)" _, cmd = run_stepin(str, str) uuid = uuidextractor(cmd) @test cmd == """ @eval Main.RebuggerTesting let (x, kw1, kwargs) = Main.Rebugger.getstored("$uuid") begin kwvarargs2(x; kw1=kw1, kwargs...) end end""" @test Rebugger.getstored(string(uuid)) == (1, 1, pairs((passthrough=false,))) _, cmd = run_stepin(cmd, "kwvarargs2") # Step in to call-overloading methods str = "RebuggerTesting.hv_test(\"hi\")" _, cmd = run_stepin(str, str) uuid = uuidextractor(cmd) @test cmd == """ @eval Main.RebuggerTesting let (hv, str) = Main.Rebugger.getstored("$uuid") begin hv.x end end""" @test Rebugger.getstored(string(uuid)) == (RebuggerTesting.hv_test, "hi") # Step in to methods that do tuple-destructuring of arguments str = "RebuggerTesting.destruct(1, (2,3), 4)" @test eval(Meta.parse(str)) == 2 _, cmd = run_stepin(str, str) uuid = uuidextractor(cmd) @test cmd == """ @eval Main.RebuggerTesting let (x, (a, b), y) = Main.Rebugger.getstored("$uuid") begin a end end""" @test Rebugger.getstored(string(uuid)) == (1, (2,3), 4) # Step in to a broadcast call str = "sum.([[1,2], (3,5)])" uuid, cmd = run_stepin(str, str) s = Rebugger.stored[uuid] @test s.method.name == :broadcast @test cmd == """ @eval Base.Broadcast let (f, As, Tf) = Main.Rebugger.getstored("$uuid") begin materialize(broadcasted(f, As...)) end end""" @test Rebugger.getstored(string(uuid)) == (sum, (Any[[1,2], (3,5)],), typeof(sum)) Core.eval(Main, Meta.parse(cmd)) == [3,8] str = "max.([1,5], [2,-3])" uuid, cmd = run_stepin(str, str) s = Rebugger.stored[uuid] @test s.method.name == :broadcast @test cmd == """ @eval Base.Broadcast let (f, As, Tf) = Main.Rebugger.getstored("$uuid") begin materialize(broadcasted(f, As...)) end end""" @test Rebugger.getstored(string(uuid)) == (max, ([1,5], [2,-3]), typeof(max)) Core.eval(Main, Meta.parse(cmd)) == [2,5] # Step in to a do block str = "RebuggerTesting.calldo()" uuidref, cmd = run_stepin(str, str) uuid1 = uuidextractor(cmd) @test uuid1 == uuidref @test cmd == """ @eval Main.RebuggerTesting let () = Main.Rebugger.getstored("$uuid1") begin apply(2, 3, 4) do x, y, z snoop3(x, y, z) end end end""" uuidref, cmd = run_stepin(cmd, "apply") uuid1 = uuidextractor(cmd) @test uuid1 == uuidref @test cmd == """ @eval Main.RebuggerTesting let (f, args) = Main.Rebugger.getstored("$uuid1") begin kwvarargs(f) f(args...) end end""" end @testset "Capture stacktrace" begin uuids = nothing mktemp() do path, iostacktrace redirect_stderr(iostacktrace) do uuids = Rebugger.capture_stacktrace(RebuggerTesting, :(snoop0())) end flush(iostacktrace) str = read(path, String) @test occursin("snoop3", str) end @test Rebugger.stored[uuids[1]].varvals == () @test Rebugger.stored[uuids[2]].varvals == ("Spy",) @test Rebugger.stored[uuids[3]].varvals == ("Spy", "on") @test Rebugger.stored[uuids[4]].varvals == ("Spy", "on", "arguments", "simply", empty_kwvarargs, String) @test_throws ErrorException("oops") RebuggerTesting.snoop0() st = try RebuggerTesting.kwfunctop(3) catch; stacktrace(catch_backtrace()) end usrtrace, defs = Rebugger.pregenerated_stacktrace(st; topname=Symbol("macro expansion")) @test length(unique(usrtrace)) == length(usrtrace) m = @which RebuggerTesting.kwfuncmiddle(1,1) @test m ∈ usrtrace # A case that tests inlining and several other aspects of argument capture ex = :([1, 2, 3] .* [1, 2]) # Capture the actual stack trace, trimming it to avoid # anything involving the `eval` itself trace = try Core.eval(Main, ex) catch stacktrace(catch_backtrace()) end i = 1 while i <= length(trace) t = trace[i] if t.func == Symbol("top-level scope") deleteat!(trace, i:length(trace)) end i += 1 end # Get the capture from Rebugger uuids = mktemp() do path, iostacktrace redirect_stderr(iostacktrace) do Rebugger.capture_stacktrace(Main, ex) end end @test length(uuids) == length(trace) for (uuid, t) in zip(reverse(uuids), trace) @test Rebugger.stored[uuid].method.name == t.func end # Try capturing a method from Core. On binaries this would throw # if we didn't catch it. # Because the first entry is "top-level scope", and that terminates # processing in Rebugger.pregenerated_stacktrace, we have to intervene a bit. mod, ex = Main, :(Core.throw(ArgumentError("oops"))) trace = try Core.eval(mod, command) catch err stacktrace(catch_backtrace()) end usrtrace, defs = Rebugger.pregenerated_stacktrace(trace[2:3]) @test usrtrace isa Vector end end @testset "User interface" begin @testset "Printing header" begin h = Rebugger.RebugHeader() h.uuid = uuid = uuid1() meth = @which RebuggerTesting.foo(1,2) h.current_method = meth Rebugger.stored[uuid] = Rebugger.Stored(meth, (:x, :y), (1, ErrorsOnShow())) h.warnmsg = "This is a warning" h.errmsg = "You will not have a second chance" io = IOBuffer() Rebugger.print_header(io, h) str = String(take!(io)) @test startswith(str, """ This is a warning You will not have a second chance foo(x, y) in Main.RebuggerTesting at """) # skip the "upper" part of the file location @test endswith(str, "testmodule.jl:7\n x = 1\n y errors in its show method") end @testset "Demos" begin function prepare_step_command(cmd, atstr) LineEdit.edit_clear(mistate) idx = findfirst(atstr, cmd) @test !isempty(idx) LineEdit.replace_line(mistate, cmd) buf = LineEdit.buffer(mistate) seek(buf, first(idx)-1) return mistate end function do_capture_stacktrace(cmd) l = length(hist.history) LineEdit.replace_line(mistate, cmd) Rebugger.capture_stacktrace(mistate) LineEdit.transition(mistate, julia_prompt) return l+1:length(hist.history) end if isdefined(Base, :active_repl) repl = Base.active_repl mistate = repl.mistate julia_prompt = find_prompt(mistate, "julia") LineEdit.transition(mistate, julia_prompt) hist = julia_prompt.hist header = Rebugger.rebug_prompt_ref[].repl.header histdel = 0 @testset "show demo" begin # this is a demo that appears in the documentation cmd1 = "show([1,2,4])" s = prepare_step_command(cmd1, cmd1) Rebugger.stepin(s) histdel += 1 uuid = header.uuid @test Rebugger.getstored(string(uuid)) == ([1,2,4],) cmd2 = LineEdit.content(s) s = prepare_step_command(cmd2, "show(stdout::IO, x)") Rebugger.stepin(s) histdel += 1 uuid = header.uuid @test Rebugger.getstored(string(uuid))[2] == [1,2,4] cmd3 = LineEdit.content(s) s = prepare_step_command(cmd3, "_show_empty") Rebugger.stepin(s) histdel += 1 @test header.warnmsg == "Execution did not reach point" end @testset "Colors demo" begin # another demo that appears in the documentation desc = "hsl(80%, 20%, 15%)" cmd = "colorant\"hsl(80%, 20%, 15%)\"" local idx mktemp() do path, io redirect_stderr(io) do logs, _ = Test.collect_test_logs() do idx = do_capture_stacktrace(cmd) end end flush(io) seek(io, 0) @test countlines(io) >= 4 end histdel += length(idx) @test length(idx) >= 5 @test hist.history[idx[1]] == cmd @test occursin("error", hist.history[idx[end]]) end @testset "Pkg demo" begin updated = Pkg.UPDATED_REGISTRY_THIS_SESSION[] Pkg.UPDATED_REGISTRY_THIS_SESSION[] = true uuids = Rebugger.capture_stacktrace(Pkg, :(add("NoPkg"))) @test length(uuids) >= 2 Pkg.UPDATED_REGISTRY_THIS_SESSION[] = updated end @testset "Empty stacktraces" begin cmd = "ccall(:jl_throw, Nothing, (Any,), ArgumentError(\"oops\"))" mktemp() do path, io redirect_stderr(io) do LineEdit.replace_line(mistate, cmd) @test Rebugger.capture_stacktrace(mistate) === nothing LineEdit.transition(mistate, julia_prompt) end flush(io) str = read(path, String) @test occursin("failed to capture", str) end end LineEdit.edit_clear(mistate) l = length(hist.history) deleteat!(hist.history, l-histdel+1:l) deleteat!(hist.modes, l-histdel+1:l) hist.cur_idx = length(hist.history)+1 end end end end
(* Title: A Definitional Encoding of TLA in Isabelle/HOL Authors: Gudmund Grov <ggrov at inf.ed.ac.uk> Stephan Merz <Stephan.Merz at loria.fr> Year: 2011 Maintainer: Gudmund Grov <ggrov at inf.ed.ac.uk> *) section \<open>Refining a Buffer Specification\<close> theory Buffer imports State begin text \<open> We specify a simple FIFO buffer and prove that two FIFO buffers in a row implement a FIFO buffer. \<close> subsection "Buffer specification" text \<open> The following definitions all take three parameters: a state function representing the input channel of the FIFO buffer, another representing the internal queue, and a third one representing the output channel. These parameters will be instantiated later in the definition of the double FIFO. \<close> definition BInit :: "'a statefun \<Rightarrow> 'a list statefun \<Rightarrow> 'a statefun \<Rightarrow> temporal" where "BInit ic q oc \<equiv> TEMP $q = #[] \<and> $ic = $oc" \<comment> \<open>initial condition of buffer\<close> definition Enq :: "'a statefun \<Rightarrow> 'a list statefun \<Rightarrow> 'a statefun \<Rightarrow> temporal" where "Enq ic q oc \<equiv> TEMP ic$ \<noteq> $ic \<and> q$ = $q @ [ ic$ ] \<and> oc$ = $oc" \<comment> \<open>enqueue a new value\<close> definition Deq :: "'a statefun \<Rightarrow> 'a list statefun \<Rightarrow> 'a statefun \<Rightarrow> temporal" where "Deq ic q oc \<equiv> TEMP # 0 < length<$q> \<and> oc$ = hd<$q> \<and> q$ = tl<$q> \<and> ic$ = $ic" \<comment> \<open>dequeue value at front\<close> definition Nxt :: "'a statefun \<Rightarrow> 'a list statefun \<Rightarrow> 'a statefun \<Rightarrow> temporal" where "Nxt ic q oc \<equiv> TEMP (Enq ic q oc \<or> Deq ic q oc)" \<comment> \<open>internal specification with buffer visible\<close> definition ISpec :: "'a statefun \<Rightarrow> 'a list statefun \<Rightarrow> 'a statefun \<Rightarrow> temporal" where "ISpec ic q oc \<equiv> TEMP BInit ic q oc \<and> \<box>[Nxt ic q oc]_(ic,q,oc) \<and> WF(Deq ic q oc)_(ic,q,oc)" \<comment> \<open>external specification: buffer hidden\<close> definition Spec :: "'a statefun \<Rightarrow> 'a statefun \<Rightarrow> temporal" where "Spec ic oc == TEMP (\<exists>\<exists> q. ISpec ic q oc)" subsection "Properties of the buffer" text \<open> The buffer never enqueues the same element twice. We therefore have the following invariant: \begin{itemize} \item any two subsequent elements in the queue are different, and the last element in the queue is different from the value of the output channel, \item if the queue is non-empty then the last element in the queue is the value that appears on the input channel, \item if the queue is empty then the values on the output and input channels are equal. \end{itemize} The following auxiliary predicate \<open>noreps\<close> is true if no two subsequent elements in a list are identical. \<close> definition noreps :: "'a list \<Rightarrow> bool" where "noreps xs \<equiv> \<forall>i < length xs - 1. xs!i \<noteq> xs!(Suc i)" definition BInv :: "'a statefun \<Rightarrow> 'a list statefun \<Rightarrow> 'a statefun \<Rightarrow> temporal" where "BInv ic q oc \<equiv> TEMP List.last<$oc # $q> = $ic \<and> noreps<$oc # $q>" lemmas buffer_defs = BInit_def Enq_def Deq_def Nxt_def ISpec_def Spec_def BInv_def lemma ISpec_stutinv: "STUTINV (ISpec ic q oc)" unfolding buffer_defs by (simp add: bothstutinvs livestutinv) lemma Spec_stutinv: "STUTINV Spec ic oc" unfolding buffer_defs by (simp add: bothstutinvs livestutinv eexSTUT) text \<open>A lemma about lists that is useful in the following\<close> lemma tl_self_iff_empty[simp]: "(tl xs = xs) = (xs = [])" proof assume 1: "tl xs = xs" show "xs = []" proof (rule ccontr) assume "xs \<noteq> []" with 1 show "False" by (auto simp: neq_Nil_conv) qed qed (auto) lemma tl_self_iff_empty'[simp]: "(xs = tl xs) = (xs = [])" proof assume 1: "xs = tl xs" show "xs = []" proof (rule ccontr) assume "xs \<noteq> []" with 1 show "False" by (auto simp: neq_Nil_conv) qed qed (auto) lemma Deq_visible: assumes v: "\<turnstile> Unchanged v \<longrightarrow> Unchanged q" shows "|~ <Deq ic q oc>_v = Deq ic q oc" proof (auto simp: tla_defs) fix w assume deq: "w \<Turnstile> Deq ic q oc" and unch: "v (w (Suc 0)) = v (w 0)" from unch v[unlifted] have "q (w (Suc 0)) = q (w 0)" by (auto simp: tla_defs) with deq show "False" by (auto simp: Deq_def tla_defs) qed lemma Deq_enabledE: "\<turnstile> Enabled <Deq ic q oc>_(ic,q,oc) \<longrightarrow> $q ~= #[]" by (auto elim!: enabledE simp: Deq_def tla_defs) text \<open> We now prove that \<open>BInv\<close> is an invariant of the Buffer specification. We need several lemmas about \<open>noreps\<close> that are used in the invariant proof. \<close> lemma noreps_empty [simp]: "noreps []" by (auto simp: noreps_def) lemma noreps_singleton: "noreps [x]" \<comment> \<open>special case of following lemma\<close> by (auto simp: noreps_def) lemma noreps_cons [simp]: "noreps (x # xs) = (noreps xs \<and> (xs = [] \<or> x \<noteq> hd xs))" proof (auto simp: noreps_singleton) assume cons: "noreps (x # xs)" show "noreps xs" proof (auto simp: noreps_def) fix i assume i: "i < length xs - Suc 0" and eq: "xs!i = xs!(Suc i)" from i have "Suc i < length (x#xs) - 1" by auto moreover from eq have "(x#xs)!(Suc i) = (x#xs)!(Suc (Suc i))" by auto moreover note cons ultimately show False by (auto simp: noreps_def) qed next assume 1: "noreps (hd xs # xs)" and 2: "xs \<noteq> []" from 2 obtain x xxs where "xs = x # xxs" by (cases xs, auto) with 1 show False by (auto simp: noreps_def) next assume 1: "noreps xs" and 2: "x \<noteq> hd xs" show "noreps (x # xs)" proof (auto simp: noreps_def) fix i assume i: "i < length xs" and eq: "(x # xs)!i = xs!i" from i obtain y ys where xs: "xs = y # ys" by (cases xs, auto) show False proof (cases i) assume "i = 0" with eq 2 xs show False by auto next fix k assume k: "i = Suc k" with i eq xs 1 show False by (auto simp: noreps_def) qed qed qed lemma noreps_append [simp]: "noreps (xs @ ys) = (noreps xs \<and> noreps ys \<and> (xs = [] \<or> ys = [] \<or> List.last xs \<noteq> hd ys))" proof auto assume 1: "noreps (xs @ ys)" show "noreps xs" proof (auto simp: noreps_def) fix i assume i: "i < length xs - Suc 0" and eq: "xs!i = xs!(Suc i)" from i have "i < length (xs @ ys) - Suc 0" by auto moreover from i eq have "(xs @ ys)!i = (xs@ys)!(Suc i)" by (auto simp: nth_append) moreover note 1 ultimately show "False" by (auto simp: noreps_def) qed next assume 1: "noreps (xs @ ys)" show "noreps ys" proof (auto simp: noreps_def) fix i assume i: "i < length ys - Suc 0" and eq: "ys!i = ys!(Suc i)" from i have "i + length xs < length (xs @ ys) - Suc 0" by auto moreover from i eq have "(xs @ ys)!(i+length xs) = (xs@ys)!(Suc (i + length xs))" by (auto simp: nth_append) moreover note 1 ultimately show "False" by (auto simp: noreps_def) qed next assume 1: "noreps (xs @ ys)" and 2: "xs \<noteq> []" and 3: "ys \<noteq> []" and 4: "List.last xs = hd ys" from 2 obtain x xxs where xs: "xs = x # xxs" by (cases xs, auto) from 3 obtain y yys where ys: "ys = y # yys" by (cases ys, auto) from xs ys have 5: "length xxs < length (xs @ ys) - 1" by auto from 4 xs ys have "(xs @ ys) ! (length xxs) = (xs @ ys) ! (Suc (length xxs))" by (auto simp: nth_append last_conv_nth) with 5 1 show "False" by (auto simp: noreps_def) next assume 1: "noreps xs" and 2: "noreps ys" and 3: "List.last xs \<noteq> hd ys" show "noreps (xs @ ys)" proof (cases "xs = [] \<or> ys = []") case True with 1 2 show ?thesis by auto next case False then obtain x xxs where xs: "xs = x # xxs" by (cases xs, auto) from False obtain y yys where ys: "ys = y # yys" by (cases ys, auto) show ?thesis proof (auto simp: noreps_def) fix i assume i: "i < length xs + length ys - Suc 0" and eq: "(xs @ ys)!i = (xs @ ys)!(Suc i)" show "False" proof (cases "i < length xxs") case True hence "i < length (x # xxs)" by simp hence xsi: "((x # xxs) @ ys)!i = (x # xxs)!i" unfolding nth_append by simp from True have "(xxs @ ys)!i = xxs!i" by (auto simp: nth_append) with True xsi eq 1 xs show "False" by (auto simp: noreps_def) next assume i2: "\<not>(i < length xxs)" show False proof (cases "i = length xxs") case True with xs have xsi: "(xs @ ys)!i = List.last xs" by (auto simp: nth_append last_conv_nth) from True xs ys have "(xs @ ys)!(Suc i) = y" by (auto simp: nth_append) with 3 ys eq xsi show False by simp next case False with i2 xs have xsi: "\<not>(i < length xs)" by auto hence "(xs @ ys)!i = ys!(i - length xs)" by (simp add: nth_append) moreover from xsi have "Suc i - length xs = Suc (i - length xs)" by auto with xsi have "(xs @ ys)!(Suc i) = ys!(Suc (i - length xs))" by (simp add: nth_append) moreover from i xsi have "i - length xs < length ys - 1" by auto with 2 have "ys!(i - length xs) \<noteq> ys!(Suc (i - length xs))" by (auto simp: noreps_def) moreover note eq ultimately show False by simp qed qed qed qed qed lemma ISpec_BInv_lemma: "\<turnstile> BInit ic q oc \<and> \<box>[Nxt ic q oc]_(ic,q,oc) \<longrightarrow> \<box>(BInv ic q oc)" proof (rule invmono) show "\<turnstile> BInit ic q oc \<longrightarrow> BInv ic q oc" by (auto simp: BInit_def BInv_def) next have enq: "|~ Enq ic q oc \<longrightarrow> BInv ic q oc \<longrightarrow> \<circle>(BInv ic q oc)" by (auto simp: Enq_def BInv_def tla_defs) have deq: "|~ Deq ic q oc \<longrightarrow> BInv ic q oc \<longrightarrow> \<circle>(BInv ic q oc)" by (auto simp: Deq_def BInv_def tla_defs neq_Nil_conv) have unch: "|~ Unchanged (ic,q,oc) \<longrightarrow> BInv ic q oc \<longrightarrow> \<circle>(BInv ic q oc)" by (auto simp: BInv_def tla_defs) show "|~ BInv ic q oc \<and> [Nxt ic q oc]_(ic, q, oc) \<longrightarrow> \<circle>(BInv ic q oc)" by (auto simp: Nxt_def actrans_def elim: enq[unlift_rule] deq[unlift_rule] unch[unlift_rule]) qed theorem ISpec_BInv: "\<turnstile> ISpec ic q oc \<longrightarrow> \<box>(BInv ic q oc)" by (auto simp: ISpec_def intro: ISpec_BInv_lemma[unlift_rule]) subsection "Two FIFO buffers in a row implement a buffer" locale DBuffer = fixes inp :: "'a statefun" \<comment> \<open>input channel for double FIFO\<close> and mid :: "'a statefun" \<comment> \<open>channel linking the two buffers\<close> and out :: "'a statefun" \<comment> \<open>output channel for double FIFO\<close> and q1 :: "'a list statefun" \<comment> \<open>inner queue of first FIFO\<close> and q2 :: "'a list statefun" \<comment> \<open>inner queue of second FIFO\<close> and vars defines "vars \<equiv> LIFT (inp,mid,out,q1,q2)" assumes DB_base: "basevars vars" begin text \<open> We need to specify the behavior of two FIFO buffers in a row. Intuitively, that specification is just the conjunction of two buffer specifications, where the first buffer has input channel \<open>inp\<close> and output channel \<open>mid\<close> whereas the second one receives from \<open>mid\<close> and outputs on \<open>out\<close>. However, this conjunction allows a simultaneous enqueue action of the first buffer and dequeue of the second one. It would not implement the previous buffer specification, which excludes such simultaneous enqueueing and dequeueing (it is written in ``interleaving style''). We could relax the specification of the FIFO buffer above, which is esthetically pleasant, but non-interleaving specifications are usually hard to get right and to understand. We therefore impose an interleaving constraint on the specification of the double buffer, which requires that enqueueing and dequeueing do not happen simultaneously. \<close> definition DBSpec where "DBSpec \<equiv> TEMP ISpec inp q1 mid \<and> ISpec mid q2 out \<and> \<box>[\<not>(Enq inp q1 mid \<and> Deq mid q2 out)]_vars" text \<open> The proof rules of TLA are geared towards specifications of the form \<open>Init \<and> \<box>[Next]_vars \<and> L\<close>, and we prove that \<open>DBSpec\<close> corresponds to a specification in this form, which we now define. \<close> definition FullInit where "FullInit \<equiv> TEMP (BInit inp q1 mid \<and> BInit mid q2 out)" definition FullNxt where "FullNxt \<equiv> TEMP (Enq inp q1 mid \<and> Unchanged (q2,out) \<or> Deq inp q1 mid \<and> Enq mid q2 out \<or> Deq mid q2 out \<and> Unchanged (inp,q1))" definition FullSpec where "FullSpec \<equiv> TEMP FullInit \<and> \<box>[FullNxt]_vars \<and> WF(Deq inp q1 mid)_vars \<and> WF(Deq mid q2 out)_vars" text \<open> The concatenation of the two queues will serve as the refinement mapping. \<close> definition qc :: "'a list statefun" where "qc \<equiv> LIFT (q2 @ q1)" lemmas db_defs = buffer_defs DBSpec_def FullInit_def FullNxt_def FullSpec_def qc_def vars_def lemma DBSpec_stutinv: "STUTINV DBSpec" unfolding db_defs by (simp add: bothstutinvs livestutinv) lemma FullSpec_stutinv: "STUTINV FullSpec" unfolding db_defs by (simp add: bothstutinvs livestutinv) text \<open> We prove that \<open>DBSpec\<close> implies \<open>FullSpec\<close>. (The converse implication also holds but is not needed for our implementation proof.) \<close> text \<open> The following lemma is somewhat more bureaucratic than we'd like it to be. It shows that the conjunction of the next-state relations, together with the invariant for the first queue, implies the full next-state relation of the combined queues. \<close> lemma DBNxt_then_FullNxt: "\<turnstile> \<box>BInv inp q1 mid \<and> \<box>[Nxt inp q1 mid]_(inp,q1,mid) \<and> \<box>[Nxt mid q2 out]_(mid,q2,out) \<and> \<box>[\<not>(Enq inp q1 mid \<and> Deq mid q2 out)]_vars \<longrightarrow> \<box>[FullNxt]_vars" (is "\<turnstile> \<box>?inv \<and> ?nxts \<longrightarrow> \<box>[FullNxt]_vars") proof - have "\<turnstile> \<box>[Nxt inp q1 mid]_(inp,q1,mid) \<and> \<box>[Nxt mid q2 out]_(mid,q2,out) \<longrightarrow> \<box>[ [Nxt inp q1 mid]_(inp,q1,mid) \<and> [Nxt mid q2 out]_(mid,q2,out)]_((inp,q1,mid),(mid,q2,out))" (is "\<turnstile> ?tmp \<longrightarrow> \<box>[?b1b2]_?vs") by (auto simp: M12[int_rewrite]) moreover have "\<turnstile> \<box>[?b1b2]_?vs \<longrightarrow> \<box>[?b1b2]_vars" by (rule R1, auto simp: vars_def tla_defs) ultimately have 1: "\<turnstile> \<box>[Nxt inp q1 mid]_(inp,q1,mid) \<and> \<box>[Nxt mid q2 out]_(mid,q2,out) \<longrightarrow> \<box>[ [Nxt inp q1 mid]_(inp,q1,mid) \<and> [Nxt mid q2 out]_(mid,q2,out) ]_vars" by force have 2: "\<turnstile> \<box>[?b1b2]_vars \<and> \<box>[\<not>(Enq inp q1 mid \<and> Deq mid q2 out)]_vars \<longrightarrow> \<box>[?b1b2 \<and> \<not>(Enq inp q1 mid \<and> Deq mid q2 out)]_vars" (is "\<turnstile> ?tmp2 \<longrightarrow> \<box>[?mid]_vars") by (simp add: M8[int_rewrite]) have "\<turnstile> ?inv \<longrightarrow> #True" by auto moreover have "|~ ?inv \<and> \<circle>?inv \<and> [?mid]_vars \<longrightarrow> [FullNxt]_vars" proof - have "|~ ?inv \<and> ?mid \<longrightarrow> [FullNxt]_vars" proof - have A: "|~ Nxt inp q1 mid \<longrightarrow> [Nxt mid q2 out]_(mid,q2,out) \<longrightarrow> \<not>(Enq inp q1 mid \<and> Deq mid q2 out) \<longrightarrow> ?inv \<longrightarrow> FullNxt" proof - have enq: "|~ Enq inp q1 mid \<and> [Nxt mid q2 out]_(mid,q2,out) \<and> \<not>(Deq mid q2 out) \<longrightarrow> Unchanged (q2,out)" by (auto simp: db_defs tla_defs) have deq1: "|~ Deq inp q1 mid \<longrightarrow> ?inv \<longrightarrow> mid$ \<noteq> $mid" by (auto simp: Deq_def BInv_def) have deq2: "|~ Deq mid q2 out \<longrightarrow> mid$ = $mid" by (auto simp: Deq_def) have deq: "|~ Deq inp q1 mid \<and> [Nxt mid q2 out]_(mid,q2,out) \<and> ?inv \<longrightarrow> Enq mid q2 out" by (force simp: Nxt_def tla_defs dest: deq1[unlift_rule] deq2[unlift_rule]) with enq show ?thesis by (force simp: Nxt_def FullNxt_def) qed have B: "|~ Nxt mid q2 out \<longrightarrow> Unchanged (inp,q1,mid) \<longrightarrow> FullNxt" by (auto simp: db_defs tla_defs) have C: "\<turnstile> Unchanged (inp,q1,mid) \<longrightarrow> Unchanged (mid,q2,out) \<longrightarrow> Unchanged vars" by (auto simp: vars_def tla_defs) show ?thesis by (force simp: actrans_def dest: A[unlift_rule] B[unlift_rule] C[unlift_rule]) qed thus ?thesis by (auto simp: tla_defs) qed ultimately have "\<turnstile> \<box>?inv \<and> \<box>[?mid]_vars \<longrightarrow> \<box>#True \<and> \<box>[FullNxt]_vars" by (rule TLA2) with 1 2 show ?thesis by force qed text \<open> It is now easy to show that \<open>DBSpec\<close> refines \<open>FullSpec\<close>. \<close> theorem DBSpec_impl_FullSpec: "\<turnstile> DBSpec \<longrightarrow> FullSpec" proof - have 1: "\<turnstile> DBSpec \<longrightarrow> FullInit" by (auto simp: DBSpec_def FullInit_def ISpec_def) have 2: "\<turnstile> DBSpec \<longrightarrow> \<box>[FullNxt]_vars" proof - have "\<turnstile> DBSpec \<longrightarrow> \<box>(BInv inp q1 mid)" by (auto simp: DBSpec_def intro: ISpec_BInv[unlift_rule]) moreover have "\<turnstile> DBSpec \<and> \<box>(BInv inp q1 mid) \<longrightarrow> \<box>[FullNxt]_vars" by (auto simp: DBSpec_def ISpec_def intro: DBNxt_then_FullNxt[unlift_rule]) ultimately show ?thesis by force qed have 3: "\<turnstile> DBSpec \<longrightarrow> WF(Deq inp q1 mid)_vars" proof - have 31: "\<turnstile> Unchanged vars \<longrightarrow> Unchanged q1" by (auto simp: vars_def tla_defs) have 32: "\<turnstile> Unchanged (inp,q1,mid) \<longrightarrow> Unchanged q1" by (auto simp: tla_defs) have deq: "|~ \<langle>Deq inp q1 mid\<rangle>_vars = \<langle>Deq inp q1 mid\<rangle>_(inp,q1,mid)" by (simp add: Deq_visible[OF 31, int_rewrite] Deq_visible[OF 32, int_rewrite]) show ?thesis by (auto simp: DBSpec_def ISpec_def WeakF_def deq[int_rewrite] deq[THEN AA26,int_rewrite]) qed have 4: "\<turnstile> DBSpec \<longrightarrow> WF(Deq mid q2 out)_vars" proof - have 41: "\<turnstile> Unchanged vars \<longrightarrow> Unchanged q2" by (auto simp: vars_def tla_defs) have 42: "\<turnstile> Unchanged (mid,q2,out) \<longrightarrow> Unchanged q2" by (auto simp: tla_defs) have deq: "|~ \<langle>Deq mid q2 out\<rangle>_vars = \<langle>Deq mid q2 out\<rangle>_(mid,q2,out)" by (simp add: Deq_visible[OF 41, int_rewrite] Deq_visible[OF 42, int_rewrite]) show ?thesis by (auto simp: DBSpec_def ISpec_def WeakF_def deq[int_rewrite] deq[THEN AA26,int_rewrite]) qed show ?thesis by (auto simp: FullSpec_def elim: 1[unlift_rule] 2[unlift_rule] 3[unlift_rule] 4[unlift_rule]) qed text \<open> We now prove that two FIFO buffers in a row (as specified by formula \<open>FullSpec\<close>) implement a FIFO buffer whose internal queue is the concatenation of the two buffers. We start by proving step simulation. \<close> lemma FullInit: "\<turnstile> FullInit \<longrightarrow> BInit inp qc out" by (auto simp: db_defs tla_defs) lemma Full_step_simulation: "|~ [FullNxt]_vars \<longrightarrow> [Nxt inp qc out]_(inp,qc,out)" by (auto simp: db_defs tla_defs) text \<open> The liveness condition requires that the combined buffer eventually performs a \<open>Deq\<close> action on the output channel if it contains some element. The idea is to use the fairness hypothesis for the first buffer to prove that in that case, eventually the queue of the second buffer will be non-empty, and that it must therefore eventually dequeue some element. The first step is to establish the enabledness conditions for the two \<open>Deq\<close> actions of the implementation. \<close> lemma Deq1_enabled: "\<turnstile> Enabled \<langle>Deq inp q1 mid\<rangle>_vars = ($q1 \<noteq> #[])" proof - have 1: "|~ \<langle>Deq inp q1 mid\<rangle>_vars = Deq inp q1 mid" by (rule Deq_visible, auto simp: vars_def tla_defs) have "\<turnstile> Enabled (Deq inp q1 mid) = ($q1 \<noteq> #[])" by (force simp: Deq_def tla_defs vars_def intro: base_enabled[OF DB_base] elim!: enabledE) thus ?thesis by (simp add: 1[int_rewrite]) qed lemma Deq2_enabled: "\<turnstile> Enabled \<langle>Deq mid q2 out\<rangle>_vars = ($q2 \<noteq> #[])" proof - have 1: "|~ \<langle>Deq mid q2 out\<rangle>_vars = Deq mid q2 out" by (rule Deq_visible, auto simp: vars_def tla_defs) have "\<turnstile> Enabled (Deq mid q2 out) = ($q2 \<noteq> #[])" by (force simp: Deq_def tla_defs vars_def intro: base_enabled[OF DB_base] elim!: enabledE) thus ?thesis by (simp add: 1[int_rewrite]) qed text \<open> We now use rule \<open>WF2\<close> to prove that the combined buffer (behaving according to specification \<open>FullSpec\<close>) implements the fairness condition of the single buffer under the refinement mapping. \<close> lemma Full_fairness: "\<turnstile> \<box>[FullNxt]_vars \<and> WF(Deq mid q2 out)_vars \<and> \<box>WF(Deq inp q1 mid)_vars \<longrightarrow> WF(Deq inp qc out)_(inp,qc,out)" proof (rule WF2) \<comment> \<open>the helpful action is the @{text Deq} action of the second queue\<close> show "|~ \<langle>FullNxt \<and> Deq mid q2 out\<rangle>_vars \<longrightarrow> \<langle>Deq inp qc out\<rangle>_(inp,qc,out)" by (auto simp: db_defs tla_defs) next \<comment> \<open>the helpful condition is the second queue being non-empty\<close> show "|~ ($q2 \<noteq> #[]) \<and> \<circle>($q2 \<noteq> #[]) \<and> \<langle>FullNxt \<and> Deq mid q2 out\<rangle>_vars \<longrightarrow> Deq mid q2 out" by (auto simp: tla_defs) next show "\<turnstile> $q2 \<noteq> #[] \<and> Enabled \<langle>Deq inp qc out\<rangle>_(inp, qc, out) \<longrightarrow> Enabled \<langle>Deq mid q2 out\<rangle>_vars" unfolding Deq2_enabled[int_rewrite] by auto next txt \<open> The difficult part of the proof is to show that the helpful condition will eventually always be true provided that the combined dequeue action is eventually always enabled and that the helpful action is never executed. We prove that (1) the helpful condition persists and (2) that it must eventually become true. \<close> have "\<turnstile> \<box>\<box>[FullNxt \<and> \<not>(Deq mid q2 out)]_vars \<longrightarrow> \<box>($q2 \<noteq> #[] \<longrightarrow> \<box>($q2 \<noteq> #[]))" proof (rule STL4) have "|~ $q2 \<noteq> #[] \<and> [FullNxt \<and> \<not>(Deq mid q2 out)]_vars \<longrightarrow> \<circle>($q2 \<noteq> #[])" by (auto simp: db_defs tla_defs) from this[THEN INV1] show "\<turnstile> \<box>[FullNxt \<and> \<not> Deq mid q2 out]_vars \<longrightarrow> ($q2 \<noteq> #[] \<longrightarrow> \<box>($q2 \<noteq> #[]))" by auto qed hence 1: "\<turnstile> \<box>[FullNxt \<and> \<not>(Deq mid q2 out)]_vars \<longrightarrow> \<diamond>($q2 \<noteq> #[]) \<longrightarrow> \<diamond>\<box>($q2 \<noteq> #[])" by (force intro: E31[unlift_rule]) have 2: "\<turnstile> \<box>[FullNxt \<and> \<not>(Deq mid q2 out)]_vars \<and> WF(Deq inp q1 mid)_vars \<longrightarrow> (Enabled \<langle>Deq inp qc out\<rangle>_(inp, qc, out) \<leadsto> $q2 \<noteq> #[])" proof - have qc: "\<turnstile> ($qc \<noteq> #[]) = ($q1 \<noteq> #[] \<or> $q2 \<noteq> #[])" by (auto simp: qc_def tla_defs) have "\<turnstile> \<box>[FullNxt \<and> \<not>(Deq mid q2 out)]_vars \<and> WF(Deq inp q1 mid)_vars \<longrightarrow> ($q1 \<noteq> #[] \<leadsto> $q2 \<noteq> #[])" proof (rule WF1) show "|~ $q1 \<noteq> #[] \<and> [FullNxt \<and> \<not> Deq mid q2 out]_vars \<longrightarrow> \<circle>($q1 \<noteq> #[]) \<or> \<circle>($q2 \<noteq> #[])" by (auto simp: db_defs tla_defs) next show "|~ $q1 \<noteq> #[] \<and> \<langle>(FullNxt \<and> \<not> Deq mid q2 out) \<and> Deq inp q1 mid\<rangle>_vars \<longrightarrow> \<circle>($q2 \<noteq> #[])" by (auto simp: db_defs tla_defs) next show "\<turnstile> $q1 \<noteq> #[] \<longrightarrow> Enabled \<langle>Deq inp q1 mid\<rangle>_vars" by (simp add: Deq1_enabled[int_rewrite]) next show "|~ $q1 \<noteq> #[] \<and> Unchanged vars \<longrightarrow> \<circle>($q1 \<noteq> #[])" by (auto simp: vars_def tla_defs) qed hence "\<turnstile> \<box>[FullNxt \<and> \<not>(Deq mid q2 out)]_vars \<and> WF(Deq inp q1 mid)_vars \<longrightarrow> ($qc \<noteq> #[] \<leadsto> $q2 \<noteq> #[])" by (auto simp: qc[int_rewrite] LT17[int_rewrite] LT1[int_rewrite]) moreover have "\<turnstile> Enabled \<langle>Deq inp qc out\<rangle>_(inp, qc, out) \<leadsto> $qc \<noteq> #[]" by (rule Deq_enabledE[THEN LT3]) ultimately show ?thesis by (force elim: LT13[unlift_rule]) qed with LT6 have "\<turnstile> \<box>[FullNxt \<and> \<not>(Deq mid q2 out)]_vars \<and> WF(Deq inp q1 mid)_vars \<and> \<diamond>Enabled \<langle>Deq inp qc out\<rangle>_(inp, qc, out) \<longrightarrow> \<diamond>($q2 \<noteq> #[])" by force with 1 E16 show "\<turnstile> \<box>[FullNxt \<and> \<not>(Deq mid q2 out)]_vars \<and> WF(Deq mid q2 out)_vars \<and> \<box>WF(Deq inp q1 mid)_vars \<and> \<diamond>\<box> Enabled \<langle>Deq inp qc out\<rangle>_(inp, qc, out) \<longrightarrow> \<diamond>\<box>($q2 \<noteq> #[])" by force qed text \<open> Putting everything together, we obtain that \<open>FullSpec\<close> refines the Buffer specification under the refinement mapping. \<close> theorem FullSpec_impl_ISpec: "\<turnstile> FullSpec \<longrightarrow> ISpec inp qc out" unfolding FullSpec_def ISpec_def using FullInit Full_step_simulation[THEN M11] Full_fairness by force theorem FullSpec_impl_Spec: "\<turnstile> FullSpec \<longrightarrow> Spec inp out" unfolding Spec_def using FullSpec_impl_ISpec by (force intro: eexI[unlift_rule]) text \<open> By transitivity, two buffers in a row also implement a single buffer. \<close> theorem DBSpec_impl_Spec: "\<turnstile> DBSpec \<longrightarrow> Spec inp out" by (rule lift_imp_trans[OF DBSpec_impl_FullSpec FullSpec_impl_Spec]) end \<comment> \<open>locale DBuffer\<close> end
import combinatorics.simple_graph.coloring --OUTPUT 1 theorem two_colorable_iff_bipartite (G : Type*) [graph G] : two_colorable G ↔ bipartite G := begin split, { assume h1 : two_colorable G, have h2 : ∃ A B : set G, (∀ a : G, a ∈ A ∨ a ∈ B) ∧ (∀ a b : G, a ∈ A ∧ b ∈ B → (a,b) ∈ E G), from sorry, show bipartite G, from sorry, }, { assume h1 : bipartite G, have h2 : ∃ A B : set G, (∀ a : G, a ∈ A ∨ a ∈ B) ∧ (∀ a b : G, a ∈ A ∧ b ∈ B → (a,b) ∈ E G), from sorry, show two_colorable G, from sorry, } end --OUTPUT 2 theorem bipartite_graph_two_colorable (G : Type*) [graph G] : (∃ A B : set G, is_bipartite G A B) ↔ (∃ f : G → bool, is_valid_coloring G f) := begin have h1 : (∃ A B : set G, is_bipartite G A B) → (∃ f : G → bool, is_valid_coloring G f), from sorry, have h2 : (∃ f : G → bool, is_valid_coloring G f) → (∃ A B : set G, is_bipartite G A B), from sorry, show (∃ A B : set G, is_bipartite G A B) ↔ (∃ f : G → bool, is_valid_coloring G f), from sorry, end --OUTPUT 3 theorem bipartite_graph_two_colorable (G : Type*) [graph G] : (∃ A B : set G, is_bipartite G A B) ↔ (∃ f : G → ℕ, is_two_coloring G f) := begin split, { assume h1 : ∃ A B : set G, is_bipartite G A B, cases h1 with A h1, cases h1 with B h1, cases h1 with h1 h2, have h3 : ∀ a : G, a ∈ A ∨ a ∈ B, from sorry, have h4 : ∀ a b : G, a ∈ A ∧ b ∈ B → edge G a b, from sorry, have h5 : ∀ a b : G, a ∈ A ∧ b ∈ A → ¬ edge G a b, from sorry, have h6 : ∀ a b : G, a ∈ B ∧ b ∈ B → ¬ edge G a b, from sorry, have h7 : ∀ a b : G, a ∈ A ∧ b ∈ B → ¬ edge G b a, from sorry, have h8 : ∀ a b : G, a ∈ B ∧ b ∈ A → ¬ edge G b a, from sorry, have h9 : ∀ a b : G, a ∈ A ∧ b ∈ A → ¬ edge G a b, from sorry, have h10 : ∀ a b : G, a ∈ B ∧ b ∈ B → ¬ edge G a b, from sorry, have h11 : ∀ a b : G, a ∈ A ∧ b ∈ B → ¬ edge G a b, from sorry, have h12 : ∀ a b : G, a ∈ B ∧ b ∈ A → ¬ edge G a b, from sorry, have h13 : ∀ a b : G, a ∈ A ∧ b ∈ A → ¬ edge G a b, from sorry, have h14 : ∀ a b : G, a ∈ B ∧ b ∈ B → ¬ edge G a b, from sorry, have h15 : ∀ a b : G, a ∈ A ∧ b ∈ B → ¬ edge G a b, from sorry, have h16 : ∀ a b : G, a ∈ B ∧ b ∈ A → ¬ edge G a b, from sorry, have h17 : ∀ a b : G, a ∈ A ∧ b ∈ A → ¬ edge G a b, from sorry, have h18 : ∀ a b : G, a ∈ B ∧ b ∈ B → ¬ edge G a b, from sorry, have h19 : ∀ a b : G, a ∈ A ∧ b ∈ B → ¬ edge G a b, from sorry, have h20 : ∀ a b : G, a ∈ B ∧ b ∈ A → ¬ edge G a b, from sorry, have h21 : ∀ a b : G, a ∈ A ∧ b ∈ A → ¬ edge G a b, from sorry, have h22 : ∀ a b : G, a ∈ B ∧ b ∈ B → ¬ edge G a b, from sorry, have h23 : ∀ a b : G, a ∈ A ∧ b ∈ B → ¬ edge G a b, from sorry, have h24 : ∀ a b : G, a ∈ B ∧ b ∈ A → ¬ edge G a b, from sorry, have h25 : ∀ a b : G, a ∈ A ∧ b ∈ A → ¬ edge G a b, from sorry, have h26 : ∀ a b : G, a ∈ B ∧ b ∈ B → ¬ edge G a b, from sorry, have h27 : ∀ a b : G, a ∈ A ∧ b ∈ B → ¬ edge G a b, from sorry, have h28 : ∀ a b : G, a ∈ B ∧ b ∈ A → ¬ edge G a b, from sorry, have h29 : ∀ a b : G, a ∈ A ∧ b ∈ A → ¬ edge G a b, from sorry, have h30 : ∀ a b : G, a ∈ B ∧ b ∈ B → ¬ edge G a b, from sorry, have h31 : ∀ a b : G, a ∈ A ∧ b ∈ B → ¬ edge G a b, from sorry, have h32 : ∀ a b : G, a ∈ B ∧ b ∈ A → ¬ edge G a b, from sorry, have h33 : ∀ a b : G, a ∈ A ∧ b ∈ A → ¬ edge G a b, from sorry, have h34 : ∀ a b : G, a ∈ B ∧ b ∈ B → ¬ edge G a b, from sorry, have h35 : ∀ a b : G, a ∈ A ∧ b ∈ B → ¬ edge G a b, from sorry, have h36 : ∀ a b : G, a ∈ B ∧ b ∈ A → ¬ edge G a b, from sorry, have h37 : ∀ a b : G, a ∈ A ∧ b ∈ A → ¬ edge G a b, from sorry, have h38 : ∀ a b : G, a ∈ B ∧ b ∈ B → ¬ edge G a b, from sorry, have h39 : ∀ a b : G, a ∈ A ∧ b ∈ B → ¬ edge G a b, from sorry, have h40 : ∀ a b : G, a ∈ B ∧ b ∈ A → ¬ edge G a b, from sorry, have h41 : ∀ a b : G, a ∈ A ∧ b ∈ A → ¬ edge G a b, from sorry, have h42 : ∀ a b : G, a ∈ B ∧ b ∈ B → ¬ edge G a b, from sorry, have h43 : ∀ a b : G, a ∈ A ∧ b ∈ B → ¬ edge G a b, from sorry, have h44 : ∀ a b : G, a ∈ B ∧ b ∈ A → ¬ edge G a b, from sorry, have h45 : ∀ a b : G, a ∈ A ∧ b ∈ A → ¬ edge G a b, from sorry, have h46 : ∀ a b : G, a ∈ B ∧ b ∈ B → ¬ edge G a b, from sorry, have h47 : ∀ a b : G, a ∈ A ∧ b ∈ B → ¬ edge G a b, from sorry, have h48 : ∀ a b : G, a ∈ B ∧ b ∈ A → ¬ edge G a b, from sorry, have h49 : ∀ a b : G, a ∈ A ∧ b ∈ A → ¬ edge G a b, from sorry, have h50 : ∀ a b : G, a ∈ B ∧ b ∈ B → ¬ edge G a b, from sorry, have h51 : ∀ a b : G, a ∈ A ∧ b ∈ B → ¬ edge G a b, from sorry, have h52 : ∀ a b : G, a ∈ B ∧ b ∈ A → ¬ edge G a b, from sorry, have h53 : ∀ a b : G, a ∈ A ∧ b ∈ A → ¬ edge G a b, from sorry, have h54 : ∀ a b : G, a ∈ B ∧ b ∈ B → ¬ edge G a b, from sorry, have h55 : ∀ a b : G, a ∈ A ∧ b ∈ B → ¬ edge G a b, from sorry, have h56 : ∀ a b : G, a ∈ B ∧ b ∈ A → ¬ edge G a b, from sorry, have h57 : ∀ a b : G, a ∈ A ∧ b ∈ A → ¬ edge G a b, from sorry, have h58 : ∀ a b : G, a end --Needs more than 2000 tokens! /- FEW SHOT PROMPTS TO CODEX(START) /--`theorem` Power Set is Closed under Intersection Let $S$ be a set. Let $\powerset S$ be the power set of $S$. Then: :$\forall A, B \in \powerset S: A \cap B \in \powerset S$ `proof` Let $A, B \in \powerset S$. Then by the definition of power set, $A \subseteq S$ and $B \subseteq S$. From Intersection is Subset we have that $A \cap B \subseteq A$. It follows from Subset Relation is Transitive that $A \cap B \subseteq S$. Thus $A \cap B \in \powerset S$ and closure is proved. {{qed}} -/ theorem power_set_intersection_closed {α : Type*} (S : set α) : ∀ A B ∈ 𝒫 S, (A ∩ B) ∈ 𝒫 S := begin assume (A : set α) (hA : A ∈ 𝒫 S) (B : set α) (hB : B ∈ 𝒫 S), have h1 : (A ⊆ S) ∧ (B ⊆ S), from sorry, have h2 : (A ∩ B) ⊆ A, from sorry, have h3 : (A ∩ B) ⊆ S, from sorry, show (A ∩ B) ∈ 𝒫 S, from sorry, end /--`theorem` Square of Sum :$\forall x, y \in \R: \paren {x + y}^2 = x^2 + 2 x y + y^2$ `proof` Follows from the distribution of multiplication over addition: {{begin-eqn}} {{eqn | l = \left({x + y}\right)^2 | r = \left({x + y}\right) \cdot \left({x + y}\right) }} {{eqn | r = x \cdot \left({x + y}\right) + y \cdot \left({x + y}\right) | c = Real Multiplication Distributes over Addition }} {{eqn | r = x \cdot x + x \cdot y + y \cdot x + y \cdot y | c = Real Multiplication Distributes over Addition }} {{eqn | r = x^2 + 2xy + y^2 | c = }} {{end-eqn}} {{qed}} -/ theorem square_of_sum (x y : ℝ) : (x + y)^2 = (x^2 + 2*x*y + y^2) := begin calc (x + y)^2 = (x+y)*(x+y) : by sorry ... = x*(x+y) + y*(x+y) : by sorry ... = x*x + x*y + y*x + y*y : by sorry ... = x^2 + 2*x*y + y^2 : by sorry, end /--`theorem` Identity of Group is Unique Let $\struct {G, \circ}$ be a group. Then there is a unique identity element $e \in G$. `proof` From Group has Latin Square Property, there exists a unique $x \in G$ such that: :$a x = b$ and there exists a unique $y \in G$ such that: :$y a = b$ Setting $b = a$, this becomes: There exists a unique $x \in G$ such that: :$a x = a$ and there exists a unique $y \in G$ such that: :$y a = a$ These $x$ and $y$ are both $e$, by definition of identity element. {{qed}} -/ theorem group_identity_unique {G : Type*} [group G] : ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a := begin have h1 : ∀ a b : G, ∃! x : G, a * x = b, from sorry, have h2 : ∀ a b : G, ∃! y : G, y * a = b, from sorry, have h3 : ∀ a : G, ∃! x : G, a * x = a, from sorry, have h4 : ∀ a : G, ∃! y : G, y * a = a, from sorry, have h5 : ∀ a : G, classical.some (h3 a) = (1 : G), from sorry, have h6 : ∀ a : G, classical.some (h4 a) = (1 : G), from sorry, show ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a, from by { use (1 : G), have h7 : ∀ e : G, (∀ a : G, e * a = a ∧ a * e = a) → e = 1, from by { assume (e : G) (h7 : ∀ a : G, e * a = a ∧ a * e = a), have h8 : ∀ a : G, e = classical.some (h3 a), from sorry, have h9 : ∀ a : G, e = classical.some (h4 a), from sorry, show e = (1 : G), from sorry, }, sorry, } end /--`theorem` Bipartite Graph is two colorable Let $G$ be a graph. Then $G$ is 2-colorable if and only if $G$ is bipartite. `proof` Let $G$ be a 2-colorable graph, which means we can color every vertex either red or blue, and no edge will have both endpoints colored the same color. Let $A$ denote the subset of vertices colored red, and let $B$ denote the subset of vertices colored blue. Since all vertices of $A$ are red, there are no edges within $A$, and similarly for $B$. This implies that every edge has one endpoint in $A$ and the other in $B$, which means $G$ is bipartite. Conversely, suppose $G$ is bipartite, that is, we can partition the vertices into two subsets $V_{1}, V_{2}$ every edge has one endpoint in $V_{1}$ and the other in $V_{2}$. Then coloring every vertex of $V_{1}$ red and every vertex of $V_{2}$ blue yields a valid coloring, so $G$ is 2-colorable. QED -/ theorem FEW SHOT PROMPTS TO CODEX(END)-/
> module CoinductiveCalculus.CoinductiveCalculus > import Syntax.PreorderReasoning > %default total > %access public export > %auto_implicits off > %hide Stream > %hide head > %hide tail > %hide (+) > %hide plusZeroRightNeutral * Streams For a type |T|, streams of values of type |T| can be described by values of type |Stream T| with > Stream : Type -> Type The idea is that any |Stream T| has a head and a tail > head : {T : Type} -> Stream T -> T > tail : {T : Type} -> Stream T -> Stream T and that piecing together a |T| with a |Stream T| yields a |Stream T|: > cons : {T : Type} -> T -> Stream T -> Stream T The stream operations |head|, |tail| and |cons| are required to fulfil three properties: > headConsProp : {T : Type} -> (t : T) -> (s : Stream T) -> head (cons t s) = t > tailConsProp : {T : Type} -> (t : T) -> (s : Stream T) -> tail (cons t s) = s > consHeadTailProp : {T : Type} -> (s : Stream T) -> cons (head s) (tail s) = s * Differentiation and integration We want to look at differentiation and integration of functions of real variables from the point of view of streams. More specifically, we want to understand which properties differentiation and integration have to fulfill when we consider these operations as operations on streams of type |Stream R = R -> R|. Here > R : Type is meant to represent real numbers and > F : Type > F = R -> R are differentiable and integrable functions on |R|. For the time being, we represent differentiation and integration through functions > D : F -> F > S : R -> R -> F -> R with |D f| and |S a b f| representing the derivative of |f| and the integral of |f| on |[a,b]|. * Head, tail and cons for functions/streams How can we implement |head| for |Stream R = F|? We have > headF : F -> R The natural way to compute a real number from a |f : F| is to simply evaluate |f|. We require our "real numbers" to contain a zero element > zero : R and we define > headF f = f zero What about the tail of a function |f|? > tailF : F -> F The type of |tailF| is |F -> F|, just like the type of |D|. Thus, we take > tailF = D We are left with the implementation of |consF|: > consF : R -> F -> F Of course, we want |headF|, |tailF| and |consF| to fulfill the properties of stream operations. In particular, we want < consF (headF f) (tailF f) = f For |f = consF a g| and our definitions of |headF| and |tailF|, this is equivalent to < consF (headF f) (tailF f) = f < < ={ def. of f }= < < consF (headF (consF a g)) (tailF (consF a g)) = consF a g < < ={ def. of consF, tailF }= < < consF ((consF a g) zero) (D (consF a g)) = consF a g Sufficient conditions for the last equality to hold are < (consF a g) zero = a and < D (consF a g) = g The first condition could be fulfilled by defining |consF a g| to be |const a|. But the second condition requires |consF a g| has to be an anti-derivative of |g|. This suggests the definition > (+) : R -> R -> R > consF a f = \ x => a + S zero x f where we can think of |(+)| as representing the standard addition on real numbers. * Properties of |R|, |D| and |S| Which properties do |R|, |D| and |S| have to satisfy for |headF|, |tailF| and |consF| to be stream operations? Our definition already requires |R| to be equipped with a zero element and with a |(+)|: < R : Type < zero : R < (+) : R -> R -> R If we also require integration to fulfill > intProp1 : (a : R) -> (f : F) -> S a a f = zero and |zero| to be a > plusZeroRightNeutral : (left : R) -> left + zero = left We can derive a formal proof of |headF (consF a f) = a|: > headConsPropF : (a : R) -> (f : F) -> headF (consF a f) = a > headConsPropF a f = ( headF (consF a f) ) > ={ Refl }= > ( headF (\ x => a + S zero x f) ) > ={ Refl }= > ( a + S zero zero f ) > ={ cong (intProp1 zero f) }= > ( a + zero ) > ={ plusZeroRightNeutral a }= > ( a ) > QED Satisfying |tailF (consF a f) = f| requires some more assumptions. Consistently with the interpretation of |D f| representing the derivative of |f|, we need > derConstZero : (a : R) -> D (const a) = const zero and > derLinear1 : (f, g : F) -> D (\ x => f x + g x) = \ x => (D f) x + (D g) x Moreover |S| has to be an "anti-derivative" > intAntiDer : (f : F) -> D (\ x => S zero x f) = f We also need |zero| to be left-neutral > plusZeroLeftNeutral : (right : R) -> zero + right = right and equality on |F| to be extensional: > extEqF : (f, g : F) -> ((x : R) -> f x = g x) -> f = g With these assumptions, we can derive > lemma : (f : F) -> (x : R) -> zero + f x = f x > lemma f x = ( zero + f x ) > ={ plusZeroLeftNeutral (f x) }= > ( f x ) > QED and, finally > tailConsPropF : (a : R) -> (f : F) -> tailF (consF a f) = f > tailConsPropF a f = ( tailF (consF a f) ) > ={ Refl }= > ( tailF (\ x => a + S zero x f) ) > ={ Refl }= > ( D (\ x => a + S zero x f) ) > ={ Refl }= > ( D (\ x => (const a) x + (\ y => S zero y f) x) ) > ={ derLinear1 (const a) (\ y => S zero y f) }= > ( \ x => (D (const a)) x + (D (\ y => S zero y f)) x ) > ={ cong {f = \ h => \ x => h x + (D (\ y => S zero y f)) x } (derConstZero a) }= > ( \ x => (const zero) x + (D (\ y => S zero y f)) x ) > ={ cong {f = \ h => \ x => (const zero) x + h x} (intAntiDer f) }= > ( \ x => (const zero) x + f x ) > ={ Refl }= > ( \ x => zero + f x ) > ={ extEqF (\ x => zero + f x) (\ x => f x) (lemma f) }= > ( \ x => f x ) > ={ Refl }= > ( f ) > QED We are left with the last property: |consF (headF f) (tailF f) = f|. To prove this property, we need three more assumptions > (-) : R -> R -> R > plusAnyMinus : (x, y : R) -> x + (y - x) = y > derAntiInt : (f : F) -> (a : R) -> (b : R) -> S a b (D f) = f b - f a With these, one has > lemma1 : (c : R) -> (f : F) -> (x : R) -> (c + (f x - c) = f x) > lemma1 c f x = plusAnyMinus c (f x) > lemma2 : (f : F) -> (c : R) -> (a : R) -> (b : R) -> c + S a b (D f) = c + (f b - f a) > lemma2 f c a b = ( c + S a b (D f) ) > ={ cong (derAntiInt f a b) }= > ( c + (f b - f a) ) > QED > consHeadTailPropF : (f : F) -> consF (headF f) (tailF f) = f > consHeadTailPropF f = ( consF (headF f) (tailF f) ) > ={ Refl }= > ( consF (f zero) (D f) ) > ={ Refl }= > ( \ x => f zero + S zero x (D f) ) > ={ extEqF (\ x => f zero + S zero x (D f)) > (\ x => f zero + (f x - f zero)) > (lemma2 f (f zero) zero) }= > ( \ x => f zero + (f x - f zero) ) > ={ extEqF (\ x => f zero + (f x - f zero)) > (\ x => f x) > (lemma1 (f zero) f) }= > ( \ x => f x ) > ={ Refl }= > ( f ) > QED * Properties of |R|, |D| and |S| (wrap up) ( 1) R : Type ( 2) zero : R ( 3) (+) : R -> R -> R ( 4) (-) : R -> R -> R ( 5) plusAnyMinus : (x, y : R) -> x + (y - x) = y ( 6) plusZeroLeftNeutral : (right : R) -> zero + right = right ( 7) plusZeroRightNeutral : ( left : R) -> left + zero = left ( 8) derConstZero : (a : R) -> D (const a) = const zero ( 9) derLinear1 : (f, g : F) -> D (\ x => f x + g x) = \ x => (D f) x + (D g) x (10) intProp1 : (a : R) -> (f : F) -> S a a f = zero (11) intAntiDer : (f : F) -> D (\ x => S zero x f) = f (12) derAntiInt : (f : F) -> (a : R) -> (b : R) -> S a b (D f) = f b - f a (13) extEqF : (f, g : F) -> ((x : R) -> f x = g x) -> f = g Properties (1-7) follow from standard axioms on real numbers. (8-9) follow from the standard definition of derivative. (10-12) follows from ? > {- > ---}
section propositional variables P Q R : Prop ------------------------------------------------ -- Proposições de dupla negaço: ------------------------------------------------ theorem doubleneg_intro : P → ¬¬P := begin intros h h', contradiction, end theorem doubleneg_elim : ¬¬P → P := begin intro h, by_contradiction h, contradiction, end theorem doubleneg_law : ¬¬P ↔ P := begin split, exact doubleneg_elim P, exact doubleneg_intro P, end ------------------------------------------------ -- Comutatividade dos ∨,∧: ------------------------------------------------ theorem disj_comm : (P ∨ Q) → (Q ∨ P) := begin intro h, cases h, right, apply h, left, apply h, end theorem conj_comm : (P ∧ Q) → (Q ∧ P) := begin intro h, cases h with hp hq, split, apply hq, apply hp, end ------------------------------------------------ -- Proposições de interdefinabilidade dos →,∨: ------------------------------------------------ theorem impl_as_disj_converse : (¬P ∨ Q) → (P → Q) := begin intros h h', cases h, contradiction, apply h, end theorem disj_as_impl : (P ∨ Q) → (¬P → Q) := begin intros h h', cases h, contradiction, apply h, end ------------------------------------------------ -- Proposições de contraposição: ------------------------------------------------ theorem impl_as_contrapositive : (P → Q) → (¬Q → ¬P) := begin intros h h' h'', have hq : Q := h h'', contradiction, end theorem impl_as_contrapositive_converse : (¬Q → ¬P) → (P → Q) := begin intros h p, by_contradiction hboom, apply h, exact hboom, exact p, end theorem contrapositive_law : (P → Q) ↔ (¬Q → ¬P) := begin split, exact impl_as_contrapositive P Q, exact impl_as_contrapositive_converse P Q, end ------------------------------------------------ -- A irrefutabilidade do LEM: ------------------------------------------------ theorem lem_irrefutable : ¬¬(P∨¬P) := begin intro h, apply h, right, intro p, apply h, left, exact p, end ------------------------------------------------ -- A lei de Peirce ------------------------------------------------ theorem peirce_law_weak : ((P → Q) → P) → ¬¬P := begin intros h np, have pq : P → Q, intro p, contradiction, have p : P := h pq, contradiction, end ------------------------------------------------ -- Proposições de interdefinabilidade dos ∨,∧: ------------------------------------------------ theorem disj_as_negconj : P∨Q → ¬(¬P∧¬Q) := begin intros h h'', cases h'' with hnp hnq, cases h, contradiction, contradiction, end theorem conj_as_negdisj : P∧Q → ¬(¬P∨¬Q) := begin intros h h'', cases h with hp hq, cases h'', contradiction, contradiction, end ------------------------------------------------ -- As leis de De Morgan para ∨,∧: ------------------------------------------------ theorem demorgan_disj : ¬(P∨Q) → (¬P ∧ ¬Q) := begin intro h, split, intro p, have poq : P∨Q, left, exact p, exact h poq, intro q, have poq : P∨Q, right, exact q, exact h poq, end theorem demorgan_disj_converse : (¬P ∧ ¬Q) → ¬(P∨Q) := begin intros h h', cases h with np nq, cases h', contradiction, contradiction, end theorem demorgan_conj : ¬(P∧Q) → (¬Q ∨ ¬P) := begin intro h, by_cases lem : Q, right, intro p, apply h, split, exact p, exact lem, left, exact lem, end theorem demorgan_conj_converse : (¬Q ∨ ¬P) → ¬(P∧Q) := begin intros h h', cases h' with p q, cases h, contradiction, contradiction, end theorem demorgan_conj_law : ¬(P∧Q) ↔ (¬Q ∨ ¬P) := begin split, exact demorgan_conj P Q, exact demorgan_conj_converse P Q, end theorem demorgan_disj_law : ¬(P∨Q) ↔ (¬P ∧ ¬Q) := begin split, intro h, split, intro p, have poq : P∨Q, left, exact p, exact h poq, intro q, have poq : P∨Q, right, exact q, exact h poq, intros h h', cases h with np nq, cases h', contradiction, contradiction, end ------------------------------------------------ -- Proposições de distributividade dos ∨,∧: ------------------------------------------------ theorem distr_conj_disj : P∧(Q∨R) → (P∧Q)∨(P∧R) := begin intro h, cases h with x y, cases y, left, split, exact x, exact y, right, split, exact x, exact y, end theorem distr_conj_disj_converse : (P∧Q)∨(P∧R) → P∧(Q∨R) := begin intro h, -- Suponha -L. split, cases h, cases h with p q, exact p, cases h with p q, exact p, cases h, cases h with p q, left, exact q, cases h with p r, right, exact r, end theorem distr_disj_conj : P∨(Q∧R) → (P∨Q)∧(P∨R) := begin intro h, cases h, split, left, exact h, left, exact h, cases h with q r, split, right, exact q, right, exact r, end theorem distr_disj_conj_converse : (P∨Q)∧(P∨R) → P∨(Q∧R) := begin intro h, cases h with poq por, cases poq, left, exact poq, cases por, left, exact por, right, split, exact poq, exact por, end ------------------------------------------------ -- Currificação ------------------------------------------------ theorem curry_prop : ((P∧Q)→R) → (P→(Q→R)) := begin intros h p q, have pq : P ∧ Q, split, exact p, exact q, exact h pq, end theorem uncurry_prop : (P→(Q→R)) → ((P∧Q)→R) := begin intros h h', cases h' with p q, exact h p q, end ------------------------------------------------ -- Reflexividade da →: ------------------------------------------------ theorem impl_refl : P → P := begin intro h, exact h, end ------------------------------------------------ -- Weakening and contraction: ------------------------------------------------ theorem weaken_disj_right : P → (P∨Q) := begin intro h, left, exact h, end theorem weaken_disj_left : Q → (P∨Q) := begin intro h, right, exact h, end theorem weaken_conj_right : (P∧Q) → P := begin intro h, cases h with p q, exact p, end theorem weaken_conj_left : (P∧Q) → Q := begin intro h, cases h with p q, exact q, end theorem conj_idempot : (P∧P) ↔ P := begin split, intro h, cases h with x y, exact x, intro h, split, exact h, exact h, end theorem disj_idempot : (P∨P) ↔ P := begin split, intro h, cases h, exact h, exact h, intro h, left, exact h, end end propositional ---------------------------------------------------------------- section predicate variable U : Type variables P Q : U -> Prop ------------------------------------------------ -- As leis de De Morgan para ∃,∀: ------------------------------------------------ theorem demorgan_exists : ¬(∃x, P x) → (∀x, ¬P x) := begin intros h x p, apply h, existsi x, exact p, end theorem demorgan_exists_converse : (∀x, ¬P x) → ¬(∃x, P x) := begin intros h h', cases h' with a ha, have hna : ¬P a := h a, contradiction, end theorem demorgan_forall : ¬(∀x, P x) → (∃x, ¬P x) := begin rw contrapositive_law, intros e p, apply p, intro x, by_cases lem : P x, exact lem, exfalso, apply e, existsi x, exact lem, end theorem demorgan_forall_converse : (∃x, ¬P x) → ¬(∀x, P x) := begin intros h h', cases h with x npx, have px : P x := h' x, contradiction, end theorem demorgan_forall_law : ¬(∀x, P x) ↔ (∃x, ¬P x) := begin split, exact demorgan_forall U P, exact demorgan_forall_converse U P, end theorem demorgan_exists_law : ¬(∃x, P x) ↔ (∀x, ¬P x) := begin split, exact demorgan_exists U P, exact demorgan_exists_converse U P, end ------------------------------------------------ -- Proposições de interdefinabilidade dos ∃,∀: ------------------------------------------------ theorem exists_as_neg_forall : (∃x, P x) → ¬(∀x, ¬P x) := begin intros h h', cases h with x px, have npx : ¬P x := h' x, contradiction, end theorem forall_as_neg_exists : (∀x, P x) → ¬(∃x, ¬P x) := begin intros h h', cases h' with x npx, have px : P x := h x, contradiction, end theorem forall_as_neg_exists_converse : ¬(∃x, ¬P x) → (∀x, P x) := begin intros h x, by_contradiction hboom, have e : ∃ (x : U), ¬P x, existsi x, exact hboom, exact h e, end theorem exists_as_neg_forall_converse : ¬(∀x, ¬P x) → (∃x, P x) := begin rw contrapositive_law, intros e p, apply p, intros x px, apply e, existsi x, exact px, end theorem forall_as_neg_exists_law : (∀x, P x) ↔ ¬(∃x, ¬P x) := begin split, exact forall_as_neg_exists U P, exact forall_as_neg_exists_converse U P, end theorem exists_as_neg_forall_law : (∃x, P x) ↔ ¬(∀x, ¬P x) := begin split, exact exists_as_neg_forall U P, exact exists_as_neg_forall_converse U P, end ------------------------------------------------ -- Proposições de distributividade de quantificadores: ------------------------------------------------ theorem exists_conj_as_conj_exists : (∃x, P x ∧ Q x) → (∃x, P x) ∧ (∃x, Q x) := begin intro h, cases h with x y, cases y with px qx, split, existsi x, exact px, existsi x, exact qx, end theorem exists_disj_as_disj_exists : (∃x, P x ∨ Q x) → (∃x, P x) ∨ (∃x, Q x) := begin intro h, cases h with x y, cases y, left, existsi x, exact y, right, existsi x, exact y, end theorem exists_disj_as_disj_exists_converse : (∃x, P x) ∨ (∃x, Q x) → (∃x, P x ∨ Q x) := begin intro h, cases h, cases h with x y, existsi x, left, exact y, cases h with x y, existsi x, right, exact y, end theorem forall_conj_as_conj_forall : (∀x, P x ∧ Q x) → (∀x, P x) ∧ (∀x, Q x) := begin intro h, split, intro x, have pxqx : P x ∧ Q x := h x, cases pxqx with px qx, exact px, intro x, have pxqx : P x ∧ Q x := h x, cases pxqx with px qx, exact qx, end theorem forall_conj_as_conj_forall_converse : (∀x, P x) ∧ (∀x, Q x) → (∀x, P x ∧ Q x) := begin intros h x, cases h, split, have px : P x := h_left x, exact px, have qx : Q x := h_right x, exact qx, end theorem forall_disj_as_disj_forall_converse : (∀x, P x) ∨ (∀x, Q x) → (∀x, P x ∨ Q x) := begin intros h x, cases h, have px : P x := h x, left, exact px, have qx : Q x := h x, right, exact qx, end /- NOT THEOREMS -------------------------------- theorem forall_disj_as_disj_forall : (∀x, P x ∨ Q x) → (∀x, P x) ∨ (∀x, Q x) := begin end theorem exists_conj_as_conj_exists_converse : (∃x, P x) ∧ (∃x, Q x) → (∃x, P x ∧ Q x) := begin end ---------------------------------------------- -/ end predicate
function this = coregister(this) % Affine coregistraton of given images to a stationary image (geometry only) % % Y = MrSeries() % Y.coregister(inputs) % % This is a method of class MrSeries. % % IN % parameters.coregister -> % .nameStationaryImage (e.g. 'mean') % .nameTransformedImage (e.g. 'anatomy') % .nameEquallyTransformedImages (e.g.'tissueProbabilityMap*') % OUT % % EXAMPLE % coregister % % See also MrSeries % Author: Saskia Klein & Lars Kasper % Created: 2014-07-28 % Copyright (C) 2014 Institute for Biomedical Engineering % University of Zurich and ETH Zurich % % This file is part of the TAPAS UniQC Toolbox, which is released % under the terms of the GNU General Public Licence (GPL), version 3. % You can redistribute it and/or modify it under the terms of the GPL % (either version 3 or, at your option, any later version). % For further details, see the file COPYING or % <http://www.gnu.org/licenses/>. nameStationaryImage = this.parameters.coregister.nameStationaryImage; nameTransformedImage = this.parameters.coregister.nameTransformedImage; nameEquallyTransformedImages = ... this.parameters.coregister.nameEquallyTransformedImages; stationaryImage = this.find('MrImage', 'name', ['^' nameStationaryImage '*']); stationaryImage = stationaryImage{1}; transformedImage = this.find('MrImage', 'name', ['^' nameTransformedImage '*']); transformedImage = transformedImage{1}; equallyTransformedImages = this.find('MrImage', 'name',... ['^' nameEquallyTransformedImages '*']); % TODO: if transformed image has more than one volume, make sure to apply % to all volumes! % set paths of all images correctly this.init_processing_step('coregister', transformedImage, ... equallyTransformedImages); [~, affineCoregistrationGeometry] = transformedImage.coregister_to(... stationaryImage, 'applyTransformation', 'geometry'); this.parameters.coregister.affineCoregistrationGeometry = ... affineCoregistrationGeometry; % now apply geometry Update to all other listed images that shall be % transformed % inverse transformation used, since coregister_to gives mapping from % stationary to transformed image nImages = numel(equallyTransformedImages); for iImage = 1:nImages equallyTransformedImages{iImage}.affineTransformation.apply_inverse_transformation(... affineCoregistrationGeometry); equallyTransformedImages{iImage}.save; end this.finish_processing_step('coregister', transformedImage, ... equallyTransformedImages);
import Pluto: Pluto, without_pluto_file_extension, generate_html, @asynclog using Base64 using SHA using FromFile @from "./MoreAnalysis.jl" import bound_variable_connections_graph @from "./Export.jl" import try_get_exact_pluto_version, try_fromcache, try_tocache @from "./Types.jl" import NotebookSession, RunningNotebook, FinishedNotebook @from "./Configuration.jl" import PlutoDeploySettings @from "./FileHelpers.jl" import find_notebook_files_recursive myhash = base64encode ∘ sha256 function path_hash(path) myhash(read(path)) end showall(xs) = Text(join(string.(xs), "\n")) ### # Shutdown function process( s::NotebookSession{String,Nothing,<:Any}; server_session::Pluto.ServerSession, settings::PlutoDeploySettings, output_dir::AbstractString, start_dir::AbstractString, progress, )::NotebookSession if s.run isa RunningNotebook Pluto.SessionActions.shutdown(server_session, s.run.notebook) end try remove_static_export(s.path; settings, output_dir) catch e @warn "Failed to remove static export files" s.path exception = (e, catch_backtrace()) end @info "### ✓ $(progress) Shutdown complete" s.path NotebookSession(; path=s.path, current_hash=nothing, desired_hash=nothing, run=nothing) end ### # Launch function process( s::NotebookSession{Nothing,String,<:Any}; server_session::Pluto.ServerSession, settings::PlutoDeploySettings, output_dir::AbstractString, start_dir::AbstractString, progress, )::NotebookSession path = s.path abs_path = joinpath(start_dir, path) @info "###### ◐ $(progress) Launching..." s.path jl_contents = read(abs_path, String) new_hash = myhash(jl_contents) if new_hash != s.desired_hash @warn "Notebook file does not have desired hash. This probably means that the file changed too quickly. Continuing and hoping for the best!" s.path new_hash s.desired_hash end keep_running = settings.SliderServer.enabled skip_cache = keep_running || path ∈ settings.Export.ignore_cache cached_state = skip_cache ? nothing : try_fromcache(settings.Export.cache_dir, new_hash) run = if cached_state !== nothing @info "Loaded from cache, skipping notebook run" s.path new_hash original_state = cached_state FinishedNotebook(; path, original_state) else try # open and run the notebook notebook = Pluto.SessionActions.open(server_session, abs_path; run_async=false) # get the state object original_state = Pluto.notebook_to_js(notebook) # shut down the notebook if !keep_running @info "Shutting down notebook process" s.path Pluto.SessionActions.shutdown(server_session, notebook) end try_tocache(settings.Export.cache_dir, new_hash, original_state) if keep_running bond_connections = bound_variable_connections_graph(notebook) @info "Bond connections" s.path showall(collect(bond_connections)) RunningNotebook(; path, notebook, original_state, bond_connections) else FinishedNotebook(; path, original_state) end catch e (e isa InterruptException) || rethrow(e) @error "$(progress) Failed to run notebook!" path exception = (e, catch_backtrace()) # continue nothing end end generate_static_export( path, run.original_state, jl_contents; settings, start_dir, output_dir, ) @info "### ✓ $(progress) Ready" s.path new_hash NotebookSession(; path=s.path, current_hash=new_hash, desired_hash=s.desired_hash, run=run, ) end ### # Update if needed function process( s::NotebookSession{String,String,<:Any}; server_session::Pluto.ServerSession, settings::PlutoDeploySettings, output_dir::AbstractString, start_dir::AbstractString, progress, )::NotebookSession if s.current_hash != s.desired_hash @info "Updating notebook... will shut down and relaunch" s.path # Simple way to update: shut down notebook and start new one if s.run isa RunningNotebook Pluto.SessionActions.shutdown(server_session, s.run.notebook) end @info "Shutdown complete" s.path result = process( NotebookSession(; path=s.path, current_hash=nothing, desired_hash=s.desired_hash, run=nothing, ); server_session, settings, output_dir, start_dir, progress, ) result else s end end ### # Leave it shut down process(s::NotebookSession{Nothing,Nothing,<:Any}; kwargs...)::NotebookSession = s should_shutdown(::NotebookSession{String,Nothing,<:Any}) = true should_shutdown(::NotebookSession) = false should_update(s::NotebookSession{String,String,<:Any}) = s.current_hash != s.desired_hash should_update(::NotebookSession) = false should_launch(::NotebookSession{Nothing,String,<:Any}) = true should_launch(::NotebookSession) = false will_process(s) = should_update(s) || should_launch(s) || should_shutdown(s) """ Core Action: Generate static export for a Pluto Notebook # Arguments: 1. slider_server_url: URL of the slider server. This will be the URL of your server, if you deploy 2. offer_binder: Flag to enable the Binder button 3. binder_url: URL of the binder link that will be invoked. Use a compatible pluto-enabled binder 4. baked_state: Whether to export pluto state within the html or in a separate file. 5. pluto_cdn_root: URL where pluto will go to find the static frontend assets """ function generate_static_export( path, original_state, jl_contents; settings, output_dir, start_dir, ) pluto_version = try_get_exact_pluto_version() export_jl_path = let relative_to_notebooks_dir = path joinpath(output_dir, relative_to_notebooks_dir) end export_html_path = let relative_to_notebooks_dir = without_pluto_file_extension(path) * ".html" joinpath(output_dir, relative_to_notebooks_dir) end export_statefile_path = let relative_to_notebooks_dir = without_pluto_file_extension(path) * ".plutostate" joinpath(output_dir, relative_to_notebooks_dir) end mkpath(dirname(export_jl_path)) mkpath(dirname(export_html_path)) mkpath(dirname(export_statefile_path)) slider_server_running_somewhere = settings.Export.slider_server_url !== nothing || (settings.SliderServer.serve_static_export_folder && settings.SliderServer.enabled) notebookfile_js = if settings.Export.offer_binder || slider_server_running_somewhere if settings.Export.baked_notebookfile "\"data:text/julia;charset=utf-8;base64,$(base64encode(jl_contents))\"" else repr(basename(export_jl_path)) end else "undefined" end slider_server_url_js = if slider_server_running_somewhere abs_path = joinpath(start_dir, path) url_of_root = relpath(start_dir, dirname(abs_path)) # e.g. "." or "../../.." repr(something(settings.Export.slider_server_url, url_of_root)) else "undefined" end binder_url_js = if settings.Export.offer_binder repr(something(settings.Export.binder_url, Pluto.default_binder_url)) else "undefined" end statefile_js = if !settings.Export.baked_state open(export_statefile_path, "w") do io Pluto.pack(io, original_state) end repr(basename(export_statefile_path)) else statefile64 = base64encode() do io Pluto.pack(io, original_state) end "\"data:;base64,$(statefile64)\"" end html_contents = generate_html(; pluto_cdn_root=settings.Export.pluto_cdn_root, version=pluto_version, notebookfile_js, statefile_js, slider_server_url_js, binder_url_js, disable_ui=settings.Export.disable_ui, ) write(export_html_path, html_contents) if (settings.Export.offer_binder || settings.Export.slider_server_url !== nothing) && !settings.Export.baked_notebookfile write(export_jl_path, jl_contents) end @debug "Written to $(export_html_path)" end tryrm(x) = isfile(x) && rm(x) function remove_static_export(path; settings, output_dir) export_jl_path = let relative_to_notebooks_dir = path joinpath(output_dir, relative_to_notebooks_dir) end export_html_path = let relative_to_notebooks_dir = without_pluto_file_extension(path) * ".html" joinpath(output_dir, relative_to_notebooks_dir) end export_statefile_path = let relative_to_notebooks_dir = without_pluto_file_extension(path) * ".plutostate" joinpath(output_dir, relative_to_notebooks_dir) end if !settings.Export.baked_state tryrm(export_statefile_path) end tryrm(export_html_path) if (settings.Export.offer_binder || settings.Export.slider_server_url !== nothing) && !settings.Export.baked_notebookfile tryrm(export_jl_path) end end
lemma (in finite_measure) measure_real_sum_image_fn: assumes "e \<in> sets M" assumes "\<And> x. x \<in> s \<Longrightarrow> e \<inter> f x \<in> sets M" assumes "finite s" assumes disjoint: "\<And> x y. \<lbrakk>x \<in> s ; y \<in> s ; x \<noteq> y\<rbrakk> \<Longrightarrow> f x \<inter> f y = {}" assumes upper: "space M \<subseteq> (\<Union>i \<in> s. f i)" shows "measure M e = (\<Sum> x \<in> s. measure M (e \<inter> f x))"
/*! @file Forward declares `boost::hana::Map`. @copyright Louis Dionne 2015 Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) */ #ifndef BOOST_HANA_FWD_MAP_HPP #define BOOST_HANA_FWD_MAP_HPP #include <boost/hana/core/operators.hpp> #include <boost/hana/fwd/core/make.hpp> namespace boost { namespace hana { //! @ingroup group-datatypes //! A basic associative array requiring unique and `Comparable` keys. //! //! The order of the elements of the map is unspecified. Also, all the //! keys must be comparable with each other and that comparison must //! yield a compile-time `Logical`. //! //! //! Modeled concepts //! ---------------- //! 1. `Comparable` (operators provided)\n //! Two maps are equal iff all their keys are equal and are associated //! to equal values. //! @snippet example/map.cpp comparable //! //! 2. `Searchable`\n //! A map can be searched by its keys with a predicate yielding a //! compile-time `Logical`. //! @snippet example/map.cpp searchable //! //! //! Provided conversions //! -------------------- //! 1. From any `Record`\n //! Converting a `Record` `R` to a `Map` is equivalent to converting its //! `members<R>()` to a `Map`, except the values are replaced by the actual //! members of the object instead of accessors. //! //! 2. From any `Foldable`\n //! Converts a `Foldable` of `Product`s to a `Map`. //! Note that the foldable structure must not contain duplicate keys. //! @todo //! We should allow duplicate keys, with a documented policy (e.g. we //! keep the last one). //! //! 3. To any `Sequence`\n //! A `Map` can be converted to a Sequence of Products. struct Map { }; template <typename Storage, typename = operators::adl> struct _map { Storage storage; struct hana { using datatype = Map; }; }; #ifdef BOOST_HANA_DOXYGEN_INVOKED //! Create a `Map` with the given key/value associations. //! @relates Map //! //! Given zero `Product`s or more representing key/value associations, //! `make<Map>` returns a Map associating these keys to these values. //! All the keys must be unique. //! //! //! Example //! ------- //! @snippet example/map.cpp make<Map> template <> constexpr auto make<Map> = [](auto&& ...pairs) { return unspecified-type{forwarded(pairs)...}; }; #endif //! Alias to `make<Map>`; provided for convenience. //! @relates Map //! //! //! Example //! ------- //! @snippet example/map.cpp make_map constexpr auto make_map = make<Map>; //! Returns a Sequence of the keys of the map, in unspecified order. //! @relates Map #ifdef BOOST_HANA_DOXYGEN_INVOKED constexpr auto keys = [](auto&& map) -> decltype(auto) { return unspecified-type; }; #else struct _keys { template <typename Map> constexpr decltype(auto) operator()(Map&& map) const; }; constexpr _keys keys{}; #endif //! Returns a Sequence of the values of the map, in unspecified order. //! @relates Map #ifdef BOOST_HANA_DOXYGEN_INVOKED constexpr auto values = [](auto&& map) -> decltype(auto) { return unspecified-type; }; #else struct _values { template <typename Map> constexpr decltype(auto) operator()(Map&& map) const; }; constexpr _values values{}; #endif }} // end namespace boost::hana #endif // !BOOST_HANA_FWD_MAP_HPP
function inside = p05_inside ( m, n, point ) %*****************************************************************************80 % %% P05_INSIDE reports if a point is inside the region in problem 05. % % Licensing: % % This code is distributed under the GNU LGPL license. % % Modified: % % 12 April 2009 % % Author: % % John Burkardt % % Parameters: % % Input, integer M, the spatial dimension. % % Input, integer N, the number of points. % % Input, real POINT(M,N), the coordinates of the points. % % Output, logical INSIDE(N), is TRUE if the point is in the region. % center1 = [ 0.0, 0.0 ]; center2 = [ -0.4, 0.0 ]; r1 = 1.00; r2 = 0.55; inside(1:n) = ... center1(2) <= point(2,1:n) ... & ... ( point(1,1:n) - center1(1) ).^2 ... + ( point(2,1:n) - center1(2) ).^2 <= r1 * r1 ... & ... r2 * r2 <= ( point(1,1:n) - center2(1) ).^2 ... + ( point(2,1:n) - center2(2) ).^2; return end
(* Title: HOL/MicroJava/J/WellType.thy Author: David von Oheimb Copyright 1999 Technische Universitaet Muenchen *) section {* Well-typedness Constraints *} theory WellType imports Term WellForm begin text {* the formulation of well-typedness of method calls given below (as well as the Java Specification 1.0) is a little too restrictive: Is does not allow methods of class Object to be called upon references of interface type. \begin{description} \item[simplifications:]\ \\ \begin{itemize} \item the type rules include all static checks on expressions and statements, e.g.\ definedness of names (of parameters, locals, fields, methods) \end{itemize} \end{description} *} text "local variables, including method parameters and This:" type_synonym lenv = "vname \<rightharpoonup> ty" type_synonym 'c env = "'c prog \<times> lenv" abbreviation (input) prg :: "'c env => 'c prog" where "prg == fst" abbreviation (input) localT :: "'c env => (vname \<rightharpoonup> ty)" where "localT == snd" consts more_spec :: "'c prog => (ty \<times> 'x) \<times> ty list => (ty \<times> 'x) \<times> ty list => bool" appl_methds :: "'c prog => cname => sig => ((ty \<times> ty) \<times> ty list) set" max_spec :: "'c prog => cname => sig => ((ty \<times> ty) \<times> ty list) set" defs more_spec_def: "more_spec G == \<lambda>((d,h),pTs). \<lambda>((d',h'),pTs'). G\<turnstile>d\<preceq>d' \<and> list_all2 (\<lambda>T T'. G\<turnstile>T\<preceq>T') pTs pTs'" -- "applicable methods, cf. 15.11.2.1" appl_methds_def: "appl_methds G C == \<lambda>(mn, pTs). {((Class md,rT),pTs') |md rT mb pTs'. method (G,C) (mn, pTs') = Some (md,rT,mb) \<and> list_all2 (\<lambda>T T'. G\<turnstile>T\<preceq>T') pTs pTs'}" -- "maximally specific methods, cf. 15.11.2.2" max_spec_def: "max_spec G C sig == {m. m \<in>appl_methds G C sig \<and> (\<forall>m'\<in>appl_methds G C sig. more_spec G m' m --> m' = m)}" lemma max_spec2appl_meths: "x \<in> max_spec G C sig ==> x \<in> appl_methds G C sig" apply (unfold max_spec_def) apply (fast) done lemma appl_methsD: "((md,rT),pTs')\<in>appl_methds G C (mn, pTs) ==> \<exists>D b. md = Class D \<and> method (G,C) (mn, pTs') = Some (D,rT,b) \<and> list_all2 (\<lambda>T T'. G\<turnstile>T\<preceq>T') pTs pTs'" apply (unfold appl_methds_def) apply (fast) done lemmas max_spec2mheads = insertI1 [THEN [2] equalityD2 [THEN subsetD], THEN max_spec2appl_meths, THEN appl_methsD] primrec typeof :: "(loc => ty option) => val => ty option" where "typeof dt Unit = Some (PrimT Void)" | "typeof dt Null = Some NT" | "typeof dt (Bool b) = Some (PrimT Boolean)" | "typeof dt (Intg i) = Some (PrimT Integer)" | "typeof dt (Addr a) = dt a" lemma is_type_typeof [rule_format (no_asm), simp]: "(\<forall>a. v \<noteq> Addr a) --> (\<exists>T. typeof t v = Some T \<and> is_type G T)" apply (rule val.induct) apply auto done lemma typeof_empty_is_type [rule_format (no_asm)]: "typeof (\<lambda>a. None) v = Some T \<longrightarrow> is_type G T" apply (rule val.induct) apply auto done lemma typeof_default_val: "\<exists>T. (typeof dt (default_val ty) = Some T) \<and> G\<turnstile> T \<preceq> ty" apply (case_tac ty) apply (rename_tac prim_ty, case_tac prim_ty) apply auto done type_synonym java_mb = "vname list \<times> (vname \<times> ty) list \<times> stmt \<times> expr" -- "method body with parameter names, local variables, block, result expression." -- "local variables might include This, which is hidden anyway" inductive ty_expr :: "'c env => expr => ty => bool" ("_ \<turnstile> _ :: _" [51, 51, 51] 50) and ty_exprs :: "'c env => expr list => ty list => bool" ("_ \<turnstile> _ [::] _" [51, 51, 51] 50) and wt_stmt :: "'c env => stmt => bool" ("_ \<turnstile> _ \<surd>" [51, 51] 50) where NewC: "[| is_class (prg E) C |] ==> E\<turnstile>NewC C::Class C" -- "cf. 15.8" -- "cf. 15.15" | Cast: "[| E\<turnstile>e::C; is_class (prg E) D; prg E\<turnstile>C\<preceq>? Class D |] ==> E\<turnstile>Cast D e:: Class D" -- "cf. 15.7.1" | Lit: "[| typeof (\<lambda>v. None) x = Some T |] ==> E\<turnstile>Lit x::T" -- "cf. 15.13.1" | LAcc: "[| localT E v = Some T; is_type (prg E) T |] ==> E\<turnstile>LAcc v::T" | BinOp:"[| E\<turnstile>e1::T; E\<turnstile>e2::T; if bop = Eq then T' = PrimT Boolean else T' = T \<and> T = PrimT Integer|] ==> E\<turnstile>BinOp bop e1 e2::T'" -- "cf. 15.25, 15.25.1" | LAss: "[| v ~= This; E\<turnstile>LAcc v::T; E\<turnstile>e::T'; prg E\<turnstile>T'\<preceq>T |] ==> E\<turnstile>v::=e::T'" -- "cf. 15.10.1" | FAcc: "[| E\<turnstile>a::Class C; field (prg E,C) fn = Some (fd,fT) |] ==> E\<turnstile>{fd}a..fn::fT" -- "cf. 15.25, 15.25.1" | FAss: "[| E\<turnstile>{fd}a..fn::T; E\<turnstile>v ::T'; prg E\<turnstile>T'\<preceq>T |] ==> E\<turnstile>{fd}a..fn:=v::T'" -- "cf. 15.11.1, 15.11.2, 15.11.3" | Call: "[| E\<turnstile>a::Class C; E\<turnstile>ps[::]pTs; max_spec (prg E) C (mn, pTs) = {((md,rT),pTs')} |] ==> E\<turnstile>{C}a..mn({pTs'}ps)::rT" -- "well-typed expression lists" -- "cf. 15.11.???" | Nil: "E\<turnstile>[][::][]" -- "cf. 15.11.???" | Cons:"[| E\<turnstile>e::T; E\<turnstile>es[::]Ts |] ==> E\<turnstile>e#es[::]T#Ts" -- "well-typed statements" | Skip:"E\<turnstile>Skip\<surd>" | Expr:"[| E\<turnstile>e::T |] ==> E\<turnstile>Expr e\<surd>" | Comp:"[| E\<turnstile>s1\<surd>; E\<turnstile>s2\<surd> |] ==> E\<turnstile>s1;; s2\<surd>" -- "cf. 14.8" | Cond:"[| E\<turnstile>e::PrimT Boolean; E\<turnstile>s1\<surd>; E\<turnstile>s2\<surd> |] ==> E\<turnstile>If(e) s1 Else s2\<surd>" -- "cf. 14.10" | Loop:"[| E\<turnstile>e::PrimT Boolean; E\<turnstile>s\<surd> |] ==> E\<turnstile>While(e) s\<surd>" definition wf_java_mdecl :: "'c prog => cname => java_mb mdecl => bool" where "wf_java_mdecl G C == \<lambda>((mn,pTs),rT,(pns,lvars,blk,res)). length pTs = length pns \<and> distinct pns \<and> unique lvars \<and> This \<notin> set pns \<and> This \<notin> set (map fst lvars) \<and> (\<forall>pn\<in>set pns. map_of lvars pn = None) \<and> (\<forall>(vn,T)\<in>set lvars. is_type G T) & (let E = (G,map_of lvars(pns[\<mapsto>]pTs)(This\<mapsto>Class C)) in E\<turnstile>blk\<surd> \<and> (\<exists>T. E\<turnstile>res::T \<and> G\<turnstile>T\<preceq>rT))" abbreviation "wf_java_prog == wf_prog wf_java_mdecl" lemma wf_java_prog_wf_java_mdecl: "\<lbrakk> wf_java_prog G; (C, D, fds, mths) \<in> set G; jmdcl \<in> set mths \<rbrakk> \<Longrightarrow> wf_java_mdecl G C jmdcl" apply (simp only: wf_prog_def) apply (erule conjE)+ apply (drule bspec, assumption) apply (simp add: wf_cdecl_mdecl_def split_beta) done lemma wt_is_type: "(E\<turnstile>e::T \<longrightarrow> ws_prog (prg E) \<longrightarrow> is_type (prg E) T) \<and> (E\<turnstile>es[::]Ts \<longrightarrow> ws_prog (prg E) \<longrightarrow> Ball (set Ts) (is_type (prg E))) \<and> (E\<turnstile>c \<surd> \<longrightarrow> True)" apply (rule ty_expr_ty_exprs_wt_stmt.induct) apply auto apply ( erule typeof_empty_is_type) apply ( simp split add: split_if_asm) apply ( drule field_fields) apply ( drule (1) fields_is_type) apply ( simp (no_asm_simp)) apply (assumption) apply (auto dest!: max_spec2mheads method_wf_mhead is_type_rTI simp add: wf_mdecl_def) done lemmas ty_expr_is_type = wt_is_type [THEN conjunct1,THEN mp, rule_format] lemma expr_class_is_class: " \<lbrakk>ws_prog (prg E); E \<turnstile> e :: Class C\<rbrakk> \<Longrightarrow> is_class (prg E) C" by (frule ty_expr_is_type, assumption, simp) end
Require Import Coq.Relations.Relations. Require Export CatSem.PCF_o_c.RPCF_rep. Set Implicit Arguments. Unset Strict Implicit. Unset Automatic Introduction. Notation "^ f" := (lift (M:= opt_monad _) f) (at level 5). Ltac fin := simpl in *; intros; autorewrite with fin; auto with fin. Hint Unfold lift : fin. Hint Extern 1 (_ = _) => f_equal : fin. Notation "V *" := (opt (T:=unit) _ V) (at level 5). Definition TT := ITYPE unit. Lemma l_eq (V W : TT) (f g : forall t, V t -> W t) r: (forall t v, f t v = g t v) -> (forall t (v : opt r V t), lift (M:=opt_monad r) f t v = ^g t v). Proof. intros. destruct v; unfold lift; simpl; auto. rewrite H. auto. Qed. Section ULC_syntax. Inductive ULC (V : TT) : TT := | Var : forall t, V t -> ULC V t | Abs : forall t : unit, ULC (opt t V) t -> ULC V t | App : forall t, ULC V t -> ULC V t -> ULC V t. Fixpoint rename V W (f : V ---> W) t (y : ULC V t) : ULC W t := match y with | Var _ v => Var (f _ v) | Abs _ v => Abs (rename ^f v) | App _ s t => App (rename f s) (rename f t) end. Definition inj u V := rename (V:=V) (W:= opt u V) (@some unit u V). Definition _shift (u : unit) (V W : TT) (f : V ---> ULC W) : V* ---> ULC (W*) := fun t v => match v in (opt _ _ y) return (ULC (W *) y) with | some t0 p => inj u (f t0 p) | none => Var (none u W) end. Fixpoint _subst V W (f : V ---> ULC W) t (y : ULC V t) : ULC W t := match y with | Var _ v => f _ v | Abs _ v => Abs (_subst (_shift f) v) | App _ s t => App (_subst f s) (_subst f t) end. Definition substar (u : unit) (V : TT) (M : ULC V u) : ULC (opt u V) ---> ULC V := _subst (fun t (y : opt u V t) => match y with | none => M | some _ v => Var v end). Notation "M [*:= N ]" := (substar N M) (at level 50). (** Notations for functions *) Notation "y //- f" := (rename f y)(at level 42, left associativity). Notation "y >- f" := (_shift f y) (at level 40). Notation "y >>= f" := (_subst f y) (at level 42, left associativity). Lemma rename_eq : forall (V : TT) (t : unit) (v : ULC V t) (W : TT) (f g : V ---> W), (forall t y, f t y = g t y) -> v //- f = v //- g. Proof. intros V t v. induction v; intros; simpl. rewrite H; auto. apply f_equal. apply IHv. simpl in *. intros. destruct t. destruct t0. assert (H':= l_eq (r:=tt) H (t:=tt) y). simpl in *. rewrite <- H'. auto. rewrite (IHv1 _ _ _ H). rewrite (IHv2 _ _ _ H). auto. Qed. Hint Resolve rename_eq l_eq : fin. Hint Rewrite rename_eq l_eq : fin. Lemma rename_comp V t (y : ULC V t) W (f : V ---> W) Z (g : W ---> Z): y //- (fun s y => g s (f s y)) = y //- f //- g. Proof. intros V t y. induction y; simpl; intros; fin. apply f_equal. rewrite <- IHy. apply rename_eq. intros r y0. destruct y0; fin. Qed. Lemma rename_id_eq V t (y : ULC V t) (f : V ---> V) (H : forall t v, f t v = v) : y //- f = y. Proof. intros V t y. induction y; simpl; intros; fin. apply f_equal. apply IHy. intros r v; destruct v; fin. unfold lift. fin. Qed. Lemma rename_id V t (y : ULC V t) : y //- id _ = y . Proof. intros V t y. apply rename_id_eq. fin. Qed. Lemma _shift_eq u V W (f g : V ---> ULC W) (H : forall t v, f t v = g t v) t (y : opt u V t) : y >- f = y >- g. Proof. intros u V W f g H t v. destruct v; fin. Qed. Hint Resolve rename_id _shift_eq : fin. Hint Rewrite rename_id _shift_eq : fin. Lemma shift_var u (V : TT) t (y : opt u V t) : y >- @Var _ = Var y. Proof. intros u V t y; induction y; fin. Qed. Hint Resolve shift_var : fin. Hint Rewrite shift_var : fin. Lemma var_lift_shift (u : unit) V W (f : V ---> W) t (y : opt u V t) : Var (^f _ y) = y >- (f ;; @Var _ ). Proof. intros u V W f t y; induction y; fin. Qed. Hint Resolve var_lift_shift : fin. Lemma shift_lift u V W Z (f : V ---> W) (g : W ---> ULC Z) t (y : opt u V t) : (^f _ y) >- g = y >- (f ;; g). Proof. intros u V W Z f g t y. induction y; fin. Qed. Hint Resolve shift_lift : fin. Hint Rewrite shift_lift : fin. Lemma subst_eq V t (y : ULC V t) W (f g : V ---> ULC W) (H : forall t y, f t y = g t y): y >>= f = y >>= g. Proof. intros V t y. induction y; fin. Qed. Hint Resolve subst_eq : fin. Hint Rewrite subst_eq : fin. Obligation Tactic := unfold Proper; red; fin. Program Instance subst_oid V W : Proper (equiv ==> equiv (Setoid:=mor_oid (ULC V) (ULC W))) (@_subst V W). Lemma subst_var (V : TT) : forall t (v : ULC V t), v >>= (@Var V) = v . Proof. intros V t y. induction y; fin. apply f_equal. rewrite <- IHy at 2. apply subst_eq. fin. Qed. Lemma subst_eq_rename V t (v : ULC V t) W (f : V ---> W) : v //- f = v >>= (f ;; Var (V:=W)). Proof. intros V t y. induction y; fin. apply f_equal. rewrite IHy. apply subst_eq. intros tr z; destruct z; fin. Qed. Lemma rename_shift a V W Z (f : V ---> ULC W) (g : W ---> Z) t (y : opt a V t) : (y >- f) //- ^g = y >- (f ;; rename g). Proof. intros a V W Z f g t y. induction y; fin. unfold inj. rewrite <- rename_comp. rewrite <- rename_comp. fin. Qed. Hint Rewrite rename_shift : fin. Hint Resolve rename_shift : fin. Hint Unfold inj : fin. Lemma rename_subst V t (v : ULC V t) W Z (f : V ---> ULC W) (g : W ---> Z) : (v >>= f) //- g = v >>= (f ;; rename g). Proof. intros V t y. induction y; fin. rewrite IHy. apply f_equal. apply subst_eq. intros tr z; destruct z; simpl; auto. unfold inj. rewrite <- rename_comp. rewrite <- rename_comp. apply rename_eq. fin. Qed. Lemma subst_rename V t (v : ULC V t) W Z (f : V ---> W) (g : W ---> ULC Z) : v //- f >>= g = v >>= (f ;; g). Proof. intros V t y. induction y; fin. apply f_equal. rewrite IHy. apply subst_eq. intros tr z; destruct z; fin. Qed. Lemma rename_substar V u t (v : ULC (opt u V) t) W (f : V ---> W) N: v [*:= N] //- f = (v //- ^f) [*:= N //- f ]. Proof. intros. unfold substar. rewrite rename_subst. rewrite subst_rename. apply subst_eq. intros t0 z ; destruct z ; fin. Qed. Hint Rewrite subst_rename rename_subst : fin. Hint Rewrite rename_shift : fin. Hint Resolve rename_shift : fin. Lemma subst_shift_shift (u:unit) V (t : unit)(v : opt u V t) W Z (f: V ---> ULC W) (g: W ---> ULC Z): (v >- f) >>= (_shift g) = v >- (f ;; _subst g). Proof. intros u V t v. induction v; simpl; intros; try apply subst_term_inj; auto. unfold inj. rewrite subst_rename. fin. Qed. Hint Resolve subst_shift_shift : fin. Hint Rewrite subst_shift_shift : fin. Lemma subst_subst V t (v : ULC V t) W Z (f : V ---> ULC W) (g : W ---> ULC Z) : v >>= f >>= g = v >>= (f;; _subst g). Proof. intros V t y. induction y; fin. apply f_equal. rewrite IHy. apply subst_eq. intros tr z; destruct z; fin. unfold inj. fin. Qed. Lemma lift_rename V t (s : ULC V t) W (f : V ---> W) : s >>= (fun t z => Var (f t z)) = s //- f. Proof. intros V t y. induction y; fin. apply f_equal. rewrite <- IHy. apply subst_eq. intros tr z; destruct z; fin. Qed. Lemma subst_var_eta (V:TT) t (v:ULC V t): v >>= (fun t z => @Var V t z) = v. Proof. induction v; intros; simpl; auto. rewrite <- IHv at 2. apply f_equal. apply subst_eq. intros; apply shift_var. rewrite IHv1. rewrite IHv2; auto. Qed. Lemma subst_substar (V W : TT) t (M: ULC (opt t V) t) (N:ULC V t) (f:forall t, V t -> ULC W t): M [*:=N] >>= f = (M >>= _shift f) [*:= (N >>= f)]. Proof. intros V W t M N f. unfold substar. simpl. repeat rewrite subst_subst. apply subst_eq. intros t0 y. simpl. destruct y. unfold _shift. unfold inj. simpl. rewrite subst_rename. simpl. rewrite (subst_var_eta (f t0 v)). auto. simpl. apply subst_eq; auto. Qed. Hint Resolve subst_var subst_subst lift_rename : fin. Hint Rewrite subst_subst lift_rename: fin. Section Relations_on_ULC. Reserved Notation "x :> y" (at level 70). Variable rel : forall (V:TT) t, relation (ULC V t). Inductive ULCpropag (V: TT) : forall t, relation (ULC V t) := | relorig : forall t (v v': ULC V t), rel v v' -> v :> v' | relApp1: forall t (M M' : ULC V t) N, M :> M' -> App M N :> App M' N | relApp2: forall t (M:ULC V t) N N', N :> N' -> App M N :> App M N' | relAbs: forall t (M M':ULC (opt t V) t), M :> M' -> Abs M :> Abs M' where "y :> z" := (@ULCpropag _ _ y z). Notation "y :>> z" := (clos_refl_trans_1n _ (@ULCpropag _ _ ) y z) (at level 50). Variable V : TT. Variable t : unit. (** these are some trivial lemmata which will be used later *) Lemma cp_App1 (M M': ULC V t) N : M :>> M' -> App M N :>> App M' N. Proof. intros. generalize N. induction H. constructor. intros. constructor 2 with (App y N0); auto. constructor 2. auto. Qed. Lemma cp_App2 (M : ULC V t) N N': N :>> N' -> App M N :>> App M N'. Proof. intros. generalize M. induction H. constructor. intros. constructor 2 with (App M0 y); auto. constructor 3. auto. Qed. Lemma cp_Abs (M M': ULC (opt t V) t ): M :>> M' -> Abs M :>> Abs M'. Proof. intros. induction H. constructor. constructor 2 with (Abs y); auto. constructor 4. auto. Qed. End Relations_on_ULC. (** Beta reduction *) Reserved Notation "a >> b" (at level 70). Inductive beta (V : TT): forall t, relation (ULC V t) := | app_abs : forall t (M: ULC (opt t V) t) N, beta (App (Abs M) N) (M [*:= N]). Definition beta_star := ULCpropag beta. Definition beta_rel := fun (V : TT) t => clos_refl_trans_1n _ (@beta_star V t). Obligation Tactic := idtac. Program Instance ULCBETA_struct (V : TT) : ipo_obj_struct (ULC V) := { IRel t := @beta_rel V t }. Next Obligation. Proof. constructor. constructor. assert (H':= @clos_rt_is_preorder _ (@beta_star V t)). unfold beta_rel in *. unfold Transitive. intros. destruct H' as [H1 H2]. unfold transitive in H2. simpl in *. apply trans_rt1n. apply H2 with y; apply rt1n_trans; auto. Qed. Definition ULCBETA (V: TT) : IPO unit := Build_ipo_obj (ULCBETA_struct V ). Program Instance Var_s (V : TT) : ipo_mor_struct (a:=SM_ipo _ V) (b:=ULCBETA V) (Var (V:=V)). Next Obligation. Proof. intros V t. unfold Proper; red. simpl. intros y z H. induction H. reflexivity. Qed. Definition VAR V := Build_ipo_mor (Var_s V). Program Instance subst_s (V W : TT) (f : SM_ipo _ V ---> ULCBETA W) : ipo_mor_struct (a:=ULCBETA V) (b:=ULCBETA W) (_subst f). Next Obligation. Proof. intros V W f t. unfold Proper; red. intros y z H. generalize dependent W. induction H; simpl; intros. constructor. transitivity (_subst f y); try apply IHclos_refl_trans_1n. clear dependent z. generalize dependent W. induction H; simpl; intros. Focus 2. apply cp_App1. apply IHULCpropag. Focus 2. apply cp_App2. apply IHULCpropag. Focus 2. apply cp_Abs. simpl in *. apply (IHULCpropag _ (SM_ind (V:=opt _ V) (W:=ULCBETA (opt t W)) (fun t y => _shift f y))). generalize dependent W. induction H; simpl; intros. apply clos_refl_trans_1n_contains. apply relorig. assert (H:=app_abs (_subst (_shift f) M) (_subst f N)). rewrite subst_substar. auto. Qed. Definition SUBST V W f := Build_ipo_mor (subst_s V W f). Obligation Tactic := fin. Program Instance ULCBETAM_s : RMonad_struct (SM_ipo unit) ULCBETA := { rweta := VAR; rkleisli a b f := SUBST f }. Next Obligation. Proof. unfold Proper; red; fin. Qed. Definition ULCBETAM : RMonad (SM_ipo unit) := Build_RMonad ULCBETAM_s. Lemma rename_lift V t (v : ULC V t) W (f : V ---> W) : v //- f = rlift ULCBETAM f _ v. Proof. unfold lift; fin. Qed. Hint Rewrite lift_rename : fin. Lemma shift_shift r s V W (f : SM_ipo _ V ---> ULCBETAM W) (y : (opt r V) s) : sshift_ (P:=ULCBETAM) (W:=W) f y = y >- f . Proof. intros r s V W f y. destruct y as [t y | ]; simpl; unfold inj; fin. Qed. Hint Resolve shift_shift rename_lift : fin. Hint Rewrite shift_shift rename_lift : fin. Definition PCF_ULC_type_mor : TY -> unit := fun _ => tt. Check App. Program Instance ulc_app_s r s : RModule_Hom_struct (M:= ULCBETAM [PCF_ULC_type_mor (r ~> s)] x (ULCBETAM [PCF_ULC_type_mor r])) (N:=ULCBETAM [PCF_ULC_type_mor s]) (fun V t => App (fst t) (snd t)). Definition ulc_app r s := Build_Module_Hom (ulc_app_s r s). Program Instance ulc_abs_s r s : Module_Hom_struct (S:= d ULCM // PCF_ULC_type_mor r [PCF_ULC_type_mor s] ) (T:= ULCM [PCF_ULC_type_mor (r ~> s)]) (fun z t => abs t). Definition ulc_abs r s := Build_Module_Hom (ulc_abs_s r s). Definition ULC_fix (V : TT) t : ULC V t := abs ( app (V:=opt t V) ( abs (V:=opt t V)( app (V:=opt t (opt t V)) (var (V:= opt t (opt t V)) (some t (A:= opt t V) ( none t (V) ))) (app (var (none t (V*))) (var (none t V*))) ) ) ( abs ( app (V:=opt t (opt t V)) (var (V:= opt t (opt t V)) (some t (A:= opt t V) ( none t (V) ))) (app (var (none t (V*))) (var (none t V*))) ) ) ). Program Instance ulc_rec_s t : Module_Hom_struct (S := ULCM [PCF_ULC_type_mor (t ~> t)]) (T:=ULCM [PCF_ULC_type_mor t]) (fun V Z => app (ULC_fix V tt) Z). Definition ulc_rec t := Build_Module_Hom (ulc_rec_s t). Program Instance ulc_ttt_s : Module_Hom_struct (S:= Term (C:=MOD ULCM TYPE)) (T:= ULCM [PCF_ULC_type_mor Bool]) (fun V _ => abs (V:=V) (abs (V:= opt tt V) (var (some tt (A:=opt tt V) (none tt V))))). Definition ulc_ttt := Build_Module_Hom ulc_ttt_s. Program Instance ulc_fff_s : Module_Hom_struct (S:= Term (C:=MOD ULCM TYPE)) (T:= ULCM [PCF_ULC_type_mor Bool]) (fun V _ => abs (V:=V) (abs (V:= opt tt V) (var (none tt (V*))))). Definition ulc_fff := Build_Module_Hom ulc_fff_s. Fixpoint ULC_Nat (n : nat) (V : TT) := match n with | 0 => abs (abs (var (none tt (opt tt V)))) | S n' => abs (V:=V) (abs (V:=opt tt V) ( app ( app (ULC_Nat n' _) (var (none tt (opt tt V)))) (var (some tt (A:=opt tt V) (none tt V))))) end. Obligation Tactic := idtac. Program Instance ulc_nats_s m : Module_Hom_struct (S:= Term (C:=MOD ULCM TYPE)) (T:= ULCM [PCF_ULC_type_mor Nat]) (fun V _ => ULC_Nat m V). Next Obligation. Proof. simpl. intro m. induction m; simpl. intros. auto. intros V W f r. apply f_equal. apply f_equal. fin. Qed. Definition ulc_nats m := Build_Module_Hom (ulc_nats_s m). (** plus = \n.\m.\f.\x. n(f) (m(f)x) *) Definition ULC_plus (V : TT) := abs (V:=V) ( abs (V:=opt tt V) ( abs (V:= opt tt (opt tt V)) ( abs (V:= opt tt (opt tt (opt tt V))) ( app (V:=opt tt (opt tt (opt tt (opt tt V)))) (app (V:=opt tt (opt tt (opt tt (opt tt V)))) (var (V:=opt tt (opt tt (opt tt (opt tt V)))) (some tt (some tt (some tt (none tt V))))) (var (V:=opt tt (opt tt (opt tt (opt tt V)))) (some tt (A:=(opt tt (opt tt (opt tt V)))) (none tt (opt tt (opt tt V)) ))) ) (app (V:=opt tt (opt tt (opt tt (opt tt V)))) (app (V:=opt tt (opt tt (opt tt (opt tt V)))) (var (V:=opt tt (opt tt (opt tt (opt tt V)))) (some tt (some tt (none tt (opt tt V) )))) (var (V:=opt tt (opt tt (opt tt (opt tt V)))) (some tt (A:= opt tt (opt tt (opt tt V))) (none tt (opt tt (opt tt V)))))) (var (V:=opt tt (opt tt (opt tt (opt tt V)))) (none tt (opt tt (opt tt (opt tt V) )) ))) ) ) ) ). Obligation Tactic := fin. (** succ = 1 + _ *) Program Instance ulc_succ_s : Module_Hom_struct (S:= Term (C:=MOD ULCM TYPE)) (T:= ULCM [PCF_ULC_type_mor (Nat ~> Nat)]) (fun V _ => app (ULC_plus V) (ulc_nats (S 0) _ (tt) )). Definition ulc_succ := Build_Module_Hom ulc_succ_s. (** if then else = \a.\b.\c. a(b)(c) *) Definition ULC_cond (V : TT) := abs (V:=V) ( abs (V:=opt tt V) ( abs (V:=opt tt (opt tt V)) ( app (V:= opt tt (opt tt (opt tt V))) (app (V:= opt tt (opt tt (opt tt V))) (var (V:= opt tt (opt tt (opt tt V))) (some tt (some tt (none tt V)))) (var (some tt (none tt (opt tt V)))) ) (var (none tt (opt tt (opt tt V))) ) ) ) ). Program Instance ulc_condn_s : Module_Hom_struct (S := Term (C:=MOD ULCM TYPE)) (T:= ULCM [PCF_ULC_type_mor (Bool ~> Nat ~> Nat ~> Nat)]) (fun V _ => ULC_cond V). Definition ulc_condn := Build_Module_Hom ulc_condn_s. Program Instance ulc_condb_s : Module_Hom_struct (S := Term (C:=MOD ULCM TYPE)) (T:= ULCM [PCF_ULC_type_mor (Bool ~> Bool ~> Bool ~> Bool)]) (fun V _ => ULC_cond V). Definition ulc_condb := Build_Module_Hom ulc_condb_s. Definition ULC_omega (V : TT) := abs (V:= V) ( app (var (none tt V)) (var (none tt V))). Program Instance ulc_bottom_s t : Module_Hom_struct (S:= Term (C:= MOD ULCM TYPE)) (T:= ULCM [PCF_ULC_type_mor t]) (fun V _ => ULC_omega V). Definition ulc_bottom t := Build_Module_Hom (ulc_bottom_s t). (* zero? = Ln.((n)(true)false)true *) Definition ULC_zero (V : TT) := abs (V:=V) ( app ( (app (var (none tt V)) (app (ulc_ttt _ tt) (ulc_fff _ tt))) ) ( ulc_ttt _ tt ) ). Program Instance ulc_zero_s : Module_Hom_struct (S:= Term (C := MOD ULCM TYPE)) (T:= ULCM [PCF_ULC_type_mor (Nat ~> Bool)]) (fun V _ => ULC_zero V). Definition ulc_zero := Build_Module_Hom ulc_zero_s. Program Instance PCF_ULC_rep_s : PCF_rep_s (U:=unit) (PCF_ULC_type_mor) ULCM := { app r s := ulc_app r s ; abs r s := ulc_abs r s ; rec t := ulc_rec t ; tttt := ulc_ttt ; ffff := ulc_fff ; nats m := ulc_nats m ; Succ := ulc_succ ; CondB := ulc_condb ; CondN := ulc_condn ; bottom t := ulc_bottom t ; Zero := ulc_zero }. Definition PCF_ULC_rep := Build_PCF_rep PCF_ULC_rep_s. Definition PCF_ULC_compilation := InitMor PCF_ULC_rep. End ULC_syntax.
{-# OPTIONS --instance-search-depth=10 #-} module _ where module _ {A : Set} (B₁ : A → Set) (B₂ : A → Set) (f : A → A) where type = ∀ {x : A} → B₁ x → B₂ (f x) record Class : Set where field method : type method : {{_ : Class}} → type method {{I}} = Class.method I id : ∀ {A : Set} → A → A id x = x Ext : {A : Set} (B : A → Set) → A → Set Ext B x = B x → B x postulate Foo : Set instance _ : {A : Set} {B : A → Set} {{_ : Class (Ext B) (Ext B) id}} → Class (Ext B) (Ext (λ _ → Foo)) id module _ {A : Set} (B : A → Set) {{_ : Class (Ext B) (Ext B) id}} {y} (R : B y → Set) where postulate _ : let Point = λ (f : B y → B y) → ∀ x → R (f x) in Class Point Point (method (Ext _) (Ext _) id)
(* Title: JinjaThreads/BV/BVSpec.thy Author: Cornelia Pusch, Gerwin Klein, Andreas Lochbihler Based on the theory Jinja/BV/BVSpec *) section \<open>The Bytecode Verifier \label{sec:BVSpec}\<close> theory BVSpec imports Effect begin text \<open> This theory contains a specification of the BV. The specification describes correct typings of method bodies; it corresponds to type \emph{checking}. \<close> \<comment> \<open>The method type only contains declared classes:\<close> definition check_types :: "'m prog \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> ty\<^sub>i' err list \<Rightarrow> bool" where "check_types P mxs mxl \<tau>s \<equiv> set \<tau>s \<subseteq> states P mxs mxl" \<comment> \<open>An instruction is welltyped if it is applicable and its effect\<close> \<comment> \<open>is compatible with the type at all successor instructions:\<close> definition wt_instr :: "['m prog,ty,nat,pc,ex_table,'addr instr,pc,ty\<^sub>m] \<Rightarrow> bool" ("_,_,_,_,_ \<turnstile> _,_ :: _" [60,0,0,0,0,0,0,61] 60) where "P,T,mxs,mpc,xt \<turnstile> i,pc :: \<tau>s \<equiv> app i P mxs T pc mpc xt (\<tau>s!pc) \<and> (\<forall>(pc',\<tau>') \<in> set (eff i P pc xt (\<tau>s!pc)). P \<turnstile> \<tau>' \<le>' \<tau>s!pc')" \<comment> \<open>The type at @{text "pc=0"} conforms to the method calling convention:\<close> definition wt_start :: "['m prog,cname,ty list,nat,ty\<^sub>m] \<Rightarrow> bool" where "wt_start P C Ts mxl\<^sub>0 \<tau>s \<equiv> P \<turnstile> Some ([],OK (Class C)#map OK Ts@replicate mxl\<^sub>0 Err) \<le>' \<tau>s!0" \<comment> \<open>A method is welltyped if the body is not empty,\<close> \<comment> \<open>if the method type covers all instructions and mentions\<close> \<comment> \<open>declared classes only, if the method calling convention is respected, and\<close> \<comment> \<open>if all instructions are welltyped.\<close> definition wt_method :: "['m prog,cname,ty list,ty,nat,nat,'addr instr list, ex_table,ty\<^sub>m] \<Rightarrow> bool" where "wt_method P C Ts T\<^sub>r mxs mxl\<^sub>0 is xt \<tau>s \<equiv> 0 < size is \<and> size \<tau>s = size is \<and> check_types P mxs (1+size Ts+mxl\<^sub>0) (map OK \<tau>s) \<and> wt_start P C Ts mxl\<^sub>0 \<tau>s \<and> (\<forall>pc < size is. P,T\<^sub>r,mxs,size is,xt \<turnstile> is!pc,pc :: \<tau>s)" \<comment> \<open>A program is welltyped if it is wellformed and all methods are welltyped\<close> definition wf_jvm_prog_phi :: "ty\<^sub>P \<Rightarrow> 'addr jvm_prog \<Rightarrow> bool" ("wf'_jvm'_prog\<^bsub>_\<^esub>") where "wf_jvm_prog\<^bsub>\<Phi>\<^esub> \<equiv> wf_prog (\<lambda>P C (M,Ts,T\<^sub>r,(mxs,mxl\<^sub>0,is,xt)). wt_method P C Ts T\<^sub>r mxs mxl\<^sub>0 is xt (\<Phi> C M))" definition wf_jvm_prog :: "'addr jvm_prog \<Rightarrow> bool" where "wf_jvm_prog P \<equiv> \<exists>\<Phi>. wf_jvm_prog\<^bsub>\<Phi>\<^esub> P" lemma wt_jvm_progD: "wf_jvm_prog\<^bsub>\<Phi>\<^esub> P \<Longrightarrow> \<exists>wt. wf_prog wt P" (*<*) by (unfold wf_jvm_prog_phi_def, blast) (*>*) lemma wt_jvm_prog_impl_wt_instr: "\<lbrakk> wf_jvm_prog\<^bsub>\<Phi>\<^esub> P; P \<turnstile> C sees M:Ts \<rightarrow> T = \<lfloor>(mxs,mxl\<^sub>0,ins,xt)\<rfloor> in C; pc < size ins \<rbrakk> \<Longrightarrow> P,T,mxs,size ins,xt \<turnstile> ins!pc,pc :: \<Phi> C M" (*<*) apply (unfold wf_jvm_prog_phi_def) apply (drule (1) sees_wf_mdecl) apply (simp add: wf_mdecl_def wt_method_def) done (*>*) lemma wt_jvm_prog_impl_wt_start: "\<lbrakk> wf_jvm_prog\<^bsub>\<Phi>\<^esub> P; P \<turnstile> C sees M:Ts \<rightarrow> T = \<lfloor>(mxs,mxl\<^sub>0,ins,xt)\<rfloor> in C \<rbrakk> \<Longrightarrow> 0 < size ins \<and> wt_start P C Ts mxl\<^sub>0 (\<Phi> C M)" (*<*) apply (unfold wf_jvm_prog_phi_def) apply (drule (1) sees_wf_mdecl) apply (simp add: wf_mdecl_def wt_method_def) done (*>*) end
State Before: α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) ⊢ ∃ f, Measurable f ∧ ∀ (n : ι), EqOn f (g n) (t n) State After: α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι ⊢ ∃ f, Measurable f ∧ ∀ (n : ι), EqOn f (g n) (t n) Tactic: inhabit ι State Before: α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι ⊢ ∃ f, Measurable f ∧ ∀ (n : ι), EqOn f (g n) (t n) State After: α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val ⊢ ∃ f, Measurable f ∧ ∀ (n : ι), EqOn f (g n) (t n) Tactic: set g' : (i : ι) → t i → β := fun i => g i ∘ (↑) State Before: α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val ⊢ ∃ f, Measurable f ∧ ∀ (n : ι), EqOn f (g n) (t n) State After: case ht' α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val ⊢ ∀ (i j : ι) (x : α) (hxi : x ∈ t i) (hxj : x ∈ t j), g' i { val := x, property := hxi } = g' j { val := x, property := hxj } α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val ht' : ∀ (i j : ι) (x : α) (hxi : x ∈ t i) (hxj : x ∈ t j), g' i { val := x, property := hxi } = g' j { val := x, property := hxj } ⊢ ∃ f, Measurable f ∧ ∀ (n : ι), EqOn f (g n) (t n) Tactic: have ht' : ∀ (i j) (x : α) (hxi : x ∈ t i) (hxj : x ∈ t j), g' i ⟨x, hxi⟩ = g' j ⟨x, hxj⟩ State Before: α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val ht' : ∀ (i j : ι) (x : α) (hxi : x ∈ t i) (hxj : x ∈ t j), g' i { val := x, property := hxi } = g' j { val := x, property := hxj } ⊢ ∃ f, Measurable f ∧ ∀ (n : ι), EqOn f (g n) (t n) State After: α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val ht' : ∀ (i j : ι) (x : α) (hxi : x ∈ t i) (hxj : x ∈ t j), g' i { val := x, property := hxi } = g' j { val := x, property := hxj } f : ↑(⋃ (i : ι), t i) → β := iUnionLift t g' ht' (⋃ (i : ι), t i) (_ : (⋃ (i : ι), t i) ⊆ ⋃ (i : ι), t i) ⊢ ∃ f, Measurable f ∧ ∀ (n : ι), EqOn f (g n) (t n) Tactic: set f : (⋃ i, t i) → β := iUnionLift t g' ht' _ Subset.rfl State Before: α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val ht' : ∀ (i j : ι) (x : α) (hxi : x ∈ t i) (hxj : x ∈ t j), g' i { val := x, property := hxi } = g' j { val := x, property := hxj } f : ↑(⋃ (i : ι), t i) → β := iUnionLift t g' ht' (⋃ (i : ι), t i) (_ : (⋃ (i : ι), t i) ⊆ ⋃ (i : ι), t i) ⊢ ∃ f, Measurable f ∧ ∀ (n : ι), EqOn f (g n) (t n) State After: α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val ht' : ∀ (i j : ι) (x : α) (hxi : x ∈ t i) (hxj : x ∈ t j), g' i { val := x, property := hxi } = g' j { val := x, property := hxj } f : ↑(⋃ (i : ι), t i) → β := iUnionLift t g' ht' (⋃ (i : ι), t i) (_ : (⋃ (i : ι), t i) ⊆ ⋃ (i : ι), t i) hfm : Measurable f ⊢ ∃ f, Measurable f ∧ ∀ (n : ι), EqOn f (g n) (t n) Tactic: have hfm : Measurable f := measurable_iUnionLift _ _ t_meas (fun i => (hg i).comp measurable_subtype_coe) State Before: α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val ht' : ∀ (i j : ι) (x : α) (hxi : x ∈ t i) (hxj : x ∈ t j), g' i { val := x, property := hxi } = g' j { val := x, property := hxj } f : ↑(⋃ (i : ι), t i) → β := iUnionLift t g' ht' (⋃ (i : ι), t i) (_ : (⋃ (i : ι), t i) ⊆ ⋃ (i : ι), t i) hfm : Measurable f ⊢ ∃ f, Measurable f ∧ ∀ (n : ι), EqOn f (g n) (t n) State After: no goals Tactic: classical refine ⟨fun x => if hx : x ∈ ⋃ i, t i then f ⟨x, hx⟩ else g default x, hfm.dite ((hg default).comp measurable_subtype_coe) (.iUnion t_meas), fun i x hx => ?_⟩ simp only [dif_pos (mem_iUnion.2 ⟨i, hx⟩)] exact iUnionLift_of_mem ⟨x, mem_iUnion.2 ⟨i, hx⟩⟩ hx State Before: case ht' α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val ⊢ ∀ (i j : ι) (x : α) (hxi : x ∈ t i) (hxj : x ∈ t j), g' i { val := x, property := hxi } = g' j { val := x, property := hxj } State After: case ht' α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val i j : ι x : α hxi : x ∈ t i hxj : x ∈ t j ⊢ g' i { val := x, property := hxi } = g' j { val := x, property := hxj } Tactic: intro i j x hxi hxj State Before: case ht' α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val i j : ι x : α hxi : x ∈ t i hxj : x ∈ t j ⊢ g' i { val := x, property := hxi } = g' j { val := x, property := hxj } State After: case ht'.inl α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val i : ι x : α hxi hxj : x ∈ t i ⊢ g' i { val := x, property := hxi } = g' i { val := x, property := hxj } case ht'.inr α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val i j : ι x : α hxi : x ∈ t i hxj : x ∈ t j hij : i ≠ j ⊢ g' i { val := x, property := hxi } = g' j { val := x, property := hxj } Tactic: rcases eq_or_ne i j with rfl | hij State Before: case ht'.inl α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val i : ι x : α hxi hxj : x ∈ t i ⊢ g' i { val := x, property := hxi } = g' i { val := x, property := hxj } State After: no goals Tactic: rfl State Before: case ht'.inr α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val i j : ι x : α hxi : x ∈ t i hxj : x ∈ t j hij : i ≠ j ⊢ g' i { val := x, property := hxi } = g' j { val := x, property := hxj } State After: no goals Tactic: exact ht hij ⟨hxi, hxj⟩ State Before: α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val ht' : ∀ (i j : ι) (x : α) (hxi : x ∈ t i) (hxj : x ∈ t j), g' i { val := x, property := hxi } = g' j { val := x, property := hxj } f : ↑(⋃ (i : ι), t i) → β := iUnionLift t g' ht' (⋃ (i : ι), t i) (_ : (⋃ (i : ι), t i) ⊆ ⋃ (i : ι), t i) hfm : Measurable f ⊢ ∃ f, Measurable f ∧ ∀ (n : ι), EqOn f (g n) (t n) State After: α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val ht' : ∀ (i j : ι) (x : α) (hxi : x ∈ t i) (hxj : x ∈ t j), g' i { val := x, property := hxi } = g' j { val := x, property := hxj } f : ↑(⋃ (i : ι), t i) → β := iUnionLift t g' ht' (⋃ (i : ι), t i) (_ : (⋃ (i : ι), t i) ⊆ ⋃ (i : ι), t i) hfm : Measurable f i : ι x : α hx : x ∈ t i ⊢ (fun x => if hx : x ∈ ⋃ (i : ι), t i then f { val := x, property := hx } else g default x) x = g i x Tactic: refine ⟨fun x => if hx : x ∈ ⋃ i, t i then f ⟨x, hx⟩ else g default x, hfm.dite ((hg default).comp measurable_subtype_coe) (.iUnion t_meas), fun i x hx => ?_⟩ State Before: α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val ht' : ∀ (i j : ι) (x : α) (hxi : x ∈ t i) (hxj : x ∈ t j), g' i { val := x, property := hxi } = g' j { val := x, property := hxj } f : ↑(⋃ (i : ι), t i) → β := iUnionLift t g' ht' (⋃ (i : ι), t i) (_ : (⋃ (i : ι), t i) ⊆ ⋃ (i : ι), t i) hfm : Measurable f i : ι x : α hx : x ∈ t i ⊢ (fun x => if hx : x ∈ ⋃ (i : ι), t i then f { val := x, property := hx } else g default x) x = g i x State After: α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val ht' : ∀ (i j : ι) (x : α) (hxi : x ∈ t i) (hxj : x ∈ t j), g' i { val := x, property := hxi } = g' j { val := x, property := hxj } f : ↑(⋃ (i : ι), t i) → β := iUnionLift t g' ht' (⋃ (i : ι), t i) (_ : (⋃ (i : ι), t i) ⊆ ⋃ (i : ι), t i) hfm : Measurable f i : ι x : α hx : x ∈ t i ⊢ iUnionLift t (fun i => g i ∘ Subtype.val) ht' (⋃ (i : ι), t i) (_ : (⋃ (i : ι), t i) ⊆ ⋃ (i : ι), t i) { val := x, property := (_ : x ∈ ⋃ (i : ι), t i) } = g i x Tactic: simp only [dif_pos (mem_iUnion.2 ⟨i, hx⟩)] State Before: α : Type u_2 β : Type u_3 γ : Type ?u.93731 δ : Type ?u.93734 δ' : Type ?u.93737 ι✝ : Sort uι s t✝ u : Set α m : MeasurableSpace α mβ : MeasurableSpace β mγ : MeasurableSpace γ ι : Type u_1 inst✝¹ : Countable ι inst✝ : Nonempty ι t : ι → Set α t_meas : ∀ (n : ι), MeasurableSet (t n) g : ι → α → β hg : ∀ (n : ι), Measurable (g n) ht : Pairwise fun i j => EqOn (g i) (g j) (t i ∩ t j) inhabited_h : Inhabited ι g' : (i : ι) → ↑(t i) → β := fun i => g i ∘ Subtype.val ht' : ∀ (i j : ι) (x : α) (hxi : x ∈ t i) (hxj : x ∈ t j), g' i { val := x, property := hxi } = g' j { val := x, property := hxj } f : ↑(⋃ (i : ι), t i) → β := iUnionLift t g' ht' (⋃ (i : ι), t i) (_ : (⋃ (i : ι), t i) ⊆ ⋃ (i : ι), t i) hfm : Measurable f i : ι x : α hx : x ∈ t i ⊢ iUnionLift t (fun i => g i ∘ Subtype.val) ht' (⋃ (i : ι), t i) (_ : (⋃ (i : ι), t i) ⊆ ⋃ (i : ι), t i) { val := x, property := (_ : x ∈ ⋃ (i : ι), t i) } = g i x State After: no goals Tactic: exact iUnionLift_of_mem ⟨x, mem_iUnion.2 ⟨i, hx⟩⟩ hx
(** This file continues the development of algebra [Operation]. It gives a way to construct operations using (conventional) curried functions, and shows that such curried operations are equivalent to the uncurried operations [Operation]. *) Require Export HoTT.Algebra.Universal.Algebra. Require Import HoTT.Types HoTT.Spaces.Finite HoTT.Spaces.Nat. Local Open Scope Algebra_scope. Local Open Scope nat_scope. (** Functions [head_dom'] and [head_dom] are used to get the first element of a nonempty operation domain [a : forall i, A (ss i)]. *) Monomorphic Definition head_dom' {σ} (A : Carriers σ) (n : nat) : forall (N : n > 0) (ss : FinSeq n (Sort σ)) (a : forall i, A (ss i)), A (fshead' n N ss) := match n with | 0 => fun N ss _ => Empty_rec (not_lt_n_n _ N) | n'.+1 => fun N ss a => a fin_zero end. Monomorphic Definition head_dom {σ} (A : Carriers σ) {n : nat} (ss : FinSeq n.+1 (Sort σ)) (a : forall i, A (ss i)) : A (fshead ss) := head_dom' A n.+1 _ ss a. (** Functions [tail_dom'] and [tail_dom] are used to obtain the tail of an operation domain [a : forall i, A (ss i)]. *) Monomorphic Definition tail_dom' {σ} (A : Carriers σ) (n : nat) : forall (ss : FinSeq n (Sort σ)) (a : forall i, A (ss i)) (i : Fin (pred n)), A (fstail' n ss i) := match n with | 0 => fun ss _ i => Empty_rec i | n'.+1 => fun ss a i => a (fsucc i) end. Monomorphic Definition tail_dom {σ} (A : Carriers σ) {n : nat} (ss : FinSeq n.+1 (Sort σ)) (a : forall i, A (ss i)) : forall i, A (fstail ss i) := tail_dom' A n.+1 ss a. (** Functions [cons_dom'] and [cons_dom] to add an element to the front of a given domain [a : forall i, A (ss i)]. *) Monomorphic Definition cons_dom' {σ} (A : Carriers σ) {n : nat} : forall (i : Fin n) (ss : FinSeq n (Sort σ)) (N : n > 0), A (fshead' n N ss) -> (forall i, A (fstail' n ss i)) -> A (ss i) := fin_ind (fun n i => forall (ss : Fin n -> Sort σ) (N : n > 0), A (fshead' n N ss) -> (forall i, A (fstail' n ss i)) -> A (ss i)) (fun n' _ z x _ => x) (fun n' i' _ => fun _ _ _ xs => xs i'). Definition cons_dom {σ} (A : Carriers σ) {n : nat} (ss : FinSeq n.+1 (Sort σ)) (x : A (fshead ss)) (xs : forall i, A (fstail ss i)) : forall i : Fin n.+1, A (ss i) := fun i => cons_dom' A i ss _ x xs. (** The empty domain: *) Definition nil_dom {σ} (A : Carriers σ) (ss : FinSeq 0 (Sort σ)) : forall i : Fin 0, A (ss i) := Empty_ind (A o ss). (** A specialization of [Operation] to finite [Fin n] arity. *) Definition FiniteOperation {σ : Signature} (A : Carriers σ) {n : nat} (ss : FinSeq n (Sort σ)) (t : Sort σ) : Type := Operation A {| Arity := Fin n; sorts_dom := ss; sort_cod := t |}. (** A type of curried operations << CurriedOperation A [s1, ..., sn] t := A s1 -> ... -> A sn -> A t. >> *) Fixpoint CurriedOperation {σ} (A : Carriers σ) {n : nat} : (FinSeq n (Sort σ)) -> Sort σ -> Type := match n with | 0 => fun ss t => A t | n'.+1 => fun ss t => A (fshead ss) -> CurriedOperation A (fstail ss) t end. (** Function [operation_uncurry] is used to uncurry an operation << operation_uncurry A [s1, ..., sn] t (op : CurriedOperation A [s1, ..., sn] t) : FiniteOperation A [s1, ..., sn] t := fun (x1 : A s1, ..., xn : A xn) => op x1 ... xn >> See [equiv_operation_curry] below. *) Fixpoint operation_uncurry {σ} (A : Carriers σ) {n : nat} : forall (ss : FinSeq n (Sort σ)) (t : Sort σ), CurriedOperation A ss t -> FiniteOperation A ss t := match n with | 0 => fun ss t op _ => op | n'.+1 => fun ss t op a => operation_uncurry A (fstail ss) t (op (a fin_zero)) (a o fsucc) end. Local Example computation_example_operation_uncurry : forall (σ : Signature) (A : Carriers σ) (n : nat) (s1 s2 t : Sort σ) (ss := (fscons s1 (fscons s2 fsnil))) (op : CurriedOperation A ss t) (a : forall i, A (ss i)), operation_uncurry A ss t op = fun a => op (a fin_zero) (a (fsucc fin_zero)). Proof. reflexivity. Qed. (** Function [operation_curry] is used to curry an operation << operation_curry A [s1, ..., sn] t (op : FiniteOperation A [s1, ..., sn] t) : CurriedOperation A [s1, ..., sn] t := fun (x1 : A s1) ... (xn : A xn) => op (x1, ..., xn) >> See [equiv_operation_curry] below. *) Fixpoint operation_curry {σ} (A : Carriers σ) {n : nat} : forall (ss : FinSeq n (Sort σ)) (t : Sort σ), FiniteOperation A ss t -> CurriedOperation A ss t := match n with | 0 => fun ss t op => op (Empty_ind _) | n'.+1 => fun ss t op x => operation_curry A (fstail ss) t (op o cons_dom A ss x) end. Local Example computation_example_operation_curry : forall (σ : Signature) (A : Carriers σ) (n : nat) (s1 s2 t : Sort σ) (ss := (fscons s1 (fscons s2 fsnil))) (op : FiniteOperation A ss t) (x1 : A s1) (x2 : A s2), operation_curry A ss t op = fun x1 x2 => op (cons_dom A ss x1 (cons_dom A _ x2 (nil_dom A _))). Proof. reflexivity. Qed. Lemma expand_cons_dom' {σ} (A : Carriers σ) (n : nat) : forall (i : Fin n) (ss : FinSeq n (Sort σ)) (N : n > 0) (a : forall i, A (ss i)), cons_dom' A i ss N (head_dom' A n N ss a) (tail_dom' A n ss a) = a i. Proof. intro i. induction i using fin_ind; intros ss N a. - unfold cons_dom'. rewrite compute_fin_ind_fin_zero. reflexivity. - unfold cons_dom'. by rewrite compute_fin_ind_fsucc. Qed. Lemma expand_cons_dom `{Funext} {σ} (A : Carriers σ) {n : nat} (ss : FinSeq n.+1 (Sort σ)) (a : forall i, A (ss i)) : cons_dom A ss (head_dom A ss a) (tail_dom A ss a) = a. Proof. funext i. apply expand_cons_dom'. Defined. Lemma path_operation_curry_to_cunurry `{Funext} {σ} (A : Carriers σ) {n : nat} (ss : FinSeq n (Sort σ)) (t : Sort σ) : operation_uncurry A ss t o operation_curry A ss t == idmap. Proof. intro a. induction n as [| n IHn]. - funext d. refine (ap a _). apply path_contr. - funext a'. refine (ap (fun x => x _) (IHn _ _) @ _). refine (ap a _). apply expand_cons_dom. Qed. Lemma path_operation_uncurry_to_curry `{Funext} {σ} (A : Carriers σ) {n : nat} (ss : FinSeq n (Sort σ)) (t : Sort σ) : operation_curry A ss t o operation_uncurry A ss t == idmap. Proof. intro a. induction n; [reflexivity|]. funext x. refine (_ @ IHn (fstail ss) (a x)). refine (ap (operation_curry A (fstail ss) t) _). funext a'. simpl. unfold cons_dom, cons_dom'. rewrite compute_fin_ind_fin_zero. refine (ap (operation_uncurry A (fstail ss) t (a x)) _). funext i'. now rewrite compute_fin_ind_fsucc. Qed. Global Instance isequiv_operation_curry `{Funext} {σ} (A : Carriers σ) {n : nat} (ss : FinSeq n (Sort σ)) (t : Sort σ) : IsEquiv (operation_curry A ss t). Proof. srapply isequiv_adjointify. - apply operation_uncurry. - apply path_operation_uncurry_to_curry. - apply path_operation_curry_to_cunurry. Defined. Definition equiv_operation_curry `{Funext} {σ} (A : Carriers σ) {n : nat} (ss : FinSeq n (Sort σ)) (t : Sort σ) : FiniteOperation A ss t <~> CurriedOperation A ss t := Build_Equiv _ _ (operation_curry A ss t) _.
[STATEMENT] lemma fps_Gcd: assumes "A - {0} \<noteq> {}" shows "Gcd A = fps_X ^ (INF f\<in>A-{0}. subdegree f)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. Gcd A = fps_X ^ Inf (subdegree ` (A - {0})) [PROOF STEP] proof (rule sym, rule GcdI) [PROOF STATE] proof (state) goal (3 subgoals): 1. \<And>a. a \<in> A \<Longrightarrow> fps_X ^ Inf (subdegree ` (A - {0})) dvd a 2. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> c dvd a) \<Longrightarrow> c dvd fps_X ^ Inf (subdegree ` (A - {0})) 3. normalize (fps_X ^ Inf (subdegree ` (A - {0}))) = fps_X ^ Inf (subdegree ` (A - {0})) [PROOF STEP] fix f [PROOF STATE] proof (state) goal (3 subgoals): 1. \<And>a. a \<in> A \<Longrightarrow> fps_X ^ Inf (subdegree ` (A - {0})) dvd a 2. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> c dvd a) \<Longrightarrow> c dvd fps_X ^ Inf (subdegree ` (A - {0})) 3. normalize (fps_X ^ Inf (subdegree ` (A - {0}))) = fps_X ^ Inf (subdegree ` (A - {0})) [PROOF STEP] assume "f \<in> A" [PROOF STATE] proof (state) this: f \<in> A goal (3 subgoals): 1. \<And>a. a \<in> A \<Longrightarrow> fps_X ^ Inf (subdegree ` (A - {0})) dvd a 2. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> c dvd a) \<Longrightarrow> c dvd fps_X ^ Inf (subdegree ` (A - {0})) 3. normalize (fps_X ^ Inf (subdegree ` (A - {0}))) = fps_X ^ Inf (subdegree ` (A - {0})) [PROOF STEP] thus "fps_X ^ (INF f\<in>A - {0}. subdegree f) dvd f" [PROOF STATE] proof (prove) using this: f \<in> A goal (1 subgoal): 1. fps_X ^ Inf (subdegree ` (A - {0})) dvd f [PROOF STEP] by (cases "f = 0") (auto simp: fps_dvd_iff intro!: cINF_lower) [PROOF STATE] proof (state) this: fps_X ^ Inf (subdegree ` (A - {0})) dvd f goal (2 subgoals): 1. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> c dvd a) \<Longrightarrow> c dvd fps_X ^ Inf (subdegree ` (A - {0})) 2. normalize (fps_X ^ Inf (subdegree ` (A - {0}))) = fps_X ^ Inf (subdegree ` (A - {0})) [PROOF STEP] next [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> c dvd a) \<Longrightarrow> c dvd fps_X ^ Inf (subdegree ` (A - {0})) 2. normalize (fps_X ^ Inf (subdegree ` (A - {0}))) = fps_X ^ Inf (subdegree ` (A - {0})) [PROOF STEP] fix d [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> c dvd a) \<Longrightarrow> c dvd fps_X ^ Inf (subdegree ` (A - {0})) 2. normalize (fps_X ^ Inf (subdegree ` (A - {0}))) = fps_X ^ Inf (subdegree ` (A - {0})) [PROOF STEP] assume d: "\<And>f. f \<in> A \<Longrightarrow> d dvd f" [PROOF STATE] proof (state) this: ?f \<in> A \<Longrightarrow> d dvd ?f goal (2 subgoals): 1. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> c dvd a) \<Longrightarrow> c dvd fps_X ^ Inf (subdegree ` (A - {0})) 2. normalize (fps_X ^ Inf (subdegree ` (A - {0}))) = fps_X ^ Inf (subdegree ` (A - {0})) [PROOF STEP] from assms [PROOF STATE] proof (chain) picking this: A - {0} \<noteq> {} [PROOF STEP] obtain f where "f \<in> A - {0}" [PROOF STATE] proof (prove) using this: A - {0} \<noteq> {} goal (1 subgoal): 1. (\<And>f. f \<in> A - {0} \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by auto [PROOF STATE] proof (state) this: f \<in> A - {0} goal (2 subgoals): 1. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> c dvd a) \<Longrightarrow> c dvd fps_X ^ Inf (subdegree ` (A - {0})) 2. normalize (fps_X ^ Inf (subdegree ` (A - {0}))) = fps_X ^ Inf (subdegree ` (A - {0})) [PROOF STEP] with d[of f] [PROOF STATE] proof (chain) picking this: f \<in> A \<Longrightarrow> d dvd f f \<in> A - {0} [PROOF STEP] have [simp]: "d \<noteq> 0" [PROOF STATE] proof (prove) using this: f \<in> A \<Longrightarrow> d dvd f f \<in> A - {0} goal (1 subgoal): 1. d \<noteq> 0 [PROOF STEP] by auto [PROOF STATE] proof (state) this: d \<noteq> 0 goal (2 subgoals): 1. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> c dvd a) \<Longrightarrow> c dvd fps_X ^ Inf (subdegree ` (A - {0})) 2. normalize (fps_X ^ Inf (subdegree ` (A - {0}))) = fps_X ^ Inf (subdegree ` (A - {0})) [PROOF STEP] from d assms [PROOF STATE] proof (chain) picking this: ?f \<in> A \<Longrightarrow> d dvd ?f A - {0} \<noteq> {} [PROOF STEP] have "subdegree d \<le> (INF f\<in>A-{0}. subdegree f)" [PROOF STATE] proof (prove) using this: ?f \<in> A \<Longrightarrow> d dvd ?f A - {0} \<noteq> {} goal (1 subgoal): 1. subdegree d \<le> Inf (subdegree ` (A - {0})) [PROOF STEP] by (intro cINF_greatest) (simp_all add: fps_dvd_iff[symmetric]) [PROOF STATE] proof (state) this: subdegree d \<le> Inf (subdegree ` (A - {0})) goal (2 subgoals): 1. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> c dvd a) \<Longrightarrow> c dvd fps_X ^ Inf (subdegree ` (A - {0})) 2. normalize (fps_X ^ Inf (subdegree ` (A - {0}))) = fps_X ^ Inf (subdegree ` (A - {0})) [PROOF STEP] with d assms [PROOF STATE] proof (chain) picking this: ?f \<in> A \<Longrightarrow> d dvd ?f A - {0} \<noteq> {} subdegree d \<le> Inf (subdegree ` (A - {0})) [PROOF STEP] show "d dvd fps_X ^ (INF f\<in>A-{0}. subdegree f)" [PROOF STATE] proof (prove) using this: ?f \<in> A \<Longrightarrow> d dvd ?f A - {0} \<noteq> {} subdegree d \<le> Inf (subdegree ` (A - {0})) goal (1 subgoal): 1. d dvd fps_X ^ Inf (subdegree ` (A - {0})) [PROOF STEP] by (simp add: fps_dvd_iff) [PROOF STATE] proof (state) this: d dvd fps_X ^ Inf (subdegree ` (A - {0})) goal (1 subgoal): 1. normalize (fps_X ^ Inf (subdegree ` (A - {0}))) = fps_X ^ Inf (subdegree ` (A - {0})) [PROOF STEP] qed simp_all
[STATEMENT] lemma comm_semiring_0_cancel_transfer[transfer_rule]: assumes[transfer_rule]: "bi_unique A" "right_total A" shows "( (A ===> A ===> A) ===> (A ===> A ===> A) ===> A ===> (A ===> A ===> A) ===> (=) ) (comm_semiring_0_cancel_ow (Collect (Domainp A))) class.comm_semiring_0_cancel" [PROOF STATE] proof (prove) goal (1 subgoal): 1. ((A ===> A ===> A) ===> (A ===> A ===> A) ===> A ===> (A ===> A ===> A) ===> (=)) (comm_semiring_0_cancel_ow (Collect (Domainp A))) class.comm_semiring_0_cancel [PROOF STEP] unfolding comm_semiring_0_cancel_ow_def class.comm_semiring_0_cancel_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. ((A ===> A ===> A) ===> (A ===> A ===> A) ===> A ===> (A ===> A ===> A) ===> (=)) (\<lambda>plus minus zero times. comm_semiring_ow (Collect (Domainp A)) plus times \<and> cancel_comm_monoid_add_ow (Collect (Domainp A)) plus minus zero) (\<lambda>plus minus zero times. class.cancel_comm_monoid_add plus minus zero \<and> class.comm_semiring plus times) [PROOF STEP] apply transfer_prover_start [PROOF STATE] proof (prove) goal (4 subgoals): 1. Transfer.Rel ((A ===> A ===> A) ===> (A ===> A ===> A) ===> ?Rh10) ?ab10 class.comm_semiring 2. Transfer.Rel ((A ===> A ===> A) ===> (A ===> A ===> A) ===> A ===> ?Rg10) ?aa10 class.cancel_comm_monoid_add 3. Transfer.Rel (?Rg10 ===> ?Rh10 ===> (=)) ?a10 (\<and>) 4. (\<lambda>plus minus zero times. comm_semiring_ow (Collect (Domainp A)) plus times \<and> cancel_comm_monoid_add_ow (Collect (Domainp A)) plus minus zero) = (\<lambda>plus minus zero times. ?a10 (?aa10 plus minus zero) (?ab10 plus times)) [PROOF STEP] apply transfer_step+ [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<lambda>plus minus zero times. comm_semiring_ow (Collect (Domainp A)) plus times \<and> cancel_comm_monoid_add_ow (Collect (Domainp A)) plus minus zero) = (\<lambda>plus minus zero times. cancel_comm_monoid_add_ow (Collect (Domainp A)) plus minus zero \<and> comm_semiring_ow (Collect (Domainp A)) plus times) [PROOF STEP] by auto
! The goal of this test case is to check that all basic reduction operators ! are well printed by the pips prettyprinter. All the loops in the programm ! should be parallelized with an omp reduction pragma. program parallel REAL x, y INTEGER j, k LOGICAL m, o do i = 1, n s = s + 1 end do do i = 1, n s = s - 1 end do do i = 1, n s = s * 2 end do do i = 1, n m = m.AND.o end do do i = 1, n m = m.OR.o end do do i = 1, n m = m.EQV.o end do do i = 1, n m = m.NEQV.o end do do i = 1, n y = MAX(y,x) end do do i = 1, n y = MIN(y,x) end do ! do i = 1, n ! k = IAND(k,j) ! end do ! do i = 1, n ! k = IOR(k,j) ! end do ! do i = 1, n ! k = IEOR(k,j) ! end do end