text
stringlengths 0
3.34M
|
---|
[STATEMENT]
lemma gencode_singleton:
"gencode [t] = map_gterm (\<lambda>f. [Some f]) t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. gencode [t] = map_gterm (\<lambda>f. [Some f]) t
[PROOF STEP]
using glabel_map_gterm_conv[unfolded comp_def, of "\<lambda>t. [t]" t]
[PROOF STATE]
proof (prove)
using this:
glabel (\<lambda>x. [gfun_at t x]) (gdomain t) = map_gterm (\<lambda>x. [Some x]) t
goal (1 subgoal):
1. gencode [t] = map_gterm (\<lambda>f. [Some f]) t
[PROOF STEP]
by (simp add: gunions_def gencode_def) |
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj17synthconj2 : forall (lv0 : natural) (lv1 : natural) (lv2 : natural) (lv3 : natural), (@eq natural (plus lv0 (plus (mult lv1 lv2) lv1)) (plus lv3 (mult lv2 lv1))).
Admitted.
QuickChick conj17synthconj2.
|
(* Title: HOL/Auth/n_germanSymIndex_lemma_on_inv__43.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_germanSymIndex Protocol Case Study*}
theory n_germanSymIndex_lemma_on_inv__43 imports n_germanSymIndex_base
begin
section{*All lemmas on causal relation between inv__43 and some rule r*}
lemma n_RecvReqSVsinv__43:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__43 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvReqS N i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__43 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvReqEVsinv__43:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__43 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvReqE N i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__43 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvAckVsinv__43:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__43 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__43 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''CurCmd'')) (Const Empty)) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const Inv))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvInvAckVsinv__43:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__43 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvInvAck i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__43 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntSVsinv__43:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__43 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntS i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__43 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv2) ''Cmd'')) (Const InvAck)) (eqn (IVar (Ident ''CurCmd'')) (Const ReqS))) (eqn (IVar (Ident ''ExGntd'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv2) ''Cmd'')) (Const InvAck)) (eqn (IVar (Ident ''CurCmd'')) (Const ReqS))) (eqn (IVar (Ident ''ExGntd'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntEVsinv__43:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__43 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__43 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv2) ''Cmd'')) (Const InvAck)) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv2) ''Cmd'')) (Const InvAck)) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendReqE__part__1Vsinv__43:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__43 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_StoreVsinv__43:
assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__43 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvGntSVsinv__43:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvGntS i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__43 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvGntEVsinv__43:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvGntE i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__43 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendInv__part__0Vsinv__43:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__43 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqE__part__0Vsinv__43:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__43 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendInv__part__1Vsinv__43:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__43 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqSVsinv__43:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqS i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__43 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
(* Title: HOL/Auth/n_flash_nodata_cub_lemma_on_inv__80.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_flash_nodata_cub Protocol Case Study*}
theory n_flash_nodata_cub_lemma_on_inv__80 imports n_flash_nodata_cub_base
begin
section{*All lemmas on causal relation between inv__80 and some rule r*}
lemma n_PI_Remote_GetVsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_Get src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_Get src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_PI_Remote_GetXVsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_GetX src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_GetX src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_NakVsinv__80:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Nak dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Nak dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__0Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__1Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__2Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__0Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__1Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''NakcMsg'') ''Cmd'')) (Const NAKC_Nakc)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_HeadVsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_PutVsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_DirtyVsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_NakVsinv__80:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') src) ''Cmd'')) (Const UNI_Get))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') src) ''HomeProc'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') src) ''Cmd'')) (Const UNI_Get))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') src) ''HomeProc'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_Nak_HomeVsinv__80:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Nak_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Get_Nak_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''HomeUniMsg'') ''Cmd'')) (Const UNI_Get)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''HomeUniMsg'') ''Cmd'')) (Const UNI_Get)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_PutVsinv__80:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__0Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__1Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__2Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__0Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__1Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_1Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_2Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_3Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_4Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_5Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_6Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__0Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__1Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__0Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__1Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_HomeVsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_Home_NODE_GetVsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8Vsinv__80:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_NODE_GetVsinv__80:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__0Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__1Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10_HomeVsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10Vsinv__80:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_11Vsinv__80:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_NakVsinv__80:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') src) ''Cmd'')) (Const UNI_GetX))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') src) ''HomeProc'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') src) ''Cmd'')) (Const UNI_GetX))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') src) ''HomeProc'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_Nak_HomeVsinv__80:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_Nak_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_GetX_Nak_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''HomeUniMsg'') ''Cmd'')) (Const UNI_GetX)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''HomeUniMsg'') ''Cmd'')) (Const UNI_GetX)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_PutXVsinv__80:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutVsinv__80:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Put dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Put dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutXVsinv__80:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_PutX dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_PutX dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Nak_ClearVsinv__80:
assumes a1: "(r=n_NI_Nak_Clear )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__80 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Remote_GetX_PutX_HomeVsinv__80:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_PutX_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX__part__0Vsinv__80:
assumes a1: "r=n_PI_Local_GetX_PutX__part__0 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_WbVsinv__80:
assumes a1: "r=n_NI_Wb " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_3Vsinv__80:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_3 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_1Vsinv__80:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_1 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_GetX__part__1Vsinv__80:
assumes a1: "r=n_PI_Local_GetX_GetX__part__1 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_GetX__part__0Vsinv__80:
assumes a1: "r=n_PI_Local_GetX_GetX__part__0 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_ReplaceVsinv__80:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_PI_Remote_Replace src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_ReplaceVsinv__80:
assumes a1: "r=n_PI_Local_Replace " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_existsVsinv__80:
assumes a1: "\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_InvAck_exists src pp" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_PutXVsinv__80:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_PI_Remote_PutX dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Remote_Get_Put_HomeVsinv__80:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Put_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvVsinv__80:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Inv dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_PutXVsinv__80:
assumes a1: "r=n_PI_Local_PutX " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_Get_PutVsinv__80:
assumes a1: "r=n_PI_Local_Get_Put " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_ShWbVsinv__80:
assumes a1: "r=n_NI_ShWb N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX_HeadVld__part__0Vsinv__80:
assumes a1: "r=n_PI_Local_GetX_PutX_HeadVld__part__0 N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_ReplaceVsinv__80:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Replace src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Local_PutXAcksDoneVsinv__80:
assumes a1: "r=n_NI_Local_PutXAcksDone " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX__part__1Vsinv__80:
assumes a1: "r=n_PI_Local_GetX_PutX__part__1 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_exists_HomeVsinv__80:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_exists_Home src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Replace_HomeVsinv__80:
assumes a1: "r=n_NI_Replace_Home " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Local_PutVsinv__80:
assumes a1: "r=n_NI_Local_Put " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_Get_GetVsinv__80:
assumes a1: "r=n_PI_Local_Get_Get " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Nak_HomeVsinv__80:
assumes a1: "r=n_NI_Nak_Home " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_2Vsinv__80:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_2 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX_HeadVld__part__1Vsinv__80:
assumes a1: "r=n_PI_Local_GetX_PutX_HeadVld__part__1 N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_FAckVsinv__80:
assumes a1: "r=n_NI_FAck " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__80 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
(*
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
*
* SPDX-License-Identifier: GPL-2.0-only
*)
theory Ipc_AC
imports Finalise_AC "Lib.MonadicRewrite"
begin
context begin interpretation Arch . (*FIXME: arch_split*)
section\<open>Notifications\<close>
subsection\<open>@{term "pas_refined"}\<close>
crunch thread_bound_ntfns[wp]: do_machine_op "\<lambda>s. P (thread_bound_ntfns s)"
crunches deleted_irq_handler, send_signal
for state_vrefs[wp]: "\<lambda>s. P (state_vrefs (s :: det_ext state))"
(wp: crunch_wps hoare_unless_wp select_wp dxo_wp_weak simp: crunch_simps)
lemma cancel_ipc_receive_blocked_caps_of_state:
"\<lbrace>\<lambda>s. P (caps_of_state (s :: det_ext state)) \<and> st_tcb_at receive_blocked t s\<rbrace> cancel_ipc t \<lbrace>\<lambda>rv s. P (caps_of_state s)\<rbrace>"
apply (clarsimp simp: cancel_ipc_def)
apply (rule hoare_seq_ext[OF _ gts_sp])
apply (rule hoare_pre)
apply (wp gts_wp | wpc | simp)+
apply (rule hoare_pre_cont)+
apply (clarsimp simp: st_tcb_def2 receive_blocked_def)
apply (clarsimp split: thread_state.splits)
done
lemma send_signal_caps_of_state[wp]:
"\<lbrace>\<lambda>s :: det_ext state. P (caps_of_state s) \<rbrace> send_signal ntfnptr badge \<lbrace>\<lambda>_ s. P (caps_of_state s)\<rbrace>"
apply (clarsimp simp: send_signal_def)
apply (rule hoare_seq_ext[OF _ get_simple_ko_sp])
apply (rule hoare_pre)
apply (wp dxo_wp_weak cancel_ipc_receive_blocked_caps_of_state gts_wp static_imp_wp | wpc | simp add: update_waiting_ntfn_def)+
apply (clarsimp simp: fun_upd_def[symmetric] st_tcb_def2)
done
crunches deleted_irq_handler, send_signal
for arch_state[wp]: "\<lambda>s. P (arch_state (s :: det_ext state))"
(wp: crunch_wps hoare_unless_wp select_wp dxo_wp_weak simp: crunch_simps)
crunch mdb[wp]: blocked_cancel_ipc, update_waiting_ntfn "\<lambda>s. P (cdt (s :: det_ext state))" (wp: crunch_wps hoare_unless_wp select_wp dxo_wp_weak simp: crunch_simps)
lemma cancel_ipc_receive_blocked_mdb:
"\<lbrace>\<lambda>s. P (cdt (s :: det_ext state)) \<and> st_tcb_at receive_blocked t s\<rbrace> cancel_ipc t \<lbrace>\<lambda>rv s. P (cdt s)\<rbrace>"
apply (clarsimp simp: cancel_ipc_def)
apply (rule hoare_seq_ext[OF _ gts_sp])
apply (rule hoare_pre)
apply (wp gts_wp | wpc | simp)+
apply (rule hoare_pre_cont)+
apply (clarsimp simp: st_tcb_def2 receive_blocked_def)
apply (clarsimp split: thread_state.splits)
done
lemma send_signal_mdb[wp]:
"\<lbrace>\<lambda>s. P (cdt (s :: det_ext state))\<rbrace> send_signal ntfnptr badge \<lbrace>\<lambda>rv s. P (cdt s)\<rbrace>"
apply (clarsimp simp: send_signal_def)
apply (rule hoare_seq_ext[OF _ get_simple_ko_sp])
apply (rule hoare_pre)
apply (wp dxo_wp_weak gts_wp cancel_ipc_receive_blocked_mdb | wpc | simp)+
apply (clarsimp simp: st_tcb_def2)
done
crunches possible_switch_to
for tcb_domain_map_wellformed[wp]: "tcb_domain_map_wellformed aag"
and pas_refined[wp]: "pas_refined aag"
lemma update_waiting_ntfn_pas_refined:
"\<lbrace>pas_refined aag and ko_at (Notification ntfn) ntfnptr and K (ntfn_obj ntfn = WaitingNtfn queue)\<rbrace>
update_waiting_ntfn ntfnptr queue badge val
\<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
apply (simp add: update_waiting_ntfn_def)
apply (wp set_thread_state_pas_refined set_simple_ko_pas_refined | simp)+
done
lemma cancel_ipc_receive_blocked_pas_refined:
"\<lbrace>pas_refined aag and st_tcb_at receive_blocked t\<rbrace> cancel_ipc t \<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
apply (clarsimp simp: cancel_ipc_def)
apply (rule hoare_seq_ext[OF _ gts_sp])
apply (rule hoare_pre)
apply (wp gts_wp | wpc | simp)+
apply (clarsimp simp: st_tcb_def2 receive_blocked_def)
done
lemma send_signal_pas_refined:
"\<lbrace>\<lambda>s. pas_refined aag s\<rbrace> send_signal ntfnptr badge \<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
apply (simp add: send_signal_def)
apply (rule hoare_seq_ext[OF _ get_simple_ko_sp])
apply (rule hoare_pre)
apply (wp set_simple_ko_pas_refined update_waiting_ntfn_pas_refined gts_wp set_thread_state_pas_refined
cancel_ipc_receive_blocked_pas_refined
| wpc
| simp)+
apply clarsimp
apply (fastforce simp: st_tcb_def2)
done
lemma receive_signal_pas_refined:
"\<lbrace>pas_refined aag and K (\<forall>ntfnptr \<in> obj_refs cap. (pasObjectAbs aag thread, Receive, pasObjectAbs aag ntfnptr) \<in> pasPolicy aag)\<rbrace>
receive_signal thread cap is_blocking
\<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
apply (simp add: receive_signal_def)
apply (cases cap, simp_all)
apply (rule hoare_seq_ext [OF _ get_simple_ko_sp])
apply (rule hoare_pre)
by (wp set_simple_ko_pas_refined set_thread_state_pas_refined
| wpc | simp add: do_nbrecv_failed_transfer_def)+
subsection\<open>integrity\<close>
subsubsection\<open>autarchy\<close>
text\<open>
For the case when the currently-running thread owns the receiver
(i.e. receiver last to the notification rendezvous or sender owns
receiver).
\<close>
lemma st_tcb_at_tcb_states_of_state:
"(st_tcb_at stf p s) = (\<exists>st. tcb_states_of_state s p = Some st \<and> stf st)"
unfolding tcb_states_of_state_def st_tcb_def2 by auto
lemma st_tcb_at_tcb_states_of_state_eq:
"(st_tcb_at ((=) st) p s) = (tcb_states_of_state s p = Some st)"
unfolding tcb_states_of_state_def st_tcb_def2 by auto
lemma kheap_auth_ipc_buffer_same:
"kheap st thread = kheap s thread \<Longrightarrow> auth_ipc_buffers st thread = auth_ipc_buffers s thread"
unfolding auth_ipc_buffers_def get_tcb_def by simp
lemma tcb_ipc_buffer_not_device:
"\<lbrakk>kheap s thread = Some (TCB tcb);valid_objs s\<rbrakk>
\<Longrightarrow> \<not> cap_is_device (tcb_ipcframe tcb)"
apply (erule(1) valid_objsE)
apply (clarsimp simp: valid_obj_def valid_tcb_def valid_ipc_buffer_cap_def
split: cap.split_asm arch_cap.split_asm)
done
lemma tro_auth_ipc_buffer_idem:
"\<lbrakk> \<forall>x. integrity_obj aag activate subjects (pasObjectAbs aag x) (kheap st x) (kheap s x);
pasObjectAbs aag thread \<notin> subjects; valid_objs s \<rbrakk>
\<Longrightarrow> auth_ipc_buffers st thread = auth_ipc_buffers s thread"
apply (drule spec [where x = thread])
by (erule integrity_objE;
simp add: auth_ipc_buffers_def get_tcb_def;
fastforce cong: cap.case_cong arch_cap.case_cong if_cong
simp: case_bool_if
dest!: tcb_ipc_buffer_not_device split:arch_cap.splits cap.splits
split: if_splits)
lemma dmo_storeWord_respects_ipc:
"\<lbrace>integrity aag X st and st_tcb_at ((=) Structures_A.Running) thread and
K ((\<not> is_subject aag thread \<longrightarrow> st_tcb_at (receive_blocked_on ep) thread st \<and> auth_ipc_buffers st thread = ptr_range buf msg_align_bits) \<and>
ipc_buffer_has_auth aag thread (Some buf) \<and> p < 2 ^ (msg_align_bits - 2)) \<rbrace>
do_machine_op (storeWord (buf + of_nat p * of_nat word_size) v)
\<lbrace>\<lambda>_. integrity aag X st\<rbrace>"
apply (rule hoare_gen_asm)
apply (elim conjE)
apply (cases "is_subject aag thread")
apply (rule hoare_pre)
apply (rule dmo_storeWord_respects_Write)
apply clarsimp
apply (drule (1) ipc_buffer_has_auth_wordE)
apply simp
apply (simp add: msg_align_bits)
apply (erule mul_word_size_lt_msg_align_bits_ofnat)
apply simp
\<comment> \<open>non auth case\<close>
apply (rule hoare_pre)
apply (simp add: storeWord_def)
apply (wp dmo_wp)
apply clarsimp
apply (simp add: integrity_def split del: if_split)
apply (clarsimp split del: if_split)
apply (case_tac "x \<in> ptr_range (buf + of_nat p * of_nat word_size) 2")
apply (clarsimp simp add: st_tcb_at_tcb_states_of_state split del: if_split)
apply (rule trm_ipc [where p' = thread])
apply simp
apply assumption
apply (clarsimp simp: ipc_buffer_has_auth_def)
apply (erule (1) set_mp [OF ptr_range_subset, rotated -1])
apply simp
apply (simp add: msg_align_bits)
apply (erule mul_word_size_lt_msg_align_bits_ofnat)
apply simp
\<comment> \<open>otherwise\<close>
apply (auto simp: is_aligned_mask [symmetric] intro!: trm_lrefl ptr_range_memI ptr_range_add_memI)
done
lemma store_word_offs_respects:
"\<lbrace>integrity aag X st and st_tcb_at ((=) Structures_A.Running) thread and
K ((\<not> is_subject aag thread \<longrightarrow> st_tcb_at (receive_blocked_on ep) thread st \<and> auth_ipc_buffers st thread = ptr_range buf msg_align_bits) \<and>
ipc_buffer_has_auth aag thread (Some buf) \<and> p < 2 ^ (msg_align_bits - 2)) \<rbrace>
store_word_offs buf p v
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (simp add: store_word_offs_def)
apply (rule hoare_pre)
apply (wp dmo_storeWord_respects_ipc [where thread = thread])
apply fastforce
done
lemma ipc_buffer_has_auth_None [simp]:
"ipc_buffer_has_auth aag receiver None"
unfolding ipc_buffer_has_auth_def by simp
(* FIXME: MOVE *)
lemma cap_auth_caps_of_state:
"\<lbrakk> caps_of_state s p = Some cap; pas_refined aag s\<rbrakk>
\<Longrightarrow> aag_cap_auth aag (pasObjectAbs aag (fst p)) cap"
unfolding aag_cap_auth_def
apply (intro conjI)
apply clarsimp
apply (drule (2) sta_caps)
apply (drule auth_graph_map_memI [where x = "pasObjectAbs aag (fst p)", OF _ sym refl])
apply (rule refl)
apply (fastforce simp: pas_refined_def)
apply clarsimp
apply (drule (2) sta_untyped [THEN pas_refined_mem] )
apply simp
apply (drule (1) clas_caps_of_state)
apply simp
apply (drule (1) cli_caps_of_state)
apply simp
done
lemma lookup_ipc_buffer_has_auth [wp]:
"\<lbrace>pas_refined aag and valid_objs\<rbrace>
lookup_ipc_buffer True receiver
\<lbrace>\<lambda>rv s. ipc_buffer_has_auth aag receiver rv\<rbrace>"
apply (rule hoare_pre)
apply (simp add: lookup_ipc_buffer_def)
apply (wp get_cap_wp thread_get_wp'
| wpc)+
apply (clarsimp simp: cte_wp_at_caps_of_state ipc_buffer_has_auth_def get_tcb_ko_at [symmetric])
apply (frule caps_of_state_tcb_cap_cases [where idx = "tcb_cnode_index 4"])
apply (simp add: dom_tcb_cap_cases)
apply (frule (1) caps_of_state_valid_cap)
apply (rule conjI)
apply (clarsimp simp: valid_cap_simps cap_aligned_def)
apply (erule aligned_add_aligned)
apply (rule is_aligned_andI1)
apply (drule (1) valid_tcb_objs)
apply (clarsimp simp: valid_obj_def valid_tcb_def valid_ipc_buffer_cap_def
split: if_splits)
apply (rule order_trans [OF _ pbfs_atleast_pageBits])
apply (simp add: msg_align_bits pageBits_def)
apply simp
apply (drule (1) cap_auth_caps_of_state)
apply (clarsimp simp: aag_cap_auth_def cap_auth_conferred_def vspace_cap_rights_to_auth_def
vm_read_write_def is_page_cap_def split: if_split_asm)
apply (drule bspec)
apply (erule (3) ipcframe_subset_page)
apply simp
done
lemma set_notification_respects:
"\<lbrace>integrity aag X st and K (aag_has_auth_to aag auth epptr \<and> auth \<in> {Receive, Notify, Reset})\<rbrace>
set_notification epptr ntfn'
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (simp add: set_simple_ko_def set_object_def)
apply (wp get_object_wp)
apply (clarsimp simp: obj_at_def partial_inv_def a_type_def)
apply (erule integrity_trans)
apply (clarsimp simp: integrity_def tro_ntfn)
done
lemma receive_signal_integrity_autarch:
"\<lbrace>integrity aag X st and pas_refined aag and valid_objs
and K ((\<forall>ntfnptr \<in> obj_refs cap. aag_has_auth_to aag Receive ntfnptr)
\<and> is_subject aag thread)\<rbrace>
receive_signal thread cap is_blocking
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (simp add: receive_signal_def)
apply (cases cap, simp_all)
apply (rule hoare_seq_ext [OF _ get_simple_ko_sp])
apply (rule hoare_pre)
apply (wp set_notification_respects[where auth=Receive] set_thread_state_integrity_autarch as_user_integrity_autarch
| wpc
| simp add: do_nbrecv_failed_transfer_def)+
done
subsubsection\<open>Non-autarchy: the sender is running\<close>
lemma length_msg_registers:
"length msg_registers = 4"
unfolding msg_registers_def
by (simp add: msgRegisters_def upto_enum_def fromEnum_def enum_register)
lemma send_upd_ctxintegrity:
"\<lbrakk> direct_send {pasSubject aag} aag ep tcb
\<or> indirect_send {pasSubject aag} aag (the (tcb_bound_notification tcb)) ep tcb;
integrity aag X st s; st_tcb_at ((=) Structures_A.thread_state.Running) thread s;
get_tcb thread st = Some tcb; get_tcb thread s = Some tcb'\<rbrakk>
\<Longrightarrow> integrity aag X st (s\<lparr>kheap :=
kheap s(thread \<mapsto> TCB (tcb'\<lparr>tcb_arch :=
arch_tcb_context_set c' (tcb_arch tcb')
\<rparr>))
\<rparr>)"
apply (clarsimp simp: integrity_def tcb_states_of_state_preserved st_tcb_def2)
apply (drule get_tcb_SomeD)+
apply (drule spec[where x=thread], simp)
apply (cases "is_subject aag thread")
apply (rule tro_lrefl, solves\<open>simp\<close>)
(* slow 5s *)
by (erule integrity_objE;
(* eliminate all TCB unrelated cases and simplifies the other *)
clarsimp;
(* deal with the orefl case *)
( solves\<open>simp add:direct_send_def indirect_send_def\<close>
(* deal with the other case by applying either the reply, call or send rules and then
the generic rule *)
| rule tro_trans_spec,
(rule tro_tcb_reply'[OF refl refl refl] tro_tcb_call[OF refl refl refl]
tro_tcb_send[OF refl refl refl];
blast),
rule tro_trans_spec,
(rule tro_tcb_generic'[OF refl refl refl]; simp),
rule tro_orefl, simp, rule tcb.equality; solves\<open>simp add:arch_tcb_context_set_def\<close>))
lemma set_mrs_respects_in_signalling':
"\<lbrace>integrity aag X st and st_tcb_at ((=) Structures_A.Running) thread and
K ((\<not> is_subject aag thread \<longrightarrow> st_tcb_at (receive_blocked_on ep) thread st
\<and> case_option True (\<lambda>buf'. auth_ipc_buffers st thread = ptr_range buf' msg_align_bits) buf)
\<and> aag_has_auth_to aag Notify ep \<and> ipc_buffer_has_auth aag thread buf) \<rbrace>
set_mrs thread buf msgs
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (rule hoare_gen_asm)
apply (simp add: set_mrs_def split_def set_object_def get_object_def)
apply (wp gets_the_wp get_wp put_wp
| wpc
| simp split del: if_split
add: zipWithM_x_mapM_x split_def store_word_offs_def fun_upd_def[symmetric])+
apply (rule hoare_post_imp [where Q = "\<lambda>rv. st_tcb_at ((=) Structures_A.Running) thread and integrity aag X st"])
apply simp
apply (wp mapM_x_wp' dmo_storeWord_respects_ipc [where thread = thread and ep = ep])
apply (fastforce simp add: set_zip nth_append simp: msg_align_bits msg_max_length_def
split: if_split_asm)
apply wp+
apply (rule impI)
apply (subgoal_tac "\<forall>c'. integrity aag X st
(s\<lparr>kheap := kheap s(thread \<mapsto>
TCB ((the (get_tcb thread s))\<lparr>tcb_arch := arch_tcb_set_registers c' (tcb_arch (the (get_tcb thread s))) \<rparr>))\<rparr>)")
apply (clarsimp simp: fun_upd_def st_tcb_at_nostate_upd [unfolded fun_upd_def])
apply (rule allI)
apply clarsimp
apply (cases "is_subject aag thread")
apply (erule (1) integrity_update_autarch)
apply (clarsimp simp: st_tcb_def2 arch_tcb_set_registers_def)
apply (rule send_upd_ctxintegrity[OF disjI1], auto simp: st_tcb_def2 direct_send_def)
done
lemma as_user_set_register_respects:
"\<lbrace>integrity aag X st and st_tcb_at ((=) Structures_A.Running) thread and
K ((\<not> is_subject aag thread \<longrightarrow> st_tcb_at (receive_blocked_on ep) thread st) \<and> (aag_has_auth_to aag SyncSend ep \<or> aag_has_auth_to aag Notify ep)) \<rbrace>
as_user thread (set_register r v)
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (simp add: as_user_def split_def set_object_def get_object_def)
apply wp
apply (clarsimp simp: in_monad setRegister_def)
apply (cases "is_subject aag thread")
apply (erule (1) integrity_update_autarch [unfolded fun_upd_def])
apply (clarsimp simp: st_tcb_def2)
apply (rule send_upd_ctxintegrity [OF disjI1, unfolded fun_upd_def])
apply (auto simp: direct_send_def st_tcb_def2)
done
lemma lookup_ipc_buffer_ptr_range:
"\<lbrace>valid_objs and integrity aag X st\<rbrace>
lookup_ipc_buffer True thread
\<lbrace>\<lambda>rv s. \<not> is_subject aag thread \<longrightarrow> (case rv of None \<Rightarrow> True | Some buf' \<Rightarrow> auth_ipc_buffers st thread = ptr_range buf' msg_align_bits) \<rbrace>"
unfolding lookup_ipc_buffer_def
apply (rule hoare_pre)
apply (wp get_cap_wp thread_get_wp' | wpc)+
apply (clarsimp simp: cte_wp_at_caps_of_state ipc_buffer_has_auth_def get_tcb_ko_at [symmetric])
apply (frule caps_of_state_tcb_cap_cases [where idx = "tcb_cnode_index 4"])
apply (simp add: dom_tcb_cap_cases)
apply (clarsimp simp: auth_ipc_buffers_def get_tcb_ko_at [symmetric] integrity_def)
apply (drule spec [where x = thread])+
apply (drule get_tcb_SomeD)+
apply (erule(1) valid_objsE)
apply (clarsimp simp: valid_obj_def valid_tcb_def valid_ipc_buffer_cap_def case_bool_if
split: if_split_asm)
apply (erule integrity_objE, simp_all add: get_tcb_def vm_read_write_def)
apply auto
done
lemma set_thread_state_respects_in_signalling:
"\<lbrace>integrity aag X st
and (\<lambda>s. \<not> is_subject aag thread \<longrightarrow> st_tcb_at (receive_blocked_on ntfnptr) thread s)
and K (aag_has_auth_to aag Notify ntfnptr)\<rbrace>
set_thread_state thread Structures_A.thread_state.Running
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (simp add: set_thread_state_def set_object_def get_object_def)
apply wp
apply (clarsimp)
apply (cases "is_subject aag thread")
apply (erule(1) integrity_update_autarch [unfolded fun_upd_def])
apply (erule integrity_trans)
apply (drule get_tcb_SomeD)
apply (clarsimp simp: integrity_def st_tcb_def2)
apply (clarsimp dest!: get_tcb_SomeD)
apply (rule tro_tcb_send [OF refl refl])
apply (rule tcb.equality;simp; rule arch_tcb_context_set_eq[symmetric])
apply (auto simp: indirect_send_def direct_send_def)
done
lemma set_notification_obj_at:
"\<lbrace>obj_at P ptr and K (ptr \<noteq> ntfnptr)\<rbrace>
set_notification ntfnptr queue
\<lbrace>\<lambda>rv. obj_at P ptr\<rbrace>"
apply (simp add: set_simple_ko_def set_object_def)
apply (wp get_object_wp)
apply (auto simp: obj_at_def)
done
lemma set_ntfn_valid_objs_at:
"\<lbrace>valid_objs and (\<lambda>s. ntfn_at p s \<longrightarrow> valid_ntfn ntfn s)\<rbrace> set_notification p ntfn \<lbrace>\<lambda>rv. valid_objs\<rbrace>"
unfolding set_simple_ko_def
apply (rule hoare_pre)
apply (wp set_object_valid_objs get_object_wp)
apply (clarsimp simp: valid_obj_def obj_at_def is_ntfn partial_inv_def
split: Structures_A.kernel_object.splits)
done
lemma drop_Suc0_iff:
"xs \<noteq> [] \<Longrightarrow> (drop (Suc 0) xs = ys) = (\<exists>x. xs = x # ys)"
by (auto simp: neq_Nil_conv)
lemma receive_blocked_on_def3:
"receive_blocked_on ref ts =
((\<exists>pl. ts = Structures_A.BlockedOnReceive ref pl)
\<or> ts = Structures_A.BlockedOnNotification ref)"
by (cases ts, auto)
lemma integrity_receive_blocked_chain:
"\<lbrakk> st_tcb_at (receive_blocked_on ep) p s; integrity aag X st s; \<not> is_subject aag p \<rbrakk> \<Longrightarrow> st_tcb_at (receive_blocked_on ep) p st"
apply (clarsimp simp: integrity_def st_tcb_at_tcb_states_of_state)
apply (drule (1) tsos_tro [where p = p] )
apply (fastforce simp: tcb_states_of_state_def)
apply simp
apply simp
done
crunch integrity[wp]: possible_switch_to "integrity aag X st"
(ignore: tcb_sched_action)
abbreviation
"integrity_once_ts_upd t ts aag X st s
== integrity aag X st (s \<lparr> kheap := (kheap s) ( t := Some (TCB ((the (get_tcb t s)) \<lparr>tcb_state := ts\<rparr>)))\<rparr>)"
lemma set_scheduler_action_integrity_once_ts_upd:
"\<lbrace>integrity_once_ts_upd t ts aag X st\<rbrace>
set_scheduler_action sa \<lbrace>\<lambda>_. integrity_once_ts_upd t ts aag X st\<rbrace>"
apply (simp add: set_scheduler_action_def, wp)
apply clarsimp
apply (erule rsubst[where P="\<lambda>x. x"])
apply (rule trans, rule_tac f="\<lambda>x. sa" in eintegrity_sa_update[symmetric])
apply (rule arg_cong[where f="integrity aag X st"])
apply (simp add: get_tcb_def)
done
crunch integrity_once_ts_upd: set_thread_state_ext "integrity_once_ts_upd t ts aag X st"
lemma set_thread_state_integrity_once_ts_upd:
"\<lbrace>integrity_once_ts_upd t ts aag X st\<rbrace>
set_thread_state t ts' \<lbrace>\<lambda>_. integrity_once_ts_upd t ts aag X st\<rbrace>"
apply (simp add: set_thread_state_def)
apply (wpsimp wp: set_object_wp set_thread_state_ext_integrity_once_ts_upd)
apply (clarsimp simp: fun_upd_def dest!: get_tcb_SomeD)
apply (simp add: get_tcb_def cong: if_cong)
done
lemma get_tcb_recv_blocked_implies_receive:
"\<lbrakk>pas_refined aag s; get_tcb t s = Some tcb; ep_recv_blocked ep (tcb_state tcb) \<rbrakk>
\<Longrightarrow> (pasObjectAbs aag t, Receive, pasObjectAbs aag ep) \<in> pasPolicy aag"
apply (erule pas_refined_mem[rotated])
apply (rule sta_ts)
apply (simp add: thread_states_def tcb_states_of_state_def)
apply (case_tac "tcb_state tcb", simp_all)
done
lemma cancel_ipc_receive_blocked_respects:
"\<lbrace>integrity aag X st and pas_refined aag and st_tcb_at (receive_blocked) t and
(sym_refs o state_refs_of) and
bound_tcb_at (\<lambda>ntfn. ntfn = Some ntfnptr) t and
K ((pasObjectAbs aag t, Receive, pasObjectAbs aag ntfnptr) \<in> pasPolicy aag \<and>
(pasSubject aag, Notify, pasObjectAbs aag ntfnptr) \<in> pasPolicy aag)\<rbrace>
cancel_ipc t \<lbrace>\<lambda>_. integrity_once_ts_upd t Running aag X st\<rbrace>"
apply (clarsimp simp: cancel_ipc_def bind_assoc)
apply (rule hoare_seq_ext[OF _ gts_sp])
apply (rule hoare_name_pre_state)
apply (subgoal_tac "case state of BlockedOnReceive x y \<Rightarrow> True | _ \<Rightarrow> False")
apply (simp add: blocked_cancel_ipc_def bind_assoc set_simple_ko_def set_object_def
get_ep_queue_def get_blocking_object_def
split: thread_state.splits)
apply (rule hoare_pre)
apply (wp set_thread_state_integrity_once_ts_upd get_object_wp get_simple_ko_wp
| wpc)+
apply (clarsimp simp: st_tcb_at_def2 obj_at_def)
apply (rename_tac ep payload s tcb ntfn)
apply (drule_tac t="tcb_state tcb" in sym)
apply (subgoal_tac "st_tcb_at ((=) (tcb_state tcb)) t s")
apply (drule(1) sym_refs_st_tcb_atD)
apply (clarsimp simp: obj_at_def ep_q_refs_of_def fun_upd_def get_tcb_def
split: endpoint.splits cong: if_cong)
apply (intro impI conjI, simp_all)[1]
apply (erule integrity_trans)
apply (simp add: integrity_def)
apply (intro impI conjI allI)
apply clarsimp
apply (rule tro_ep_unblock; simp?)
apply (erule get_tcb_recv_blocked_implies_receive, erule get_tcb_rev; solves\<open>simp\<close>)
apply (rule_tac ep=ep in tro_tcb_send[OF refl refl];
fastforce intro!: tcb.equality arch_tcb_context_set_eq[symmetric]
simp: indirect_send_def pred_tcb_at_def obj_at_def)
apply (fastforce simp: indirect_send_def pred_tcb_at_def obj_at_def)
apply (fastforce simp: pred_tcb_at_def obj_at_def receive_blocked_def)
done
lemma set_thread_state_integrity':
"\<lbrace>integrity_once_ts_upd t ts aag X st\<rbrace> set_thread_state t ts \<lbrace>\<lambda>_. integrity aag X st\<rbrace>"
apply (simp add: set_thread_state_def)
by (wpsimp wp: set_object_wp)
lemma as_user_set_register_respects_indirect:
"\<lbrace>integrity aag X st and st_tcb_at ((=) Structures_A.Running) thread and
K ((\<not> is_subject aag thread \<longrightarrow> st_tcb_at receive_blocked thread st
\<and> bound_tcb_at ((=) (Some ntfnptr)) thread st)
\<and> (aag_has_auth_to aag Notify ntfnptr)) \<rbrace>
as_user thread (set_register r v)
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (simp add: as_user_def split_def set_object_def get_object_def)
apply wp
apply (clarsimp simp: in_monad setRegister_def)
apply (cases "is_subject aag thread")
apply (erule (1) integrity_update_autarch [unfolded fun_upd_def])
apply (clarsimp simp: st_tcb_def2 receive_blocked_def)
apply (simp split: thread_state.split_asm)
apply (rule send_upd_ctxintegrity [OF disjI2, unfolded fun_upd_def],
auto simp: st_tcb_def2 indirect_send_def pred_tcb_def2 dest: sym)
done
lemma integrity_receive_blocked_chain':
"\<lbrakk> st_tcb_at receive_blocked p s; integrity aag X st s; \<not> is_subject aag p \<rbrakk> \<Longrightarrow> st_tcb_at receive_blocked p st"
apply (clarsimp simp: integrity_def st_tcb_at_tcb_states_of_state receive_blocked_def)
apply (simp split: thread_state.split_asm)
apply (rename_tac word pl)
apply (drule_tac ep=word in tsos_tro [where p = p], simp+ )
done
lemma tba_Some:
"thread_bound_ntfns s t = Some a \<Longrightarrow> bound_tcb_at ((=) (Some a)) t s"
by (clarsimp simp: thread_bound_ntfns_def pred_tcb_at_def obj_at_def get_tcb_def
split: option.splits kernel_object.splits)
lemma tsos_tro':
"\<lbrakk>\<forall>x. integrity_obj aag activate subjects (pasObjectAbs aag x) (kheap s x) (kheap s' x);
thread_bound_ntfns s' p = Some a; pasObjectAbs aag p \<notin> subjects \<rbrakk>
\<Longrightarrow> thread_bound_ntfns s p = Some a"
apply (drule_tac x=p in spec)
apply (erule integrity_objE;
simp?;
fastforce simp: thread_bound_ntfns_def get_tcb_def
tcb_bound_notification_reset_integrity_def)
done
lemma integrity_receive_blocked_chain_bound:
"\<lbrakk>bound_tcb_at ((=) (Some ntfnptr)) p s; integrity aag X st s; \<not> is_subject aag p\<rbrakk>
\<Longrightarrow> bound_tcb_at ((=) (Some ntfnptr)) p st"
apply (clarsimp simp: integrity_def)
apply (drule bound_tcb_at_thread_bound_ntfns)
apply (drule tsos_tro' [where p = p], simp+ )
apply (clarsimp simp:tba_Some)
done
lemma send_signal_respects:
"\<lbrace>integrity aag X st and pas_refined aag
and valid_objs
and sym_refs \<circ> state_refs_of
and K (aag_has_auth_to aag Notify ntfnptr)\<rbrace>
send_signal ntfnptr badge
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (simp add: send_signal_def)
apply (rule hoare_seq_ext[OF _ get_simple_ko_sp])
apply (rule hoare_name_pre_state)
apply (case_tac "ntfn_obj ntfn = IdleNtfn \<and> ntfn_bound_tcb ntfn \<noteq> None")
\<comment> \<open>ntfn-binding case\<close>
apply (rule hoare_pre)
apply (wp set_notification_respects[where auth=Notify]
as_user_set_register_respects_indirect[where ntfnptr=ntfnptr]
set_thread_state_integrity' sts_st_tcb_at' static_imp_wp
cancel_ipc_receive_blocked_respects[where ntfnptr=ntfnptr]
gts_wp
| wpc | simp)+
apply (clarsimp, rule conjI, clarsimp simp: st_tcb_def2)
apply (clarsimp simp: receive_blocked_def)
apply (simp split: thread_state.split_asm)
apply (clarsimp simp: obj_at_def)
apply (drule (3) ntfn_bound_tcb_at[where ntfnptr=ntfnptr and P="\<lambda>ntfn. ntfn = Some ntfnptr"], simp+)[1]
apply (rule conjI)
apply (drule_tac x=ntfnptr and t=y in bound_tcb_at_implies_receive)
apply (clarsimp simp: pred_tcb_at_def obj_at_def, simp)
apply clarsimp
apply (rule conjI)
apply (rule_tac s=sa in integrity_receive_blocked_chain')
apply (clarsimp simp add: pred_tcb_at_def obj_at_def receive_blocked_def)
apply (fastforce split: thread_state.split)
apply simp+
apply (rule_tac s=sa in integrity_receive_blocked_chain_bound)
apply (clarsimp simp: pred_tcb_at_def obj_at_def)
apply simp+
apply (rule hoare_pre)
apply clarsimp
apply (wpc, clarsimp)
apply (wp set_notification_respects[where auth=Notify]
sts_st_tcb_at' as_user_set_register_respects
set_thread_state_pas_refined set_simple_ko_pas_refined
set_thread_state_respects_in_signalling [where ntfnptr = ntfnptr]
set_ntfn_valid_objs_at hoare_vcg_disj_lift static_imp_wp
| wpc
| simp add: update_waiting_ntfn_def)+
apply clarsimp
apply (subgoal_tac "st_tcb_at (receive_blocked_on ntfnptr) (hd x) sa")
prefer 2
apply (rule ntfn_queued_st_tcb_at', assumption)
apply (fastforce simp: obj_at_def valid_obj_def valid_ntfn_def)
apply assumption+
apply simp
apply simp
apply (intro impI conjI)
\<comment> \<open>st_tcb_at receive_blocked st\<close>
apply (erule (2) integrity_receive_blocked_chain)
apply clarsimp
done
section\<open>Sync IPC\<close>
text\<open>
When transferring caps, i.e. when the grant argument is true on the
IPC operations, the currently-running thread owns the receiver. Either
it is the receiver (and ?thesis by well-formedness) or it is the
sender, and that can send arbitrary caps, hence ?thesis by sbta_ipc
etc.
\<close>
subsection\<open>auxiliary\<close>
lemma cap_master_cap_masked_as_full:
"cap_master_cap (masked_as_full a a) = cap_master_cap a "
apply(clarsimp simp: cap_master_cap_def split: cap.splits simp: masked_as_full_def)
done
lemma cap_badge_masked_as_full:
"(cap_badge (masked_as_full a a), cap_badge a) \<in> capBadge_ordering False"
apply(case_tac a, simp_all add: masked_as_full_def)
done
lemma masked_as_full_double:
"masked_as_full (masked_as_full ab ab) cap' = masked_as_full ab ab"
apply(case_tac ab, simp_all add: masked_as_full_def)
done
lemma transfer_caps_loop_pres_dest_aux:
assumes x: "\<And>cap src dest.
\<lbrace>\<lambda>s. P s \<and> dest \<in> slots' \<and> src \<in> snd ` caps'
\<and> (valid_objs s \<and> real_cte_at dest s \<and> s \<turnstile> cap \<and> tcb_cap_valid cap dest s
\<and> real_cte_at src s
\<and> cte_wp_at (is_derived (cdt s) src cap) src s \<and> cap \<noteq> cap.NullCap) \<rbrace>
cap_insert cap src dest \<lbrace>\<lambda>rv. P\<rbrace>"
assumes eb: "\<And>b n'. n' \<le> N \<Longrightarrow> \<lbrace>P\<rbrace> set_extra_badge buffer b n' \<lbrace>\<lambda>_. P\<rbrace>"
shows "n + length caps \<le> N \<Longrightarrow>
\<lbrace>\<lambda>s. P s \<and> set slots \<subseteq> slots' \<and> set caps \<subseteq> caps' \<and>
(valid_objs s \<and> valid_mdb s \<and> distinct slots \<and>
(\<forall>x \<in> set slots. real_cte_at x s) \<and>
(\<forall>x \<in> set caps. s \<turnstile> fst x \<and>
cte_wp_at (\<lambda>cp. fst x \<noteq> cap.NullCap \<longrightarrow> cp \<noteq> fst x \<longrightarrow> cp = masked_as_full (fst x) (fst x)) (snd x) s
\<and> real_cte_at (snd x) s))\<rbrace>
transfer_caps_loop ep buffer n caps slots mi
\<lbrace>\<lambda>rv. P\<rbrace>" (is "?L \<Longrightarrow> ?P n caps slots mi")
proof (induct caps arbitrary: slots n mi)
case Nil
thus ?case by (simp, wp, simp)
next
case (Cons m ms)
hence nN: "n \<le> N" by simp
from Cons have "\<And>slots mi. ?P (n + 1) ms slots mi" by clarsimp
thus ?case
apply (cases m)
apply (clarsimp simp add: Let_def split_def whenE_def
cong: if_cong list.case_cong split del: if_split)
apply (rule hoare_pre)
apply (wp eb [OF nN] hoare_vcg_const_imp_lift hoare_vcg_const_Ball_lift
| assumption | simp split del: if_split)+
apply (rule cap_insert_assume_null)
apply (wp x hoare_vcg_const_Ball_lift cap_insert_cte_wp_at)+
(* cannot blindly use derive_cap_is_derived_foo here , need to first hoist
out of the postcondition the conjunct that the return value is derived,
and solve this using derived_cap_is_derived, and then solve the rest
using derive_cap_is_derived_foo *)
apply (rule_tac Q'="\<lambda>r s. S r s \<and> Q r s" for S Q in hoare_post_imp_R)
apply (rule hoare_vcg_conj_lift_R)
apply (rule derive_cap_is_derived)
prefer 2
apply clarsimp
apply assumption
apply(wp derive_cap_is_derived_foo)+
apply (simp only: tl_drop_1[symmetric])
apply (clarsimp simp: cte_wp_at_caps_of_state
ex_cte_cap_to_cnode_always_appropriate_strg
real_cte_tcb_valid caps_of_state_valid
split del: if_split)
apply (clarsimp simp: remove_rights_def caps_of_state_valid
neq_Nil_conv cte_wp_at_caps_of_state
imp_conjR[symmetric] cap_master_cap_masked_as_full
cap_badge_masked_as_full
split del: if_splits)
apply(intro conjI)
apply clarsimp
apply (case_tac "cap = a",clarsimp simp: remove_rights_def)
apply (clarsimp simp:masked_as_full_def is_cap_simps)
apply (clarsimp simp: cap_master_cap_simps remove_rights_def split:if_splits)
apply clarsimp
apply (intro conjI)
apply (clarsimp split:if_splits elim!: image_eqI[rotated])
apply (clarsimp split:if_splits simp: remove_rights_def)
apply (rule ballI)
apply (drule(1) bspec)
apply clarsimp
apply (intro conjI)
apply clarsimp
apply clarsimp
apply (case_tac "capa = ab",clarsimp simp: masked_as_full_def is_cap_simps split: if_splits)
apply (clarsimp simp: masked_as_full_double)
done
qed
(* FIXME: move *)
lemma transfer_caps_loop_pres_dest:
assumes x: "\<And>cap src dest.
\<lbrace>\<lambda>s. P s \<and> dest \<in> set slots \<and> src \<in> snd ` set caps
\<and> (valid_objs s \<and> real_cte_at dest s \<and> s \<turnstile> cap \<and> tcb_cap_valid cap dest s
\<and> real_cte_at src s
\<and> cte_wp_at (is_derived (cdt s) src cap) src s \<and> cap \<noteq> cap.NullCap) \<rbrace>
cap_insert cap src dest \<lbrace>\<lambda>rv. P\<rbrace>"
assumes eb: "\<And>b n'. n' \<le> n + length caps \<Longrightarrow> \<lbrace>P\<rbrace> set_extra_badge buffer b n' \<lbrace>\<lambda>_. P\<rbrace>"
shows "\<lbrace>\<lambda>s. P s \<and> (valid_objs s \<and> valid_mdb s \<and> distinct slots \<and>
(\<forall>x \<in> set slots. real_cte_at x s) \<and>
(\<forall>x \<in> set caps. s \<turnstile> fst x \<and> cte_wp_at (\<lambda>cp. fst x \<noteq> cap.NullCap \<longrightarrow> cp \<noteq> fst x \<longrightarrow> cp = masked_as_full (fst x) (fst x)) (snd x) s
\<and> real_cte_at (snd x) s))\<rbrace>
transfer_caps_loop ep buffer n caps slots mi
\<lbrace>\<lambda>rv. P\<rbrace>"
apply (rule hoare_pre)
apply (rule transfer_caps_loop_pres_dest_aux [OF x eb])
apply assumption
apply simp
apply simp
done
subsection\<open>pas_refined\<close>
lemma lookup_slot_for_thread_authorised:
"\<lbrace>pas_refined aag and K (is_subject aag thread)\<rbrace>
lookup_slot_for_thread thread cref
\<lbrace>\<lambda>rv s. is_subject aag (fst (fst rv))\<rbrace>,-"
unfolding lookup_slot_for_thread_def
apply wp
apply (clarsimp simp: owns_thread_owns_cspace)
done
lemma cnode_cap_all_auth_owns:
"(\<exists>s. is_cnode_cap cap \<and> (\<forall>x\<in>obj_refs cap.
\<forall>auth\<in>cap_auth_conferred cap. aag_has_auth_to aag auth x)
\<and> pas_refined aag s)
\<longrightarrow> (\<forall>x\<in>obj_refs cap. is_subject aag x)"
apply (clarsimp simp: is_cap_simps)
apply (clarsimp simp: cap_auth_conferred_def pas_refined_all_auth_is_owns)
done
lemma get_receive_slots_authorised:
"\<lbrace>pas_refined aag and K (\<forall>rbuf. recv_buf = Some rbuf \<longrightarrow> is_subject aag receiver)\<rbrace>
get_receive_slots receiver recv_buf
\<lbrace>\<lambda>rv s. \<forall>slot \<in> set rv. is_subject aag (fst slot)\<rbrace>"
apply (rule hoare_gen_asm)
apply (cases recv_buf)
apply (simp, wp, simp)
apply clarsimp
apply (wp get_cap_auth_wp[where aag=aag] lookup_slot_for_thread_authorised
| rule hoare_drop_imps
| simp add: add: lookup_cap_def split_def)+
apply (strengthen cnode_cap_all_auth_owns, simp add: aag_cap_auth_def)
apply (wp hoare_vcg_all_lift_R hoare_drop_imps)+
apply clarsimp
apply (fastforce simp: is_cap_simps)
done
crunch pas_refined[wp]: set_extra_badge "pas_refined aag"
lemma remove_rights_clas [simp]:
"cap_links_asid_slot aag p (remove_rights R cap) = cap_links_asid_slot aag p cap"
unfolding cap_links_asid_slot_def remove_rights_def cap_rights_update_def acap_rights_update_def
by (clarsimp split: cap.splits arch_cap.splits bool.splits)
lemma remove_rights_cap_auth_conferred_subset:
"x \<in> cap_auth_conferred (remove_rights R cap) \<Longrightarrow> x \<in> cap_auth_conferred cap"
unfolding remove_rights_def cap_rights_update_def
apply (clarsimp split: if_split_asm cap.splits arch_cap.splits bool.splits
simp: cap_auth_conferred_def vspace_cap_rights_to_auth_def acap_rights_update_def
validate_vm_rights_def vm_read_only_def vm_kernel_only_def)
apply (erule set_mp [OF cap_rights_to_auth_mono, rotated], clarsimp)+
apply (auto simp: is_page_cap_def cap_rights_to_auth_def reply_cap_rights_to_auth_def split:if_splits)
done
lemma remove_rights_cli [simp]:
"cap_links_irq aag l (remove_rights R cap) = cap_links_irq aag l cap"
unfolding remove_rights_def cap_rights_update_def
by (clarsimp split: cap.splits arch_cap.splits bool.splits simp: cap_links_irq_def)
lemma remove_rights_untyped_range [simp]:
"untyped_range (remove_rights R c) = untyped_range c"
unfolding remove_rights_def cap_rights_update_def
by (clarsimp split: cap.splits arch_cap.splits bool.splits simp: )
lemma obj_refs_remove_rights [simp]:
"obj_refs (remove_rights rs cap) = obj_refs cap"
unfolding remove_rights_def
by (cases cap, simp_all add: cap_rights_update_def acap_rights_update_def split: arch_cap.splits bool.splits)
lemma remove_rights_cur_auth:
"pas_cap_cur_auth aag cap \<Longrightarrow> pas_cap_cur_auth aag (remove_rights R cap)"
unfolding aag_cap_auth_def
by (clarsimp dest!: remove_rights_cap_auth_conferred_subset)
(* FIXME MOVE *)
lemmas hoare_gen_asmE2 = hoare_gen_asmE[where P'=\<top>,simplified pred_and_true_var]
lemma derive_cap_is_transferable:
"\<lbrace>K(is_transferable_cap cap) \<rbrace> derive_cap slot cap \<lbrace>\<lambda>r s. is_transferable_cap r\<rbrace>, -"
apply (rule hoare_gen_asmE2)
by (erule is_transferable_capE; wpsimp simp: derive_cap_def)
lemma auth_derived_refl[simp]:
" auth_derived cap cap"
by (simp add:auth_derived_def)
lemma derive_cap_auth_derived:
"\<lbrace>\<top>\<rbrace> derive_cap slot cap \<lbrace>\<lambda>rv _. rv \<noteq> NullCap \<longrightarrow> auth_derived rv cap \<rbrace>,-"
apply (cases cap ; (wpsimp simp:derive_cap_def)?)
apply (case_tac x12 ;
simp add:derive_cap_def arch_derive_cap_def;
wpc?;
wp?;
simp add:auth_derived_def cap_auth_conferred_def)
done
(* FIXME MOVE *)
lemma auth_derived_pas_cur_auth:
"auth_derived cap cap' \<Longrightarrow> pas_cap_cur_auth aag cap' \<Longrightarrow> pas_cap_cur_auth aag cap"
by (force simp:aag_cap_auth_def auth_derived_def cap_links_asid_slot_def cap_links_irq_def)
lemma derive_cap_is_derived_foo':
"\<lbrace>\<lambda>s. \<forall>cap'. (cte_wp_at (\<lambda>capa.
cap_master_cap capa = cap_master_cap cap \<and>
(cap_badge capa, cap_badge cap) \<in> capBadge_ordering False \<and>
cap_asid capa = cap_asid cap \<and> vs_cap_ref capa = vs_cap_ref cap)
slot s \<and> valid_objs s \<and> cap' \<noteq> NullCap
\<longrightarrow> cte_at slot s )
\<and> (s \<turnstile> cap \<longrightarrow> s \<turnstile> cap')
\<and> (cap' \<noteq> NullCap \<longrightarrow> auth_derived cap' cap \<and> cap \<noteq> NullCap \<and> \<not> is_zombie cap \<and> cap \<noteq> IRQControlCap)
\<longrightarrow> Q cap' s \<rbrace>
derive_cap slot cap \<lbrace>Q\<rbrace>,-"
apply (clarsimp simp add: validE_R_def validE_def valid_def
split: sum.splits)
apply (frule in_inv_by_hoareD[OF derive_cap_inv], clarsimp)
apply (erule allE)
apply (erule impEM)
apply (frule use_validE_R[OF _ cap_derive_not_null_helper, OF _ _ imp_refl])
apply (rule derive_cap_inv[THEN valid_validE_R])
apply (intro conjI)
apply (clarsimp simp:cte_wp_at_caps_of_state)+
apply (erule(1) use_validE_R[OF _ derive_cap_valid_cap])
apply simp
apply (erule use_validE_R[OF _ derive_cap_auth_derived],simp)
apply simp
done
(* FIXME: cleanup *)
lemma transfer_caps_loop_presM_extended:
fixes P vo em ex buffer slots caps n mi
assumes x: "\<And>cap src dest.
\<lbrace>\<lambda>s::('state_ext :: state_ext) state .
P s \<and> (vo \<longrightarrow> valid_objs s \<and> valid_mdb s \<and> real_cte_at dest s \<and>
s \<turnstile> cap \<and> Psrc src \<and> Pdest dest \<and> Pcap cap \<and>
tcb_cap_valid cap dest s
\<and> real_cte_at src s
\<and> cte_wp_at (is_derived (cdt s) src cap) src s \<and>
cap \<noteq> cap.NullCap)
\<and> (em \<longrightarrow> cte_wp_at ((=) cap.NullCap) dest s)
\<and> (ex \<longrightarrow> ex_cte_cap_wp_to (appropriate_cte_cap cap) dest s)\<rbrace>
cap_insert cap src dest \<lbrace>\<lambda>rv. P\<rbrace>"
assumes eb: "\<And>b n. \<lbrace>P\<rbrace> set_extra_badge buffer b n \<lbrace>\<lambda>_. P\<rbrace>"
assumes pcap_auth_derived :
"\<And>cap cap'. \<lbrakk>auth_derived cap cap'; Pcap cap'\<rbrakk> \<Longrightarrow> Pcap cap"
shows "\<lbrace>\<lambda>s. P s \<and>
(vo \<longrightarrow> valid_objs s \<and> valid_mdb s \<and> distinct slots \<and>
(\<forall>x \<in> set slots. cte_wp_at (\<lambda>cap. cap = cap.NullCap) x s \<and>
real_cte_at x s \<and> Pdest x) \<and>
(\<forall>x \<in> set caps. valid_cap (fst x) s \<and> Psrc (snd x) \<and> Pcap (fst x) \<and>
cte_wp_at (\<lambda>cp. fst x \<noteq> cap.NullCap \<longrightarrow> cp \<noteq> fst x
\<longrightarrow> cp = masked_as_full (fst x) (fst x)) (snd x) s
\<and> real_cte_at (snd x) s))
\<and> (ex \<longrightarrow> (\<forall>x \<in> set slots. ex_cte_cap_wp_to is_cnode_cap x s))\<rbrace>
transfer_caps_loop ep buffer n caps slots mi
\<lbrace>\<lambda>rv. P\<rbrace>"
apply (induct caps arbitrary: slots n mi)
apply (simp, wp, simp)
apply (clarsimp simp add: Let_def split_def whenE_def
cong: if_cong list.case_cong split del: if_split)
apply (rule hoare_pre)
apply (wp eb hoare_vcg_const_imp_lift hoare_vcg_const_Ball_lift static_imp_wp
| assumption | simp split del: if_split)+
apply (rule cap_insert_assume_null)
apply (wp x hoare_vcg_const_Ball_lift cap_insert_cte_wp_at static_imp_wp)+
apply (rule hoare_vcg_conj_liftE_R)
apply (rule derive_cap_is_derived_foo')
apply (rule_tac Q' ="\<lambda>cap' s. (vo \<longrightarrow> cap'\<noteq> cap.NullCap \<longrightarrow>
cte_wp_at (is_derived (cdt s) (aa, b) cap') (aa, b) s)
\<and> (cap'\<noteq> cap.NullCap \<longrightarrow> QM s cap')" for QM
in hoare_post_imp_R)
prefer 2
apply clarsimp
apply assumption
apply (rule hoare_vcg_conj_liftE_R)
apply (rule hoare_vcg_const_imp_lift_R)
apply (rule derive_cap_is_derived)
apply (wp derive_cap_is_derived_foo')+
apply (clarsimp simp: cte_wp_at_caps_of_state
ex_cte_cap_to_cnode_always_appropriate_strg
real_cte_tcb_valid caps_of_state_valid
split del: if_split)
apply (clarsimp simp: remove_rights_def caps_of_state_valid
neq_Nil_conv cte_wp_at_caps_of_state
imp_conjR[symmetric] conj_comms
split del: if_split)
apply (rule conjI)
apply clarsimp
apply (case_tac "cap = a",clarsimp)
apply (clarsimp simp:masked_as_full_def is_cap_simps)
apply (fastforce simp: cap_master_cap_simps split: if_splits)
apply (clarsimp split del: if_split)
apply (intro conjI)
apply (fastforce split: if_split elim!: pcap_auth_derived)
apply (fastforce)
apply (clarsimp)
apply (rule ballI)
apply (drule(1) bspec)
apply clarsimp
apply (intro conjI)
apply (case_tac "capa = ac",clarsimp+)
apply (case_tac "capa = ac")
by (clarsimp simp: masked_as_full_def is_cap_simps split: if_splits)+
lemma transfer_caps_loop_pas_refined:
"\<lbrace>pas_refined aag and valid_objs and valid_mdb
and (\<lambda>s. (\<forall>x \<in> set caps. valid_cap (fst x) s \<and>
cte_wp_at (\<lambda>cp. fst x \<noteq> cap.NullCap \<longrightarrow> cp = fst x) (snd x) s
\<and> real_cte_at (snd x) s) \<and>
(\<forall>x\<in>set slots. real_cte_at x s \<and> cte_wp_at (\<lambda>cap. cap = NullCap) x s))
and K ((\<forall>slot \<in> set slots. is_subject aag (fst slot)) \<and>
(\<forall>x \<in> set caps. is_subject aag (fst (snd x)) \<and>
pas_cap_cur_auth aag (fst x)) \<and> distinct slots)\<rbrace>
transfer_caps_loop ep buffer n caps slots mi
\<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
apply (rule hoare_pre)
apply (rule transfer_caps_loop_presM_extended
[where vo=True and em=True and ex=False and
Psrc="\<lambda>slot . is_subject aag (fst slot)" and
Pcap="pas_cap_cur_auth aag" and
Pdest="\<lambda>slot . is_subject aag (fst slot)"])
apply (wp cap_insert_pas_refined)
apply (fastforce simp: cte_wp_at_caps_of_state elim!: is_derived_is_transferable)
apply (rule set_extra_badge_pas_refined)
apply (erule(1) auth_derived_pas_cur_auth)
apply (fastforce elim: cte_wp_at_weakenE)
done
lemma transfer_caps_pas_refined:
"\<lbrace>pas_refined aag and valid_objs and valid_mdb
and (\<lambda>s. (\<forall>x \<in> set caps. valid_cap (fst x) s \<and> valid_cap (fst x) s \<and>
cte_wp_at (\<lambda>cp. fst x \<noteq> cap.NullCap \<longrightarrow> cp = fst x) (snd x) s
\<and> real_cte_at (snd x) s))
and K (is_subject aag receiver \<and> (\<forall>x \<in> set caps. is_subject aag (fst (snd x))) \<and> (\<forall>x \<in> set caps. pas_cap_cur_auth aag (fst x))) \<rbrace>
transfer_caps info caps endpoint receiver recv_buf
\<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
unfolding transfer_caps_def
apply (rule hoare_pre)
by (wp transfer_caps_loop_pas_refined get_receive_slots_authorised get_recv_slot_inv
hoare_vcg_const_imp_lift hoare_vcg_all_lift grs_distinct
| wpc | simp del: get_receive_slots.simps add: ball_conj_distrib)+
crunch pas_refined[wp]: copy_mrs "pas_refined aag"
(wp: crunch_wps)
lemma lookup_cap_and_slot_authorised:
"\<lbrace>pas_refined aag and K (is_subject aag thread)\<rbrace>
lookup_cap_and_slot thread xs
\<lbrace>\<lambda>rv s. is_subject aag (fst (snd rv))\<rbrace>, -"
unfolding lookup_cap_and_slot_def
apply (rule hoare_pre)
apply (wp lookup_slot_for_thread_authorised
| simp add: split_def)+
done
lemma lookup_extra_caps_authorised:
"\<lbrace>pas_refined aag and K (is_subject aag thread)\<rbrace>
lookup_extra_caps thread buffer mi
\<lbrace>\<lambda>rv s. \<forall>cap \<in> set rv. is_subject aag (fst (snd cap))\<rbrace>, -"
apply (simp add: lookup_extra_caps_def)
apply (wp mapME_set lookup_cap_and_slot_authorised
| simp)+
done
lemma lookup_cap_and_slot_cur_auth:
"\<lbrace>pas_refined aag and K (is_subject aag thread)\<rbrace>
lookup_cap_and_slot thread xs
\<lbrace>\<lambda>rv s. pas_cap_cur_auth aag (fst rv)\<rbrace>, -"
unfolding lookup_cap_and_slot_def
apply (rule hoare_pre)
apply (wp get_cap_auth_wp [where aag = aag] lookup_slot_for_thread_authorised
| simp add: split_def)+
done
lemma lookup_extra_caps_auth:
"\<lbrace>pas_refined aag and K (is_subject aag thread)\<rbrace>
lookup_extra_caps thread buffer mi
\<lbrace>\<lambda>rv s. \<forall>cap \<in> set rv. pas_cap_cur_auth aag (fst cap)\<rbrace>, -"
apply (simp add: lookup_extra_caps_def)
apply (wp mapME_set lookup_cap_and_slot_cur_auth
| simp)+
done
lemma transfer_caps_empty_inv:
"\<lbrace>P\<rbrace> transfer_caps mi [] endpoint receiver rbuf \<lbrace>\<lambda>_. P\<rbrace>"
unfolding transfer_caps_def
by (wp | wpc | simp) +
lemma lcs_valid':
"\<lbrace>valid_objs\<rbrace> lookup_cap_and_slot thread xs \<lbrace>\<lambda>x s. s \<turnstile> fst x\<rbrace>, -"
unfolding lookup_cap_and_slot_def
apply (rule hoare_pre)
apply wp
apply (simp add: split_def)
apply (wp lookup_slot_for_thread_inv | simp)+
done
lemma lec_valid_cap':
"\<lbrace>valid_objs\<rbrace> lookup_extra_caps thread xa mi \<lbrace>\<lambda>rv s. (\<forall>x\<in>set rv. s \<turnstile> fst x)\<rbrace>, -"
unfolding lookup_extra_caps_def
by (wpsimp wp: mapME_set lcs_valid')
(* FIXME: MOVE *)
lemma hoare_conjDR1:
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<and> R rv s\<rbrace>,- \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,-"
by (simp add:validE_def validE_R_def valid_def) blast
lemma hoare_conjDR2:
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<and> R rv s\<rbrace>,- \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>R\<rbrace>,-"
by (simp add:validE_def validE_R_def valid_def) blast
lemma do_normal_transfer_pas_refined:
"\<lbrace>pas_refined aag
and valid_objs and valid_mdb
and K (grant \<longrightarrow> is_subject aag sender)
and K (grant \<longrightarrow> is_subject aag receiver)\<rbrace>
do_normal_transfer sender sbuf endpoint badge grant receiver rbuf
\<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
proof(cases grant)
case True thus ?thesis
apply -
apply (rule hoare_gen_asm)
apply (simp add: do_normal_transfer_def)
by (simp
| wp copy_mrs_pas_refined transfer_caps_pas_refined lec_valid_cap'
copy_mrs_cte_wp_at
hoare_vcg_ball_lift
lookup_extra_caps_srcs[simplified ball_conj_distrib,THEN hoare_conjDR1]
lookup_extra_caps_srcs[simplified ball_conj_distrib,THEN hoare_conjDR2]
lookup_extra_caps_authorised lookup_extra_caps_auth lec_valid_cap'
| wpc | simp add:ball_conj_distrib)+
next
case False thus ?thesis
apply (simp add: do_normal_transfer_def)
by (simp
| wp copy_mrs_pas_refined transfer_caps_empty_inv
copy_mrs_cte_wp_at
hoare_vcg_const_imp_lift hoare_vcg_all_lift
| wpc)+
qed
crunch pas_refined[wp]: do_fault_transfer "pas_refined aag"
lemma do_ipc_transfer_pas_refined:
"\<lbrace>pas_refined aag
and valid_objs and valid_mdb
and K (grant \<longrightarrow> is_subject aag sender)
and K (grant \<longrightarrow> is_subject aag receiver)\<rbrace>
do_ipc_transfer sender ep badge grant receiver
\<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
apply (simp add: do_ipc_transfer_def)
apply (wp do_normal_transfer_pas_refined
hoare_vcg_conj_lift hoare_vcg_all_lift
| rule hoare_drop_imps
| wpc)+
by simp
(* FIXME MOVE*)
lemma cap_insert_pas_refined_transferable:
"\<lbrace>pas_refined aag and valid_mdb
and K (is_transferable_cap new_cap
\<and> aag_cap_auth aag (pasObjectAbs aag (fst dest_slot)) new_cap
\<and> (pasObjectAbs aag (fst src_slot), DeleteDerived, pasObjectAbs aag (fst dest_slot))
\<in> pasPolicy aag) \<rbrace>
cap_insert new_cap src_slot dest_slot
\<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
apply (rule hoare_gen_asm)
apply (simp add: cap_insert_def)
apply (rule hoare_pre)
apply (wp set_cap_pas_refined set_cdt_pas_refined update_cdt_pas_refined hoare_vcg_imp_lift
hoare_weak_lift_imp hoare_vcg_all_lift set_cap_caps_of_state2
set_untyped_cap_as_full_cdt_is_original_cap get_cap_wp
tcb_domain_map_wellformed_lift hoare_vcg_disj_lift
set_untyped_cap_as_full_is_transferable'
| simp split del: if_split del: split_paired_All fun_upd_apply
| strengthen update_one_strg)+
by (fastforce split: if_split_asm
simp: cte_wp_at_caps_of_state pas_refined_refl valid_mdb_def2
mdb_cte_at_def Option.is_none_def
simp del: split_paired_All
dest: aag_cdt_link_Control aag_cdt_link_DeleteDerived cap_auth_caps_of_state
intro: aag_wellformed_delete_derived_trans[OF _ _ pas_refined_wellformed])
lemma setup_caller_cap_pas_refined:
"\<lbrace>pas_refined aag
and valid_mdb
and K((grant \<longrightarrow> is_subject aag sender \<and> is_subject aag receiver) \<and>
(pasObjectAbs aag receiver, Reply, pasObjectAbs aag sender) \<in> pasPolicy aag )\<rbrace>
setup_caller_cap sender receiver grant
\<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
apply (simp add: setup_caller_cap_def)
apply (rule conjI)
(* if grant *)
apply clarsimp
apply (rule hoare_pre)
apply (wpsimp wp: cap_insert_pas_refined set_thread_state_pas_refined)
apply (fastforce simp: aag_cap_auth_def clas_no_asid cli_no_irqs pas_refined_refl)
(* if not grant *)
apply clarsimp
apply (rule hoare_pre)
apply (wp cap_insert_pas_refined_transferable set_thread_state_pas_refined)
by (fastforce simp: aag_cap_auth_def cap_links_irq_def cap_links_asid_slot_def
cap_auth_conferred_def reply_cap_rights_to_auth_def
intro: aag_wellformed_delete_derived[OF _ pas_refined_wellformed])
(* FIXME: MOVE *)
lemma sym_ref_endpoint_recvD:
assumes sym: "sym_refs (state_refs_of s)"
and ep: "ko_at (Endpoint (RecvEP l)) epptr s"
and inl: "t \<in> set l"
shows "\<exists> pl. st_tcb_at ((=) (BlockedOnReceive epptr pl)) t s"
proof -
have "(t, EPRecv) \<in> state_refs_of s epptr"
using ep inl by (force simp: state_refs_of_def dest:ko_atD)
hence "(epptr, TCBBlockedRecv) \<in> state_refs_of s t"
using sym by (force dest:sym_refsD[rotated])
thus ?thesis
by (force simp: st_tcb_at_tcb_states_of_state_eq tcb_states_of_state_def get_tcb_def
state_refs_of_def tcb_st_refs_of_def tcb_bound_refs_def ep_q_refs_of_def
ntfn_q_refs_of_def ntfn_bound_refs_def
split: ntfn.splits endpoint.splits thread_state.splits option.splits
kernel_object.splits)
qed
lemma pas_refined_ep_recv:
assumes policy: "pas_refined aag s"
and invs: "invs s"
and ep: "ko_at (Endpoint (RecvEP l)) epptr s"
and inl: "t \<in> set l"
shows "(pasObjectAbs aag t, Receive, pasObjectAbs aag epptr) \<in> pasPolicy aag"
apply (insert sym_ref_endpoint_recvD[OF invs_sym_refs[OF invs] ep inl])
apply clarsimp
apply (clarsimp simp:st_tcb_at_tcb_states_of_state_eq)
apply (rule pas_refined_mem[OF _ policy])
apply (rule sta_ts[of epptr Receive])
apply (simp add:thread_states_def)
done
lemma send_ipc_valid_ep_helper:
"\<lbrakk>invs s ; ko_at (Endpoint (RecvEP (h # t))) epptr s \<rbrakk> \<Longrightarrow>
valid_ep (case t of [] \<Rightarrow> IdleEP | h' # t' \<Rightarrow> RecvEP t) s"
apply (drule invs_valid_objs)
apply (drule ko_atD)
apply (erule(1) valid_objsE)
by (cases t; simp add: valid_obj_def valid_ep_def)
lemmas head_in_set = list.set_intros(1)[of h t for h t]
lemma send_ipc_pas_refined:
"\<lbrace>pas_refined aag
and invs
and K (is_subject aag thread
\<and> aag_has_auth_to aag SyncSend epptr
\<and> (can_grant_reply \<longrightarrow> aag_has_auth_to aag Call epptr)
\<and> (can_grant \<longrightarrow> aag_has_auth_to aag Grant epptr \<and> aag_has_auth_to aag Call epptr))\<rbrace>
send_ipc block call badge can_grant can_grant_reply thread epptr
\<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
apply (rule hoare_gen_asm)
apply (simp add: send_ipc_def)
apply (rule hoare_seq_ext[OF _ get_simple_ko_sp])
apply (rule hoare_pre)
apply (wpc | wp set_thread_state_pas_refined)+
apply (simp add: hoare_if_r_and split del:if_split)
apply (wp setup_caller_cap_pas_refined set_thread_state_pas_refined)+
apply (simp split del:if_split)
apply (rule_tac Q="\<lambda>rv. valid_mdb
and pas_refined aag and K(
(can_grant \<or> can_grant_reply \<longrightarrow> (reply_can_grant \<longrightarrow> is_subject aag x21) \<and>
(pasObjectAbs aag x21, Reply, pasSubject aag) \<in> pasPolicy aag))"
in hoare_strengthen_post[rotated])
apply simp
apply (wp set_thread_state_pas_refined do_ipc_transfer_pas_refined static_imp_wp gts_wp
| wpc
| simp add: hoare_if_r_and)+
apply (wp hoare_vcg_all_lift hoare_imp_lift_something | simp add:st_tcb_at_tcb_states_of_state_eq)+
subgoal for ep s (* post-wp proof *)
apply (intro conjI impI; clarsimp;
frule(2) pas_refined_ep_recv[OF _ _ _ head_in_set])
subgoal (* can_grant *)
apply (frule(2) aag_wellformed_grant_Control_to_recv[OF _ _ pas_refined_wellformed])
apply (frule(2) aag_wellformed_reply[OF _ _ pas_refined_wellformed])
apply (force elim: send_ipc_valid_ep_helper simp:aag_has_auth_to_Control_eq_owns)
done
subgoal (* can_grant_reply and not can_grant*)
apply (frule(2) aag_wellformed_reply[OF _ _ pas_refined_wellformed])
apply (frule(1) tcb_states_of_state_to_auth)
(* Make the blockedOnReceive state point to epptr *)
apply (frule(1) sym_ref_endpoint_recvD[OF invs_sym_refs _ head_in_set],
clarsimp simp:st_tcb_at_tcb_states_of_state_eq)
apply (fastforce elim: send_ipc_valid_ep_helper
simp: aag_has_auth_to_Control_eq_owns
dest: aag_wellformed_grant_Control_to_recv_by_reply
[OF _ _ _ pas_refined_wellformed])
done
subgoal (* not can_grant_reply and not can_grant*)
by (force elim: send_ipc_valid_ep_helper)
done
done
lemma set_simple_ko_get_tcb:
"\<lbrace>\<lambda>s. P (get_tcb p s)\<rbrace> set_simple_ko f ep epptr \<lbrace>\<lambda>_ s. P (get_tcb p s) \<rbrace>"
unfolding set_simple_ko_def set_object_def
apply (wp get_object_wp)
apply (auto simp: partial_inv_def a_type_def get_tcb_def obj_at_def the_equality
split: Structures_A.kernel_object.splits option.splits)
done
lemma get_tcb_is_Some_iff_typ_at:
"(\<exists>y. get_tcb p s = Some y) = typ_at ATCB p s"
by (simp add: tcb_at_typ [symmetric] tcb_at_def)
lemma case_list_cons_cong:
"(case xxs of [] \<Rightarrow> f | x # xs \<Rightarrow> g xxs)
= (case xxs of [] \<Rightarrow> f | x # xs \<Rightarrow> g (x # xs))"
by (simp split: list.split)
lemma complete_signal_integrity:
"\<lbrace>integrity aag X st and pas_refined aag and valid_objs
and bound_tcb_at ((=) (Some ntfnptr)) thread
and K (is_subject aag thread)\<rbrace>
complete_signal ntfnptr thread
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (simp add: complete_signal_def)
apply (rule hoare_seq_ext [OF _ get_simple_ko_sp])
apply (rule hoare_pre)
apply ((wp set_notification_respects[where auth=Receive] set_thread_state_integrity_autarch as_user_integrity_autarch
| wpc
| simp)+)[1]
apply clarsimp
apply (drule_tac t="pasSubject aag" in sym)
apply (fastforce intro!: bound_tcb_at_implies_receive)
done
match_abbreviation (input) receive_ipc_base2
in concl receive_ipc_def
select "case_endpoint X Y Z A" (for X Y Z A)
abbreviation (input) receive_ipc_base
where
"receive_ipc_base aag thread ep epptr rights is_blocking
\<equiv> receive_ipc_base2 thread is_blocking epptr rights ep"
lemma sym_ref_endpoint_sendD:
assumes sym: "sym_refs (state_refs_of s)"
and ep: "ko_at (Endpoint (SendEP l)) epptr s"
and inl: "t \<in> set l"
shows "\<exists> pl. st_tcb_at ((=) (BlockedOnSend epptr pl)) t s"
proof -
have "(t, EPSend) \<in> state_refs_of s epptr"
using ep inl by (force simp: state_refs_of_def dest:ko_atD)
hence "(epptr, TCBBlockedSend) \<in> state_refs_of s t"
using sym by (force dest:sym_refsD[rotated])
thus ?thesis
by (force simp: st_tcb_at_tcb_states_of_state_eq tcb_states_of_state_def get_tcb_def
state_refs_of_def tcb_st_refs_of_def tcb_bound_refs_def ep_q_refs_of_def
ntfn_q_refs_of_def ntfn_bound_refs_def
split: ntfn.splits endpoint.splits thread_state.splits option.splits
kernel_object.splits)
qed
lemma receive_ipc_valid_ep_helper:
"\<lbrakk> invs s; ko_at (Endpoint (SendEP list)) epptr s \<rbrakk> \<Longrightarrow>
valid_ep (case tl list of [] \<Rightarrow> IdleEP | a # t \<Rightarrow> SendEP (tl list)) s"
apply (drule_tac invs_valid_objs)
apply (drule ko_atD)
apply (erule(1) valid_objsE)
apply (cases list, solves \<open>simp add: valid_obj_def valid_ep_def\<close>)
by (cases "tl list"; clarsimp simp:valid_obj_def valid_ep_def)
lemma receive_ipc_sender_helper:
"\<lbrakk>pas_refined aag s ; kheap s thread = Some (TCB tcb) ; tcb_state tcb = BlockedOnSend ep pl\<rbrakk>
\<Longrightarrow> (pasObjectAbs aag thread, SyncSend, pasObjectAbs aag ep) \<in> pasPolicy aag"
apply (erule pas_refined_mem[rotated])
apply (rule sta_ts)
apply (simp add: thread_states_def tcb_states_of_state_def get_tcb_def)
done
lemma receive_ipc_sender_can_grant_helper:
"\<lbrakk>invs s; pas_refined aag s ; kheap s thread = Some (TCB tcb) ; tcb_state tcb = BlockedOnSend ep pl;
sender_can_grant pl ; aag_has_auth_to aag Receive ep\<rbrakk>
\<Longrightarrow> is_subject aag thread"
apply (frule pas_refined_mem[rotated,where x = "thread" and auth=Grant])
apply (rule sta_ts)
apply (simp add:thread_states_def tcb_states_of_state_def get_tcb_def)
apply (frule(2) aag_wellformed_grant_Control_to_send[OF _ _ pas_refined_wellformed])
apply (simp add:aag_has_auth_to_Control_eq_owns)
done
lemma receive_ipc_base_pas_refined:
"\<lbrace>pas_refined aag and invs
and ko_at (Endpoint ep) epptr
and K (is_subject aag thread
\<and> (pasSubject aag, Receive, pasObjectAbs aag epptr) \<in> pasPolicy aag \<and>
(\<forall> auth \<in> cap_rights_to_auth rights True . aag_has_auth_to aag auth epptr))\<rbrace>
receive_ipc_base aag thread ep epptr rights is_blocking
\<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
(* FIXME: proof structure *)
apply (rule hoare_gen_asm)
apply (clarsimp simp: thread_get_def cong: endpoint.case_cong)
apply (rule hoare_pre)
apply (wp set_thread_state_pas_refined get_simple_ko_wp setup_caller_cap_pas_refined
| wpc | simp add: thread_get_def do_nbrecv_failed_transfer_def split del: if_split)+
apply (rename_tac list sss data)
apply (rule_tac Q="\<lambda>rv s. pas_refined aag s \<and> valid_mdb s \<and>
(sender_can_grant data \<longrightarrow> is_subject aag (hd list)) \<and>
(sender_can_grant_reply data \<longrightarrow>
(AllowGrant \<in> rights \<longrightarrow> is_subject aag (hd list)) \<and>
(pasSubject aag, Reply, pasObjectAbs aag (hd list)) \<in> pasPolicy aag)"
in hoare_strengthen_post[rotated])
apply (fastforce simp: cap_auth_conferred_def pas_refined_all_auth_is_owns
pas_refined_refl)
apply (wp static_imp_wp do_ipc_transfer_pas_refined set_simple_ko_pas_refined
set_thread_state_pas_refined get_simple_ko_wp hoare_vcg_all_lift
hoare_vcg_imp_lift [OF set_simple_ko_get_tcb, unfolded disj_not1]
| wpc
| simp add: thread_get_def get_thread_state_def do_nbrecv_failed_transfer_def)+
apply (clarsimp simp: tcb_at_def [symmetric] tcb_at_st_tcb_at)
subgoal premises prems for s
proof -
have "\<And>P Q R. \<lbrakk> R ; P \<Longrightarrow> R \<Longrightarrow> Q \<rbrakk> \<Longrightarrow> (P \<longrightarrow> Q) \<and> (\<not>P \<longrightarrow> R)" by blast
thus ?thesis
apply this
using prems
apply clarsimp
subgoal premises prems for list ep' pl
proof -
have "sender_can_grant pl \<Longrightarrow> is_subject aag (hd list)"
using prems
apply (clarsimp elim!: tcb_atE simp:tcb_at_st_tcb_at[symmetric] get_tcb_def)
apply (frule(1) sym_ref_endpoint_sendD[OF invs_sym_refs,where t= "hd list"],force)
apply (clarsimp simp:st_tcb_at_def elim!:obj_atE)
by (rule receive_ipc_sender_can_grant_helper)
moreover have
"sender_can_grant_reply pl \<Longrightarrow>
(pasSubject aag, Reply, pasObjectAbs aag (hd list)) \<in> pasPolicy aag"
using prems
apply (clarsimp elim!: tcb_atE simp:tcb_at_st_tcb_at[symmetric] get_tcb_def)
apply (frule(1) sym_ref_endpoint_sendD[OF invs_sym_refs,where t= "hd list"],force)
apply (clarsimp simp:st_tcb_at_def elim!:obj_atE)
apply (frule pas_refined_mem[rotated,where x = "(hd list)" and auth=Call])
apply (rule sta_ts)
apply (simp add:thread_states_def tcb_states_of_state_def get_tcb_def)
by (erule(2) aag_wellformed_reply[OF _ _ pas_refined_wellformed])
ultimately show ?thesis
using prems by (force dest:receive_ipc_valid_ep_helper)
qed
apply clarsimp
using prems
apply (intro conjI;clarsimp simp add:cap_rights_to_auth_def)
apply (clarsimp elim!: tcb_atE simp:tcb_at_st_tcb_at[symmetric] get_tcb_def
cap_rights_to_auth_def)
apply (frule_tac t="hd x" in sym_ref_endpoint_sendD[OF invs_sym_refs],assumption,force)
apply (clarsimp simp:st_tcb_at_def elim!:obj_atE)
apply (frule_tac x="hd x" in pas_refined_mem[rotated,where auth=Call])
apply (rule sta_ts)
apply (simp add:thread_states_def tcb_states_of_state_def get_tcb_def)
apply (frule aag_wellformed_grant_Control_to_send_by_reply[OF _ _ _ pas_refined_wellformed])
by (force simp:aag_has_auth_to_Control_eq_owns)+
qed
done
lemma complete_signal_pas_refined:
"\<lbrace>pas_refined aag and bound_tcb_at ((=) (Some ntfnptr)) thread\<rbrace>
complete_signal ntfnptr thread
\<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
apply (simp add: complete_signal_def)
apply (rule hoare_seq_ext [OF _ get_simple_ko_sp])
apply (rule hoare_pre)
apply (wp set_simple_ko_pas_refined set_thread_state_pas_refined
| wpc)+
apply clarsimp
done
lemma receive_ipc_pas_refined:
"\<lbrace>pas_refined aag
and invs
and K (is_subject aag thread
\<and> pas_cap_cur_auth aag ep_cap \<and> AllowRead \<in> cap_rights ep_cap)\<rbrace>
receive_ipc thread ep_cap is_blocking
\<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
apply (rule hoare_gen_asm)
apply (simp add: receive_ipc_def thread_get_def split: cap.split)
apply clarsimp
apply (rule hoare_seq_ext[OF _ get_simple_ko_sp])
apply (rule hoare_seq_ext[OF _ gbn_sp])
apply (case_tac ntfnptr, simp_all)
(* old receive_ipc stuff *)
apply (rule hoare_pre)
apply (wp receive_ipc_base_pas_refined)[1]
apply (fastforce simp: aag_cap_auth_def cap_auth_conferred_def cap_rights_to_auth_def)
(* ntfn-binding case *)
apply clarsimp
apply (rule hoare_seq_ext[OF _ get_simple_ko_sp])
apply (case_tac "isActive ntfn", simp_all)
apply (wp complete_signal_pas_refined, clarsimp)
(* regular case again *)
apply (rule hoare_pre, wp receive_ipc_base_pas_refined)
apply (fastforce simp: aag_cap_auth_def cap_auth_conferred_def cap_rights_to_auth_def)
done
subsection \<open>@{term "integrity"}\<close>
subsubsection\<open>autarchy\<close>
text\<open>
For the case when the currently-running thread owns the receiver
(i.e. receiver last to the IPC rendezvous or sender owns receiver).
\<close>
lemma set_extra_badge_integrity_autarch:
"\<lbrace>(integrity aag X st and
K (is_subject aag thread \<and>
ipc_buffer_has_auth aag thread (Some buf) \<and>
buffer_cptr_index + n < 2 ^ (msg_align_bits - 2)))\<rbrace>
set_extra_badge buf badge n
\<lbrace>\<lambda>_. integrity aag X st\<rbrace>"
unfolding set_extra_badge_def
by (wp store_word_offs_integrity_autarch)
lemma transfer_caps_integrity_autarch:
"\<lbrace>pas_refined aag
and integrity aag X st
and valid_objs and valid_mdb
and (\<lambda> s. (\<forall>x\<in>set caps.
s \<turnstile> fst x) \<and>
(\<forall>x\<in>set caps.
cte_wp_at
(\<lambda>cp. fst x \<noteq> NullCap \<longrightarrow>
cp = fst x)
(snd x) s \<and>
real_cte_at (snd x) s))
and K (is_subject aag receiver \<and> ipc_buffer_has_auth aag receiver receive_buffer \<and>
(\<forall>x\<in>set caps. is_subject aag (fst (snd x))) \<and> length caps < 6)\<rbrace>
transfer_caps mi caps endpoint receiver receive_buffer
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (rule hoare_gen_asm)
apply (simp add: transfer_caps_def)
apply (wpc | wp)+
apply (rule_tac P = "\<forall>x \<in> set dest_slots. is_subject aag (fst x)" in hoare_gen_asm)
apply (wp transfer_caps_loop_pres_dest cap_insert_integrity_autarch set_extra_badge_integrity_autarch [where aag = aag and thread = receiver]
get_receive_slots_authorised hoare_vcg_all_lift hoare_vcg_imp_lift
| simp add: msg_align_bits buffer_cptr_index_def msg_max_length_def cte_wp_at_caps_of_state
| blast)+
done
(* FIXME: duplicate somehow *)
lemma load_word_offs_inv[wp]:
"\<lbrace>P\<rbrace> load_word_offs buf off \<lbrace>\<lambda>rv. P\<rbrace>"
apply (simp add: load_word_offs_def do_machine_op_def split_def)
apply wp
apply clarsimp
apply (drule in_inv_by_hoareD[OF loadWord_inv])
apply simp
done
lemma copy_mrs_integrity_autarch:
"\<lbrace>pas_refined aag and integrity aag X st and K (is_subject aag receiver \<and> ipc_buffer_has_auth aag receiver rbuf \<and> unat n < 2 ^ (msg_align_bits - 2))\<rbrace>
copy_mrs sender sbuf receiver rbuf n
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (rule hoare_gen_asm)
apply (simp add: copy_mrs_def cong: if_cong)
apply (wp mapM_wp' as_user_integrity_autarch
store_word_offs_integrity_autarch [where aag = aag and thread = receiver]
| wpc
| simp
| fastforce simp: length_msg_registers msg_align_bits split: if_split_asm)+
done
(* FIXME: Why was the [wp] attribute clobbered by interpretation of the Arch locale? *)
declare as_user_thread_bound_ntfn[wp]
lemma get_mi_valid':
"\<lbrace>\<top>\<rbrace> get_message_info a \<lbrace>\<lambda>rv s. valid_message_info rv\<rbrace>"
apply (simp add: get_message_info_def)
apply (wp, rule hoare_post_imp, rule data_to_message_info_valid)
apply wp+
done
lemma lookup_extra_caps_length:
"\<lbrace>K (valid_message_info mi)\<rbrace> lookup_extra_caps thread buf mi \<lbrace>\<lambda>rv s. length rv < 6\<rbrace>, -"
unfolding lookup_extra_caps_def
apply (cases buf, simp_all)
apply (wp mapME_length | simp add: comp_def valid_message_info_def msg_max_extra_caps_def word_le_nat_alt)+
done
lemma get_mi_length:
"\<lbrace>\<top>\<rbrace> get_message_info sender \<lbrace>\<lambda>rv s. unat (mi_length rv) < 2 ^ (msg_align_bits - 2)\<rbrace>"
apply (rule hoare_post_imp [OF _ get_mi_valid'])
apply (clarsimp simp: valid_message_info_def msg_align_bits msg_max_length_def word_le_nat_alt)
done
lemma do_normal_transfer_send_integrity_autarch:
notes lec_valid_cap[wp del]
shows
"\<lbrace>pas_refined aag
and integrity aag X st
and valid_objs and valid_mdb
and K (is_subject aag receiver \<and>
ipc_buffer_has_auth aag receiver rbuf \<and>
(grant \<longrightarrow> is_subject aag sender))\<rbrace>
do_normal_transfer sender sbuf endpoint badge grant receiver rbuf
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (simp add: do_normal_transfer_def)
apply (wp as_user_integrity_autarch set_message_info_integrity_autarch transfer_caps_integrity_autarch
copy_mrs_integrity_autarch
copy_mrs_tcb copy_mrs_cte_wp_at lookup_extra_caps_authorised
lookup_extra_caps_length get_mi_length get_mi_valid'
hoare_vcg_conj_lift hoare_vcg_ball_lift lec_valid_cap' static_imp_wp
| wpc
| simp)+
done
crunch integrity_autarch: setup_caller_cap "integrity aag X st"
lemma do_fault_transfer_integrity_autarch:
"\<lbrace>integrity aag X st and K (is_subject aag receiver \<and> ipc_buffer_has_auth aag receiver recv_buf) \<rbrace>
do_fault_transfer badge sender receiver recv_buf
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (simp add: do_fault_transfer_def split_def)
apply (wp as_user_integrity_autarch set_message_info_integrity_autarch set_mrs_integrity_autarch
thread_get_wp'
| wpc | simp)+
done
lemma do_ipc_transfer_integrity_autarch:
"\<lbrace>pas_refined aag
and integrity aag X st
and valid_objs and valid_mdb
and K (is_subject aag receiver \<and> (grant \<longrightarrow> is_subject aag sender))\<rbrace>
do_ipc_transfer sender ep badge grant receiver
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (simp add: do_ipc_transfer_def)
apply (wp do_normal_transfer_send_integrity_autarch do_fault_transfer_integrity_autarch
thread_get_wp' lookup_ipc_buffer_has_auth hoare_vcg_all_lift
| wpc | simp | wp (once) hoare_drop_imps)+
done
lemma set_thread_state_running_respects:
"\<lbrace>integrity aag X st
and (\<lambda>s. \<exists>ep. aag_has_auth_to aag Receive ep
\<and> st_tcb_at (send_blocked_on ep) sender s)\<rbrace>
set_thread_state sender Structures_A.Running
\<lbrace>\<lambda>_. integrity aag X st\<rbrace>"
apply (simp add: set_thread_state_def)
apply (wpsimp wp: set_object_wp)
apply (erule integrity_trans)
apply (clarsimp simp: integrity_def obj_at_def st_tcb_at_def)
apply (clarsimp dest!: get_tcb_SomeD)
apply (rule_tac new_st=Running in tro_tcb_receive)
apply (auto simp: tcb_bound_notification_reset_integrity_def)
done
(* FIXME move *)
lemma set_simple_ko_obj_at:
"\<lbrace>obj_at P ptr and K (ptr \<noteq> epptr)\<rbrace>
set_simple_ko f epptr ep
\<lbrace>\<lambda>rv. obj_at P ptr\<rbrace>"
apply (simp add: set_simple_ko_def set_object_def)
apply (wp get_object_wp)
apply (auto simp: obj_at_def)
done
(* ep is free here *)
lemma sts_receive_Inactive_respects:
"\<lbrace>integrity aag X st and st_tcb_at (send_blocked_on ep) thread
and (\<lambda>s. \<forall>tcb. get_tcb thread s = Some tcb \<longrightarrow> call_blocked ep (tcb_state tcb))
and K (aag_has_auth_to aag Receive ep)\<rbrace>
set_thread_state thread Structures_A.thread_state.Inactive
\<lbrace>\<lambda>_. integrity aag X st\<rbrace>"
apply (simp add: set_thread_state_def set_object_def get_object_def)
apply wp
apply clarsimp
apply (erule integrity_trans)
apply (clarsimp simp: integrity_def)
apply (drule get_tcb_SomeD)
apply (rule_tac new_st=Inactive in tro_tcb_receive, simp_all)
apply (fastforce simp add: st_tcb_at_def obj_at_def)
done
crunch pred_tcb: do_ipc_transfer "pred_tcb_at proj P t"
(wp: crunch_wps transfer_caps_loop_pres make_fault_message_inv simp: zipWithM_x_mapM)
lemma set_untyped_cap_as_full_not_untyped:
"\<lbrace> P and K(\<not> is_untyped_cap cap') \<rbrace>
set_untyped_cap_as_full cap cap' slot
\<lbrace> \<lambda>rv. P\<rbrace>"
unfolding set_untyped_cap_as_full_def
apply wp
apply (rule hoare_pre_cont)
apply wpsimp+
done
lemma cap_insert_integrity_autarch_not_untyped:
"\<lbrace>integrity aag X st and
K (\<not> is_untyped_cap cap \<and> is_subject aag (fst dest_slot))\<rbrace>
cap_insert cap src_slot dest_slot
\<lbrace>\<lambda>_. integrity aag X st\<rbrace>"
apply (simp add:cap_insert_def)
apply (wp set_original_integrity_autarch cap_insert_ext_extended.list_integ_lift
cap_insert_list_integrity update_cdt_fun_upd_integrity_autarch gets_inv
set_cap_integrity_autarch set_untyped_cap_as_full_not_untyped assert_inv)
apply fastforce
done
(* FIXME MOVE*)
lemma pred_tcb_atE:
assumes hyp: "pred_tcb_at proj pred t s"
obtains tcb where "kheap s t = Some (TCB tcb)" and " pred (proj (tcb_to_itcb tcb))"
using hyp by (fastforce elim:obj_atE simp:pred_tcb_at_def)
lemma set_thread_state_blocked_on_reply_respects:
"\<lbrace> integrity aag X st and
st_tcb_at (send_blocked_on ep) thread and
pred_tcb_at id (\<lambda>itcb. allowed_call_blocked ep (itcb_state itcb)) thread
and K(aag_has_auth_to aag Receive ep)\<rbrace>
set_thread_state thread BlockedOnReply
\<lbrace>\<lambda>_. integrity aag X st\<rbrace>"
apply (rule hoare_gen_asm)
apply (rule hoare_pre)
apply (simp add: set_thread_state_def set_object_def get_object_def)
apply wp
apply clarsimp
apply (erule integrity_trans)
apply (clarsimp simp:integrity_def dest!:get_tcb_SomeD elim!: pred_tcb_atE)
apply (rule tro_tcb_receive[where new_st=BlockedOnReply])
by fastforce+
lemma setup_caller_cap_integrity_recv:
"\<lbrace>integrity aag X st and valid_mdb
and st_tcb_at (send_blocked_on ep) sender and
pred_tcb_at id (\<lambda>itcb. allowed_call_blocked ep (itcb_state itcb)) sender
and K (aag_has_auth_to aag Receive ep \<and> is_subject aag receiver
\<and> (grant \<longrightarrow> is_subject aag sender))\<rbrace>
setup_caller_cap sender receiver grant
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (rule hoare_gen_asm)
apply (rule hoare_pre)
apply (unfold setup_caller_cap_def)
apply (wp cap_insert_integrity_autarch_not_untyped set_thread_state_blocked_on_reply_respects)
by fastforce
lemma pred_tcb_atI:
"\<lbrakk>kheap s t = Some (TCB tcb) ;pred (proj (tcb_to_itcb tcb))\<rbrakk> \<Longrightarrow> pred_tcb_at proj pred t s"
by (fastforce simp:pred_tcb_at_def obj_at_def)
(* FIXME MOVE *)
abbreviation
sender_can_call :: "sender_payload \<Rightarrow> bool"
where
"sender_can_call pl \<equiv> sender_can_grant pl \<or> sender_can_grant_reply pl"
lemma receive_ipc_base_integrity:
notes do_nbrecv_failed_transfer_def[simp]
shows "\<lbrace>pas_refined aag and integrity aag X st and invs
and ko_at (Endpoint ep) epptr
and K (is_subject aag receiver
\<and> (pasSubject aag, Receive, pasObjectAbs aag epptr) \<in> pasPolicy aag
\<and> (\<forall>auth \<in> cap_rights_to_auth rights True. aag_has_auth_to aag auth epptr))\<rbrace>
receive_ipc_base aag receiver ep epptr rights is_blocking
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (rule hoare_gen_asm)
apply (clarsimp simp: thread_get_def get_thread_state_def cong: endpoint.case_cong)
apply (rule hoare_pre)
apply (wp set_endpoinintegrity set_thread_state_running_respects
setup_caller_cap_integrity_recv[where ep = epptr]
do_ipc_transfer_integrity_autarch
set_thread_state_integrity_autarch[where param_a=receiver]
sts_receive_Inactive_respects[where ep = epptr]
as_user_integrity_autarch
| wpc | simp)+
apply (rename_tac list tcb data)
apply (rule_tac Q="\<lambda>rv s. integrity aag X st s
\<and> valid_mdb s
\<and> is_subject aag receiver
\<and> (sender_can_call data \<longrightarrow> AllowGrant \<in> rights
\<longrightarrow> is_subject aag (hd list))
\<and> (sender_can_grant data \<longrightarrow> is_subject aag (hd list))
\<and> st_tcb_at (send_blocked_on epptr) (hd list) s
\<and> st_tcb_at (\<lambda>st. st = BlockedOnSend epptr data) (hd list) s"
in hoare_strengthen_post[rotated])
apply (fastforce simp: st_tcb_at_def obj_at_def get_tcb_rev call_blocked_def
allowed_call_blocked_def
dest!: get_tcb_SomeD
elim: pred_tcb_atI)
apply (wp do_ipc_transfer_integrity_autarch do_ipc_transfer_pred_tcb set_endpoinintegrity
get_simple_ko_wp
set_thread_state_integrity_autarch[where param_a=receiver]
hoare_vcg_imp_lift [OF set_simple_ko_get_tcb, unfolded disj_not1]
hoare_vcg_all_lift as_user_integrity_autarch
| wpc | simp)+
apply clarsimp
apply (subgoal_tac "ep_at epptr s \<and> (\<exists>auth. aag_has_auth_to aag auth epptr
\<and> (auth = Receive \<or> auth = SyncSend \<or> auth = Reset))")
prefer 2
apply (fastforce simp: obj_at_def is_ep)
apply simp
apply (thin_tac "ep_at epptr s \<and> (\<exists>auth. aag_has_auth_to aag auth epptr
\<and> (auth = Receive \<or> auth = SyncSend \<or> auth = Reset))")
apply (clarsimp simp: st_tcb_def2 a_type_def)
apply (frule_tac t="hd x" in sym_ref_endpoint_sendD[OF invs_sym_refs],assumption,force)
apply (clarsimp simp: get_tcb_def elim!: pred_tcb_atE)
apply (intro conjI;
(force elim: receive_ipc_valid_ep_helper receive_ipc_sender_can_grant_helper)?)
apply (intro impI)
apply (frule_tac x= "hd x" in pas_refined_mem[rotated,where auth=Call])
apply (rule sta_ts)
apply (simp add: thread_states_def tcb_states_of_state_def get_tcb_def)
apply (simp add: cap_rights_to_auth_def)
apply (rule aag_has_auth_to_Control_eq_owns[THEN iffD1],assumption)
apply (erule aag_wellformed_grant_Control_to_send_by_reply[OF _ _ _ pas_refined_wellformed];force)
done
lemma receive_ipc_integrity_autarch:
"\<lbrace>pas_refined aag and integrity aag X st and invs
and K (is_subject aag receiver
\<and> pas_cap_cur_auth aag cap \<and> AllowRead \<in> cap_rights cap)\<rbrace>
receive_ipc receiver cap is_blocking
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (rule hoare_gen_asm)
apply (simp add: receive_ipc_def split: cap.splits)
apply clarsimp
apply (rule hoare_seq_ext[OF _ get_simple_ko_sp])
apply (rule hoare_seq_ext[OF _ gbn_sp])
apply (case_tac ntfnptr, simp_all)
(* old receive case, not bound *)
apply (rule hoare_pre, wp receive_ipc_base_integrity)
apply (fastforce simp:aag_cap_auth_def cap_auth_conferred_def cap_rights_to_auth_def)
apply (rule hoare_seq_ext[OF _ get_simple_ko_sp])
apply (case_tac "isActive ntfn", simp_all)
(* new ntfn-binding case *)
apply (rule hoare_pre, wp complete_signal_integrity, clarsimp)
(* old receive case, bound ntfn not active *)
apply (rule hoare_pre, wp receive_ipc_base_integrity)
apply (fastforce simp:aag_cap_auth_def cap_auth_conferred_def cap_rights_to_auth_def)
done
subsubsection\<open>Non-autarchy: the sender is running\<close>
text\<open>
If the sender is running (i.e. last to the IPC rendezvous) then we
need this auxiliary machinery to show that the sequence of TCB
updates ends up in the tcb_send, tcb_call or tcb_reply case of @{term integrity_obj}.
This machinery is used for both send_ipc and do_reply_transfer
The sender can update an IPC receiver's context as much as it likes,
provided it eventually changes the thread state to Running.
\<close>
datatype tcb_respects_state = TRContext | TRFinal | TRFinalOrCall | TRReplyContext
inductive
tcb_in_ipc for aag tst l' epptr ko ko'
where
tii_lrefl: "\<lbrakk> l' = pasSubject aag \<rbrakk> \<Longrightarrow> tcb_in_ipc aag tst l' epptr ko ko'"
| tii_context: "\<lbrakk> ko = Some (TCB tcb);
ko' = Some (TCB tcb');
can_receive_ipc (tcb_state tcb);
\<exists>ctxt'. tcb' = tcb \<lparr>tcb_arch := arch_tcb_context_set ctxt' (tcb_arch tcb)\<rparr>;
tst = TRContext\<rbrakk>
\<Longrightarrow> tcb_in_ipc aag tst l' epptr ko ko'"
| tii_final: "\<lbrakk> ko = Some (TCB tcb);
ko' = Some (TCB tcb');
receive_blocked_on epptr (tcb_state tcb);
\<exists>ctxt'. tcb' = tcb \<lparr> tcb_arch := arch_tcb_context_set ctxt' (tcb_arch tcb)
, tcb_state := Structures_A.Running\<rparr>;
aag_has_auth_to aag SyncSend epptr;
tst = TRFinal \<or> tst = TRFinalOrCall\<rbrakk>
\<Longrightarrow> tcb_in_ipc aag tst l' epptr ko ko'"
| tii_call: "\<lbrakk> ko = Some (TCB tcb);
ko' = Some (TCB tcb');
ep_recv_blocked epptr (tcb_state tcb);
\<exists>ctxt'. tcb' = tcb \<lparr> tcb_arch := arch_tcb_context_set ctxt' (tcb_arch tcb),
tcb_state := Structures_A.Running,
tcb_caller := ReplyCap caller False R\<rparr>;
is_subject aag caller;
aag_has_auth_to aag Call epptr;
tst = TRFinal\<rbrakk>
\<Longrightarrow> tcb_in_ipc aag tst l' epptr ko ko'"
| tii_reply: "\<lbrakk> ko = Some (TCB tcb);
ko' = Some (TCB tcb');
\<exists>ctxt'. tcb' = tcb \<lparr> tcb_arch := arch_tcb_context_set ctxt' (tcb_arch tcb),
tcb_fault := None,
tcb_state := Structures_A.Running\<rparr>;
(pasSubject aag, Reply, l') \<in> pasPolicy aag;
tcb_state tcb = BlockedOnReply;
tst = TRFinal\<rbrakk>
\<Longrightarrow> tcb_in_ipc aag tst l' epptr ko ko'"
lemmas tii_subject[simp] = tii_lrefl [OF refl]
definition integrity_tcb_in_ipc
:: "'a PAS \<Rightarrow> obj_ref set \<Rightarrow> obj_ref \<Rightarrow> obj_ref \<Rightarrow> tcb_respects_state \<Rightarrow>
det_ext state \<Rightarrow> det_ext state \<Rightarrow> bool"
where
"integrity_tcb_in_ipc aag X thread epptr tst st \<equiv> \<lambda>s.
\<not> is_subject aag thread \<and> pas_refined aag st \<and>
(integrity aag X st (s\<lparr>kheap := (kheap s)(thread := kheap st thread),
machine_state :=
(machine_state s)\<lparr>underlying_memory :=
(\<lambda>p. if p \<in> auth_ipc_buffers st thread then
underlying_memory (machine_state st) p
else underlying_memory (machine_state s) p) \<rparr>\<rparr>)
\<and> (tcb_in_ipc aag tst (pasObjectAbs aag thread) epptr (kheap st thread) (kheap s thread)))"
lemma tcb_context_no_change:
"\<exists>ctxt. tcb = tcb\<lparr> tcb_arch := arch_tcb_context_set ctxt (tcb_arch tcb)\<rparr>"
apply (cases tcb, clarsimp)
apply (case_tac tcb_arch)
apply (auto simp: arch_tcb_context_set_def)
done
lemma auth_ipc_buffers_mem_Write:
"\<lbrakk> x \<in> auth_ipc_buffers s thread; pas_refined aag s; valid_objs s; is_subject aag thread \<rbrakk>
\<Longrightarrow> aag_has_auth_to aag Write x"
apply (clarsimp simp add: auth_ipc_buffers_member_def)
apply (drule (1) cap_cur_auth_caps_of_state)
apply simp
apply (clarsimp simp: aag_cap_auth_def cap_auth_conferred_def
vspace_cap_rights_to_auth_def vm_read_write_def
is_page_cap_def
split: if_split_asm)
apply (auto dest: ipcframe_subset_page)
done
lemma integrity_tcb_in_ipc_final:
"\<lbrakk> integrity_tcb_in_ipc aag X thread epptr TRFinal st s \<rbrakk> \<Longrightarrow> integrity aag X st s"
unfolding integrity_tcb_in_ipc_def
apply clarsimp
apply (erule integrity_trans)
apply (clarsimp simp: integrity_def)
apply (rule conjI)
apply (erule tcb_in_ipc.cases; simp)
apply (fastforce intro!: tro_tcb_send simp: direct_send_def)
apply (fastforce intro!: tro_tcb_call simp: direct_call_def)
apply clarsimp
apply (fastforce intro!: tro_tcb_reply tcb.equality simp: direct_reply_def)
\<comment> \<open>rm\<close>
apply clarsimp
apply (cases "is_subject aag thread")
apply (rule trm_write)
apply (solves\<open>simp\<close>)
\<comment> \<open>doesn't own\<close>
apply (erule tcb_in_ipc.cases, simp_all)[1]
apply clarsimp
apply (rule trm_ipc [where p' = thread])
apply (simp add: tcb_states_of_state_def get_tcb_def)
apply (simp add: tcb_states_of_state_def get_tcb_def)
apply (simp add: auth_ipc_buffers_def get_tcb_def
split: option.split_asm cap.split_asm arch_cap.split_asm if_split_asm split del: if_split)
apply simp
apply clarsimp
apply (rule trm_ipc [where p' = thread])
apply (simp add: tcb_states_of_state_def get_tcb_def split:thread_state.splits)
apply (simp add: tcb_states_of_state_def get_tcb_def)
apply (simp add: auth_ipc_buffers_def get_tcb_def
split: option.split_asm cap.split_asm arch_cap.split_asm if_split_asm split del: if_split)
apply simp
apply clarsimp
apply (rule trm_ipc [where p' = thread])
apply (simp add: tcb_states_of_state_def get_tcb_def split:thread_state.splits)
apply (simp add: tcb_states_of_state_def get_tcb_def)
apply (simp add: auth_ipc_buffers_def get_tcb_def
split: option.split_asm cap.split_asm arch_cap.split_asm if_split_asm split del: if_split)
apply simp
done
lemma update_tcb_context_in_ipc:
"\<lbrakk> integrity_tcb_in_ipc aag X thread epptr TRContext st s;
get_tcb thread s = Some tcb;
tcb' = tcb\<lparr>tcb_arch := arch_tcb_context_set ctxt' (tcb_arch tcb)\<rparr>\<rbrakk>
\<Longrightarrow> integrity_tcb_in_ipc aag X thread epptr TRContext st
(s\<lparr>kheap := (kheap s)(thread \<mapsto> TCB tcb')\<rparr>)"
unfolding integrity_tcb_in_ipc_def
apply (elim conjE)
apply (intro conjI)
apply assumption+
apply (erule integrity_trans)
apply (simp cong: if_cong)
apply clarsimp
apply (erule tcb_in_ipc.cases, simp_all)
apply (auto intro!: tii_context[OF refl refl] tii_lrefl[OF refl] tcb_context_no_change
dest!: get_tcb_SomeD simp: arch_tcb_context_set_def)
done
lemma update_tcb_state_in_ipc:
"\<lbrakk> integrity_tcb_in_ipc aag X thread epptr TRContext st s;
receive_blocked_on epptr (tcb_state tcb); aag_has_auth_to aag SyncSend epptr;
get_tcb thread s = Some tcb; tcb' = tcb\<lparr>tcb_state := Structures_A.thread_state.Running\<rparr> \<rbrakk>
\<Longrightarrow> integrity_tcb_in_ipc aag X thread epptr TRFinalOrCall st
(s\<lparr>kheap := (kheap s)(thread \<mapsto> TCB tcb')\<rparr>)"
unfolding integrity_tcb_in_ipc_def
apply (elim conjE)
apply (intro conjI)
apply assumption+
apply (erule integrity_trans)
apply (simp cong: if_cong)
apply clarsimp
apply (erule tcb_in_ipc.cases, simp_all)
apply (drule get_tcb_SomeD)
apply (rule tii_final[OF refl refl])
apply (solves\<open>clarsimp\<close>)
apply (elim exE, intro exI tcb.equality; solves \<open>simp\<close>)
apply fastforce
apply fastforce
done
lemma as_user_respects_in_ipc:
"\<lbrace>integrity_tcb_in_ipc aag X thread epptr TRContext st\<rbrace>
as_user thread m
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X thread epptr TRContext st\<rbrace>"
apply (simp add: as_user_def set_object_def get_object_def)
apply (wp gets_the_wp get_wp put_wp mapM_x_wp'
| wpc
| simp split del: if_split add: zipWithM_x_mapM_x split_def store_word_offs_def)+
apply (clarsimp simp: st_tcb_def2 tcb_at_def fun_upd_def[symmetric])
apply (auto elim: update_tcb_context_in_ipc)
done
lemma set_message_info_respects_in_ipc:
"\<lbrace>integrity_tcb_in_ipc aag X thread epptr TRContext st\<rbrace>
set_message_info thread m
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X thread epptr TRContext st\<rbrace>"
unfolding set_message_info_def
by (wp as_user_respects_in_ipc)
lemma mul_add_word_size_lt_msg_align_bits_ofnat:
"\<lbrakk> p < 2 ^ (msg_align_bits - 2); k < 4 \<rbrakk>
\<Longrightarrow> of_nat p * of_nat word_size + k < (2 :: word32) ^ msg_align_bits"
unfolding word_size_def
apply simp
apply (rule is_aligned_add_less_t2n[where n=2])
apply (simp_all add: msg_align_bits word_bits_conv
is_aligned_word_size_2[simplified word_size_def, simplified])
apply (erule word_less_power_trans_ofnat [where k = 2 and m=9, simplified], simp)
done
lemmas ptr_range_off_off_mems =
ptr_range_add_memI [OF _ mul_word_size_lt_msg_align_bits_ofnat]
ptr_range_add_memI [OF _ mul_add_word_size_lt_msg_align_bits_ofnat,
simplified add.assoc [symmetric]]
lemma store_word_offs_respects_in_ipc:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr TRContext st and
K ((\<not> is_subject aag receiver \<longrightarrow> auth_ipc_buffers st receiver = ptr_range buf msg_align_bits)
\<and> is_aligned buf msg_align_bits \<and> r < 2 ^ (msg_align_bits - 2))\<rbrace>
store_word_offs buf r v
\<lbrace>\<lambda>_. integrity_tcb_in_ipc aag X receiver epptr TRContext st\<rbrace>"
apply (simp add: store_word_offs_def storeWord_def pred_conj_def)
apply (rule hoare_pre)
apply (wp dmo_wp)
apply (unfold integrity_tcb_in_ipc_def)
apply (elim conjE)
apply (intro impI conjI)
apply assumption+
apply (erule integrity_trans)
apply (clarsimp simp: ptr_range_off_off_mems integrity_def is_aligned_mask [symmetric]
cong: imp_cong )
apply simp
done
crunch respects_in_ipc: set_extra_badge "integrity_tcb_in_ipc aag X receiver epptr TRContext st"
(wp: store_word_offs_respects_in_ipc)
lemma set_object_respects_in_ipc_autarch:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr ctxt st
and K (is_subject aag ptr)\<rbrace>
set_object ptr obj
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X receiver epptr ctxt st\<rbrace>"
apply (simp add: integrity_tcb_in_ipc_def)
apply (rule hoare_pre, wp)
apply (wpsimp wp: set_object_wp)
apply (simp only: pred_conj_def)
apply (elim conjE)
apply (intro conjI ; (solves \<open>simp\<close>)?)
apply (erule integrity_trans)
apply (clarsimp simp: integrity_def)
done
lemma set_cap_respects_in_ipc_autarch:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr ctxt st
and K (is_subject aag (fst ptr))\<rbrace>
set_cap cap ptr
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X receiver epptr ctxt st\<rbrace>"
apply (simp add: set_cap_def split_def)
apply (wp set_object_respects_in_ipc_autarch get_object_wp
| wpc)+
apply simp
done
lemma set_original_respects_in_ipc_autarch:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr ctxt st
and K (is_subject aag (fst slot))\<rbrace>
set_original slot orig
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X receiver epptr ctxt st\<rbrace>"
apply (wp set_original_wp)
apply (clarsimp simp: integrity_tcb_in_ipc_def)
apply (simp add: integrity_def
tcb_states_of_state_def get_tcb_def map_option_def
split del: if_split cong: if_cong)
by (fastforce intro :integrity_cdt_direct)
lemma update_cdt_fun_upd_respects_in_ipc_autarch:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr TRContext st
and K (is_subject aag (fst slot))\<rbrace>
update_cdt (\<lambda>cdt. cdt (slot := v cdt))
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X receiver epptr TRContext st\<rbrace>"
apply (simp add: update_cdt_def set_cdt_def)
apply wp
apply (clarsimp simp: integrity_tcb_in_ipc_def integrity_def
tcb_states_of_state_def get_tcb_def
split del: if_split cong: if_cong)
by (fastforce intro :integrity_cdt_direct)
lemma set_untyped_cap_as_full_integrity_tcb_in_ipc_autarch:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr TRContext st and
K (is_subject aag (fst src_slot))\<rbrace>
set_untyped_cap_as_full src_cap new_cap src_slot
\<lbrace>\<lambda>ya. integrity_tcb_in_ipc aag X receiver epptr TRContext st\<rbrace>"
apply(rule hoare_pre)
apply(clarsimp simp: set_untyped_cap_as_full_def)
apply(intro conjI impI)
apply (wp set_cap_respects_in_ipc_autarch | simp)+
done
lemma cap_insert_ext_integrity_in_ipc_autarch:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr ctxt st and K(is_subject aag (fst src_slot))
and K(is_subject aag (fst dest_slot))\<rbrace>
cap_insert_ext src_parent src_slot dest_slot src_p dest_p
\<lbrace>\<lambda>yd. integrity_tcb_in_ipc aag X receiver epptr ctxt st\<rbrace>"
apply (rule hoare_gen_asm)+
apply (simp add: integrity_tcb_in_ipc_def split del: if_split)
apply (unfold integrity_def)
apply (simp only: integrity_cdt_list_as_list_integ)
apply (rule hoare_lift_Pf[where f="ekheap"])
apply (clarsimp simp: integrity_tcb_in_ipc_def integrity_def
tcb_states_of_state_def get_tcb_def
split del: if_split cong: if_cong)
including no_pre
apply wp
apply (rule hoare_vcg_conj_lift)
apply (simp add: list_integ_def del: split_paired_All)
apply (fold list_integ_def)
apply (wp cap_insert_list_integrity | simp | force)+
done
lemma cap_inserintegrity_in_ipc_autarch:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr TRContext st
and K (is_subject aag (fst dest_slot) \<and> is_subject aag (fst src_slot))\<rbrace>
cap_insert new_cap src_slot dest_slot
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X receiver epptr TRContext st\<rbrace>"
apply (rule hoare_gen_asm)
apply (simp add: cap_insert_def cong: if_cong)
apply (rule hoare_pre)
apply (wp set_original_respects_in_ipc_autarch
set_untyped_cap_as_full_integrity_tcb_in_ipc_autarch
update_cdt_fun_upd_respects_in_ipc_autarch
set_cap_respects_in_ipc_autarch get_cap_wp
cap_insert_ext_integrity_in_ipc_autarch
| simp split del: if_split)+
done
lemma transfer_caps_loop_respects_in_ipc_autarch:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr TRContext st
and valid_objs and valid_mdb
and (\<lambda>s. (\<forall>slot \<in> set slots. real_cte_at slot s)
\<and> (\<forall>x \<in> set caps. s \<turnstile> fst x
\<and> cte_wp_at (\<lambda>cp. fst x \<noteq> cap.NullCap \<longrightarrow> cp = fst x) (snd x) s
\<and> real_cte_at (snd x) s))
and K ((\<forall>cap \<in> set caps. is_subject aag (fst (snd cap)))
\<and> (\<forall>slot \<in> set slots. is_subject aag (fst slot))
\<and> (\<not> is_subject aag receiver \<longrightarrow>
auth_ipc_buffers st receiver = ptr_range buffer msg_align_bits)
\<and> is_aligned buffer msg_align_bits
\<and> n + length caps < 6 \<and> distinct slots)\<rbrace>
transfer_caps_loop ep buffer n caps slots mi
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X receiver epptr TRContext st\<rbrace>"
apply (rule hoare_gen_asm)
apply (wp transfer_caps_loop_pres_dest cap_inserintegrity_in_ipc_autarch
set_extra_badge_respects_in_ipc
| simp
| simp add: msg_align_bits buffer_cptr_index_def msg_max_length_def
| blast)+
apply (auto simp: cte_wp_at_caps_of_state)
done
lemma transfer_caps_respects_in_ipc:
"\<lbrace>pas_refined aag
and integrity_tcb_in_ipc aag X receiver epptr TRContext st
and valid_objs and valid_mdb
and tcb_at receiver
and (\<lambda>s. (\<forall>x \<in> set caps. s \<turnstile> fst x)
\<and> (\<forall>x \<in> set caps. cte_wp_at (\<lambda>cp. fst x \<noteq> cap.NullCap \<longrightarrow> cp = fst x) (snd x) s
\<and> real_cte_at (snd x) s))
and K ((\<not> null caps \<longrightarrow> is_subject aag receiver)
\<and> (\<forall>cap \<in> set caps. is_subject aag (fst (snd cap)))
\<and> (\<not> is_subject aag receiver \<longrightarrow>
case_option True (\<lambda>buf'. auth_ipc_buffers st receiver
= ptr_range buf' msg_align_bits) recv_buf)
\<and> (case_option True (\<lambda>buf'. is_aligned buf' msg_align_bits) recv_buf)
\<and> length caps < 6)\<rbrace>
transfer_caps mi caps endpoint receiver recv_buf
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X receiver epptr TRContext st\<rbrace>"
apply (rule hoare_gen_asm)
apply (cases recv_buf)
apply (simp add: transfer_caps_def, wp, simp)
apply (cases caps)
apply (simp add: transfer_caps_def del: get_receive_slots.simps, wp, simp)
apply (simp add: transfer_caps_def del: get_receive_slots.simps)
apply (wp transfer_caps_loop_respects_in_ipc_autarch
get_receive_slots_authorised
hoare_vcg_all_lift
| wpc
| rule hoare_drop_imps
| simp add: null_def del: get_receive_slots.simps)+
done
lemma copy_mrs_respects_in_ipc:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr TRContext st
and K ((\<not> is_subject aag receiver \<longrightarrow>
case_option True (\<lambda>buf'. auth_ipc_buffers st receiver
= ptr_range buf' msg_align_bits) rbuf)
\<and> (case_option True (\<lambda>buf'. is_aligned buf' msg_align_bits) rbuf)
\<and> unat n < 2 ^ (msg_align_bits - 2))\<rbrace>
copy_mrs sender sbuf receiver rbuf n
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X receiver epptr TRContext st\<rbrace>"
apply (rule hoare_gen_asm)
apply (simp add: copy_mrs_def)
apply (wp as_user_respects_in_ipc store_word_offs_respects_in_ipc
mapM_wp'
hoare_vcg_const_imp_lift hoare_vcg_all_lift
| wpc
| fastforce split: if_split_asm simp: length_msg_registers)+
done
lemma do_normal_transfer_respects_in_ipc:
notes lec_valid_cap[wp del]
shows
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr TRContext st
and pas_refined aag
and valid_objs and valid_mdb
and st_tcb_at can_receive_ipc receiver
and (\<lambda>s. grant \<longrightarrow> is_subject aag sender
\<and> is_subject aag receiver)
and K ((\<not> is_subject aag receiver \<longrightarrow>
(case recv_buf of None \<Rightarrow> True
| Some buf' \<Rightarrow> auth_ipc_buffers st receiver
= ptr_range buf' msg_align_bits)) \<and>
(case recv_buf of None \<Rightarrow> True | Some buf' \<Rightarrow> is_aligned buf' msg_align_bits))\<rbrace>
do_normal_transfer sender sbuf epopt badge grant receiver recv_buf
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X receiver epptr TRContext st\<rbrace>"
apply (simp add: do_normal_transfer_def)
apply (wp as_user_respects_in_ipc set_message_info_respects_in_ipc
transfer_caps_respects_in_ipc copy_mrs_respects_in_ipc get_mi_valid'
lookup_extra_caps_authorised lookup_extra_caps_length get_mi_length
hoare_vcg_const_Ball_lift hoare_vcg_conj_lift_R hoare_vcg_const_imp_lift
lec_valid_cap'
| rule hoare_drop_imps
| simp)+
apply (auto simp: null_def intro: st_tcb_at_tcb_at)
done
lemma set_mrs_respects_in_ipc:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr TRContext st and
K ((\<not> is_subject aag receiver \<longrightarrow> (case recv_buf of None \<Rightarrow> True | Some buf' \<Rightarrow> auth_ipc_buffers st receiver = ptr_range buf' msg_align_bits)) \<and>
(case recv_buf of None \<Rightarrow> True | Some buf' \<Rightarrow> is_aligned buf' msg_align_bits))\<rbrace>
set_mrs receiver recv_buf msgs
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X receiver epptr TRContext st\<rbrace>"
apply (rule hoare_gen_asm)
apply (simp add: set_mrs_def set_object_def get_object_def)
apply (wp mapM_x_wp' store_word_offs_respects_in_ipc
| wpc
| simp split del: if_split add: zipWithM_x_mapM_x split_def)+
apply (clarsimp simp add: set_zip nth_append simp: msg_align_bits msg_max_length_def
split: if_split_asm)
apply (simp add: length_msg_registers)
apply arith
apply simp
apply wp+
apply (clarsimp simp: arch_tcb_set_registers_def)
by (rule update_tcb_context_in_ipc [unfolded fun_upd_def]; fastforce)
lemma do_fault_transfer_respects_in_ipc:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr TRContext st and
K ((\<not> is_subject aag receiver \<longrightarrow> (case recv_buf of None \<Rightarrow> True | Some buf' \<Rightarrow> auth_ipc_buffers st receiver = ptr_range buf' msg_align_bits)) \<and>
(case recv_buf of None \<Rightarrow> True | Some buf' \<Rightarrow> is_aligned buf' msg_align_bits))\<rbrace>
do_fault_transfer badge sender receiver recv_buf
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X receiver epptr TRContext st\<rbrace>"
apply (simp add: do_fault_transfer_def split_def)
apply (wp as_user_respects_in_ipc set_message_info_respects_in_ipc set_mrs_respects_in_ipc
| wpc
| simp
| rule hoare_drop_imps)+
done
lemma lookup_ipc_buffer_ptr_range_in_ipc:
"\<lbrace>valid_objs and integrity_tcb_in_ipc aag X thread epptr tst st\<rbrace>
lookup_ipc_buffer True thread
\<lbrace>\<lambda>rv s. \<not> is_subject aag thread \<longrightarrow>
(case rv of None \<Rightarrow> True
| Some buf' \<Rightarrow> auth_ipc_buffers st thread = ptr_range buf' msg_align_bits) \<rbrace>"
unfolding lookup_ipc_buffer_def
apply (rule hoare_pre)
apply (wp get_cap_wp thread_get_wp' | wpc)+
apply (clarsimp simp: cte_wp_at_caps_of_state ipc_buffer_has_auth_def get_tcb_ko_at [symmetric])
apply (frule caps_of_state_tcb_cap_cases [where idx = "tcb_cnode_index 4"])
apply (simp add: dom_tcb_cap_cases)
apply (clarsimp simp: auth_ipc_buffers_def get_tcb_ko_at [symmetric] integrity_tcb_in_ipc_def)
apply (drule get_tcb_SomeD)
apply (erule(1) valid_objsE)
apply (clarsimp simp: valid_obj_def valid_tcb_def valid_ipc_buffer_cap_def case_bool_if
split: if_split_asm)
apply (erule tcb_in_ipc.cases; clarsimp simp: get_tcb_def vm_read_write_def)
done
lemma lookup_ipc_buffer_aligned:
"\<lbrace>valid_objs\<rbrace>
lookup_ipc_buffer True thread
\<lbrace>\<lambda>rv s. (case rv of None \<Rightarrow> True | Some buf' \<Rightarrow> is_aligned buf' msg_align_bits) \<rbrace>"
unfolding lookup_ipc_buffer_def
apply (rule hoare_pre)
apply (wp get_cap_wp thread_get_wp' | wpc)+
apply (clarsimp simp: cte_wp_at_caps_of_state get_tcb_ko_at [symmetric])
apply (frule caps_of_state_tcb_cap_cases [where idx = "tcb_cnode_index 4"])
apply (simp add: dom_tcb_cap_cases)
apply (frule (1) caps_of_state_valid_cap)
apply (clarsimp simp: valid_cap_simps cap_aligned_def)
apply (erule aligned_add_aligned)
apply (rule is_aligned_andI1)
apply (drule (1) valid_tcb_objs)
apply (clarsimp simp: valid_obj_def valid_tcb_def valid_ipc_buffer_cap_def
split: if_splits)
apply (rule order_trans [OF _ pbfs_atleast_pageBits])
apply (simp add: msg_align_bits pageBits_def)
done
lemma do_ipc_transfer_respects_in_ipc:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr TRContext st
and pas_refined aag
and valid_objs and valid_mdb
and st_tcb_at can_receive_ipc receiver
and (\<lambda>s. grant \<longrightarrow> is_subject aag sender
\<and> is_subject aag receiver)
\<rbrace>
do_ipc_transfer sender epopt badge grant receiver
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X receiver epptr TRContext st\<rbrace>"
apply (simp add: do_ipc_transfer_def)
apply (wp do_normal_transfer_respects_in_ipc do_fault_transfer_respects_in_ipc
lookup_ipc_buffer_ptr_range_in_ipc lookup_ipc_buffer_aligned
hoare_vcg_conj_lift
| wpc
| simp
| rule hoare_drop_imps)+
apply (auto intro: st_tcb_at_tcb_at)
done
lemma sts_ext_running_noop:
"\<lbrace>P and st_tcb_at (runnable) receiver\<rbrace> set_thread_state_ext receiver \<lbrace>\<lambda>_. P\<rbrace>"
apply (simp add: set_thread_state_ext_def get_thread_state_def thread_get_def
| wp set_scheduler_action_wp)+
apply (clarsimp simp add: st_tcb_at_def obj_at_def get_tcb_def)
done
lemma set_thread_state_running_respects_in_ipc:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr TRContext st and st_tcb_at(receive_blocked_on epptr) receiver and K(aag_has_auth_to aag SyncSend epptr)\<rbrace>
set_thread_state receiver Structures_A.thread_state.Running
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X receiver epptr TRFinalOrCall st\<rbrace>"
apply (simp add: set_thread_state_def)
apply (wpsimp wp: set_object_wp sts_ext_running_noop)
apply (auto simp: st_tcb_at_def obj_at_def get_tcb_def
get_tcb_rev update_tcb_state_in_ipc
cong: if_cong
elim: update_tcb_state_in_ipc[unfolded fun_upd_def])
done
lemma set_endpoinintegrity_in_ipc:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr TRContext st
and K (aag_has_auth_to aag SyncSend epptr)\<rbrace>
set_endpoint epptr ep'
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X receiver epptr TRContext st\<rbrace>"
apply (simp add: set_simple_ko_def set_object_def)
apply (wp get_object_wp)
apply (clarsimp split: Structures_A.kernel_object.splits
simp: obj_at_def is_tcb is_ep integrity_tcb_in_ipc_def
partial_inv_def a_type_def)
apply (intro impI conjI)
apply (erule integrity_trans)
apply (clarsimp simp: integrity_def)
apply clarsimp
apply (erule tcb_in_ipc.cases, simp_all)
apply (erule integrity_trans)
apply (clarsimp simp: integrity_def)
apply (fastforce intro: tro_ep)
done
(* FIXME: move *)
lemma valid_ep_recv_dequeue:
"\<lbrakk> ko_at (Endpoint (Structures_A.endpoint.RecvEP (t # ts))) epptr s;
valid_objs s; sym_refs (state_refs_of s) \<rbrakk>
\<Longrightarrow> valid_ep (case ts of [] \<Rightarrow> Structures_A.endpoint.IdleEP
| b # bs \<Rightarrow> Structures_A.endpoint.RecvEP ts) s"
unfolding valid_objs_def valid_obj_def valid_ep_def obj_at_def
apply (drule bspec)
apply (auto split: list.splits)
done
lemma integrity_tcb_in_ipc_refl:
"\<lbrakk> st_tcb_at can_receive_ipc receiver s; \<not> is_subject aag receiver; pas_refined aag s\<rbrakk>
\<Longrightarrow> integrity_tcb_in_ipc aag X receiver epptr TRContext s s"
unfolding integrity_tcb_in_ipc_def
apply (clarsimp simp: st_tcb_def2)
apply (rule tii_context [OF get_tcb_SomeD get_tcb_SomeD], assumption+)
apply (rule tcb_context_no_change)
apply simp
done
(* stronger *)
(* MOVE *)
lemma ep_queued_st_tcb_at'':
"\<And>P. \<lbrakk>ko_at (Endpoint ep) ptr s; (t, rt) \<in> ep_q_refs_of ep;
valid_objs s; sym_refs (state_refs_of s);
\<And>pl pl'. (rt = EPSend \<and> P (Structures_A.BlockedOnSend ptr pl)) \<or>
(rt = EPRecv \<and> P (Structures_A.BlockedOnReceive ptr pl')) \<rbrakk>
\<Longrightarrow> st_tcb_at P t s"
apply (case_tac ep, simp_all)
apply (frule (1) sym_refs_ko_atD, fastforce simp: st_tcb_at_def obj_at_def refs_of_rev)+
done
subsubsection \<open>Inserting the reply cap\<close>
lemma integrity_tcb_in_ipc_no_call:
"integrity_tcb_in_ipc aag X receiver epptr TRFinalOrCall st s
\<Longrightarrow> integrity_tcb_in_ipc aag X receiver epptr TRFinal st s"
unfolding integrity_tcb_in_ipc_def tcb_in_ipc.simps by clarsimp
lemma set_original_respects_in_ipc_reply:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr ctxt st
and K(st_tcb_at (\<lambda>st. direct_call {pasSubject aag} aag epptr st) receiver st)\<rbrace>
set_original (receiver, tcb_cnode_index 3) orig
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X receiver epptr ctxt st\<rbrace>"
apply (wp set_original_wp)
apply (clarsimp simp: integrity_tcb_in_ipc_def)
apply (simp add: integrity_def tcb_states_of_state_def get_tcb_def
split del: if_split cong: if_cong)
apply (fold get_tcb_def tcb_states_of_state_def)
apply (clarsimp simp:st_tcb_at_tcb_states_of_state)
apply (rule integrity_cdt_change_allowed)
apply (rule cca_reply; force)
done
lemma cap_insert_ext_integrity_in_ipc_reply:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr ctxt st
and K(is_subject aag (fst src_slot)
\<and> st_tcb_at (\<lambda>st. direct_call {pasSubject aag} aag epptr st) receiver st)\<rbrace>
cap_insert_ext src_parent src_slot (receiver, tcb_cnode_index 3) src_p dest_p
\<lbrace>\<lambda>_. integrity_tcb_in_ipc aag X receiver epptr ctxt st\<rbrace>"
apply (rule hoare_gen_asm)+
apply (simp add: integrity_tcb_in_ipc_def split del: if_split)
apply (unfold integrity_def)
apply (simp only: integrity_cdt_list_as_list_integ)
apply (clarsimp simp: integrity_tcb_in_ipc_def integrity_def
tcb_states_of_state_def get_tcb_def
split del: if_split cong: if_cong)
apply wp
apply (simp add: list_integ_def del: split_paired_All)
apply (fold list_integ_def get_tcb_def tcb_states_of_state_def)
apply (wp cap_insert_list_integrity)
apply simp
apply (simp add: list_integ_def del: split_paired_All)
apply (fold list_integ_def get_tcb_def tcb_states_of_state_def)
apply (clarsimp simp: st_tcb_at_tcb_states_of_state)
apply (rule cca_reply; force)
done
lemma update_cdt_wp:
"\<lbrace>\<lambda>s. P (s\<lparr> cdt := f (cdt s) \<rparr>)\<rbrace>
update_cdt f
\<lbrace>\<lambda>_. P \<rbrace>"
by (wpsimp simp: update_cdt_def set_cdt_def)
lemma update_cdt_reply_in_ipc:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr ctxt st
and K(st_tcb_at (\<lambda>st. direct_call {pasSubject aag} aag epptr st) receiver st)\<rbrace>
update_cdt (\<lambda>cdt. cdt ((receiver, tcb_cnode_index 3) := val cdt))
\<lbrace>\<lambda>_. integrity_tcb_in_ipc aag X receiver epptr ctxt st\<rbrace>"
apply (wp update_cdt_wp)
apply (clarsimp simp: integrity_tcb_in_ipc_def)
apply (simp add: integrity_def tcb_states_of_state_def get_tcb_def
split del: if_split cong: if_cong)
apply (fold get_tcb_def tcb_states_of_state_def)
apply (clarsimp simp:st_tcb_at_tcb_states_of_state)
apply (rule integrity_cdt_change_allowed)
apply (rule cca_reply; force)
done
lemma spec_valid_direct:
"( P s \<Longrightarrow> s \<turnstile> \<lbrace> \<top> \<rbrace> f \<lbrace> Q \<rbrace>) \<Longrightarrow> s \<turnstile> \<lbrace>P\<rbrace> f \<lbrace> Q \<rbrace>"
by (simp add: spec_valid_def valid_def)
lemma set_cap_respects_in_ipc_reply:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr TRFinalOrCall st
and K(st_tcb_at (\<lambda>st. direct_call {pasSubject aag} aag epptr st) receiver st
\<and> is_subject aag caller)\<rbrace>
set_cap (ReplyCap caller False R) (receiver, tcb_cnode_index 3)
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X receiver epptr TRFinal st\<rbrace>"
unfolding set_cap_def
apply simp
apply (rule hoare_seq_ext[OF _ get_object_sp])
including no_pre
apply (wp set_object_wp)
apply (rule use_spec')
apply (rule spec_valid_direct)
apply (clarsimp simp:tcb_at_def get_tcb_def dest!:ko_atD split:kernel_object.splits)
apply (simp add: spec_valid_def valid_def return_def)
unfolding integrity_tcb_in_ipc_def
apply (clarsimp simp:st_tcb_at_tcb_states_of_state )
apply (clarsimp simp:tcb_states_of_state_def direct_call_def dest!:get_tcb_SomeD)
by (erule tcb_in_ipc.cases; (force intro:tii_call))
lemma cap_insert_reply_cap_respects_in_ipc:
"\<lbrace> integrity_tcb_in_ipc aag X receiver epptr TRFinalOrCall st and
K (st_tcb_at (direct_call {pasSubject aag} aag epptr) receiver st \<and>
is_subject aag caller \<and> is_subject aag (fst master_slot))\<rbrace>
cap_insert
(ReplyCap caller False R)
master_slot (receiver, tcb_cnode_index 3)
\<lbrace>\<lambda>_. integrity_tcb_in_ipc aag X receiver epptr TRFinal st\<rbrace>"
unfolding cap_insert_def
apply (rule hoare_pre)
apply (wp add:set_original_respects_in_ipc_reply)
apply simp
apply (wp cap_insert_ext_integrity_in_ipc_reply update_cdt_reply_in_ipc
set_cap_respects_in_ipc_reply set_untyped_cap_as_full_not_untyped get_cap_wp)+
by fastforce
lemma set_scheduler_action_respects_in_ipc_autarch:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr ctxt st\<rbrace>
set_scheduler_action action
\<lbrace>\<lambda>_. integrity_tcb_in_ipc aag X receiver epptr ctxt st\<rbrace>"
unfolding set_scheduler_action_def
by (wpsimp simp: integrity_tcb_in_ipc_def integrity_def tcb_states_of_state_def get_tcb_def)
lemma exists_cons_append:
"\<exists>xs. xs @ ys = zs \<Longrightarrow> \<exists>xs. xs @ ys = z # zs"
by auto
lemma tcb_sched_action_respects_in_ipc_autarch:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr ctxt st\<rbrace>
tcb_sched_action tcb_sched_enqueue target
\<lbrace>\<lambda>_. integrity_tcb_in_ipc aag X receiver epptr ctxt st\<rbrace>"
apply (simp add: tcb_sched_action_def)
apply wp
apply (clarsimp simp: integrity_def integrity_tcb_in_ipc_def tcb_states_of_state_def get_tcb_def
integrity_ready_queues_def pas_refined_def tcb_domain_map_wellformed_aux_def
tcb_at_def get_etcb_def tcb_sched_enqueue_def etcb_at_def
split: option.splits)
apply (fastforce intro: exists_cons_append)
done
crunches possible_switch_to, set_thread_state for respects_in_ipc_autarch: "integrity_tcb_in_ipc aag X receiver epptr ctxt st"
(wp: tcb_sched_action_respects_in_ipc_autarch ignore:tcb_sched_action )
lemma setup_caller_cap_respects_in_ipc_reply:
"\<lbrace> integrity_tcb_in_ipc aag X receiver epptr TRFinalOrCall st
and K (is_subject aag sender \<and> st_tcb_at (direct_call {pasSubject aag} aag epptr) receiver st) \<rbrace>
setup_caller_cap sender receiver grant
\<lbrace>\<lambda>_. integrity_tcb_in_ipc aag X receiver epptr TRFinal st \<rbrace>"
unfolding setup_caller_cap_def
by (wpsimp wp: cap_insert_reply_cap_respects_in_ipc set_thread_state_respects_in_ipc_autarch)
lemma send_ipc_integrity_autarch:
"\<lbrace>integrity aag X st and pas_refined aag
and invs
and is_subject aag \<circ> cur_thread
and obj_at (\<lambda>ep. can_grant \<longrightarrow> (\<forall>r \<in> refs_of ep. snd r = EPRecv \<longrightarrow> is_subject aag (fst r))) epptr
and K (is_subject aag sender \<and> aag_has_auth_to aag SyncSend epptr \<and>
(can_grant_reply \<longrightarrow> aag_has_auth_to aag Call epptr))\<rbrace>
send_ipc block call badge can_grant can_grant_reply sender epptr
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (rule hoare_gen_asm)
apply (simp add: send_ipc_def)
apply (rule hoare_seq_ext[OF _ get_simple_ko_sp])
apply (case_tac ep)
\<comment> \<open>IdleEP\<close>
apply simp
apply (rule hoare_pre)
apply (wp set_endpoinintegrity set_thread_state_integrity_autarch
| wpc | simp)+
apply (fastforce simp: obj_at_def is_ep) \<comment> \<open>ep_at and has_auth\<close>
\<comment> \<open>SendEP\<close>
apply simp
apply (rule hoare_pre)
apply (wp set_endpoinintegrity set_thread_state_integrity_autarch
| wpc | simp)+
apply (fastforce simp: obj_at_def is_ep) \<comment> \<open>ep_at and has_auth\<close>
\<comment> \<open>WaitingEP\<close>
apply (rename_tac list)
apply simp
apply (case_tac "is_subject aag (hd list)") \<comment> \<open>autarch or not on receiver side\<close>
apply clarsimp
apply (rule hoare_pre)
apply (wp setup_caller_cap_integrity_autarch set_thread_state_integrity_autarch thread_get_wp'
| wpc)+
apply (rule_tac Q="\<lambda>rv s. integrity aag X st s\<and> (can_grant \<longrightarrow> is_subject aag (hd list))"
in hoare_strengthen_post[rotated])
apply simp+
apply (wp set_thread_state_integrity_autarch thread_get_wp'
do_ipc_transfer_integrity_autarch
hoare_vcg_all_lift hoare_drop_imps set_endpoinintegrity
| wpc | simp add: get_thread_state_def split del: if_split
del: hoare_True_E_R)+
apply (fastforce simp: a_type_def obj_at_def is_ep elim: send_ipc_valid_ep_helper)
\<comment> \<open>we don't own head of queue\<close>
apply clarsimp
apply (rule use_spec') \<comment> \<open>Name initial state\<close>
apply (simp add: spec_valid_def) \<comment> \<open>no imp rule?\<close>
apply (rule hoare_pre)
apply (wpc, wp)
apply (rename_tac list s receiver queue)
apply (rule_tac Q = "\<lambda>_ s'. integrity aag X st s \<and>
integrity_tcb_in_ipc aag X receiver epptr TRFinal s s'" in hoare_post_imp)
apply (fastforce dest!: integrity_tcb_in_ipc_final elim!: integrity_trans)
apply (wp setup_caller_cap_respects_in_ipc_reply
set_thread_state_respects_in_ipc_autarch[where param_b = Inactive]
hoare_vcg_if_lift static_imp_wp possible_switch_to_respects_in_ipc_autarch
set_thread_state_running_respects_in_ipc do_ipc_transfer_respects_in_ipc thread_get_inv
set_endpoinintegrity_in_ipc
| wpc
| strengthen integrity_tcb_in_ipc_no_call
| wp hoare_drop_imps
| simp add:get_thread_state_def)+
apply (clarsimp intro:integrity_tcb_in_ipc_refl)
apply (frule_tac t=x in sym_ref_endpoint_recvD[OF invs_sym_refs],assumption,simp)
apply (clarsimp simp:st_tcb_at_tcb_states_of_state_eq st_tcb_at_tcb_states_of_state direct_call_def)
apply (subgoal_tac "\<not> can_grant")
apply (force intro!: integrity_tcb_in_ipc_refl
simp: st_tcb_at_tcb_states_of_state
elim: send_ipc_valid_ep_helper)
apply (force elim:obj_at_ko_atE)
done
section\<open>Faults\<close>
(* FIXME: move *)
lemma valid_tcb_fault_update:
"\<lbrakk> valid_tcb p t s; valid_fault fault \<rbrakk> \<Longrightarrow> valid_tcb p (t\<lparr>tcb_fault := Some fault\<rparr>) s"
by (simp add: valid_tcb_def ran_tcb_cap_cases)
lemma thread_set_fault_pas_refined:
"\<lbrace>pas_refined aag\<rbrace>
thread_set (tcb_fault_update (\<lambda>_. Some fault)) thread
\<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
apply (wp send_ipc_pas_refined thread_set_pas_refined
thread_set_refs_trivial thread_set_obj_at_impossible
| simp)+
done
lemma owns_ep_owns_receivers':
"\<lbrakk> (\<forall>auth. aag_has_auth_to aag auth epptr); pas_refined aag s; valid_objs s;
sym_refs (state_refs_of s); ko_at (Endpoint ep) epptr s; (t, EPRecv) \<in> ep_q_refs_of ep\<rbrakk>
\<Longrightarrow> is_subject aag t"
apply (drule (1) ep_rcv_queued_st_tcb_at [where P = "receive_blocked_on epptr"])
apply clarsimp
apply clarsimp
apply clarsimp
apply (rule refl)
apply (drule st_tcb_at_to_thread_states)
apply (clarsimp simp: receive_blocked_on_def2)
apply (drule spec [where x = Grant])
apply (frule aag_wellformed_grant_Control_to_recv [OF _ _ pas_refined_wellformed])
apply (rule pas_refined_mem [OF sta_ts])
apply fastforce
apply assumption
apply assumption
apply (erule (1) aag_Control_into_owns)
done
lemma send_fault_ipc_pas_refined:
"\<lbrace>pas_refined aag
and invs
and is_subject aag \<circ> cur_thread
and K (valid_fault fault)
and K (is_subject aag thread)\<rbrace>
send_fault_ipc thread fault
\<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
apply (rule hoare_gen_asm)+
apply (simp add: send_fault_ipc_def Let_def lookup_cap_def split_def)
apply (wp send_ipc_pas_refined thread_set_fault_pas_refined thread_set_tcb_fault_set_invs
thread_set_refs_trivial thread_set_obj_at_impossible
get_cap_wp thread_set_valid_objs''
hoare_vcg_conj_lift hoare_vcg_ex_lift hoare_vcg_all_lift
| wpc
| rule hoare_drop_imps
| simp add: split_def split del: if_split)+
apply (rule_tac Q'="\<lambda>rv s. pas_refined aag s
\<and> is_subject aag (cur_thread s)
\<and> invs s
\<and> valid_fault fault
\<and> is_subject aag (fst (fst rv))"
in hoare_post_imp_R[rotated])
apply (fastforce dest!: cap_auth_caps_of_state
simp: invs_valid_objs invs_sym_refs cte_wp_at_caps_of_state aag_cap_auth_def
cap_auth_conferred_def cap_rights_to_auth_def AllowSend_def)
apply (wp get_cap_auth_wp[where aag=aag] lookup_slot_for_thread_authorised
| simp add: lookup_cap_def split_def)+
done
lemma handle_fault_pas_refined:
"\<lbrace>pas_refined aag
and invs
and is_subject aag \<circ> cur_thread
and K (valid_fault fault)
and K (is_subject aag thread)\<rbrace>
handle_fault thread fault
\<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
apply (simp add: handle_fault_def)
apply (wp set_thread_state_pas_refined send_fault_ipc_pas_refined
| simp add: handle_double_fault_def)+
done
lemma thread_set_tcb_fault_update_valid_mdb:
"\<lbrace>valid_mdb\<rbrace>
thread_set (tcb_fault_update (\<lambda>_. Some fault)) thread
\<lbrace>\<lambda>rv. valid_mdb\<rbrace>"
apply(rule thread_set_mdb)
apply(clarsimp simp: tcb_cap_cases_def)
apply auto
done
(* FIXME: MOVE *)
lemma obj_at_conj_distrib:
"obj_at (\<lambda>ko. P ko \<and> Q ko) p s = (obj_at (\<lambda>ko. P ko) p s \<and> obj_at (\<lambda>ko. Q ko) p s)"
by (auto simp: obj_at_def)
lemma send_fault_ipc_integrity_autarch:
"\<lbrace>pas_refined aag
and invs
and integrity aag X st
and is_subject aag \<circ> cur_thread
and K (valid_fault fault)
and K (is_subject aag thread)\<rbrace>
send_fault_ipc thread fault
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (rule hoare_gen_asm)+
apply (simp add: send_fault_ipc_def Let_def)
apply (wp send_ipc_integrity_autarch
thread_set_integrity_autarch thread_set_fault_pas_refined
thread_set_valid_objs'' thread_set_refs_trivial
thread_set_tcb_fault_update_valid_mdb
thread_set_tcb_fault_set_invs
| wpc
| simp add: is_obj_defs)+
(* 14 subgoals *)
apply (rename_tac word1 word2 set)
apply (rule_tac R="\<lambda>rv s. ep_at word1 s" in hoare_post_add)
apply (simp only: obj_at_conj_distrib[symmetric] flip: conj_assoc)
apply (wp thread_set_obj_at_impossible thread_set_tcb_fault_set_invs
get_cap_auth_wp[where aag=aag]
| simp add: lookup_cap_def is_obj_defs split_def)+
(* down to 3 : normal indentation *)
apply (rule_tac Q'="\<lambda>rv s. integrity aag X st s \<and> pas_refined aag s
\<and> invs s
\<and> valid_fault fault
\<and> is_subject aag (cur_thread s)
\<and> is_subject aag (fst (fst rv))"
in hoare_post_imp_R[rotated])
apply (clarsimp simp: invs_valid_objs invs_sym_refs cte_wp_at_caps_of_state obj_at_def)
apply (frule(1) caps_of_state_valid)
apply (clarsimp simp: valid_cap_def is_ep aag_cap_auth_def cap_auth_conferred_def
cap_rights_to_auth_def AllowSend_def
elim!: obj_atE)
apply (intro conjI ; fastforce ?)
apply (clarsimp simp:ep_q_refs_of_def split:endpoint.splits)
apply (frule(1) pas_refined_ep_recv, simp add:obj_at_def,assumption)
apply (frule(1) aag_wellformed_grant_Control_to_recv[OF _ _ pas_refined_wellformed,rotated],
blast)
apply (simp add:aag_has_auth_to_Control_eq_owns)
apply (wp lookup_slot_for_thread_authorised)+
apply simp
done
lemma handle_fault_integrity_autarch:
"\<lbrace>pas_refined aag
and integrity aag X st
and is_subject aag \<circ> cur_thread
and invs
and K (valid_fault fault)
and K (is_subject aag thread)\<rbrace>
handle_fault thread fault
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (simp add: handle_fault_def)
apply (wp set_thread_state_integrity_autarch send_fault_ipc_integrity_autarch
| simp add: handle_double_fault_def)+
done
section\<open>Replies\<close>
crunch pas_refined[wp]: handle_fault_reply "pas_refined aag"
lemma handle_fault_reply_respects:
"\<lbrace>integrity aag X st and K (is_subject aag thread)\<rbrace>
handle_fault_reply fault thread x y
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (cases fault, simp_all)
apply (wp as_user_integrity_autarch
| simp add: handle_arch_fault_reply_def arch_get_sanitise_register_info_def)+
done
lemma tcb_st_to_auth_Restart_Inactive [simp]:
"tcb_st_to_auth (if P then Restart else Inactive) = {}"
by simp
lemma do_reply_transfer_pas_refined:
"\<lbrace>pas_refined aag
and invs and K (is_subject aag sender)
and K ((grant \<longrightarrow> is_subject aag receiver) \<and> is_subject aag (fst slot))\<rbrace>
do_reply_transfer sender receiver slot grant
\<lbrace>\<lambda>rv. pas_refined aag\<rbrace>"
apply (simp add: do_reply_transfer_def)
apply (rule hoare_pre)
apply (wp set_thread_state_pas_refined do_ipc_transfer_pas_refined
thread_set_pas_refined K_valid
| wpc
| simp add: thread_get_def split del: if_split)+
(* otherwise simp does too much *)
apply (rule hoare_strengthen_post, rule gts_inv)
apply (rule impI)
apply assumption
apply auto
done
lemma update_tcb_state_in_ipc_reply:
"\<lbrakk> integrity_tcb_in_ipc aag X thread epptr TRContext st s;
tcb_state tcb = BlockedOnReply; aag_has_auth_to aag Reply thread; tcb_fault tcb = None;
get_tcb thread s = Some tcb; tcb' = tcb\<lparr>tcb_state := Structures_A.thread_state.Running\<rparr>
\<rbrakk> \<Longrightarrow>
integrity_tcb_in_ipc aag X thread epptr TRFinal st
(s \<lparr> kheap := (kheap s)(thread \<mapsto> TCB tcb') \<rparr>)"
unfolding integrity_tcb_in_ipc_def
apply (elim conjE)
apply (intro conjI)
apply assumption+
apply (erule integrity_trans)
apply (simp cong: if_cong)
apply clarsimp
apply (erule tcb_in_ipc.cases, simp_all)
apply (drule get_tcb_SomeD)
apply (rule tii_reply[OF refl refl])
apply (elim exE, intro exI tcb.equality; solves \<open>simp\<close>)
apply auto
done
abbreviation "fault_tcb_at \<equiv> pred_tcb_at itcb_fault"
lemma fault_tcb_atI:
"\<lbrakk>kheap s ptr = Some (TCB tcb); P (tcb_fault tcb) \<rbrakk> \<Longrightarrow> fault_tcb_at P ptr s"
by (fastforce simp:pred_tcb_at_def obj_at_def)
lemma fault_tcb_atE:
assumes hyp:"fault_tcb_at P ptr s"
obtains tcb where "kheap s ptr = Some (TCB tcb)" and "P (tcb_fault tcb)"
using hyp by (fastforce simp:pred_tcb_at_def elim: obj_atE)
lemma set_thread_state_running_respects_in_ipc_reply:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr TRContext st
and st_tcb_at awaiting_reply receiver
and fault_tcb_at ((=) None) receiver
and K (aag_has_auth_to aag Reply receiver)\<rbrace>
set_thread_state receiver Structures_A.thread_state.Running
\<lbrace>\<lambda>rv. integrity_tcb_in_ipc aag X receiver epptr TRFinal st\<rbrace>"
apply (simp add: set_thread_state_def set_object_def get_object_def)
apply (wp sts_ext_running_noop)
apply (auto simp: st_tcb_at_def obj_at_def get_tcb_def
cong: if_cong
elim!: fault_tcb_atE elim: update_tcb_state_in_ipc_reply[unfolded fun_upd_def])
done
end
context is_extended begin
interpretation Arch . (*FIXME: arch_split*)
lemma list_integ_lift_in_ipc:
assumes li:
"\<lbrace>list_integ (cdt_change_allowed aag {pasSubject aag} (cdt st) (tcb_states_of_state st)) st and Q\<rbrace>
f
\<lbrace>\<lambda>_. list_integ (cdt_change_allowed aag {pasSubject aag} (cdt st) (tcb_states_of_state st)) st\<rbrace>"
assumes ekh: "\<And>P. \<lbrace>\<lambda>s. P (ekheap s)\<rbrace> f \<lbrace>\<lambda>rv s. P (ekheap s)\<rbrace>"
assumes rq: "\<And>P. \<lbrace> \<lambda>s. P (ready_queues s) \<rbrace> f \<lbrace> \<lambda>rv s. P (ready_queues s) \<rbrace>"
shows "\<lbrace>integrity_tcb_in_ipc aag X receiver epptr ctxt st and Q\<rbrace> f \<lbrace>\<lambda>_. integrity_tcb_in_ipc aag X receiver epptr ctxt st\<rbrace>"
apply (unfold integrity_tcb_in_ipc_def integrity_def[abs_def])
apply (simp del:split_paired_All)
apply (rule hoare_pre)
apply (simp only: integrity_cdt_list_as_list_integ)
apply (rule hoare_lift_Pf2[where f="ekheap"])
apply (simp add: tcb_states_of_state_def get_tcb_def)
apply (wp li[simplified tcb_states_of_state_def get_tcb_def] ekh rq)+
apply (simp only: integrity_cdt_list_as_list_integ)
apply (simp add: tcb_states_of_state_def get_tcb_def)
done
end
context begin interpretation Arch . (*FIXME: arch_split*)
lemma fast_finalise_reply_respects_in_ipc_autarch:
"\<lbrace> integrity_tcb_in_ipc aag X receiver epptr ctxt st and K (is_reply_cap cap) \<rbrace>
fast_finalise cap final
\<lbrace>\<lambda>_. integrity_tcb_in_ipc aag X receiver epptr ctxt st \<rbrace>"
by (rule hoare_gen_asm) (fastforce simp: is_cap_simps)
lemma empty_slot_list_integrity':
notes split_paired_All[simp del]
shows
"\<lbrace>list_integ P st and (\<lambda>s . cdt_list s slot = []) and K(P slot)\<rbrace> empty_slot_ext slot slot_p \<lbrace>\<lambda>_. list_integ P st\<rbrace>"
apply (simp add: empty_slot_ext_def split del: if_split)
apply (wp update_cdt_list_wp)
apply (intro impI conjI allI | simp add: list_filter_replace_list list_filter_remove split: option.splits | elim conjE | simp add: list_integ_def)+
done
lemma tcb_state_of_states_cdt_update_behind_kheap[simp]:
"tcb_states_of_state (kheap_update g (cdt_update f s)) = tcb_states_of_state (kheap_update g s)"
by (simp add:tcb_states_of_state_def get_tcb_def)
lemma set_cdt_empty_slot_respects_in_ipc_autarch:
"\<lbrace> integrity_tcb_in_ipc aag X receiver epptr ctxt st and (\<lambda>s. m =cdt s)
and (\<lambda>s. descendants_of slot m = {}) and K(is_subject aag (fst slot))\<rbrace>
set_cdt ((\<lambda>p. if m p = Some slot then m slot else m p)(slot := None))
\<lbrace>\<lambda>_. integrity_tcb_in_ipc aag X receiver epptr ctxt st \<rbrace>"
unfolding set_cdt_def
apply wp
apply (simp add: integrity_tcb_in_ipc_def integrity_def)
apply (force simp:no_children_empty_desc[symmetric])
done
lemma reply_cap_no_children':
"\<lbrakk> valid_mdb s; caps_of_state s p = Some (ReplyCap t False r) \<rbrakk> \<Longrightarrow> \<forall>p' .cdt s p' \<noteq> Some p"
using reply_cap_no_children ..
lemma valid_list_empty:
"\<lbrakk> valid_list_2 list m; descendants_of slot m = {}\<rbrakk> \<Longrightarrow> list slot = []"
unfolding valid_list_2_def
apply (drule no_children_empty_desc[THEN iffD2])
apply (rule classical)
by (fastforce simp del:split_paired_All split_paired_Ex simp add:neq_Nil_conv)
lemma empty_slot_respects_in_ipc_autarch:
"\<lbrace>integrity_tcb_in_ipc aag X receiver epptr ctxt st and valid_mdb and valid_list
and cte_wp_at is_reply_cap slot and K (is_subject aag (fst slot))\<rbrace>
empty_slot slot NullCap
\<lbrace>\<lambda>_. integrity_tcb_in_ipc aag X receiver epptr ctxt st\<rbrace>"
unfolding empty_slot_def apply simp
apply (wp add: set_cap_respects_in_ipc_autarch set_original_respects_in_ipc_autarch)
apply (wp empty_slot_extended.list_integ_lift_in_ipc empty_slot_list_integrity')
apply simp
apply wp+
apply (wp set_cdt_empty_slot_respects_in_ipc_autarch)
apply (simp add: set_cdt_def)
apply wp
apply wp
apply wp
apply (wp get_cap_wp)
apply (clarsimp simp: integrity_tcb_in_ipc_def cte_wp_at_caps_of_state is_cap_simps)
apply (drule(1) reply_cap_no_children')
by (force dest: valid_list_empty simp: no_children_empty_desc simp del: split_paired_All)
lemma cte_delete_one_respects_in_ipc_autharch:
"\<lbrace> integrity_tcb_in_ipc aag X receiver epptr ctxt st and valid_mdb and valid_list and
cte_wp_at is_reply_cap slot and K (is_subject aag (fst slot)) \<rbrace>
cap_delete_one slot
\<lbrace>\<lambda>_. integrity_tcb_in_ipc aag X receiver epptr ctxt st \<rbrace>"
unfolding cap_delete_one_def
apply (wp empty_slot_respects_in_ipc_autarch
fast_finalise_reply_respects_in_ipc_autarch get_cap_wp)
by (fastforce simp:cte_wp_at_caps_of_state is_cap_simps)
text \<open>The special case of fault reply need a different machinery than *_in_ipc stuff because,
there is no @{term underlying_memory} modification\<close>
datatype tcb_respects_fault_state = TRFContext | TRFRemoveFault | TRFFinal
inductive tcb_in_fault_reply for aag tst l' ko ko'
where
tifr_lrefl: "\<lbrakk> l' = pasSubject aag \<rbrakk> \<Longrightarrow> tcb_in_fault_reply aag tst l' ko ko'"
| tifr_context: "\<lbrakk> ko = Some (TCB tcb);
ko' = Some (TCB tcb');
\<exists>ctxt'. tcb' = tcb \<lparr>tcb_arch := arch_tcb_context_set ctxt' (tcb_arch tcb)\<rparr>;
(pasSubject aag, Reply, l') \<in> pasPolicy aag;
tcb_state tcb = BlockedOnReply;
tcb_fault tcb = Some fault;
tst = TRFContext\<rbrakk>
\<Longrightarrow> tcb_in_fault_reply aag tst l' ko ko'"
| tifr_remove_fault: "\<lbrakk> ko = Some (TCB tcb);
ko' = Some (TCB tcb');
\<exists>ctxt'. tcb' = tcb \<lparr> tcb_arch := arch_tcb_context_set ctxt' (tcb_arch tcb)
, tcb_fault := None\<rparr>;
(pasSubject aag, Reply, l') \<in> pasPolicy aag;
tcb_state tcb = BlockedOnReply;
tcb_fault tcb = Some fault;
tst = TRFRemoveFault\<rbrakk>
\<Longrightarrow> tcb_in_fault_reply aag tst l' ko ko'"
| tifr_reply: "\<lbrakk> ko = Some (TCB tcb);
ko' = Some (TCB tcb');
\<exists>ctxt'. tcb' = tcb \<lparr> tcb_arch := arch_tcb_context_set ctxt' (tcb_arch tcb),
tcb_fault := None,
tcb_state := new_st\<rparr>;
new_st = Structures_A.Restart \<or> new_st = Structures_A.Inactive;
(pasSubject aag, Reply, l') \<in> pasPolicy aag;
tcb_state tcb = BlockedOnReply;
tcb_fault tcb = Some fault;
tst = TRFFinal\<rbrakk>
\<Longrightarrow> tcb_in_fault_reply aag tst l' ko ko'"
definition integrity_tcb_in_fault_reply ::
"'a PAS \<Rightarrow> obj_ref set \<Rightarrow> obj_ref \<Rightarrow> tcb_respects_fault_state \<Rightarrow> det_ext state
\<Rightarrow> det_ext state \<Rightarrow> bool"
where
"integrity_tcb_in_fault_reply aag X thread tst st \<equiv> \<lambda>s.
\<not> is_subject aag thread \<and> pas_refined aag st \<and> \<comment> \<open>more or less convenience\<close>
(integrity aag X st (s\<lparr>kheap := (kheap s)(thread := kheap st thread)\<rparr>)
\<and> (tcb_in_fault_reply aag tst (pasObjectAbs aag thread) (kheap st thread) (kheap s thread)))"
lemma integrity_tcb_in_fault_reply_final:
"\<lbrakk> integrity_tcb_in_fault_reply aag X thread TRFFinal st s \<rbrakk> \<Longrightarrow> integrity aag X st s"
unfolding integrity_tcb_in_fault_reply_def
apply clarsimp
apply (erule integrity_trans)
apply (clarsimp simp: integrity_def)
apply (erule tcb_in_fault_reply.cases; clarsimp)
apply (fastforce intro!:tcb.equality tro_tcb_reply)
done
lemma set_scheduler_action_respects_in_fault_reply:
"\<lbrace>integrity_tcb_in_fault_reply aag X receiver ctxt st\<rbrace>
set_scheduler_action action
\<lbrace>\<lambda>_. integrity_tcb_in_fault_reply aag X receiver ctxt st\<rbrace>"
unfolding set_scheduler_action_def
by (wpsimp simp: integrity_tcb_in_fault_reply_def integrity_def
tcb_states_of_state_def get_tcb_def)
crunches set_thread_state_ext
for respects_in_fault_reply:"integrity_tcb_in_fault_reply aag X receiver ctxt st"
lemma as_user_respects_in_fault_reply:
"\<lbrace>integrity_tcb_in_fault_reply aag X thread TRFContext st\<rbrace>
as_user thread m
\<lbrace>\<lambda>rv. integrity_tcb_in_fault_reply aag X thread TRFContext st\<rbrace>"
apply (simp add: as_user_def)
apply (wpsimp wp: set_object_wp)
apply (clarsimp simp: integrity_tcb_in_fault_reply_def)
apply (erule tcb_in_fault_reply.cases; clarsimp dest!: get_tcb_SomeD)
apply (rule tifr_context[OF refl refl])
apply (intro exI tcb.equality; simp add: arch_tcb_context_set_def)
by fastforce+
lemma handle_fault_reply_respects_in_fault_reply:
"\<lbrace>integrity_tcb_in_fault_reply aag X thread TRFContext st\<rbrace>
handle_fault_reply f thread label mrs
\<lbrace>\<lambda>_. integrity_tcb_in_fault_reply aag X thread TRFContext st\<rbrace>"
by (cases f;
wpsimp simp: handle_arch_fault_reply_def arch_get_sanitise_register_info_def
wp: as_user_respects_in_fault_reply)
lemma thread_set_no_fault_respects_in_fault_reply:
"\<lbrace>integrity_tcb_in_fault_reply aag X thread TRFContext st\<rbrace>
thread_set (\<lambda>tcb. tcb \<lparr> tcb_fault := None \<rparr>) thread
\<lbrace>\<lambda>_. integrity_tcb_in_fault_reply aag X thread TRFRemoveFault st\<rbrace>"
apply (simp add: thread_set_def)
apply (wp set_thread_state_ext_respects_in_fault_reply set_object_wp)
apply (clarsimp simp: integrity_tcb_in_fault_reply_def)
apply (erule tcb_in_fault_reply.cases; clarsimp dest!: get_tcb_SomeD)
apply (rule tifr_remove_fault[OF refl refl])
apply (intro exI tcb.equality ; simp add: arch_tcb_context_set_def)
by fastforce+
lemma set_thread_state_respects_in_fault_reply:
"tst = Restart \<or> tst = Inactive \<Longrightarrow>
\<lbrace>integrity_tcb_in_fault_reply aag X thread TRFRemoveFault st\<rbrace>
set_thread_state thread tst
\<lbrace>\<lambda>_. integrity_tcb_in_fault_reply aag X thread TRFFinal st\<rbrace>"
apply (simp add: set_thread_state_def)
apply (wp set_thread_state_ext_respects_in_fault_reply set_object_wp)
apply (clarsimp simp: integrity_tcb_in_fault_reply_def)
apply (erule tcb_in_fault_reply.cases; clarsimp dest!: get_tcb_SomeD)
apply (rule tifr_reply[OF refl refl])
apply (intro exI tcb.equality; simp add: arch_tcb_context_set_def)
by fastforce+
lemma integrity_tcb_in_fault_reply_refl:
"\<lbrakk> st_tcb_at awaiting_reply receiver s; fault_tcb_at (flip (\<noteq>) None) receiver s;
aag_has_auth_to aag Reply receiver;
\<not> is_subject aag receiver; pas_refined aag s \<rbrakk>
\<Longrightarrow> integrity_tcb_in_fault_reply aag X receiver TRFContext s s"
unfolding integrity_tcb_in_fault_reply_def
apply (clarsimp elim!: pred_tcb_atE)
apply (rule tifr_context[OF refl refl])
apply (rule tcb_context_no_change)
apply fastforce+
done
thm cte_wp_at_emptyableD[simplified cte_wp_at_caps_of_state]
lemma emptyable_not_master:
"\<lbrakk> valid_objs s; caps_of_state s slot = Some cap; \<not> is_master_reply_cap cap\<rbrakk>
\<Longrightarrow> emptyable slot s"
apply (rule emptyable_cte_wp_atD[rotated 2])
apply (intro allI impI, assumption)
by (fastforce simp:is_cap_simps cte_wp_at_caps_of_state)+
lemma do_reply_transfer_respects:
"\<lbrace>pas_refined aag
and integrity aag X st
and einvs \<comment> \<open>cap_delete_one\<close>
and tcb_at sender
and cte_wp_at (is_reply_cap_to receiver) slot
and K (is_subject aag sender)
and K (aag_has_auth_to aag Reply receiver)
and K (is_subject aag (fst slot) \<and> (grant \<longrightarrow> is_subject aag receiver))\<rbrace>
do_reply_transfer sender receiver slot grant
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (rule hoare_gen_asm)+
apply (simp add: do_reply_transfer_def thread_get_def get_thread_state_def)
apply (rule hoare_seq_ext[OF _ assert_get_tcb_sp];force?)
apply (rule hoare_seq_ext[OF _ assert_sp])
apply (rule hoare_seq_ext[OF _ assert_get_tcb_sp];force?)
apply wpc
\<comment> \<open>No fault case\<close>
apply (rule hoare_vcg_if_split[where P= "is_subject aag receiver" and f=f and g=f for f,
simplified if_cancel])
\<comment> \<open>receiver is a subject\<close>
apply ((wp set_thread_state_integrity_autarch thread_set_integrity_autarch
handle_fault_reply_respects do_ipc_transfer_integrity_autarch
do_ipc_transfer_pas_refined
| simp
| intro conjI impI)+)[1]
\<comment> \<open>receiver is not a subject\<close>
apply (rule use_spec') \<comment> \<open>Name initial state\<close>
apply (simp add: spec_valid_def) \<comment> \<open>no imp rule?\<close>
apply (rule_tac Q = "\<lambda>_ s'. integrity aag X st s \<and>
integrity_tcb_in_ipc aag X receiver _ TRFinal s s'" in hoare_post_imp)
apply (fastforce dest!: integrity_tcb_in_ipc_final elim!: integrity_trans)
apply ((wp possible_switch_to_respects_in_ipc_autarch
set_thread_state_running_respects_in_ipc_reply
cte_delete_one_respects_in_ipc_autharch cap_delete_one_reply_st_tcb_at
do_ipc_transfer_pred_tcb do_ipc_transfer_respects_in_ipc
do_ipc_transfer_non_null_cte_wp_at2
| simp add: is_cap_simps is_reply_cap_to_def
| clarsimp)+)[1]
\<comment> \<open>fault case\<close>
apply (rule hoare_vcg_if_split[where P= "is_subject aag receiver" and f=f and g=f for f,
simplified if_cancel])
\<comment> \<open>receiver is a subject\<close>
apply ((wp set_thread_state_integrity_autarch thread_set_integrity_autarch
handle_fault_reply_respects
| simp
| intro conjI impI)+)[1]
\<comment> \<open>receiver is not a subject\<close>
apply (rule hoare_seq_ext, simp)
apply (rule use_spec') \<comment> \<open>Name initial state\<close>
apply (simp add: spec_valid_def) \<comment> \<open>no imp rule?\<close>
apply wp
apply (rule_tac Q = "\<lambda>_ s'. integrity aag X st s \<and>
integrity_tcb_in_fault_reply aag X receiver TRFFinal s s'"
in hoare_post_imp)
apply (fastforce dest!: integrity_tcb_in_fault_reply_final elim!: integrity_trans)
apply (wp set_thread_state_respects_in_fault_reply
thread_set_no_fault_respects_in_fault_reply
handle_fault_reply_respects_in_fault_reply
| simp)+
apply force
apply (strengthen integrity_tcb_in_fault_reply_refl)+
apply (wp cap_delete_one_reply_st_tcb_at)
\<comment> \<open>the end\<close>
by (force simp: st_tcb_at_tcb_states_of_state cte_wp_at_caps_of_state is_cap_simps
is_reply_cap_to_def
dest!: tcb_states_of_state_kheapD get_tcb_SomeD tcb_atD ko_atD
intro: tcb_atI fault_tcb_atI emptyable_not_master
intro!: integrity_tcb_in_ipc_refl tcb_states_of_state_kheapI
elim!: fault_tcb_atE)
lemma reply_from_kernel_integrity_autarch:
"\<lbrace>integrity aag X st and pas_refined aag and valid_objs and K (is_subject aag thread)\<rbrace>
reply_from_kernel thread x
\<lbrace>\<lambda>rv. integrity aag X st\<rbrace>"
apply (simp add: reply_from_kernel_def split_def)
apply (wp set_message_info_integrity_autarch set_mrs_integrity_autarch as_user_integrity_autarch | simp)+
done
end
end
|
(* Author: Tobias Nipkow, 2007 *)
section \<open>Lists as vectors\<close>
theory ListVector
imports MainRLT
begin
text\<open>\noindent
A vector-space like structure of lists and arithmetic operations on them.
Is only a vector space if restricted to lists of the same length.\<close>
text\<open>Multiplication with a scalar:\<close>
abbreviation scale :: "('a::times) \<Rightarrow> 'a list \<Rightarrow> 'a list" (infix "*\<^sub>s" 70)
where "x *\<^sub>s xs \<equiv> map ((*) x) xs"
lemma scale1[simp]: "(1::'a::monoid_mult) *\<^sub>s xs = xs"
by (induct xs) simp_all
subsection \<open>\<open>+\<close> and \<open>-\<close>\<close>
fun zipwith0 :: "('a::zero \<Rightarrow> 'b::zero \<Rightarrow> 'c) \<Rightarrow> 'a list \<Rightarrow> 'b list \<Rightarrow> 'c list"
where
"zipwith0 f [] [] = []" |
"zipwith0 f (x#xs) (y#ys) = f x y # zipwith0 f xs ys" |
"zipwith0 f (x#xs) [] = f x 0 # zipwith0 f xs []" |
"zipwith0 f [] (y#ys) = f 0 y # zipwith0 f [] ys"
instantiation list :: ("{zero, plus}") plus
begin
definition
list_add_def: "(+) = zipwith0 (+)"
instance ..
end
instantiation list :: ("{zero, uminus}") uminus
begin
definition
list_uminus_def: "uminus = map uminus"
instance ..
end
instantiation list :: ("{zero,minus}") minus
begin
definition
list_diff_def: "(-) = zipwith0 (-)"
instance ..
end
lemma zipwith0_Nil[simp]: "zipwith0 f [] ys = map (f 0) ys"
by(induct ys) simp_all
lemma list_add_Nil[simp]: "[] + xs = (xs::'a::monoid_add list)"
by (induct xs) (auto simp:list_add_def)
lemma list_add_Nil2[simp]: "xs + [] = (xs::'a::monoid_add list)"
by (induct xs) (auto simp:list_add_def)
lemma list_add_Cons[simp]: "(x#xs) + (y#ys) = (x+y)#(xs+ys)"
by(auto simp:list_add_def)
lemma list_diff_Nil[simp]: "[] - xs = -(xs::'a::group_add list)"
by (induct xs) (auto simp:list_diff_def list_uminus_def)
lemma list_diff_Nil2[simp]: "xs - [] = (xs::'a::group_add list)"
by (induct xs) (auto simp:list_diff_def)
lemma list_diff_Cons_Cons[simp]: "(x#xs) - (y#ys) = (x-y)#(xs-ys)"
by (induct xs) (auto simp:list_diff_def)
lemma list_uminus_Cons[simp]: "-(x#xs) = (-x)#(-xs)"
by (induct xs) (auto simp:list_uminus_def)
lemma self_list_diff:
"xs - xs = replicate (length(xs::'a::group_add list)) 0"
by(induct xs) simp_all
lemma list_add_assoc: fixes xs :: "'a::monoid_add list"
shows "(xs+ys)+zs = xs+(ys+zs)"
apply(induct xs arbitrary: ys zs)
apply simp
apply(case_tac ys)
apply(simp)
apply(simp)
apply(case_tac zs)
apply(simp)
apply(simp add: add.assoc)
done
subsection "Inner product"
definition iprod :: "'a::ring list \<Rightarrow> 'a list \<Rightarrow> 'a" ("\<langle>_,_\<rangle>") where
"\<langle>xs,ys\<rangle> = (\<Sum>(x,y) \<leftarrow> zip xs ys. x*y)"
lemma iprod_Nil[simp]: "\<langle>[],ys\<rangle> = 0"
by(simp add: iprod_def)
lemma iprod_Nil2[simp]: "\<langle>xs,[]\<rangle> = 0"
by(simp add: iprod_def)
lemma iprod_Cons[simp]: "\<langle>x#xs,y#ys\<rangle> = x*y + \<langle>xs,ys\<rangle>"
by(simp add: iprod_def)
lemma iprod0_if_coeffs0: "\<forall>c\<in>set cs. c = 0 \<Longrightarrow> \<langle>cs,xs\<rangle> = 0"
apply(induct cs arbitrary:xs)
apply simp
apply(case_tac xs) apply simp
apply auto
done
lemma iprod_uminus[simp]: "\<langle>-xs,ys\<rangle> = -\<langle>xs,ys\<rangle>"
by(simp add: iprod_def uminus_sum_list_map o_def split_def map_zip_map list_uminus_def)
lemma iprod_left_add_distrib: "\<langle>xs + ys,zs\<rangle> = \<langle>xs,zs\<rangle> + \<langle>ys,zs\<rangle>"
apply(induct xs arbitrary: ys zs)
apply (simp add: o_def split_def)
apply(case_tac ys)
apply simp
apply(case_tac zs)
apply (simp)
apply(simp add: distrib_right)
done
lemma iprod_left_diff_distrib: "\<langle>xs - ys, zs\<rangle> = \<langle>xs,zs\<rangle> - \<langle>ys,zs\<rangle>"
apply(induct xs arbitrary: ys zs)
apply (simp add: o_def split_def)
apply(case_tac ys)
apply simp
apply(case_tac zs)
apply (simp)
apply(simp add: left_diff_distrib)
done
lemma iprod_assoc: "\<langle>x *\<^sub>s xs, ys\<rangle> = x * \<langle>xs,ys\<rangle>"
apply(induct xs arbitrary: ys)
apply simp
apply(case_tac ys)
apply (simp)
apply (simp add: distrib_left mult.assoc)
done
end
|
{-# OPTIONS --cubical --safe #-}
module Data.Bits.Equatable where
open import Data.Bits
open import Prelude
_≡ᴮ_ : Bits → Bits → Bool
[] ≡ᴮ [] = true
(0∷ xs) ≡ᴮ (0∷ ys) = xs ≡ᴮ ys
(1∷ xs) ≡ᴮ (1∷ ys) = xs ≡ᴮ ys
_ ≡ᴮ _ = false
open import Relation.Nullary.Discrete.FromBoolean
sound-== : ∀ n m → T (n ≡ᴮ m) → n ≡ m
sound-== [] [] p i = []
sound-== (0∷ n) (0∷ m) p i = 0∷ sound-== n m p i
sound-== (1∷ n) (1∷ m) p i = 1∷ sound-== n m p i
complete-== : ∀ n → T (n ≡ᴮ n)
complete-== [] = tt
complete-== (0∷ n) = complete-== n
complete-== (1∷ n) = complete-== n
_≟_ : Discrete Bits
_≟_ = from-bool-eq _≡ᴮ_ sound-== complete-==
|
State Before: α : Type u_1
β : Type u_2
ι : Type ?u.398685
mα : MeasurableSpace α
mβ : MeasurableSpace β
κ : { x // x ∈ kernel α β }
f : β → ℝ≥0∞
g : α → β
a : α
hg : Measurable g
hf : Measurable f
s : Set β
hs : MeasurableSet s
inst✝ : Decidable (g a ∈ s)
⊢ (∫⁻ (x : β) in s, f x ∂↑(deterministic g hg) a) = if g a ∈ s then f (g a) else 0 State After: no goals Tactic: rw [kernel.deterministic_apply, set_lintegral_dirac' hf hs] |
module Bautzen.Game.Core
import Bautzen.Combats
import Bautzen.GameUnit
import Bautzen.Pos
import Bautzen.Terrain
import Data.Fin
import Data.Nat
%default total
||| * see section 8.2
public export
data CombatPhase : Type where
NoCombat : CombatPhase
AssignTacticalSupport : (side : Side) -> (combat : CombatState) -> CombatPhase
AssignStrategicSupport : (side : Side) -> (combat : CombatState) -> CombatPhase
Resolve : (combat : CombatState) -> CombatPhase
ApplyLosses : (side : Side) -> (combat : CombatState) -> CombatPhase
public export
data GameSegment : Type where
Supply : GameSegment
Move : GameSegment
Combat : (phase : CombatPhase) -> GameSegment
GameEnd : GameSegment
public export
Show GameSegment where
show Supply = "Supply"
show Move = "Move"
show (Combat _) = "Combat"
show GameEnd = "GameEnd"
public export
record GameState where
constructor MkGameState
turn : Fin 6
side : Side
segment : GameSegment
units : List (GameUnit, Pos)
public export
Show GameState where
show (MkGameState turn side segment units) =
"GameState: turn=" ++
show (finToInteger turn) ++
", side=" ++ show side ++
", " ++ show segment ++
", units=" ++ show units
public export
data GameError : Type where
NoSuchUnits : (unitName : List String) -> GameError
NotYourTurn : (side : Side) -> GameError
EnemyInHex : (unit : GameUnit) -> (hex : Pos) -> GameError
MoveFromZocToZoc : (unit : GameUnit) -> (to : Pos) -> GameError
ForbiddenTerrain : (from : Pos) -> (to : Pos) -> GameError
InvalidMove : (from : Pos) -> (to : Pos) -> GameError
NotEnoughMPs : (unit : GameUnit) -> (from : Pos)-> (to : Pos) -> (mp : Nat) -> GameError
NotAdjacentTo : (units : List GameUnit) -> (target : Pos) -> GameError
NothingToAttack : (target : Pos) -> GameError
AttackingOwnUnits : (units : List GameUnit) -> (target : Pos) -> GameError
NotSupportingUnits: (units : List GameUnit) -> GameError
NotInSupportRange: (units : List GameUnit) -> GameError
NotInChainOfCommand : (units : List GameUnit) -> GameError
NoSupplyColumnThere : (hex : Pos) -> GameError
NoStepsToLose : (side : Side) -> GameError
CombatInProgress : (side : Side) -> GameError
GameHasEnded : GameError
public export
Show GameError where
show (NoSuchUnits unitNames) = "No such units: " ++ show unitNames
show (NotYourTurn side) = "Not your turn: " ++ show side
show (EnemyInHex unit hex) = "Target hex is occupied by enemy: " ++ show hex
show (MoveFromZocToZoc unit to) = "Cannot move from a ZoC to a ZoC: " ++ show to
show (ForbiddenTerrain from to) = "Unit cannot enter terrain: " ++ show from ++ " -> " ++ show to
show (InvalidMove from to) = "Move is invalid: " ++ show from ++ " -> " ++ show to
show (NotEnoughMPs unit from to mp) = "Unit has not enough MPs: " ++ show mp ++ ", " ++ show from ++ " -> " ++ show to
show (NotAdjacentTo units target) = "Units are not adjacent to target hex: " ++ show units ++ " -> " ++ show target
show (NotInSupportRange units) = "Units are not in support range: " ++ show units
show (NotSupportingUnits units) = "Units are not support units (HQ or Artillery): " ++ show units
show (NotInChainOfCommand units) = "HQ cannot provide support to other formation's units: " ++ show units
show (NoSupplyColumnThere hex) = "No supply column there: " ++ show hex
show (NoStepsToLose side) = "No steps to lose for " ++ show side
show (NothingToAttack target) = "Attacked hex is empty: " ++ show target
show (AttackingOwnUnits units target) = "Attacking own units: " ++ show units ++ " -> " ++ show target
show (CombatInProgress side) = "Combat in progress for: " ++ show side
show GameHasEnded = "Game has ended"
public export
data Command : (segment : GameSegment) -> Type where
MoveTo : (unitName : String) -> (to : Pos) -> Command Move
AttackWith : (unitNames : List String) -> (target : Pos) -> Command (Combat NoCombat)
NextSegment : Command segment
TacticalSupport : (unitNames : List String) -> Command (Combat $ AssignTacticalSupport side combatState)
ResolveCombat : (combatState : CombatState) -> Command (Combat $ Resolve combatState)
LoseStep : (unitName : String) -> Command (Combat $ ApplyLosses side combatState)
public export
Show (Command segment) where
show (MoveTo unitName to) = "MoveTo " ++ unitName ++ " -> " ++ show to
show (AttackWith unitNames target) = "AttackWith " ++ show unitNames ++ " -> " ++ show target
show NextSegment = "NextSegment"
show (TacticalSupport unitNames) = "TacticalSupport " ++ show unitNames
show (ResolveCombat state) = "ResolveCombat " ++ show state
show (LoseStep unitName) = "LoseStep " ++ show unitName
public export
data Event : Type where
||| Unit has moved from some position to some other position
Moved : (unit : GameUnit) -> (from : Pos) -> (to : Pos) -> (cost : Cost)
-> { auto prf : LTE (toNat cost) (currentMP unit) }
-> Event
||| Some attackers have engaged combat with some defenders on the given target hex.
|||
||| The target hex can be inferred from the `defenders`' position, eg. all `defenders`
||| should be in the same hex.
|||
||| @attackers list of attacker's units and their positions
||| @defenders list of defender's units and their positions
||| @target the attacked position
CombatEngaged : (attackers : List (GameUnit, Pos)) -> (defenders : List (GameUnit, Pos)) -> (target : Pos) -> Event
||| Units provide tactical support for some side in current combat
|||
||| @supportedSide the side which is given support
||| @supportUnits units and positions that provide support
TacticalSupportProvided : (supportedSide : Side) -> (supportUnits : List (GameUnit, Pos)) -> Event
||| A supply column is used to provide support to units engaged in a combat
|||
||| @supportedSide the side which is given support
||| @hex the position of the supply column
SupplyColumnUsed : (supportedSide : Side) -> (hex : Pos) -> Event
||| Combat has been resolved yielding the given losses
|||
||| @state the state of the combat (before any loss has been applied)
||| @losses losses to apply on engaged units
CombatResolved : (state : CombatState) -> (losses : Losses) -> Event
||| Given unit has lost a step
|||
||| Depending on the current state of the unit, this can either
||| either reduce the unit or destroy it.
|||
||| @side the side the unit is part of (useful to look for the unit in
||| the combat state
||| @unit the unit to apply a step loss to
||| @remainingLosses losses remaining to apply
StepLost : (side : Side) -> (unit : GameUnit) -> (remainingLosses : Losses) -> Event
||| The segment has been advanced one step.
|||
||| @from the previous segment
||| @to the new segment
SegmentChanged : (from : GameSegment) -> (to : GameSegment) -> Event
||| Axis turn is over, move to Allies turn
AxisTurnDone : Event
||| Turn ended, start a new turn
TurnEnded : Fin 6 -> Event
||| Game has ended
GameEnded : Event
public export
Show Event where
show (Moved unit from to cost) = "Moved " ++ name unit ++ " from " ++ show from ++ " to " ++ show to ++ " for " ++ show (toNat cost) ++ " mps"
show (CombatEngaged atk def tgt) = "CombatEngaged " ++ show (map (GameUnit.name . fst) atk) ++ " -> " ++ show (map (GameUnit.name . fst) def) ++ " @ " ++ show tgt
show (TacticalSupportProvided side units) = "TacticalSupportProvided " ++ show (map (GameUnit.name . fst) units) ++ " -> " ++ show side
show (SupplyColumnUsed side hex) = "SupplyColumnUsed " ++ show side ++ " @ " ++ show hex
show (CombatResolved state losses) = "CombatResolved " ++ show state ++ " : " ++ show losses
show (StepLost side unit remain) = "Step Lost " ++ show unit ++ " @ " ++ show side ++ " (" ++ show remain++")"
show (SegmentChanged from to) = "Segment Changed " ++ show from ++ " -> " ++ show to
show AxisTurnDone = "Axis Turn Over"
show (TurnEnded n) = "Turn Ended: " ++ show (finToNat n)
show GameEnded = "Game Ended"
public export
data Game : Type where
MkGame : (events : List Event) -> (curState : GameState) -> (gameMap : Map) -> Game
public export
Show Game where
show (MkGame events state gameMap) = "Game: " ++ show events ++ "\n" ++ show state ++ "\n" ++ show gameMap
public export
curSegment : Game -> GameSegment
curSegment (MkGame _ (MkGameState _ _ segment _) _) = segment
public export
data QueryError : Type where
NoSupplyPathFor : (unitName: String) -> (pos : Pos) -> QueryError
UnitDoesNotExist : (unitName: String) -> QueryError
|
The polynomial $0$ is the zero polynomial, the polynomial $c$ is the constant polynomial $c$, and the polynomial $x$ is the linear polynomial $x$. |
function pass = test_equiOption( pref )
% Test funqi in 2D
if ( nargin < 1 )
pref = chebfunpref;
end
tol = 100*pref.cheb2Prefs.chebfun2eps;
% Canonical domain:
dom = [-1 1 -1 1];
f = @(x,y) cos(x+y);
x = linspace(dom(1),dom(2),100);
y = linspace(dom(3),dom(4),100);
[xx, yy] = meshgrid( x, y );
A = f(xx, yy) ;
g = chebfun2( A , dom, 'equi' );
h = chebfun2( f, dom);
pass(1) = norm( h - g ) < tol ;
% Rectangular domain:
dom = [-1 2 -2 1];
f = @(x,y) cos(x+y);
x = linspace(dom(1),dom(2),100);
y = linspace(dom(3),dom(4),100);
[xx, yy] = meshgrid( x, y );
A = f(xx, yy) ;
g = chebfun2( A , dom, 'equi' );
h = chebfun2( f, dom);
pass(2) = norm( h - g ) < tol ;
% Nonsymmetric function:
dom = [-1 2 -2 1];
f = @(x,y) cos(x+2*y);
x = linspace(dom(1),dom(2),100);
y = linspace(dom(3),dom(4),100);
[xx, yy] = meshgrid( x, y );
A = f(xx, yy) ;
g = chebfun2( A , dom, 'equi' );
h = chebfun2( f, dom);
pass(3) = norm( h - g ) < tol ;
% Small domain;
h = 1e-3;
dom = [1-h 1+h 1-2*h 1+2*h];
f = @(x,y) cos(x+2*y);
x = linspace(dom(1),dom(2),100);
y = linspace(dom(3),dom(4),100);
[xx, yy] = meshgrid( x, y );
A = f(xx, yy) ;
g = chebfun2( A , dom, 'equi' );
h = chebfun2( f, dom);
pass(4) = norm( h - g ) < tol ;
end |
subroutine z_vermom_finvol(nmmax ,kmax ,icx ,icy ,u0 , &
& v0 ,w0 ,vicww ,rxz ,ryz , &
& guu ,gvv ,guv ,gvu ,guz , &
& gvz ,gsqs ,kfs ,kcs ,aak ,bbk , &
& cck ,ddk ,kfuz0 ,kfvz0 ,kfsz0 , &
& kfsmin ,kfsmx0 ,kcshyd ,dzs0 ,dzu0 , &
& dzv0 ,w1 ,p0 ,zk ,gdp )
!----- GPL ---------------------------------------------------------------------
!
! Copyright (C) Stichting Deltares, 2011-2016.
!
! This program is free software: you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation version 3.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License
! along with this program. If not, see <http://www.gnu.org/licenses/>.
!
! contact: [email protected]
! Stichting Deltares
! P.O. Box 177
! 2600 MH Delft, The Netherlands
!
! All indications and logos of, and references to, "Delft3D" and "Deltares"
! are registered trademarks of Stichting Deltares, and remain the property of
! Stichting Deltares. All rights reserved.
!
!-------------------------------------------------------------------------------
! $Id: z_vermom_finvol.f90 5820 2016-02-10 09:15:02Z jagers $
! $HeadURL: https://svn.oss.deltares.nl/repos/delft3d/tags/6686/src/engines_gpl/flow2d3d/packages/kernel/src/compute/z_vermom_finvol.f90 $
!!--description-----------------------------------------------------------------
!
! Vertical momentum equation. Integration for
! full timestep. w0 is vertical velocity at
! end of previous non-hydrostatic timestep.
!
!!--pseudo code and references--------------------------------------------------
! NONE
!!--declarations----------------------------------------------------------------
use precision
use globaldata
!
implicit none
!
type(globdat),target :: gdp
!
! The following list of pointer parameters is used to point inside the gdp structure
! They replace the include igd / include igp lines
!
real(fp), pointer :: hdt
real(fp), pointer :: rhow
real(fp), pointer :: vicmol
integer , pointer :: m1_nhy
integer , pointer :: m2_nhy
integer , pointer :: n1_nhy
integer , pointer :: n2_nhy
!
! Global variables
!
integer , intent(in) :: icx
integer , intent(in) :: icy
integer , intent(in) :: kmax ! Description and declaration in esm_alloc_int.f90
integer , intent(in) :: nmmax ! Description and declaration in dimens.igs
integer , dimension(gdp%d%nmlb:gdp%d%nmub) , intent(in) :: kcs ! Description and declaration in esm_alloc_int.f90
integer , dimension(gdp%d%nmlb:gdp%d%nmub) , intent(in) :: kfs ! Description and declaration in esm_alloc_int.f90
integer , dimension(gdp%d%nmlb:gdp%d%nmub) , intent(in) :: kfsmx0 ! Description and declaration in esm_alloc_int.f90
integer , dimension(gdp%d%nmlb:gdp%d%nmub) , intent(in) :: kfsmin ! Description and declaration in esm_alloc_int.f90
integer , dimension(gdp%d%nmlb:gdp%d%nmub) , intent(in) :: kcshyd ! Description and declaration in esm_alloc_int.f90
integer , dimension(gdp%d%nmlb:gdp%d%nmub, kmax) , intent(in) :: kfsz0 ! Description and declaration in esm_alloc_int.f90
integer , dimension(gdp%d%nmlb:gdp%d%nmub, kmax) , intent(in) :: kfuz0 ! Description and declaration in esm_alloc_int.f90
integer , dimension(gdp%d%nmlb:gdp%d%nmub, kmax) , intent(in) :: kfvz0 ! Description and declaration in esm_alloc_int.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub) , intent(in) :: guu ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub) , intent(in) :: guv ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub) , intent(in) :: gvu ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub) , intent(in) :: gvv ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub) , intent(in) :: guz ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub) , intent(in) :: gvz ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub) , intent(in) :: gsqs ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub, 0:kmax) :: aak
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub, 0:kmax) :: bbk
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub, 0:kmax) :: cck
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub, 0:kmax) :: ddk
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub, 0:kmax) , intent(in) :: vicww ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub, 0:kmax) , intent(in) :: w0 ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub, 0:kmax) :: w1 ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub, kmax) , intent(in) :: dzs0 ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub, kmax) , intent(in) :: dzu0 ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub, kmax) , intent(in) :: dzv0 ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub, kmax) , intent(in) :: p0 ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub, kmax) :: rxz ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub, kmax) :: ryz ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub, kmax) , intent(in) :: u0 ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nmlb:gdp%d%nmub, kmax) , intent(in) :: v0 ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(0:kmax) , intent(in) :: zk ! Description and declaration in esm_alloc_real.f90
!
! Local variables
!
integer :: ddb
integer :: icxy
integer :: ifkx
integer :: ifky
integer :: ikenx
integer :: ikeny
integer :: k
integer :: kfad
integer :: ku
integer :: kuu
integer :: kd
integer :: m
integer :: maxk
integer :: ndelta
integer :: ndm
integer :: ndmd
integer :: ndmu
integer :: nm
integer :: nmd
integer :: nmst
integer :: nmstart
integer :: nmu
integer :: num
integer :: numd
integer :: numu
real(fp) :: advecx
real(fp) :: advecy
real(fp) :: advecz
real(fp) :: adza
real(fp) :: adzc
real(fp) :: bi
real(fp) :: cuu
real(fp) :: cvv
real(fp) :: ddza
real(fp) :: ddzb
real(fp) :: ddzc
real(fp) :: dt
real(fp) :: dz
real(fp) :: dzdo
real(fp) :: dzu
real(fp) :: dzup
real(fp) :: dzv
real(fp) :: geta
real(fp) :: gksi
real(fp) :: uuu
real(fp) :: viscow
real(fp) :: visk
real(fp) :: viskup
real(fp) :: vix
real(fp) :: viy
real(fp) :: vvv
real(fp) :: wdo
real(fp) :: wup
real(fp) :: www
real(fp) :: ddkadx = 0.0_fp
real(fp) :: ddkady = 0.0_fp
real(fp) :: ddkadz = 0.0_fp
integer :: kadx = 1
integer :: kady = 1
integer :: kadz = 1
real(fp) :: area ! area of flux interface
real(fp) :: uavg0 ! transport velocity at interface, east
real(fp) :: vavg0 ! transport velocity at interface, north
real(fp) :: wavg0 ! transport velocity at interface, top
real(fp) :: wzeta ! conservation correction
real(fp) :: thvert ! theta coefficient for vertical terms
real(fp) :: pcoef ! temporary value for coefficient pressure derivative
real(fp) :: voltemp ! work variable
!
!! executable statements -------------------------------------------------------
!
m1_nhy => gdp%gdnonhyd%m1_nhy
m2_nhy => gdp%gdnonhyd%m2_nhy
n1_nhy => gdp%gdnonhyd%n1_nhy
n2_nhy => gdp%gdnonhyd%n2_nhy
rhow => gdp%gdphysco%rhow
vicmol => gdp%gdphysco%vicmol
hdt => gdp%gdnumeco%hdt
!
ddb = gdp%d%ddbound
icxy = max(icx,icy)
dt = 2.0_fp * hdt
thvert = 0.0_fp
!
ndelta = n2_nhy - n1_nhy
nmstart = (n1_nhy + ddb) + (m1_nhy - 1 + ddb)*icxy
!
! Array initialisation
!
aak = 0.0_fp
bbk = 1.0_fp
cck = 0.0_fp
ddk = 0.0_fp
w1 = 0.0_fp
!
! Turbulent stresses rxz, ryz
!
do m = m1_nhy, m2_nhy
nmst = nmstart + (m - m1_nhy)*icxy
do nm = nmst, nmst + ndelta
nmu = nm + icx
num = nm + icy
rxz(nm,kmax) = 0.0_fp
ryz(nm,kmax) = 0.0_fp
do k = 1, kmax-1
ku = k + 1
kd = k - 1
ifkx = kfuz0(nm,k) * kfuz0(nm,ku) * kfsz0(nm,k) * kfsz0(nmu,k)
ifky = kfvz0(nm,k) * kfvz0(nm,ku) * kfsz0(nm,k) * kfsz0(num,k)
vix = 0.5_fp * (vicww(nm,k)+vicww(nmu,k))
viy = 0.5_fp * (vicww(nm,k)+vicww(num,k))
dzu = 0.5_fp * (zk(ku)-zk(kd))
dzv = 0.5_fp * (zk(ku)-zk(kd))
if (ifkx == 1) then
rxz(nm,k) = vix * ( (w0(nmu,k )-w0(nm,k))/gvu(nm) &
& + (u0(nm ,ku)-u0(nm,k))/dzu )
else
rxz(nm,k) = 0.0_fp
endif
if (ifky == 1) then
ryz(nm,k) = viy * ( (w0(num,k )-w0(nm,k))/guv(nm) &
& + (v0(nm ,ku)-v0(nm,k))/dzv )
else
ryz(nm,k) = 0.0_fp
endif
enddo
enddo
enddo
!
! Set up the complete system for the vertical velocities
!
do m = m1_nhy, m2_nhy
nmst = nmstart + (m - m1_nhy)*icxy
do nm = nmst, nmst + ndelta
if (kfs(nm)*kcs(nm) == 1) then
nmd = nm - icx
ndm = nm - icy
ndmd = nm - icx - icy
nmu = nm + icx
num = nm + icy
numu = nm + icx + icy
ndmu = nm + icx - icy
numd = nm - icx + icy
gksi = gvu(nm)
geta = guu(nm)
!
! Loop over internal layers
!
do k = kfsmin(nm), kfsmx0(nm)-1
ku = k + 1
kuu = k + 2
kd = k - 1
dz = 0.5_fp * (zk(ku)-zk(kd))
!
! Initialize system
!
bbk(nm,k) = 1.0_fp/dt
ddk(nm,k) = w0(nm,k)/dt
!
! Horizontal viscosity
!
viscow = (rxz(nm,k)-rxz(nmd,k)) / (0.5_fp*(gvv(nm)+gvv(ndm))) &
& + (ryz(nm,k)-ryz(ndm,k)) / (0.5_fp*(guu(nm)+gvv(nmd)))
!
ddk(nm,k) = ddk(nm,k) + viscow
!
! Vertical viscosity (rzz)
!
viskup = 0.5_fp * (2.0_fp*vicmol + vicww(nm, k) + vicww(nm, ku))
visk = 0.5_fp * (2.0_fp*vicmol + vicww(nm, k) + vicww(nm, kd))
dzup = zk(ku) - zk(k)
dzdo = zk(k ) - zk(kd)
dz = 0.5_fp * (dzup+dzdo)
ddza = visk / (dzdo*dz)
ddzc = viskup / (dzup*dz)
ddzb = -ddza - ddzc
aak(nm,k) = aak(nm,k) - ddza
bbk(nm,k) = bbk(nm,k) - ddzb
cck(nm,k) = cck(nm,k) - ddzc
!
! Eq. for velocity under bottom (w=0)
!
if (k == kfsmin(nm)) then
aak(nm,kd) = 0.0_fp
bbk(nm,kd) = 1.0_fp
cck(nm,kd) = 0.0_fp
ddk(nm,kd) = 0.0_fp
endif
!
! Eq. for velocity above free surface (const. extrapolation)
!
if (k == kfsmx0(nm)-1) then
aak(nm,ku) = -1.0_fp
bbk(nm,ku) = 1.0_fp
cck(nm,ku) = 0.0_fp
ddk(nm,ku) = 0.0_fp
endif
enddo
endif
enddo
enddo
!
! Finite volume approach for horizontal advection: u dw/dx + v dw/dy
!
do nm = 1, nmmax
nmd = nm - icx
ndm = nm - icy
ndmd = nm - icx - icy
nmu = nm + icx
num = nm + icy
numu = nm + icx + icy
ndmu = nm + icx - icy
numd = nm - icx + icy
!
! For all flooded w points, excluding water level boundaries
!
if (kcs(nm)*kfs(nm) == 1) then
do k = kfsmin(nm), kfsmx0(nm)-1
ku = k + 1
kd = k - 1
dz = 0.5_fp * (zk(ku)-zk(kd))
!
! Note: system coefficients already initialized above
!
! Advective fluxes
!
ddkadx = 0.0_fp
ddkady = 0.0_fp
ddkadz = 0.0_fp
kadx = 1
kady = 1
kadz = 1
!
if (kfsz0(nmd,k)*kfsz0(nmd,ku) == 1) then
!
! Internal / water level boundary (west)
!
uavg0 = 0.5_fp * (u0(nmd,k)+u0(nmd,ku))
area = guu(nmd) * 0.5_fp*(dzu0(nmd,k)+dzu0(nmd,ku))
!
! Water level boundary
!
if (kcs(nmd) == 2) then
!
! Constant extrapolation outwards
!
if ( uavg0 < 0.0_fp ) then
ddkadx = ddkadx + area*uavg0*w0(nm,k)
else
!
! No advection if inward flow
!
kadx = 0
endif
else
!
! Internal flux
!
ddkadx = ddkadx + area*( 0.5_fp*(uavg0+abs(uavg0))*w0(nmd,k) + 0.5_fp*(uavg0-abs(uavg0))*w0(nm,k) )
endif
elseif( k >= kfsmx0(nmd) ) then ! .and. k < kfsmx0(nmu) ) then
!
! Vertical free surface section (west)
!
! REMARK: is the if part necessary?
!
! The horizontal velocities are always available, dzu0 not
!
area = guu(nmd) * 0.5_fp*(dzs0(nm,k)+dzs0(nm,k+1)) !(dzu0(nmd,k)+dzu0(nmd,k+1))
uavg0 = (u0(nmd,k)+u0(nmd,ku)) !/ (max (1, kfuz0(nmd,k)+kfuz0(nmd,ku)) )
!
! Constant extrapolation outwards
!
if( uavg0 < 0.0_fp ) then
ddkadx = ddkadx + area*uavg0*w0(nm,k)
else
!
! No advection so flux west => flux east
!
kadx = 0
endif
endif
!
if (kfsz0(nmu,k)*kfsz0(nmu,ku) == 1) then
!
! East
!
uavg0 = 0.5_fp * (u0(nm,k)+u0(nm,ku))
area = guu(nm) * 0.5_fp*(dzu0(nm,k)+dzu0(nm,ku))
if (kcs(nmu) == 2) then
!
! Constant extrapolation outwards
!
if (uavg0 > 0.0_fp) then
ddkadx = ddkadx - area*uavg0*w0(nm,k)
elseif ( kfuz0(nmd,k)*kfuz0(nmd,ku) == 1 ) then
!
! No advection if inwards flow
!
kadx = 0
endif
else
ddkadx = ddkadx - area*( 0.5_fp*(uavg0+abs(uavg0))*w0(nm,k) + 0.5_fp*(uavg0-abs(uavg0))*w0(nmu,k) )
endif
elseif( k >= kfsmx0(nmu) ) then ! .and. k < kfsmx0(nmd) ) then
!
! East vertical section
! Note: in case of a single column of water no horizontal advection (???)
!
! REMARK: is the if part necessary?
!
! The horizontal velocities are always available, dzu0 not
!
area = guu(nm) * 0.5_fp*(dzs0(nm,k)+dzs0(nm,k+1)) !(dzu0(nm,k)+dzu0(nm,k+1))
uavg0 = (u0(nm,k)+u0(nm,ku)) !/ (max (1, kfuz0(nm,k)+kfuz0(nm,ku)) )
!
! Constant extrapolation outwards
!
if( uavg0 > 0.0_fp ) then
ddkadx = ddkadx - area*uavg0*w0(nm,k)
else
!
! No advection so flux east => flux west
!
kadx = 0
endif
endif
if (k == kfsmx0(nm)-1) then
!
! Vertical (top)
!
! Obtain surface velocity from continuity equation
!
wzeta = w0(nm,k) + (u0(nmd,k+1)*dzu0(nmd,k+1)*guu(nmd)-u0(nm,k+1)*dzu0(nm,k+1)*guu(nm))/gsqs(nm)
wavg0 = 0.5_fp*(wzeta+w0(nm,k))
! if( wavg0 > 0.0 ) then
ddkadz = ddkadz - (1.0_fp-thvert)*gsqs(nm)*( 0.5_fp*(wavg0+abs(wavg0))*w0(nm,k) + 0.5_fp*(wavg0-abs(wavg0))*wzeta )
! else
! !
! ! No advection in case of inwards flow
! !
! kadz = 0
! endif
else
wavg0 = 0.5_fp*(w0(nm,k)+w0(nm,ku))
ddkadz = ddkadz - gsqs(nm)*(1.0_fp-thvert)*( 0.5_fp*(wavg0+abs(wavg0))*w0(nm,k) + 0.5_fp*(wavg0-abs(wavg0))*w0(nm,ku) )
endif
!
! Vertical (bottom)
!
if (k > kfsmin(nm)) then
wavg0 = 0.5_fp*(w0(nm,k)+w0(nm,kd))
ddkadz = ddkadz + gsqs(nm)*(1.0_fp-thvert)*( 0.5_fp*(wavg0+abs(wavg0))*w0(nm,kd) + 0.5_fp*(wavg0-abs(wavg0))*w0(nm,k) )
else
!
! In the bottom layer the lower volume face is located at the center of the mass volume
!
wavg0 = 0.5_fp*(w0(nm,k)) ! w=0 at bottom
ddkadz = ddkadz + gsqs(nm)*(1.0_fp-thvert)*( 0.5_fp*(wavg0+abs(wavg0))*w0(nm,kd) + 0.5_fp*(wavg0-abs(wavg0))*w0(nm,k) )
endif
!
! Update system coefficients
!
ddk(nm,k) = ddk(nm,k) + ( ddkadx*real(kadx,fp) + ddkady*real(kady,fp) + ddkadz*real(kadz,fp) ) / ( gsqs(nm)*0.5_fp*(dzs0(nm,ku)+dzs0(nm,k)) )
!
! Pressure
!
pcoef = 1.0_fp / dz
ddk(nm, k) = ddk(nm,k) - pcoef*(p0(nm,ku)-p0(nm,k))/ rhow
enddo
endif
enddo
!
! SOLUTION PROCEDURE SYSTEM OF EQUATIONS
!
do m = m1_nhy, m2_nhy
nmst = nmstart + (m-m1_nhy)*icxy
do nm = nmst, nmst+ndelta
if (kfsmx0(nm)-kfsmin(nm)>0 .and. kcs(nm)==1) then
maxk = kfsmin(nm) - 1
bi = 1.0_fp / bbk(nm, maxk)
bbk(nm,maxk) = 1.0_fp
cck(nm,maxk) = cck(nm,maxk) * bi
ddk(nm,maxk) = ddk(nm,maxk) * bi
endif
enddo
enddo
do m = m1_nhy, m2_nhy
nmst = nmstart + (m-m1_nhy)*icxy
do nm = nmst, nmst+ndelta
if (kfs(nm)*kcs(nm) == 1) then
do k = kfsmin(nm), kfsmx0(nm)
bi = 1.0_fp / (bbk(nm,k)-aak(nm,k)*cck(nm,k-1))
cck(nm,k) = cck(nm,k) * bi
ddk(nm,k) = (ddk(nm,k)-aak(nm,k)*ddk(nm,k-1)) * bi
enddo
endif
enddo
enddo
do m = m1_nhy, m2_nhy
nmst = nmstart + (m-m1_nhy)*icxy
do nm = nmst, nmst+ndelta
if (kcs(nm) == 1) then
if (kfsmx0(nm) >= kfsmin(nm)) then
w1(nm,kfsmx0(nm)) = ddk(nm,kfsmx0(nm))
endif
endif
enddo
enddo
do m = m1_nhy, m2_nhy
nmst = nmstart + (m-m1_nhy)*icxy
do nm = nmst, nmst+ndelta
if (kcs(nm) == 1) then
do k = kfsmx0(nm)-1, kfsmin(nm)-1, -1
ddk(nm,k) = ddk(nm,k) - cck(nm,k)*ddk(nm,k+1)
w1(nm ,k) = ddk(nm,k)
enddo
endif
enddo
enddo
end subroutine z_vermom_finvol
|
Lasker and Capablanca both worried that chess would suffer a " draw death " as top @-@ level players drew more and more of their games . More recently , Fischer agreed , saying that the game has become played out . All three advocated changing the rules of chess to minimize the number of drawn games . Lasker suggested scoring less than half a point for a draw , and more than half a point for <unk> the opponent 's king . Capablanca in the 1920s proposed Capablanca chess , a chess variant played on a larger board and with additional pieces . Fischer advocated Fischer Random Chess , another chess variant , in which the initial position of the pieces is determined at random .
|
\section{Introduction}
The following document outlines the procedure to assign priorities to IT tasks.
The final list of current IT priorities and the assignee to each task is in a password protected site, hence this document is just a reference of the process done by the team to decide what activities will be executed first.
|
Retail Management is a field in management that deals with the task of managing supermarkets and hypermarkets along with selling of goods and services to consumers. Retail management is the right or rather the best career for those who feel an interest towards commodities, sales markets, diversity of businesses and marketing research. A retail manager who is trained in Retail management courses will be well aware about the delivery of demanding goods at an affordable price and prescribed time. As part of an upsurge in the retail industry, companies are adopting new marketing strategies to diversify business and to impress the customers, leading to a greater demand for retail management courses. Retailing involves coordinating the business activities from designing of a product to its delivery and post-delivery service. Retail management courses make the students well aware in areas like monitoring retail orders, analyzing the supply and problem solving.
Certificate and diploma courses are offered in retail management courses. Bachelor of arts in materials management comes under retail management courses. Post graduate degree in retail management, retailing & logistics management and MBA in retail management are available. Postgraduate diploma management (PGDM) in retail management is a two year course with great demand.
The certificate and diploma programs with one year to two year duration require passes in plus two and basic education. Retail management courses for graduate level admissions require a pass in higher secondary education in any field. For seeking admission into the master’s degree program a student must have passed the bachelor’s degree from a reputed institute, in retail management or subjects related to retail.
The masters and PGDM programs in retail management courses also require a valid score in CAT / MAT / XAT or ATMA. For admissions, each college puts up their set of rules which will be final in the selection of candidate. Basic knowledge in retail management will be helpful for candidates pursuing higher education retail management sector.
India is one of the top ten rising retail markets in the world, plenty of retail market jobs are available at all levels. Retail management is a subject that requires complete awareness of brands and marketing strategies to win the customer. Major areas which provide job opportunities for a graduate in retail management courses include supply chains, advertising agencies, departmental stores, supermarkets and many more. Manufacturing companies use retail managers to help customers understand about the products and to reach customers directly. Insurance and banking sectors require skilled managers to convey their commodities directly to the customers. Along with multinational companies, the education and health care industries also absorbs a large number of retail managers. The pay package in this retail industry depends on the factors such as company, job profile, the area of the work, experience and the type of Retail management courses pursued. Among the retail management courses, professional with MBA in retail management is great demand in areas like business research and market research. As the supermarket and hypermarket business is ever expanding, the demand for floor personnel and retail managers is always high. |
\section{Education}
\cventry{2015-2017}{MSc Econometrics}{UvA}{Amsterdam}{Big data business analytics\\ Focus on Machine learning}{}
|
[STATEMENT]
lemma distinct_rotate[simp]: "distinct(rotate n xs) = distinct xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. distinct (rotate n xs) = distinct xs
[PROOF STEP]
by (induct n) (simp_all add:rotate_def) |
/-
Copyright (c) 2014 Parikshit Khanna. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Parikshit Khanna, Jeremy Avigad, Leonardo de Moura, Floris van Doorn, Mario Carneiro,
Scott Morrison
-/
import data.list.count
import data.list.infix
import algebra.order.monoid.min_max
/-!
# Lattice structure of lists
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This files prove basic properties about `list.disjoint`, `list.union`, `list.inter` and
`list.bag_inter`, which are defined in core Lean and `data.list.defs`.
`l₁ ∪ l₂` is the list where all elements of `l₁` have been inserted in `l₂` in order. For example,
`[0, 0, 1, 2, 2, 3] ∪ [4, 3, 3, 0] = [1, 2, 4, 3, 3, 0]`
`l₁ ∩ l₂` is the list of elements of `l₁` in order which are in `l₂`. For example,
`[0, 0, 1, 2, 2, 3] ∪ [4, 3, 3, 0] = [0, 0, 3]`
`bag_inter l₁ l₂` is the list of elements that are in both `l₁` and `l₂`, counted with multiplicity
and in the order they appear in `l₁`. As opposed to `list.inter`, `list.bag_inter` copes well with
multiplicity. For example,
`bag_inter [0, 1, 2, 3, 2, 1, 0] [1, 0, 1, 4, 3] = [0, 1, 3, 1]`
-/
open nat
namespace list
variables {α : Type*} {l l₁ l₂ : list α} {p : α → Prop} {a : α}
/-! ### `disjoint` -/
section disjoint
lemma disjoint.symm (d : disjoint l₁ l₂) : disjoint l₂ l₁ := λ a i₂ i₁, d i₁ i₂
lemma disjoint_comm : disjoint l₁ l₂ ↔ disjoint l₂ l₁ := ⟨disjoint.symm, disjoint.symm⟩
lemma disjoint_left : disjoint l₁ l₂ ↔ ∀ ⦃a⦄, a ∈ l₁ → a ∉ l₂ := iff.rfl
lemma disjoint_right : disjoint l₁ l₂ ↔ ∀ ⦃a⦄, a ∈ l₂ → a ∉ l₁ := disjoint_comm
lemma disjoint_iff_ne : disjoint l₁ l₂ ↔ ∀ a ∈ l₁, ∀ b ∈ l₂, a ≠ b :=
by simp only [disjoint_left, imp_not_comm, forall_eq']
lemma disjoint_of_subset_left (ss : l₁ ⊆ l) (d : disjoint l l₂) : disjoint l₁ l₂ := λ x m, d (ss m)
lemma disjoint_of_subset_right (ss : l₂ ⊆ l) (d : disjoint l₁ l) : disjoint l₁ l₂ :=
λ x m m₁, d m (ss m₁)
lemma disjoint_of_disjoint_cons_left {l₁ l₂} : disjoint (a :: l₁) l₂ → disjoint l₁ l₂ :=
disjoint_of_subset_left (list.subset_cons _ _)
lemma disjoint_of_disjoint_cons_right {l₁ l₂} : disjoint l₁ (a :: l₂) → disjoint l₁ l₂ :=
disjoint_of_subset_right (list.subset_cons _ _)
@[simp] lemma disjoint_nil_left (l : list α) : disjoint [] l := λ a, (not_mem_nil a).elim
@[simp] lemma disjoint_nil_right (l : list α) : disjoint l [] :=
by { rw disjoint_comm, exact disjoint_nil_left _ }
@[simp, priority 1100] lemma singleton_disjoint : disjoint [a] l ↔ a ∉ l :=
by { simp only [disjoint, mem_singleton, forall_eq], refl }
@[simp, priority 1100] lemma disjoint_singleton : disjoint l [a] ↔ a ∉ l :=
by rw [disjoint_comm, singleton_disjoint]
@[simp] lemma disjoint_append_left : disjoint (l₁ ++ l₂) l ↔ disjoint l₁ l ∧ disjoint l₂ l :=
by simp only [disjoint, mem_append, or_imp_distrib, forall_and_distrib]
@[simp] lemma disjoint_append_right : disjoint l (l₁ ++ l₂) ↔ disjoint l l₁ ∧ disjoint l l₂ :=
disjoint_comm.trans $ by simp only [disjoint_comm, disjoint_append_left]
@[simp] lemma disjoint_cons_left : disjoint (a :: l₁) l₂ ↔ a ∉ l₂ ∧ disjoint l₁ l₂ :=
(@disjoint_append_left _ l₂ [a] l₁).trans $ by simp only [singleton_disjoint]
@[simp] lemma disjoint_cons_right : disjoint l₁ (a :: l₂) ↔ a ∉ l₁ ∧ disjoint l₁ l₂ :=
disjoint_comm.trans $ by simp only [disjoint_comm, disjoint_cons_left]
lemma disjoint_of_disjoint_append_left_left (d : disjoint (l₁ ++ l₂) l) : disjoint l₁ l :=
(disjoint_append_left.1 d).1
lemma disjoint_of_disjoint_append_left_right (d : disjoint (l₁ ++ l₂) l) : disjoint l₂ l :=
(disjoint_append_left.1 d).2
lemma disjoint_of_disjoint_append_right_left (d : disjoint l (l₁ ++ l₂)) : disjoint l l₁ :=
(disjoint_append_right.1 d).1
lemma disjoint_of_disjoint_append_right_right (d : disjoint l (l₁ ++ l₂)) : disjoint l l₂ :=
(disjoint_append_right.1 d).2
lemma disjoint_take_drop {m n : ℕ} (hl : l.nodup) (h : m ≤ n) : disjoint (l.take m) (l.drop n) :=
begin
induction l generalizing m n,
case list.nil : m n
{ simp },
case list.cons : x xs xs_ih m n
{ cases m; cases n; simp only [disjoint_cons_left, mem_cons_iff, disjoint_cons_right, drop,
true_or, eq_self_iff_true, not_true, false_and,
disjoint_nil_left, take],
{ cases h },
cases hl with _ _ h₀ h₁, split,
{ intro h, exact h₀ _ (mem_of_mem_drop h) rfl, },
solve_by_elim [le_of_succ_le_succ] { max_depth := 4 } },
end
end disjoint
variable [decidable_eq α]
/-! ### `union` -/
section union
@[simp] lemma nil_union (l : list α) : [] ∪ l = l := rfl
@[simp] lemma cons_union (l₁ l₂ : list α) (a : α) : a :: l₁ ∪ l₂ = insert a (l₁ ∪ l₂) := rfl
@[simp] lemma mem_union : a ∈ l₁ ∪ l₂ ↔ a ∈ l₁ ∨ a ∈ l₂ :=
by induction l₁; simp only [nil_union, not_mem_nil, false_or, cons_union, mem_insert_iff,
mem_cons_iff, or_assoc, *]
lemma mem_union_left (h : a ∈ l₁) (l₂ : list α) : a ∈ l₁ ∪ l₂ := mem_union.2 (or.inl h)
lemma mem_union_right (l₁ : list α) (h : a ∈ l₂) : a ∈ l₁ ∪ l₂ := mem_union.2 (or.inr h)
lemma sublist_suffix_of_union : ∀ l₁ l₂ : list α, ∃ t, t <+ l₁ ∧ t ++ l₂ = l₁ ∪ l₂
| [] l₂ := ⟨[], by refl, rfl⟩
| (a :: l₁) l₂ := let ⟨t, s, e⟩ := sublist_suffix_of_union l₁ l₂ in
if h : a ∈ l₁ ∪ l₂
then ⟨t, sublist_cons_of_sublist _ s, by simp only [e, cons_union, insert_of_mem h]⟩
else ⟨a::t, s.cons_cons _, by simp only [cons_append, cons_union, e, insert_of_not_mem h];
split; refl⟩
lemma suffix_union_right (l₁ l₂ : list α) : l₂ <:+ l₁ ∪ l₂ :=
(sublist_suffix_of_union l₁ l₂).imp (λ a, and.right)
lemma union_sublist_append (l₁ l₂ : list α) : l₁ ∪ l₂ <+ l₁ ++ l₂ :=
let ⟨t, s, e⟩ := sublist_suffix_of_union l₁ l₂ in
e ▸ (append_sublist_append_right _).2 s
lemma forall_mem_union : (∀ x ∈ l₁ ∪ l₂, p x) ↔ (∀ x ∈ l₁, p x) ∧ (∀ x ∈ l₂, p x) :=
by simp only [mem_union, or_imp_distrib, forall_and_distrib]
lemma forall_mem_of_forall_mem_union_left (h : ∀ x ∈ l₁ ∪ l₂, p x) : ∀ x ∈ l₁, p x :=
(forall_mem_union.1 h).1
lemma forall_mem_of_forall_mem_union_right
(h : ∀ x ∈ l₁ ∪ l₂, p x) : ∀ x ∈ l₂, p x :=
(forall_mem_union.1 h).2
end union
/-! ### `inter` -/
section inter
@[simp] lemma inter_nil (l : list α) : [] ∩ l = [] := rfl
@[simp] lemma inter_cons_of_mem (l₁ : list α) (h : a ∈ l₂) :
(a :: l₁) ∩ l₂ = a :: (l₁ ∩ l₂) :=
if_pos h
@[simp] lemma inter_cons_of_not_mem (l₁ : list α) (h : a ∉ l₂) :
(a :: l₁) ∩ l₂ = l₁ ∩ l₂ :=
if_neg h
lemma mem_of_mem_inter_left : a ∈ l₁ ∩ l₂ → a ∈ l₁ := mem_of_mem_filter
lemma mem_of_mem_inter_right : a ∈ l₁ ∩ l₂ → a ∈ l₂ := of_mem_filter
lemma mem_inter_of_mem_of_mem : a ∈ l₁ → a ∈ l₂ → a ∈ l₁ ∩ l₂ :=
mem_filter_of_mem
@[simp] lemma mem_inter : a ∈ l₁ ∩ l₂ ↔ a ∈ l₁ ∧ a ∈ l₂ := mem_filter
lemma inter_subset_left (l₁ l₂ : list α) : l₁ ∩ l₂ ⊆ l₁ := filter_subset _
lemma inter_subset_right (l₁ l₂ : list α) : l₁ ∩ l₂ ⊆ l₂ := λ a, mem_of_mem_inter_right
lemma subset_inter {l l₁ l₂ : list α} (h₁ : l ⊆ l₁) (h₂ : l ⊆ l₂) : l ⊆ l₁ ∩ l₂ :=
λ a h, mem_inter.2 ⟨h₁ h, h₂ h⟩
lemma inter_eq_nil_iff_disjoint : l₁ ∩ l₂ = [] ↔ disjoint l₁ l₂ :=
by { simp only [eq_nil_iff_forall_not_mem, mem_inter, not_and], refl }
lemma forall_mem_inter_of_forall_left (h : ∀ x ∈ l₁, p x)
(l₂ : list α) :
∀ x, x ∈ l₁ ∩ l₂ → p x :=
ball.imp_left (λ x, mem_of_mem_inter_left) h
lemma forall_mem_inter_of_forall_right (l₁ : list α)
(h : ∀ x ∈ l₂, p x) :
∀ x, x ∈ l₁ ∩ l₂ → p x :=
ball.imp_left (λ x, mem_of_mem_inter_right) h
@[simp] lemma inter_reverse {xs ys : list α} : xs.inter ys.reverse = xs.inter ys :=
by simp only [list.inter, mem_reverse]
end inter
/-! ### `bag_inter` -/
section bag_inter
@[simp] lemma nil_bag_inter (l : list α) : [].bag_inter l = [] :=
by cases l; refl
@[simp] lemma bag_inter_nil (l : list α) : l.bag_inter [] = [] :=
by cases l; refl
@[simp] lemma cons_bag_inter_of_pos (l₁ : list α) (h : a ∈ l₂) :
(a :: l₁).bag_inter l₂ = a :: l₁.bag_inter (l₂.erase a) :=
by cases l₂; exact if_pos h
@[simp] lemma cons_bag_inter_of_neg (l₁ : list α) (h : a ∉ l₂) :
(a :: l₁).bag_inter l₂ = l₁.bag_inter l₂ :=
begin
cases l₂, {simp only [bag_inter_nil]},
simp only [erase_of_not_mem h, list.bag_inter, if_neg h]
end
@[simp] lemma mem_bag_inter {a : α} : ∀ {l₁ l₂ : list α}, a ∈ l₁.bag_inter l₂ ↔ a ∈ l₁ ∧ a ∈ l₂
| [] l₂ := by simp only [nil_bag_inter, not_mem_nil, false_and]
| (b :: l₁) l₂ := begin
by_cases b ∈ l₂,
{ rw [cons_bag_inter_of_pos _ h, mem_cons_iff, mem_cons_iff, mem_bag_inter],
by_cases ba : a = b,
{ simp only [ba, h, eq_self_iff_true, true_or, true_and] },
{ simp only [mem_erase_of_ne ba, ba, false_or] } },
{ rw [cons_bag_inter_of_neg _ h, mem_bag_inter, mem_cons_iff, or_and_distrib_right],
symmetry, apply or_iff_right_of_imp,
rintro ⟨rfl, h'⟩, exact h.elim h' }
end
@[simp] lemma count_bag_inter {a : α} :
∀ {l₁ l₂ : list α}, count a (l₁.bag_inter l₂) = min (count a l₁) (count a l₂)
| [] l₂ := by simp
| l₁ [] := by simp
| (b :: l₁) l₂ :=
begin
by_cases hb : b ∈ l₂,
{ rw [cons_bag_inter_of_pos _ hb, count_cons', count_cons', count_bag_inter, count_erase,
← min_add_add_right],
by_cases ab : a = b,
{ rw [if_pos ab, tsub_add_cancel_of_le],
rwa [succ_le_iff, count_pos, ab] },
{ rw [if_neg ab, tsub_zero, add_zero, add_zero] } },
{ rw [cons_bag_inter_of_neg _ hb, count_bag_inter],
by_cases ab : a = b,
{ rw [← ab] at hb, rw [count_eq_zero.2 hb, min_zero, min_zero] },
{ rw [count_cons_of_ne ab] } },
end
lemma bag_inter_nil_iff_inter_nil : ∀ l₁ l₂ : list α, l₁.bag_inter l₂ = [] ↔ l₁ ∩ l₂ = []
| [] l₂ := by simp
| (b :: l₁) l₂ :=
begin
by_cases h : b ∈ l₂; simp [h],
exact bag_inter_nil_iff_inter_nil l₁ l₂
end
end bag_inter
end list
|
#include <faucet/Asio.hpp>
#include <stdexcept>
#include <boost/thread.hpp>
void Asio::startup() {
if(ioService != 0) {
return;
}
ioService = new boost::asio::io_service();
work = new boost::asio::io_service::work(*ioService);
butler = new boost::thread([]{ioService->run();});
}
boost::asio::io_service &Asio::getIoService() {
if(ioService == 0) {
throw std::runtime_error("Attempted to access io_service before startup or after shutdown.");
}
return *ioService;
}
void Asio::shutdown() {
if(ioService == 0) {
return;
}
delete work;
ioService->stop();
butler->join();
delete butler;
delete ioService;
work = 0;
butler = 0;
ioService = 0;
}
boost::asio::io_service *Asio::ioService = 0;
boost::asio::io_service::work *Asio::work = 0;
boost::thread *Asio::butler = 0;
|
// Copyright (c) 2015-2018, CNRS
// Authors: Justin Carpentier <[email protected]>
#ifndef __multicontact_api_math_search_hpp__
#define __multicontact_api_math_search_hpp__
#include <Eigen/Dense>
namespace multicontact_api
{
namespace math
{
template<class DenseDerived>
Eigen::DenseIndex binarySearch(const Eigen::DenseBase<DenseDerived> & vector,
const typename DenseDerived::Scalar & value)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(DenseDerived);
int left_id = 0;
int right_id = vector.ssize()-1;
while(left_id <= right_id)
{
int middle_id = left_id + (right_id - left_id)/2;
if(vector[middle_id] < value)
left_id = middle_id+1;
else if(vector[middle_id] > value)
right_id = middle_id-1;
else
return middle_id;
}
return left_id-1;
}
}
}
#endif // ifndef __multicontact_api_math_search_hpp__
|
#### Zápis do souboru
Vyhodnoťte funkci sin(x) ve 100 bodech na intervalu [0, 10] a uložte hodnoty x a sin(x) do souboru (např csv).
a) čistý Python
```python
xmin = 0
xmax = 10
N = 100
```
```python
import math
f = open("data.txt", "w")
for i in range(0, N):
x = xmin + i*(xmax-xmin)/(N-1)
y = math.sin(x)
f.write("%f %f\n"%(x,y))
f.close()
```
Alternativní způsob práce se souborem
```python
import math
with open("data.csv", "w") as f:
for i in range(0, 100):
x = xmin + i*(xmax-xmin)/(N-1)
y = math.sin(x)
f.write(f"{x:f} {y:f}\n")
```
b) s využitím numpy je to o poznání jednodušší
```python
import numpy as np
x = np.linspace(0, 10, 100)
y = np.sin(x)
np.savetxt("data.txt", np.vstack((x, y)).T)
```
#### Čtení ze souboru do pole
Načtěte data uložená v předchozí úloze a vykreslete funkci graficky na obrazovce
a) čistý Python
```python
data = []
f = open("data.csv")
for line in f:
data.append(list(map(float, line.strip().split(" "))))
```
b) s využitím numpy
```python
data = np.loadtxt("data.csv")
```
Vykresleni
```python
import matplotlib.pyplot as plt
plt.plot(data[:,0], data[:,1])
```
### Floating point aritmetika
Reprezentace des. čísel ve dvojkové soustavě: vytiskněte číslo 0.1 na 19 desetinných míst
```python
"%.19f"%0.1
```
'0.1000000000000000056'
Sčítání a odčítání.
Vypočtěte $1+10^{-15}$ a $1+10^{-16}$
```python
1+1e-15
```
1.000000000000001
```python
1+1e-16
```
1.0
Kolik desítkových platných míst má float64 s 53-bitovou mantisou?
```python
np.log10(2**53)
```
15.954589770191003
Výpočty tedy probíhají přibližně s přesností na 16 platných míst. Relativní zaokrouhlovací chyba roste, pokud například počítáme rozdíl dvou podobně velkých čísel:
Vypočtěte $1.001-1$
```python
1.001-1
```
0.0009999999999998899
Z důvodu zaokrouhlovací chyby také záleží na pořadí aritmetických operací (jsou vyhodnocovány zleva doprava):
Vypočtěte $10^{-15}+1-1$ a $1-1+10^{-15}$
```python
1-1+1e-15
```
1e-15
```python
1e-15+1-1
```
1.1102230246251565e-15
Nevhodně navržený algoritmus může vést k "zesilování" chyby. Algoritmus potom je tzv. numericky nestabilní. Viz následující příklad:
Spočítejte členy následující posloupnosti pro $j=1\ldots10$:
$$x_0 = 1$$
$$x_j = (1.01 - x_{j-1})\cdot 100$$
a porovnejte s hodnotami vypočtenými "na papíře"
```python
# ukol 1
```
#### Sčítání číselných řad
Spočítejte
$$y = \sum_{i=0}^{1000} \frac{1}{10^{-6} + i}$$
- "popořadě"
- v obráceném pořadí, tedy od nejmenších členů
- s využitím knihovních funkce `sum` a `math.fsum`
a porovnejte výsledky
```python
x = np.arange(1001)
y = 1/(1e-6 + x)
```
popořadě:
```python
s = 0
for i in y: s += i
s
```
1000007.4854692194
pozpátku:
```python
s = 0
for i in y[::-1]: s += i
s
```
1000007.4854692166
Vestavěná obecná funkce sum pracuje popořadě. Umí sčítat jakýkoliv objekt s definovanou operací sčítání:
```python
sum(y)
```
1000007.4854692194
Funkce `math.fsum` je optimální pro přesnost výpočtu. V tomto případě se shoduje se součtem od nejmenších členů:
```python
math.fsum(y)
```
1000007.4854692166
Funkce `np.sum` je optimalizovaná pro rychlost výpočtu, relativně přesná:
```python
np.sum(y)
```
1000007.4854692165
Poznámka: exaktní výsledek můžeme snadno ověřit symbolickým výpočtem v knihovně sympy, která umožňuje numerické vyhodnocení sumy s libovolnou přesností. Nejdříve symbolicky definujeme náš výraz:
```python
import sympy as sp
si = sp.Symbol("si")
series = sp.Sum(1/(sp.S(10)**(-6) + si), (si, 0, 1000))
series
```
$\displaystyle \sum_{si=0}^{1000} \frac{1}{si + \frac{1}{1000000}}$
a vyhodnotíme s přesností na 30 platných míst. Vidíme, že v tomto případě je sumace "od nejmenších členů" dostatečně přesná.
```python
sp.Sum(1/(sp.S(10)**(-6) + si), (si, 0, 1000)).evalf(30)
```
$\displaystyle 1000007.48546921661698028641805$
### Porovnávání čísel
Zjistěte, jestli se rovnají čísla:
$1+ 10^{-16}$ a $1$;
$(1.01-1)\cdot100$ a $1$
```python
1+1e-16 == 1
```
True
```python
1 == (1.01 - 1)*100
```
False
Vidíme, že z důvodu zaokrouhlovací chyby se mohou shodná čísla jevit jako různá a naopak. Je nutné toto mít na paměti a v případě potřeby použít porovnávání s tolerancí (absolutní a relativní, viz např funkce `np.isclose` a její dokumentace)
```python
def isclose(a, b, rel_tol=1e-9, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
```
```python
isclose(1, (1.01 - 1)*100)
```
True
Vhodnější je ale použití knihovních funkcí `np.isclose` nebo `math.isclose`, které zaručují korektní chování s nekonečny a NaN...
```python
np.isclose(1, (1.01 - 1)*100)
```
True
#### "Speciální" čísla: nula, nekonečno NaN
Poznámka bokem k typům v Pythonu: Python je dynamicky typovaný, takže jméno proměnné není nijak spjato s datovým typem, jako třeba v C/C++. Typ proměnné nebo výrazu můžeme zjistit příkazem `type`
```python
x = 0
type(x)
```
int
```python
x = 0.0
type(x)
```
float
A ještě jedna trochu záludná poznámka k typům čísel v Pythonu: spočtěte `1/0` a `1/np.sin(0)`. Proč se výsledky liší?
```python
1/0
```
```python
1/np.sin(0)
```
<ipython-input-62-21b4b538a0f5>:1: RuntimeWarning: divide by zero encountered in double_scalars
1/np.sin(0)
inf
Je to typem čísel, zkuste type(0), type(0.0) a type(np.sin(0)
```python
type(0)
```
int
```python
type(0.0)
```
float
```python
type(np.sin(0))
```
numpy.float64
Problém je v různých typech float čísel. Numpy float umožňuje dělení nulou. Pouze generuje varování a korektně vrací hodnotu inf. Toto chování numpy lze nastavit.
64-bitový nulový numpy float můžete vytvořit příkazem `np.float64(0)` a tento datový typ dokáže reprezentovat i speciální hodnoty NaN (not a number, nedefinovaný výraz), +inf, -inf (nekonečna), +0, -0 (nula se znaménkem)
```python
-np.float64(0), np.nan, -np.inf, np.inf
```
(-0.0, nan, -inf, inf)
Ukázka některých výpočtů s NaN a inf hodnotami:
```python
1/np.float64(0), -1/np.float64(0)
```
<ipython-input-52-18dbe72427c2>:1: RuntimeWarning: divide by zero encountered in double_scalars
1/np.float64(0), -1/np.float64(0)
(inf, -inf)
```python
0/np.float64(0)
```
<ipython-input-53-f117ac9e30c2>:1: RuntimeWarning: invalid value encountered in double_scalars
0/np.float64(0)
nan
NaN dle definice není větší, menší, ani rovno žádnému číslu:
```python
np.nan > 3, np.nan <= 3, np.nan == np.nan
```
(False, False, False)
```python
np.inf == np.inf
```
True
```python
-np.float64(0) == np.float64(0)
```
True
```python
np.arctan(np.inf)/np.pi
```
0.5
### Hledání kořenů
Naprogramujte hledání kořenů metodou půlení intervalu.
Najděte kořen sin(x) mezi 3 a 4 metodou půlení intervalu
```python
a, b = 3, 4
f = np.sin
```
```python
def bisect(f, a, b, maxiter=53):
# ukol 2: definujte tělo této funkce
```
```python
bisect(f, 3, 4)
```
3.141592653589793
Newtonova metoda
```python
def newton(f, df, a):
for i in range(10):
a_new = a - f(a)/df(a)
if a_new == a:
return a
a = a_new
```
```python
df = np.cos
```
```python
newton(f, df, 4.8)
```
15.707963267948966
### Minimalizace
Newtonova metoda
```python
def newton_min(f, df, ddf, a):
for i in range(10):
a_new = a - df(a)/ddf(a)
if a_new == a:
if ddf(a) > 0:
return a
else:
raise(RuntimeError("Method did not converge to minimum"))
a = a_new
```
```python
def f(x): return x**2 + x
def df(x): return 2*x + 1
def ddf(x): return 2
```
```python
newton_min(f, df, ddf, 1)
```
-0.5
```python
x = np.linspace(-2, 2)
plt.plot(x, f(x))
plt.ylim(ymax=2)
plt.grid()
```
```python
```
|
State Before: l : Type ?u.432380
m : Type ?u.432383
n : Type u_1
o : Type ?u.432389
m' : o → Type ?u.432394
n' : o → Type ?u.432399
R : Type ?u.432402
S : Type ?u.432405
α : Type v
β : Type w
γ : Type ?u.432412
inst✝² : Semiring α
inst✝¹ : DecidableEq n
inst✝ : Fintype n
a : α
i : n
⊢ ↑(scalar n) a i i = a State After: no goals Tactic: simp only [coe_scalar, Matrix.smul_apply, one_apply_eq, smul_eq_mul, mul_one] |
State Before: α : Type u_1
inst✝ : LinearOrderedField α
a b c d : α
n : ℤ
hn : Odd n
⊢ a ^ n < 0 ↔ a < 0 State After: case intro
α : Type u_1
inst✝ : LinearOrderedField α
a b c d : α
n k : ℤ
hk : n = 2 * k + 1
⊢ a ^ n < 0 ↔ a < 0 Tactic: cases' hn with k hk State Before: case intro
α : Type u_1
inst✝ : LinearOrderedField α
a b c d : α
n k : ℤ
hk : n = 2 * k + 1
⊢ a ^ n < 0 ↔ a < 0 State After: no goals Tactic: simpa only [hk, two_mul] using zpow_bit1_neg_iff |
Formal statement is: proposition compact_eq_seq_compact_metric: "compact (S :: 'a::metric_space set) \<longleftrightarrow> seq_compact S" Informal statement is: A metric space is compact if and only if it is sequentially compact. |
Formal statement is: lemma INT_decseq_offset: assumes "decseq F" shows "(\<Inter>i. F i) = (\<Inter>i\<in>{n..}. F i)" Informal statement is: If $F$ is a decreasing sequence of sets, then $\bigcap_{i=1}^\infty F_i = \bigcap_{i=n}^\infty F_i$ for any $n$. |
{-# OPTIONS --universe-polymorphism #-}
module Categories.Bifunctor where
open import Level
open import Data.Product using (_,_; swap)
open import Categories.Category
open import Categories.Functor public
open import Categories.Product
Bifunctor : ∀ {o ℓ e} {o′ ℓ′ e′} {o′′ ℓ′′ e′′} → Category o ℓ e → Category o′ ℓ′ e′ → Category o′′ ℓ′′ e′′ → Set (o ⊔ ℓ ⊔ e ⊔ o′ ⊔ ℓ′ ⊔ e′ ⊔ o′′ ⊔ ℓ′′ ⊔ e′′)
Bifunctor C D E = Functor (Product C D) E
overlap-× : ∀ {o ℓ e} {o′₁ ℓ′₁ e′₁} {o′₂ ℓ′₂ e′₂} {C : Category o ℓ e} {D₁ : Category o′₁ ℓ′₁ e′₁} {D₂ : Category o′₂ ℓ′₂ e′₂} (H : Bifunctor D₁ D₂ C) {o″ ℓ″ e″} {E : Category o″ ℓ″ e″} (F : Functor E D₁) (G : Functor E D₂) → Functor E C
overlap-× H F G = H ∘ (F ※ G)
reduce-× : ∀ {o ℓ e} {o′₁ ℓ′₁ e′₁} {o′₂ ℓ′₂ e′₂} {C : Category o ℓ e} {D₁ : Category o′₁ ℓ′₁ e′₁} {D₂ : Category o′₂ ℓ′₂ e′₂} (H : Bifunctor D₁ D₂ C) {o″₁ ℓ″₁ e″₁} {E₁ : Category o″₁ ℓ″₁ e″₁} (F : Functor E₁ D₁) {o″₂ ℓ″₂ e″₂} {E₂ : Category o″₂ ℓ″₂ e″₂} (G : Functor E₂ D₂) → Bifunctor E₁ E₂ C
reduce-× H F G = H ∘ (F ⁂ G)
flip-bifunctor : ∀ {o ℓ e} {o′ ℓ′ e′} {o′′ ℓ′′ e′′} {C : Category o ℓ e} → {D : Category o′ ℓ′ e′} → {E : Category o′′ ℓ′′ e′′} → Bifunctor C D E → Bifunctor D C E
flip-bifunctor {C = C} {D = D} {E = E} b = _∘_ b (Swap {C = C} {D = D})
|
module precision
!----- GPL ---------------------------------------------------------------------
!
! Copyright (C) Stichting Deltares, 2011-2016.
!
! This program is free software: you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation version 3.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License
! along with this program. If not, see <http://www.gnu.org/licenses/>.
!
! contact: [email protected]
! Stichting Deltares
! P.O. Box 177
! 2600 MH Delft, The Netherlands
!
! All indications and logos of, and references to, "Delft3D" and "Deltares"
! are registered trademarks of Stichting Deltares, and remain the property of
! Stichting Deltares. All rights reserved.
!
!-------------------------------------------------------------------------------
! $Id: precision.f90 5717 2016-01-12 11:35:24Z mourits $
! $HeadURL: https://svn.oss.deltares.nl/repos/delft3d/tags/6686/src/tools_gpl/kubint/packages/kubint_f/src/precision.f90 $
!!--description-----------------------------------------------------------------
!
! This module contains the parameters used to switch easily from
! single precision mode to double precision mode.
!
!
! See also precision.h file for C-code (DD)
! See also tri-dyn.igd file for connection with esm
!
! sp: single precision
! hp: high (or double) precision
! fp: flexible precision, single or double
! fp is the used precision
!
! SWITCHING FROM SINGLE PRECISION FP TO DOUBLE PRECISION:
! 1) File libsrc\flow_modsrc\precision.f90
! - Comment out the following line:
! integer, parameter :: fp=sp
! - Activate the following line:
! integer, parameter :: fp=hp
! 2) File include\flow\tri-dyn.igd
! - Comment out the following line:
! equivalence ( r(0), rbuf(0))
! - Activate the following line:
! equivalence ( r(0), dbuf(0))
! 3) File include\hydra\precision.h
! - Comment out the following line:
! #undef FLOW_DOUBLE_PRECISION
! - Activate the following line:
! #define FLOW_DOUBLE_PRECISION
!
! SWITCHING FROM SINGLE PRECISION BODSED/DPS TO DOUBLE PRECISION:
! 1) File libsrc\flow_modsrc\precision.f90
! - Comment out the following line:
! integer, parameter :: prec=sp
! - Activate the following line:
! integer, parameter :: prec=hp
! 2) File include\flow\tri-dyn.igd
! - Comment out the following line:
! equivalence ( d(0), rbuf(0))
! - Activate the following line:
! equivalence ( d(0), dbuf(0))
! 3) File libsrc\flow_dd\hyexth\precision.h
! - Comment out the following line:
! #undef PREC_DOUBLE_PRECISION
! - Activate the following line:
! #define PREC_DOUBLE_PRECISION
!
!!--pseudo code and references--------------------------------------------------
! NONE
!!--declarations----------------------------------------------------------------
implicit none
!
! parameters, used in conversions: sp=single precision, hp=high (double) precision
!
integer, parameter :: sp=kind(1.0e00)
integer, parameter :: hp=kind(1.0d00)
!
! double precision integers:
!
integer, parameter :: long = SELECTED_INT_KIND(16)
!
! fp is the generally used precision in Delft3D-FLOW
!
integer, parameter :: fp=hp
!integer, parameter :: fp=sp
!
! prec is used to switch bodsed/dps from sp to hp
!
integer, parameter :: prec=hp
!integer, parameter :: prec=sp
end module precision
|
#-*- coding: utf-8 -*-
import json
import requests
import csv
import warnings
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from sklearn.feature_extraction.text import CountVectorizer
from flask import Flask, request
app = Flask(__name__)
enr_cate_id = []
enr_cate_id.append('02')
enr_cate_id.append('03')
enr_cate_id.append('04')
enr_cate_id.append('05')
enr_cate_id.append('06')
enr_cate_id.append('07')
enr_cate_id.append('08')
enr_cate_id.append('09')
enr_cate_id.append('10')
enr_cate_id.append('12')
enr_cate_id.append('14')
enr_cate_id.append('15')
enr_cate_id.append('16')
enr_cate_id.append('18')
enr_cate_id.append('21')
enr_cate_id.append('22')
enr_cate_id.append('23')
enr_cate_id.append('24')
enr_cate_id.append('86')
enr_cate_id.append('90')
enr_cate_id.append('91')
enr_cate_id.append('93')
enr_cate_id.append('95')
clf = joblib.load('filename.pkl')
corpus = joblib.load('vec.dic')
vectorizer = CountVectorizer(min_df=1, tokenizer=lambda x: list(x), ngram_range=(2, 4))
vectorizer.fit_transform(corpus)
transform = vectorizer.transform([u'자동 물걸레 청소기'])
transform1 = vectorizer.transform([u'에센스 커버 팩트 리미티드 패키지'])
print enr_cate_id[clf.predict(transform)[0]]
print enr_cate_id[clf.predict(transform1)[0]]
print enr_cate_id[clf.predict(vectorizer.transform([u'로이드미 블루투스 차량용 충전기']))[0]]
@app.route('/predict', methods=['GET', 'POST'])
def getPredict():
post_id = request.args.get('data')
print post_id
transform = vectorizer.transform([post_id])
result = enr_cate_id[clf.predict(transform)[0]]
if result == "02":
output = "영상,디카"
elif result == "03":
output = "디지털"
elif result == "04":
output = "컴퓨터"
elif result == "05":
output = "생활가전"
elif result == "06":
output = "주방가전"
elif result == "07":
output = "부품"
elif result == "08":
output = "화장품"
elif result == "09":
output = "스포츠"
elif result == "10":
output = "유아,완구"
elif result == "12":
output = "가구"
elif result == "14":
output = "패션,잡화"
elif result == "15":
output = "식품"
elif result == "16":
output = "생활,취미"
elif result == "18":
output = "문구,사무"
elif result == "21":
output = "자동차용품"
elif result == "22":
output = "액세서리"
elif result == "24":
output = "계절가전"
elif result == "86":
output = "백화점관"
elif result == "90":
output = "메인배너"
elif result == "91":
output = "여행"
elif result == "93":
output = "도서"
elif result == "95":
output = "음반,기타"
return output
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
#print loaded.transform([u'자동 물걸레 청소기'])
|
module TestTypeDef
using Compat
using Compat.Test
using CategoricalArrays
using CategoricalArrays: DefaultRefType, level, reftype, leveltype, catvalue, iscatvalue
@testset "CategoricalPool, a b c order" begin
pool = CategoricalPool(
[
"a",
"b",
"c"
],
Dict(
"a" => DefaultRefType(1),
"b" => DefaultRefType(2),
"c" => DefaultRefType(3),
)
)
@test iscatvalue(Int) == false
@test iscatvalue(Any) == false
@test iscatvalue(Missing) == false
@test isa(pool, CategoricalPool)
@test isa(pool.index, Vector)
@test length(pool.index) == 3
@test pool.index[1] == "a"
@test pool.index[2] == "b"
@test pool.index[3] == "c"
@test isa(pool.invindex, Dict)
@test length(pool.invindex) == 3
@test pool.invindex["a"] === DefaultRefType(1)
@test pool.invindex["b"] === DefaultRefType(2)
@test pool.invindex["c"] === DefaultRefType(3)
@test isa(pool.order, Vector{DefaultRefType})
@test length(pool.order) == 3
@test pool.order[1] === DefaultRefType(1)
@test pool.order[2] === DefaultRefType(2)
@test pool.order[3] === DefaultRefType(3)
# leveltype() only accepts categorical value type
@test_throws ArgumentError leveltype("abc")
@test_throws ArgumentError leveltype(String)
@test_throws ArgumentError leveltype(1.0)
@test_throws ArgumentError leveltype(Int)
for i in 1:3
x = catvalue(i, pool)
@test iscatvalue(x)
@test iscatvalue(typeof(x))
@test eltype(x) === Char
@test eltype(typeof(x)) === Char
@test leveltype(x) === String
@test leveltype(typeof(x)) === String
@test reftype(x) === DefaultRefType
@test reftype(typeof(x)) === DefaultRefType
@test x isa CategoricalArrays.CategoricalString{DefaultRefType}
@test isa(level(x), DefaultRefType)
@test level(x) === DefaultRefType(i)
@test isa(CategoricalArrays.pool(x), CategoricalPool)
@test CategoricalArrays.pool(x) === pool
@test typeof(x)(x) === x
end
end
@testset "CategoricalPool, c b a order" begin
pool = CategoricalPool(
[
"a",
"b",
"c"
],
Dict(
"a" => DefaultRefType(1),
"b" => DefaultRefType(2),
"c" => DefaultRefType(3),
),
[
DefaultRefType(3),
DefaultRefType(2),
DefaultRefType(1),
]
)
@test isa(pool, CategoricalPool)
@test isa(pool.index, Vector)
@test length(pool.index) == 3
@test pool.index[1] == "a"
@test pool.index[2] == "b"
@test pool.index[3] == "c"
@test isa(pool.invindex, Dict)
@test length(pool.invindex) == 3
@test pool.invindex["a"] === DefaultRefType(1)
@test pool.invindex["b"] === DefaultRefType(2)
@test pool.invindex["c"] === DefaultRefType(3)
@test isa(pool.order, Vector{DefaultRefType})
@test length(pool.order) == 3
@test pool.order[1] === DefaultRefType(3)
@test pool.order[2] === DefaultRefType(2)
@test pool.order[3] === DefaultRefType(1)
for i in 1:3
y = catvalue(i, pool)
@test iscatvalue(y)
@test isa(level(y), DefaultRefType)
@test level(y) === DefaultRefType(i)
@test isa(CategoricalArrays.pool(y), CategoricalPool)
@test CategoricalArrays.pool(y) === pool
@test typeof(y)(y) === y
end
end
end
|
lemma continuous_on_tendsto_compose: assumes f_cont: "continuous_on s f" and g: "(g \<longlongrightarrow> l) F" and l: "l \<in> s" and ev: "\<forall>\<^sub>Fx in F. g x \<in> s" shows "((\<lambda>x. f (g x)) \<longlongrightarrow> f l) F" |
[STATEMENT]
lemma mydiv_mult_leq: "0 < k \<Longrightarrow> l\<le>k \<Longrightarrow> mydiv (l*A) k \<le> A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>0 < k; l \<le> k\<rbrakk> \<Longrightarrow> mydiv (l * A) k \<le> A
[PROOF STEP]
by(simp add: mydiv_le_E) |
-- | An implementation of the Marketoritative PCM
module Relation.Ternary.Separation.Construct.Market where
open import Level hiding (Lift)
open import Data.Product
open import Relation.Unary
open import Relation.Binary hiding (_⇒_)
open import Relation.Binary.PropositionalEquality as P
open import Relation.Ternary.Separation
open import Relation.Ternary.Separation.Morphisms
module _ {ℓ} (A : Set ℓ) where
data Market : Set ℓ where
offer : (l : A) → Market
demand : (r : A) → Market
module _ {ℓ} {A : Set ℓ} {{ sep : RawSep A }} {{ _ : IsSep sep }} where
data Split : Market A → Market A → Market A → Set ℓ where
offerₗ : {r l₁ l₂ : A} (σ : l₂ ⊎ r ≣ l₁) → Split (offer l₁) (demand r) (offer l₂)
offerᵣ : {r l₁ l₂ : A} (σ : r ⊎ l₂ ≣ l₁) → Split (demand r) (offer l₁) (offer l₂)
demand : {r₁ r₂ r : A} (σ : r₁ ⊎ r₂ ≣ r) → Split (demand r₁) (demand r₂) (demand r)
comm : ∀ {Φ₁ Φ₂ Φ} → Split Φ₁ Φ₂ Φ → Split Φ₂ Φ₁ Φ
comm (demand p) = demand (⊎-comm p)
comm (offerₗ σ) = offerᵣ (⊎-comm σ)
comm (offerᵣ σ) = offerₗ (⊎-comm σ)
assoc : ∀ {a b ab c abc} → Split a b ab → Split ab c abc → ∃ λ bc → (Split a bc abc) × (Split b c bc)
assoc (offerₗ σ₁) (offerₗ σ₂) =
let _ , σ₃ , σ₄ = ⊎-assoc σ₂ σ₁ in -, offerₗ σ₃ , demand (⊎-comm σ₄)
assoc (offerᵣ σ₁) (offerₗ σ₂) =
let _ , σ₃ , σ₄ = ⊎-unassoc σ₁ σ₂ in -, offerᵣ σ₃ , offerₗ σ₄
assoc (demand σ₁) (offerᵣ σ₂) =
let _ , σ₃ , σ₄ = ⊎-assoc (⊎-comm σ₁) σ₂ in -, offerᵣ σ₄ , offerᵣ σ₃
assoc (demand σ₁) (demand σ₂) =
let _ , σ₃ , σ₄ = ⊎-assoc σ₁ σ₂ in -, demand σ₃ , demand σ₄
instance market-raw-sep : RawSep (Market A)
RawSep._⊎_≣_ market-raw-sep = Split
instance market-has-sep : IsSep market-raw-sep
market-has-sep = record
{ ⊎-comm = comm
; ⊎-assoc = assoc
}
instance market-sep : Separation _
market-sep = record
{ isSep = market-has-sep }
module _ {a} {A : Set a} {{r : RawSep A}} {u} {{ s : IsUnitalSep r u }} where
module U = IsUnitalSep
instance market-is-unital : IsUnitalSep market-raw-sep (demand ε)
U.isSep market-is-unital = market-has-sep
U.⊎-idˡ market-is-unital {offer l} = offerᵣ ⊎-idˡ
U.⊎-idˡ market-is-unital {demand r} = demand ⊎-idˡ
U.⊎-id⁻ˡ market-is-unital (offerᵣ σ) = cong offer (sym (⊎-id⁻ˡ σ))
U.⊎-id⁻ˡ market-is-unital (demand σ) = cong demand (⊎-id⁻ˡ σ)
module _ {a} {{ s : MonoidalSep a }} where
open MonoidalSep s using () renaming (Carrier to A)
matching : ∀ {a b : A} {c d} → (demand a) ⊎ (offer b) ≣ c → (demand (d ∙ a)) ⊎ (offer (d ∙ b)) ≣ c
matching (offerᵣ σ) = offerᵣ (⊎-∙ₗ σ)
module _ {ℓ} {A : Set ℓ} {{_ : RawSep A}} where
private
variable
ℓv : Level
P Q : Pred (A × A) ℓv
[_]Completes : A → (A × A) → Set ℓ
[_]Completes x (y , z) = x ⊎ z ≣ y
data ● {p} (P : Pred (A × A) p) : Pred (Market A) (ℓ ⊔ p) where
lift : ∀ {xs l₂} → P xs → [ l₂ ]Completes xs → ● P (offer l₂)
●-map : ∀[ P ⇒ Q ] → ∀[ ● P ⇒ ● Q ]
●-map f (lift px le) = lift (f px) le
module _ {a} {A : Set a} {{r : RawSep A}} {u} {{s₁ : IsUnitalSep r u}} where
open Morphism
instance market : Morphism A (Market A)
j market = demand
j-⊎ market s = demand s
j-⊎⁻ market (demand σ) = -, refl , σ
module _ {a} {A : Set a} {{r : RawSep A}} {u} {{s₁ : IsUnitalSep r u}} where
open import Relation.Ternary.Separation.Construct.Product
open Morphism (market {A = A})
data ○ {p} (P : Pred (A × A) p) : Pred (Market A) (p) where
lift : ∀ {xs} → P (ε , xs) → ○ P (demand xs)
module _ {p q} {P : Pred A p} {Q : Pred (A × A) q} where
○≺●ₗ : ∀[ P ⇒ⱼ ● Q ─✴ ● (Π₂ P ✴ Q) ]
app (○≺●ₗ px) (lift qx σ₂) (offerᵣ σ₁) with ⊎-assoc (⊎-comm σ₁) σ₂
... | _ , σ₃ , σ₄ = lift (snd px ×⟨ ⊎-idˡ , σ₄ ⟩ qx ) σ₃
○≺●ᵣ : ∀[ ● (Π₂ P ✴ Q) ⇒ J P ✴ ● Q ]
○≺●ᵣ (lift (snd px ×⟨ σₗ , σᵣ ⟩ qx) σ₂) with ⊎-id⁻ˡ σₗ
... | refl with ⊎-unassoc σ₂ σᵣ
... | _ , σ₃ , σ₄ = inj px ×⟨ offerᵣ (⊎-comm σ₃) ⟩ lift qx σ₄
{- Complete with respect to a certain element -}
module _ {a} {A : Set a} {{r : RawSep A}} {u} {{ s : IsUnitalSep r u }} where
open import Relation.Ternary.Separation.Construct.Product
open Morphism (market {A = A})
record _◑_ {p q} (P : Pred A p) (Q : Pred (A × A) q) (Φ : A × A) : Set (a ⊔ p ⊔ q) where
constructor _◑⟨_⟩_
field
{Φp Φq} : _
px : P Φp
inc : proj₁ Φ ⊎ Φp ≣ Φq
qx : Q (Φq , proj₂ Φ)
-- the following cannot be proven unfortunately
-- _ : ∀[ (P ◑ Q₁) ✴ Q₂ ⇒ P ◑ (Q₁ ✴ Q₂) ]
absorb : ∀ {p q} {P : Pred A p} {Q : Pred (A × A) q} →
∀[ P ⇒ⱼ ● Q ─✴ ● (P ◑ Q) ]
app (absorb px) (lift qx k) (offerᵣ σ) with ⊎-assoc (⊎-comm σ) k
... | _ , σ₂ , σ₃ with ⊎-unassoc σ₂ (⊎-comm σ₃)
... | _ , σ₄ , σ₅ = lift (px ◑⟨ σ₅ ⟩ qx) σ₄
expell : ∀ {p q} {P : Pred A p} {Q : Pred (A × A) q} →
∀[ ● (P ◑ Q) ⇒ J P ✴ ● Q ]
expell (lift (px ◑⟨ τ₁ ⟩ qx) k) with ⊎-unassoc (⊎-comm τ₁) k
... | _ , τ₃ , τ₄ = (inj px) ×⟨ offerᵣ τ₃ ⟩ (lift qx τ₄)
{- Completion preserving updates -}
module _ {a} {A : Set a} {{r : RawSep A}} {u} {{ s : IsUnitalSep r u }} where
open import Relation.Ternary.Separation.Construct.Product
record ⟰_ {p} (P : Pred (A × A) p) (Φᵢ : A × A) : Set (a ⊔ p) where
constructor complete
field
updater : ∀ {Φⱼ Φₖ} →
Φᵢ ⊎ Φⱼ ≣ (Φₖ , Φₖ) →
∃₂ λ Φₗ Φ → Φₗ ⊎ Φⱼ ≣ (Φ , Φ) × P Φₗ
open ⟰_ public
●-update : ∀ {p q} {P : Pred (A × A) p} {Q : Pred (A × A) q} →
∀[ ○ (P ─✴ ⟰ Q) ⇒ ● P ─✴ ● Q ]
app (●-update (lift f)) (lift px σ₁) (offerᵣ σ₂) with ⊎-assoc (⊎-comm σ₂) σ₁
... | _ , σ₃ , σ₄ with updater (app f px (⊎-idˡ , σ₄)) (⊎-idʳ , ⊎-comm σ₃)
... | _ , _ , (σ₅ , σ₆) , qx with ⊎-id⁻ʳ σ₅
... | refl = lift qx (⊎-comm σ₆)
|
#include <common.hpp>
#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
void export_vector_table()
{
class_<std::vector<parser_t::table_t> >("VectorTable")
.def(vector_indexing_suite<std::vector<parser_t::table_t> >())
;
}
|
function LofOffsetPointing(arg0::Frame, arg1::BodyShape, arg2::AttitudeProvider, arg3::Vector3D)
return LofOffsetPointing((Frame, BodyShape, AttitudeProvider, Vector3D), arg0, arg1, arg2, arg3)
end
function get_attitude(obj::LofOffsetPointing, arg0::FieldPVCoordinatesProvider, arg1::FieldAbsoluteDate, arg2::Frame)
return jcall(obj, "getAttitude", FieldAttitude, (FieldPVCoordinatesProvider, FieldAbsoluteDate, Frame), arg0, arg1, arg2)
end
function get_attitude(obj::LofOffsetPointing, arg0::PVCoordinatesProvider, arg1::AbsoluteDate, arg2::Frame)
return jcall(obj, "getAttitude", Attitude, (PVCoordinatesProvider, AbsoluteDate, Frame), arg0, arg1, arg2)
end
function get_target_pv(obj::LofOffsetPointing, arg0::FieldPVCoordinatesProvider, arg1::FieldAbsoluteDate, arg2::Frame)
return jcall(obj, "getTargetPV", TimeStampedFieldPVCoordinates, (FieldPVCoordinatesProvider, FieldAbsoluteDate, Frame), arg0, arg1, arg2)
end
function get_target_pv(obj::LofOffsetPointing, arg0::PVCoordinatesProvider, arg1::AbsoluteDate, arg2::Frame)
return jcall(obj, "getTargetPV", TimeStampedPVCoordinates, (PVCoordinatesProvider, AbsoluteDate, Frame), arg0, arg1, arg2)
end
|
%-------------------------
% Software developer resume in Latex
% Author : Anuj Karn
% License : MIT
%------------------------
\documentclass[letterpaper,11pt]{article}
\usepackage{latexsym}
\usepackage[empty]{fullpage}
\usepackage{titlesec}
\usepackage{marvosym}
\usepackage[usenames,dvipsnames]{color}
\usepackage{verbatim}
\usepackage{enumitem}
\usepackage[hidelinks]{hyperref}
\usepackage{fancyhdr}
\usepackage[english]{babel}
\pagestyle{fancy}
\fancyhf{} % clear all header and footer fields
\fancyfoot{}
\renewcommand{\headrulewidth}{0pt}
\renewcommand{\footrulewidth}{0pt}
% Adjust margins
\addtolength{\oddsidemargin}{-0.5in}
\addtolength{\evensidemargin}{-0.5in}
\addtolength{\textwidth}{1in}
\addtolength{\topmargin}{-.5in}
\addtolength{\textheight}{1.0in}
\urlstyle{same}
\raggedbottom
\raggedright
\setlength{\tabcolsep}{0in}
% Sections formatting
\titleformat{\section}{
\vspace{2pt}\scshape\raggedright\large
}{}{0em}{}[\color{black}\titlerule \vspace{2pt}]
%-------------------------
% Custom commands
\newcommand{\resumeItem}[2]{
\item\small{
\textbf{#1}{: #2 \vspace{-2pt}}
}
}
\newcommand{\resumeSubheading}[4]{
\vspace{-1pt}\item
\begin{tabular*}{0.97\textwidth}[t]{l@{\extracolsep{\fill}}r}
\textbf{#1} & #2 \\
\textit{\small#3} & \textit{\small #4} \\
\end{tabular*}\vspace{-5pt}
}
\newcommand{\resumeSubItem}[2]{\resumeItem{#1}{#2}\vspace{-4pt}}
\renewcommand{\labelitemii}{$\circ$}
\newcommand{\resumeSubHeadingListStart}{\begin{itemize}[leftmargin=*]}
\newcommand{\resumeSubHeadingListEnd}{\end{itemize}}
\newcommand{\resumeItemListStart}{\begin{itemize}}
\newcommand{\resumeItemListEnd}{\end{itemize}\vspace{-5pt}}
%-------------------------------------------
%%%%%% CV STARTS HERE %%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
%----------HEADING-----------------
\begin{tabular*}{\textwidth}{c @{\extracolsep{\fill}}c c} %That's what she said%
\href{mailto:[email protected]}{[email protected]} & \textbf{\href{https://anujkarn.com.np/}{\huge Anuj Karn}} & GitHub: \href{https://github.com/anujkarn002}{github.com/anujkarn002}\\
Kathmandu, Nepal & \href{https://anujkarn.com.np/}{anujkarn.com.np} & Linked In: \href{https://linkedin.com/in/anujkarn002}{linkedin.com/in/anujkarn002}\\
\end{tabular*}
%-----------EXPERIENCE-----------------
\section{Experience}
\resumeSubHeadingListStart
\resumeSubheading
{\href{http://sunya.health}{Sunya Health}}{Lalitpur, Nepal}
{Software Developer}{Dec 2019 - present}
\resumeItemListStart
\resumeItem{Mobile \& Web apps}
{Design \& implement Android, and Web app with a team of 3 (ReactJS, JavaScript, Java, Django, OpenCV)}
\resumeItem{Code Conversion}
{Convert OpenCV algorithm from python to java (Python, Java, OpenCV)}
\resumeItem{Algorithm Tweaking}
{Made changes in algorithm to increase accuracy (Python, Java, OpenCV)}
\resumeItemListEnd
\resumeSubHeadingListEnd
%-----------PROJECTS-----------------
\section{Projects}
\resumeSubHeadingListStart
\resumeSubItem{\href{http://github.com/anujkarn002/adminpanel}{Admin Panel}}
{Admin Panel for e-commerce using php from scratch (PHP, MySQL)}
\resumeSubItem{\href{https://github.com/anujkarn002/designyourway}{Design Your Way}}
{A template for sapper app using tailwind css deployable to firebase.}
\resumeSubHeadingListEnd
%-----------EDUCATION-----------------
\section{Education}
\resumeSubHeadingListStart
\resumeSubheading
{GED}{Kathmandu, Nepal}
{US High School Equivalent}{Oct. 2019 }
\resumeSubHeadingListEnd
%-----------INTERESTS-----------------
\section{Interests}
{Sketching, Robotics, Personal Productivity, Entrepreneurship, Artificial Intelligence, Iron Man (Movies)}
%-------------------------------------------
\end{document}
|
from pygeos import lib
import numpy as np
__all__ = ["STRtree"]
class STRtree:
"""A query-only R-tree created using the Sort-Tile-Recursive (STR)
algorithm.
For two-dimensional spatial data. The actual tree will be constructed at the first
query.
Parameters
----------
geometries : array_like
leafsize : int
the maximum number of child nodes that a node can have
Examples
--------
>>> import pygeos
>>> geoms = pygeos.points(np.arange(10), np.arange(10))
>>> tree = pygeos.STRtree(geoms)
>>> tree.query(pygeos.box(2, 2, 4, 4)).tolist()
[2, 3, 4]
"""
def __init__(self, geometries, leafsize=5):
self._tree = lib.STRtree(np.asarray(geometries, dtype=np.object), leafsize)
def __len__(self):
return self._tree.count
def query(self, envelope):
"""Return all items whose extent intersect the given envelope.
Parameters
----------
envelope : Geometry
The envelope of the geometry is taken automatically.
"""
return self._tree.query(envelope)
@property
def geometries(self):
return self._tree.geometries
|
subroutine setcurrentdatetime(timnow, gdp)
!----- GPL ---------------------------------------------------------------------
!
! Copyright (C) Stichting Deltares, 2011-2016.
!
! This program is free software: you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation version 3.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License
! along with this program. If not, see <http://www.gnu.org/licenses/>.
!
! contact: [email protected]
! Stichting Deltares
! P.O. Box 177
! 2600 MH Delft, The Netherlands
!
! All indications and logos of, and references to, "Delft3D" and "Deltares"
! are registered trademarks of Stichting Deltares, and remain the property of
! Stichting Deltares. All rights reserved.
!
!-------------------------------------------------------------------------------
! $Id: setcurrentdatetime.f90 5717 2016-01-12 11:35:24Z mourits $
! $HeadURL: https://svn.oss.deltares.nl/repos/delft3d/tags/6686/src/engines_gpl/flow2d3d/packages/kernel/src/general/setcurrentdatetime.f90 $
!!--description-----------------------------------------------------------------
! Print the time varying history data
! Selection is done using PRSHIS. For elements like
! ZCURW where KMAX must be > 1 this coupling between
! KMAX and PRSHIS is done in subroutine RDPRFL
!
!!--pseudo code and references--------------------------------------------------
! NONE
!!--declarations----------------------------------------------------------------
use precision
!
use globaldata
!
implicit none
!
! Global variables
!
type(globdat),target :: gdp
real(fp) :: timnow
!
! The following list of pointer parameters is used to point inside the gdp structure
!
real(fp) , pointer :: hdt
real(fp) , pointer :: timsec
real(fp) , pointer :: timmin
real(hp) , pointer :: dtimmin
real(fp) , pointer :: timhr ! Current timestep (in hours) TIMNOW * 2 * HDT / 3600.
integer , pointer :: julday
character(24) , pointer :: date_time
integer, dimension(:) , pointer :: i_date_time
!
!
! Local variables
!
integer :: idate ! Current date, based on JULDAY and TIMSEC
integer :: iday ! Days in IDATE
integer :: ifrac ! Seconds fraction (4 digits) in TIMSEC
integer :: ihour ! Hours in ITIME
integer :: imin ! Minutes in ITIME
integer :: imon ! Months in IDATE
integer :: isec ! Seconds in ITIME
integer :: itime ! Current time without seconds fraction, based on JULDAY and TIMSEC
integer :: iyear ! Years in IDATE
integer(long) :: itimsec ! int(TIMSEC), + 1 when rounding the fraction to 4 digits results in exactly one second
!
!! executable statements -------------------------------------------------------
!
hdt => gdp%gdnumeco%hdt
timsec => gdp%gdinttim%timsec
timmin => gdp%gdinttim%timmin
dtimmin => gdp%gdinttim%dtimmin
timhr => gdp%gdinttim%timhr
julday => gdp%gdinttim%julday
date_time => gdp%gdinttim%date_time
i_date_time => gdp%gdinttim%i_date_time
!
timsec = timnow * 2.0_fp * hdt
timmin = timsec / 60.0_fp
dtimmin = real(timsec, hp) / 60.0_hp
timhr = timsec / 3600.0_fp
!
itimsec = int(timsec,long)
ifrac = nint((timsec - real(itimsec,fp))*10000.0_fp)
if (ifrac > 9999) then
ifrac = ifrac - 10000
itimsec = itimsec + 1
endif
call timdat(julday, real(itimsec, fp), idate, itime)
iyear = idate/10000
imon = (idate - iyear*10000)/100
iday = idate - iyear*10000 - imon*100
ihour = itime/10000
imin = (itime - ihour*10000)/100
isec = itime - ihour*10000 - imin*100
write (date_time, '(i4.4,2(a1,i2.2),a1,3(i2.2,a1),i4.4)') &
& iyear, '-', imon, '-', iday, ' ', ihour, ':', imin, ':', isec, '.', ifrac
i_date_time(1) = iyear
i_date_time(2) = imon
i_date_time(3) = iday
i_date_time(4) = ihour
i_date_time(5) = imin
i_date_time(6) = isec
end subroutine setcurrentdatetime
|
%%
% Visual display of convolutive kernels.
addpath('../toolbox/');
addpath('../toolbox/img/');
addpath('../toolbox/export_fig/');
rep = 'results/rkhs/';
[~,~] = mkdir(rep);
normalize = @(x)x/sum(x(:));
if not(exist('kernel'))
kernel = 'gaussian-medium';
kernel = 'gaussian-large';
kernel = 'gaussian-small';
kernel = 'energy-dist';
end
% grid points for display
q = 512;
% with padding
q1 = q*8;
% input
rand('state', 1243);
N = 20;
mu = zeros(q,q);
I = randperm(q*q);
mu(I(1:N)) = 1/N;
nu = zeros(q,q);
I = randperm(q*q);
nu(I(1:N)) = 1/N;
mu = load_image('shape-1',q);
nu = load_image('shape-2',q);
mu = normalize(rescale(-mu));
nu = normalize(rescale(-nu));
xi = mu-nu;
% compute sqrt kernel
u = 2 * [0:q1/2, -q1/2+1:-1]' / q1;
[Y,X] = meshgrid(u,u);
D = sqrt(X.^2+Y.^2);
% soft max
s=1/5;
sm = @(t)s*log( 1+exp(t/s) );
switch kernel
case 'energy-dist'
r = .3;
K = max(r-D,0);
K = sm(r-D);
case 'gaussian-small'
sigma = .005;
K = exp( -D.^2./(2*sigma^2) );
case 'gaussian-medium'
sigma = .02;
K = exp( -D.^2./(2*sigma^2) );
case 'gaussian-large'
sigma = .05;
K = exp( -D.^2./(2*sigma^2) );
end
Kh = fft2(K);
% Kh = max(real(Kh),0);
% for display
K1 = real( ifft2(sqrt(Kh)) );
K1 = fftshift(K1);
K1 = K1(end/2-q/2:end/2+q/2-1, end/2-q/2:end/2+q/2-1);
% zero padding
U = zeros(q1);
U(1:q,1:q) = xi;
xiC = real( ifft2( fft2(U) .* sqrt(Kh) ) );
xiC = xiC(1:q,1:q);
mu1 = mu/max(mu(:));
nu1 = nu/max(nu(:));
imagesc( cat(3, 2-nu1, 2-mu1-nu1, 2-mu1 )/2 );
axis image; axis off;
colormap jet(256);
% saveas(gcf, [rep 'input.png'], 'png');
export_fig([rep 'input.png'], '-m3');
disp_distrib(xiC);
axis image; axis off;
% saveas(gcf, [rep kernel '.png'], 'png');
export_fig([rep kernel '.png'], '-m3');
if strcmp(kernel, 'energy-dist')
t = linspace(-1,1,q);
[y,x] = meshgrid(t,t);
r = .5*1e-3;
A = -1 ./ sqrt(r + x.^2+y.^2); A = A/max(abs(A(:)));
disp_distrib( A, 40 );
else
disp_distrib(-K1, 40);
end
axis image; axis off;
% saveas(gcf, [rep kernel '-kernel.png'], 'png');
export_fig([rep kernel '-kernel.png'], '-m3');
|
DV := NumberTheory[Divisors]:
ghost_coeff := (n) -> proc(x)
local d;
return add(d * x[d]^(n/d), d in DV(n));
end:
ghost_vector := (n::posint) -> proc(x)
local m;
return [seq(ghost_coeff(m)(x),m=1..n)];
end:
unghost_vector := (n::posint) -> proc(w)
local x,m,d;
x := []:
for m from 1 to n do
x := [op(x),expand((w[m] - add(d*x[d]^(m/d),d in DV(m) minus {m}))/m)];
od:
return x;
end:
witt_add_term := proc(n::posint)
option remember;
local x,y,u,d,rel;
for d in DV(n) minus {n} do
u[d] := witt_add_term(d)(x,y);
od:
rel := ghost_coeff(n)(u) - ghost_coeff(n)(x) - ghost_coeff(n)(y);
u[n] := expand(solve(rel,u[n]));
return unapply(u[n],x,y);
end:
witt_add := (n::posint) -> proc(x,y)
local m;
return [seq(witt_add_term(m)(x,y),m=1..n)];
end:
witt_mult_term := proc(n::posint)
option remember;
local x,y,u,d,rel;
for d in DV(n) minus {n} do
u[d] := witt_mult_term(d)(x,y);
od:
rel := ghost_coeff(n)(u) - ghost_coeff(n)(x) * ghost_coeff(n)(y);
u[n] := expand(solve(rel,u[n]));
return unapply(u[n],x,y);
end:
witt_mult := (n::posint) -> proc(x,y)
local m;
return [seq(witt_mult_term(m)(x,y),m=1..n)];
end:
witt_series := (n::posint) -> proc(x)
local m;
return expand(rem(mul(1 - x[m]*t^m,m=1..n),t^(n+1),t));
end:
witt_unseries := (n::posint) -> proc(f)
local c,g,d;
c := []:
g := f;
for d from 1 to n do
c := [op(c),-coeff(g,t,d)];
g := expand(convert(series(g/(1-c[d]*t^d),t=0,n+1),polynom,t));
od:
return c;
end:
ghost_p_coeff := (n) -> proc(x)
local i;
return add(p^i * x[i]^(p^(n-i)), i=0..n);
end:
ghost_p_vector := (n::nonnegint) -> proc(x)
local i;
return table([seq(i = ghost_p_coeff(i)(x),i=0..n-1)]);
end:
ghost_p_add := (n::nonnegint) -> proc(a,b)
local i;
return table([seq(i=expand(a[i]+b[i]),i=0..n-1)]);
end:
ghost_p_sub := (n::nonnegint) -> proc(a,b)
local i;
return table([seq(i=expand(a[i]-b[i]),i=0..n-1)]);
end:
ghost_p_mult := (n::nonnegint) -> proc(a,b)
local i;
return table([seq(i=expand(a[i]*b[i]),i=0..n-1)]);
end:
ghost_p_is_equal := (n::nonnegint) -> proc(a,b)
local i;
return `and`(seq(evalb(simplify(a[i]-b[i])=0),i=0..n-1));
end:
unghost_p_vector := (n::nonnegint) -> proc(w)
local x,m,k;
x := table:
for m from 0 to n-1 do
x[m] := expand((w[m] - add(p^k*x[k]^(p^(m-k)),k=0..m-1))/p^m);
od:
return eval(x);
end:
witt_p_add_term := proc(n::nonnegint)
option remember;
local x,y,u,m,rel;
u := table():
for m from 0 to n-1 do
u[m] := witt_p_add_term(m)(x,y);
od:
rel := ghost_p_coeff(n)(u) - ghost_p_coeff(n)(x) - ghost_p_coeff(n)(y);
u[n] := expand(solve(rel,u[n]));
return unapply(u[n],x,y);
end:
witt_p_add := (n::posint) -> proc(x,y)
local m;
return table([seq(m=witt_p_add_term(m)(x,y),m=0..n-1)]);
end:
witt_p_mult_term := proc(n::nonnegint)
option remember;
local x,y,u,m,rel;
u := table():
for m from 0 to n-1 do
u[m] := witt_p_mult_term(m)(x,y);
od:
rel := ghost_p_coeff(n)(u) - ghost_p_coeff(n)(x) * ghost_p_coeff(n)(y);
u[n] := expand(solve(rel,u[n]));
return unapply(u[n],x,y);
end:
witt_p_mult := (n::posint) -> proc(x,y)
local m;
return table([seq(m=witt_p_mult_term(m)(x,y),m=0..n-1)]);
end: |
If $a$ and $b$ are coprime and $b \neq 1$, then there exist integers $x$ and $y$ such that $a x = b y + 1$. |
[STATEMENT]
lemma construct_tree_post:
assumes "y \<le> v"
and "construct_tree_inv v x y D R"
shows "construct_tree_post x y D R"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. construct_tree_post x y D R
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. construct_tree_post x y D R
[PROOF STEP]
have "v*x\<^sup>T \<le> D\<^sup>T\<^sup>\<star>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. v * x\<^sup>T \<le> D\<^sup>T\<^sup>\<star>
[PROOF STEP]
by (metis (no_types, lifting) assms(2) conv_contrav conv_invol conv_iso star_conv)
[PROOF STATE]
proof (state)
this:
v * x\<^sup>T \<le> D\<^sup>T\<^sup>\<star>
goal (1 subgoal):
1. construct_tree_post x y D R
[PROOF STEP]
hence 1: "v \<le> D\<^sup>T\<^sup>\<star>*x"
[PROOF STATE]
proof (prove)
using this:
v * x\<^sup>T \<le> D\<^sup>T\<^sup>\<star>
goal (1 subgoal):
1. v \<le> D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
using assms point_def ss423bij
[PROOF STATE]
proof (prove)
using this:
v * x\<^sup>T \<le> D\<^sup>T\<^sup>\<star>
y \<le> v
construct_tree_inv v x y D R
point ?x \<equiv> is_vector ?x \<and> bijective ?x
bijective ?x \<Longrightarrow> (?y * ?x\<^sup>T \<le> ?z) = (?y \<le> ?z * ?x)
goal (1 subgoal):
1. v \<le> D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
v \<le> D\<^sup>T\<^sup>\<star> * x
goal (1 subgoal):
1. construct_tree_post x y D R
[PROOF STEP]
hence 2: "D\<^sup>T*1 \<le> D\<^sup>T\<^sup>\<star>*x"
[PROOF STATE]
proof (prove)
using this:
v \<le> D\<^sup>T\<^sup>\<star> * x
goal (1 subgoal):
1. D\<^sup>T * 1 \<le> D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
using assms le_supE
[PROOF STATE]
proof (prove)
using this:
v \<le> D\<^sup>T\<^sup>\<star> * x
y \<le> v
construct_tree_inv v x y D R
\<lbrakk>?a + ?b \<le> ?x; \<lbrakk>?a \<le> ?x; ?b \<le> ?x\<rbrakk> \<Longrightarrow> ?P\<rbrakk> \<Longrightarrow> ?P
goal (1 subgoal):
1. D\<^sup>T * 1 \<le> D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
D\<^sup>T * 1 \<le> D\<^sup>T\<^sup>\<star> * x
goal (1 subgoal):
1. construct_tree_post x y D R
[PROOF STEP]
have "D\<^sup>\<star>*y \<le> D\<^sup>T\<^sup>\<star>*x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. D\<^sup>\<star> * y \<le> D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
proof (rule star_inductl, rule sup.boundedI)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. y \<le> D\<^sup>T\<^sup>\<star> * x
2. D * (D\<^sup>T\<^sup>\<star> * x) \<le> D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
show "y \<le> D\<^sup>T\<^sup>\<star>*x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. y \<le> D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
using 1 assms order.trans
[PROOF STATE]
proof (prove)
using this:
v \<le> D\<^sup>T\<^sup>\<star> * x
y \<le> v
construct_tree_inv v x y D R
\<lbrakk>?a \<le> ?b; ?b \<le> ?c\<rbrakk> \<Longrightarrow> ?a \<le> ?c
goal (1 subgoal):
1. y \<le> D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
y \<le> D\<^sup>T\<^sup>\<star> * x
goal (1 subgoal):
1. D * (D\<^sup>T\<^sup>\<star> * x) \<le> D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. D * (D\<^sup>T\<^sup>\<star> * x) \<le> D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
have "D*(D\<^sup>T\<^sup>\<star>*x) = D*x + D*D\<^sup>T\<^sup>+*x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. D * (D\<^sup>T\<^sup>\<star> * x) = D * x + D * D\<^sup>T\<^sup>+ * x
[PROOF STEP]
by (metis conway.dagger_unfoldl_distr distrib_left mult_assoc)
[PROOF STATE]
proof (state)
this:
D * (D\<^sup>T\<^sup>\<star> * x) = D * x + D * D\<^sup>T\<^sup>+ * x
goal (1 subgoal):
1. D * (D\<^sup>T\<^sup>\<star> * x) \<le> D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
D * (D\<^sup>T\<^sup>\<star> * x) = D * x + D * D\<^sup>T\<^sup>+ * x
goal (1 subgoal):
1. D * (D\<^sup>T\<^sup>\<star> * x) \<le> D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
have "... = D*D\<^sup>T\<^sup>+*x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. D * x + D * D\<^sup>T\<^sup>+ * x = D * D\<^sup>T\<^sup>+ * x
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
y \<le> v
construct_tree_inv v x y D R
goal (1 subgoal):
1. D * x + D * D\<^sup>T\<^sup>+ * x = D * D\<^sup>T\<^sup>+ * x
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
D * x + D * D\<^sup>T\<^sup>+ * x = D * D\<^sup>T\<^sup>+ * x
goal (1 subgoal):
1. D * (D\<^sup>T\<^sup>\<star> * x) \<le> D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
D * x + D * D\<^sup>T\<^sup>+ * x = D * D\<^sup>T\<^sup>+ * x
goal (1 subgoal):
1. D * (D\<^sup>T\<^sup>\<star> * x) \<le> D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
have "... \<le> 1'*D\<^sup>T\<^sup>\<star>*x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. D * D\<^sup>T\<^sup>+ * x \<le> 1' * D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
by (metis assms(2) is_inj_def mult_assoc mult_isor)
[PROOF STATE]
proof (state)
this:
D * D\<^sup>T\<^sup>+ * x \<le> 1' * D\<^sup>T\<^sup>\<star> * x
goal (1 subgoal):
1. D * (D\<^sup>T\<^sup>\<star> * x) \<le> D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
D * (D\<^sup>T\<^sup>\<star> * x) \<le> 1' * D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
show "D*(D\<^sup>T\<^sup>\<star>*x) \<le> D\<^sup>T\<^sup>\<star>*x"
[PROOF STATE]
proof (prove)
using this:
D * (D\<^sup>T\<^sup>\<star> * x) \<le> 1' * D\<^sup>T\<^sup>\<star> * x
goal (1 subgoal):
1. D * (D\<^sup>T\<^sup>\<star> * x) \<le> D\<^sup>T\<^sup>\<star> * x
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
D * (D\<^sup>T\<^sup>\<star> * x) \<le> D\<^sup>T\<^sup>\<star> * x
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
D\<^sup>\<star> * y \<le> D\<^sup>T\<^sup>\<star> * x
goal (1 subgoal):
1. construct_tree_post x y D R
[PROOF STEP]
thus "construct_tree_post x y D R"
[PROOF STATE]
proof (prove)
using this:
D\<^sup>\<star> * y \<le> D\<^sup>T\<^sup>\<star> * x
goal (1 subgoal):
1. construct_tree_post x y D R
[PROOF STEP]
using 2 assms
[PROOF STATE]
proof (prove)
using this:
D\<^sup>\<star> * y \<le> D\<^sup>T\<^sup>\<star> * x
D\<^sup>T * 1 \<le> D\<^sup>T\<^sup>\<star> * x
y \<le> v
construct_tree_inv v x y D R
goal (1 subgoal):
1. construct_tree_post x y D R
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
construct_tree_post x y D R
goal:
No subgoals!
[PROOF STEP]
qed |
module LineListModule
use CharacterContainerModule, only: CharacterContainerType, &
CastAsCharacterContainerType, ConstructCharacterContainer
use ConstantsModule, only: LENBIGLINE
use InputOutputModule, only: same_word
use ListModule, only: ListType
private
public :: LineListType, same_lines
type :: LineListType
type(ListType), pointer, private :: List => null()
contains
procedure :: InitializeLineList
procedure :: AddLine
procedure :: Clear
procedure :: CountLines
procedure :: GetLine
procedure :: Includes
end type LineListType
contains
! Type-bound procedures for LineListType
subroutine InitializeLineList(this)
implicit none
! dummy
class(LineListType) :: this
!
allocate(this%List)
!
return
end subroutine InitializeLineList
subroutine AddLine(this, line)
implicit none
! dummy
class(LineListType) :: this
character(len=*), intent(in) :: line
! local
class(*), pointer :: obj => null()
type(CharacterContainerType), pointer :: charCont => null()
!
call ConstructCharacterContainer(charCont, line)
obj => charCont
call this%List%Add(obj)
!
return
end subroutine AddLine
subroutine GetLine(this, indx, line)
implicit none
! dummy
class(LineListType) :: this
integer, intent(in) :: indx
character(len=*) :: line
! local
class(*), pointer :: obj => null()
type(CharacterContainerType), pointer :: charCont => null()
!
obj => this%List%GetItem(indx)
charCont => CastAsCharacterContainerType(obj)
line = charCont%charstring
!
return
end subroutine GetLine
integer function CountLines(this)
implicit none
! dummy
class(LineListType) :: this
!
CountLines = this%List%Count()
!
return
end function CountLines
subroutine Clear(this, destroy)
implicit none
! dummy
class(LineListType) :: this
logical, intent(in), optional :: destroy
! local
logical :: destroylocal
!
if (present(destroy)) then
destroylocal = destroy
else
destroylocal = .false.
endif
!
call this%List%Clear(destroylocal)
!
return
end subroutine Clear
function Includes(this, line, caseSensitive) result(incl)
implicit none
! dummy
class(LineListType), intent(in) :: this
character(len=*), intent(in) :: line
logical, intent(in) :: caseSensitive
logical :: incl
! local
integer :: i, n
character(len=LENBIGLINE) :: linelocal
!
incl = .false.
n = this%List%Count()
do i=1,n
call this%GetLine(i,linelocal)
if (caseSensitive) then
if (line == linelocal) then
incl = .true.
exit
endif
else
if (same_word(line,linelocal)) then
incl = .true.
exit
endif
endif
enddo
!
return
end function Includes
! Non-type-bound procedures
logical function same_lines(listA, listB)
implicit none
! dummy
class(LineListType) :: listA, listB
! local
integer :: i, kListA, kListB
character(len=200) :: lineA, lineB
!
kListA = listA%CountLines()
kListB = listB%CountLines()
!
same_lines = .true.
if (kListA==kListB) then
do i=1,kListA
call listA%GetLine(i, lineA)
call listB%GetLine(i, lineB)
if (lineA /= lineB) then
same_lines = .false.
exit
endif
enddo
else
same_lines = .false.
endif
!
return
end function same_lines
end module LineListModule
|
(** * Equiv: Program Equivalence *)
Set Warnings "-notation-overridden,-parsing,-deprecated-hint-without-locality".
From PLF Require Import Maps.
From Coq Require Import Bool.Bool.
From Coq Require Import Arith.Arith.
From Coq Require Import Init.Nat.
From Coq Require Import Arith.PeanoNat. Import Nat.
From Coq Require Import Arith.EqNat.
From Coq Require Import Lia.
From Coq Require Import Lists.List. Import ListNotations.
From Coq Require Import Logic.FunctionalExtensionality.
From PLF Require Export Imp.
(** *** Before You Get Started:
- Create a fresh directory for this volume. (Do not try to mix the
files from this volume with files from _Logical Foundations_ in
the same directory: the result will not make you happy.) You
can either start with an empty directory and populate it with
the files listed below, or else download the whole PLF zip file
and unzip it.
- The new directory should contain at least the following files:
- [Imp.v] (make sure it is the one from the PLF distribution,
not the one from LF: they are slightly different);
- [Maps.v] (ditto)
- [Equiv.v] (this file)
- [_CoqProject], containing the following line:
-Q . PLF
- If you see errors like this...
Compiled library PLF.Maps (in file
/Users/.../plf/Maps.vo) makes inconsistent assumptions
over library Coq.Init.Logic
... it may mean something went wrong with the above steps.
Doing "[make clean]" (or manually removing everything except
[.v] and [_CoqProject] files) may help.
- If you are using VSCode with the VSCoq extension, you'll then
want to open a new window in VSCode, click [Open Folder > plf],
and run [make]. If you get an error like "Cannot find a
physical path..." error, it may be because you didn't open plf
directly (you instead opened a folder containing both lf and
plf, for example). *)
(** *** Advice for Working on Exercises:
- Most of the Coq proofs we ask you to do in this chapter are
similar to proofs that we've provided. Before starting to work
on exercises, take the time to work through our proofs (both
informally and in Coq) and make sure you understand them in
detail. This will save you a lot of time.
- The Coq proofs we're doing now are sufficiently complicated that
it is more or less impossible to complete them by random
experimentation or following your nose. You need to start with
an idea about why the property is true and how the proof is
going to go. The best way to do this is to write out at least a
sketch of an informal proof on paper -- one that intuitively
convinces you of the truth of the theorem -- before starting to
work on the formal one. Alternately, grab a friend and try to
convince them that the theorem is true; then try to formalize
your explanation.
- Use automation to save work! The proofs in this chapter can get
pretty long if you try to write out all the cases explicitly. *)
(* ################################################################# *)
(** * Behavioral Equivalence *)
(** In an earlier chapter, we investigated the correctness of a very
simple program transformation: the [optimize_0plus] function. The
programming language we were considering was the first version of
the language of arithmetic expressions -- with no variables -- so
in that setting it was very easy to define what it means for a
program transformation to be correct: it should always yield a
program that evaluates to the same number as the original.
To talk about the correctness of program transformations for the
full Imp language -- in particular, assignment -- we need to
consider the role of mutable state and develop a more
sophisticated notion of correctness, which we'll call _behavioral
equivalence_.. *)
(* ================================================================= *)
(** ** Definitions *)
(** For [aexp]s and [bexp]s with variables, the definition we want is
clear: Two [aexp]s or [bexp]s are "behaviorally equivalent" if
they evaluate to the same result in every state. *)
Definition aequiv (a1 a2 : aexp) : Prop :=
forall (st : state),
aeval st a1 = aeval st a2.
Definition bequiv (b1 b2 : bexp) : Prop :=
forall (st : state),
beval st b1 = beval st b2.
(** Here are some simple examples of equivalences of arithmetic
and boolean expressions. *)
Theorem aequiv_example:
aequiv
<{ X - X }>
<{ 0 }>.
Proof.
intros st. simpl. lia.
Qed.
Theorem bequiv_example:
bequiv
<{ X - X = 0 }>
<{ true }>.
Proof.
intros st. unfold beval.
rewrite aequiv_example. reflexivity.
Qed.
(** For commands, the situation is a little more subtle. We
can't simply say "two commands are behaviorally equivalent if they
evaluate to the same ending state whenever they are started in the
same initial state," because some commands, when run in some
starting states, don't terminate in any final state at all!
What we need instead is this: two commands are behaviorally
equivalent if, for any given starting state, they either (1) both
diverge or (2) both terminate in the same final state. A compact
way to express this is "if the first one terminates in a
particular state then so does the second, and vice versa." *)
Definition cequiv (c1 c2 : com) : Prop :=
forall (st st' : state),
(st =[ c1 ]=> st') <-> (st =[ c2 ]=> st').
Definition refines (c1 c2 : com) : Prop :=
forall (st st' : state),
(st =[ c1 ]=> st') -> (st =[ c2 ]=> st').
(* ================================================================= *)
(** ** Simple Examples *)
(** For examples of command equivalence, let's start by looking at
some trivial program equivalences involving [skip]: *)
Theorem skip_left : forall c,
cequiv
<{ skip; c }>
c.
Proof.
(* WORKED IN CLASS *)
intros c st st'.
split; intros H.
- (* -> *)
inversion H. subst.
inversion H2. subst.
assumption.
- (* <- *)
apply E_Seq with st.
apply E_Skip.
assumption.
Qed.
(** **** Exercise: 2 stars, standard (skip_right)
Prove that adding a [skip] after a command results in an
equivalent program *)
Theorem skip_right : forall c,
cequiv
<{ c ; skip }>
c.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** Similarly, here is a simple equivalence that optimizes [if]
commands: *)
Theorem if_true_simple : forall c1 c2,
cequiv
<{ if true then c1 else c2 end }>
c1.
Proof.
intros c1 c2.
split; intros H.
- (* -> *)
inversion H; subst. assumption. discriminate.
- (* <- *)
apply E_IfTrue. reflexivity. assumption. Qed.
(** Of course, no (human) programmer would write a conditional whose
condition is literally [true]. But they might write one whose
condition is _equivalent_ to true:
_Theorem_: If [b] is equivalent to [true], then [if b then c1
else c2 end] is equivalent to [c1].
_Proof_:
- ([->]) We must show, for all [st] and [st'], that if [st =[
if b then c1 else c2 end ]=> st'] then [st =[ c1 ]=> st'].
Proceed by cases on the rules that could possibly have been
used to show [st =[ if b then c1 else c2 end ]=> st'], namely
[E_IfTrue] and [E_IfFalse].
- Suppose the final rule in the derivation of [st =[ if b
then c1 else c2 end ]=> st'] was [E_IfTrue]. We then have,
by the premises of [E_IfTrue], that [st =[ c1 ]=> st'].
This is exactly what we set out to prove.
- On the other hand, suppose the final rule in the derivation
of [st =[ if b then c1 else c2 end ]=> st'] was [E_IfFalse].
We then know that [beval st b = false] and [st =[ c2 ]=> st'].
Recall that [b] is equivalent to [true], i.e., forall [st],
[beval st b = beval st <{true}> ]. In particular, this means
that [beval st b = true], since [beval st <{true}> = true]. But
this is a contradiction, since [E_IfFalse] requires that
[beval st b = false]. Thus, the final rule could not have
been [E_IfFalse].
- ([<-]) We must show, for all [st] and [st'], that if
[st =[ c1 ]=> st'] then
[st =[ if b then c1 else c2 end ]=> st'].
Since [b] is equivalent to [true], we know that [beval st b] =
[beval st <{true}> ] = [true]. Together with the assumption that
[st =[ c1 ]=> st'], we can apply [E_IfTrue] to derive
[st =[ if b then c1 else c2 end ]=> st']. []
Here is the formal version of this proof: *)
Theorem if_true: forall b c1 c2,
bequiv b <{true}> ->
cequiv
<{ if b then c1 else c2 end }>
c1.
Proof.
intros b c1 c2 Hb.
split; intros H.
- (* -> *)
inversion H. subst.
+ (* b evaluates to true *)
assumption.
+ (* b evaluates to false (contradiction) *)
unfold bequiv in Hb. simpl in Hb.
rewrite Hb in H5.
discriminate.
- (* <- *)
apply E_IfTrue; try assumption.
unfold bequiv in Hb. simpl in Hb.
apply Hb. Qed.
(** **** Exercise: 2 stars, standard, especially useful (if_false) *)
Theorem if_false : forall b c1 c2,
bequiv b <{false}> ->
cequiv
<{ if b then c1 else c2 end }>
c2.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 3 stars, standard (swap_if_branches)
Show that we can swap the branches of an [if] if we also negate its
condition. *)
Theorem swap_if_branches : forall b c1 c2,
cequiv
<{ if b then c1 else c2 end }>
<{ if ~ b then c2 else c1 end }>.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** For [while] loops, we can give a similar pair of theorems. A loop
whose guard is equivalent to [false] is equivalent to [skip],
while a loop whose guard is equivalent to [true] is equivalent to
[while true do skip end] (or any other non-terminating program). *)
(** The first of these facts is easy. *)
Theorem while_false : forall b c,
bequiv b <{false}> ->
cequiv
<{ while b do c end }>
<{ skip }>.
Proof.
intros b c Hb. split; intros H.
- (* -> *)
inversion H. subst.
+ (* E_WhileFalse *)
apply E_Skip.
+ (* E_WhileTrue *)
rewrite Hb in H2. discriminate.
- (* <- *)
inversion H. subst.
apply E_WhileFalse.
apply Hb. Qed.
(** **** Exercise: 2 stars, advanced, optional (while_false_informal)
Write an informal proof of [while_false].
(* FILL IN HERE *)
*)
(** [] *)
(** To prove the second fact, we need an auxiliary lemma stating that
[while] loops whose guards are equivalent to [true] never
terminate. *)
(** _Lemma_: If [b] is equivalent to [true], then it cannot be
the case that [st =[ while b do c end ]=> st'].
_Proof_: Suppose that [st =[ while b do c end ]=> st']. We show,
by induction on a derivation of [st =[ while b do c end ]=> st'],
that this assumption leads to a contradiction. The only two cases
to consider are [E_WhileFalse] and [E_WhileTrue], the others
are contradictory.
- Suppose [st =[ while b do c end ]=> st'] is proved using rule
[E_WhileFalse]. Then by assumption [beval st b = false]. But
this contradicts the assumption that [b] is equivalent to
[true].
- Suppose [st =[ while b do c end ]=> st'] is proved using rule
[E_WhileTrue]. We must have that:
1. [beval st b = true],
2. there is some [st0] such that [st =[ c ]=> st0] and
[st0 =[ while b do c end ]=> st'],
3. and we are given the induction hypothesis that
[st0 =[ while b do c end ]=> st'] leads to a contradiction,
We obtain a contradiction by 2 and 3. [] *)
Lemma while_true_nonterm : forall b c st st',
bequiv b <{true}> ->
~( st =[ while b do c end ]=> st' ).
Proof.
(* WORKED IN CLASS *)
intros b c st st' Hb.
intros H.
remember <{ while b do c end }> as cw eqn:Heqcw.
induction H;
(* Most rules don't apply; we rule them out by inversion: *)
inversion Heqcw; subst; clear Heqcw.
(* The two interesting cases are the ones for while loops: *)
- (* E_WhileFalse *) (* contradictory -- b is always true! *)
unfold bequiv in Hb.
(* [rewrite] is able to instantiate the quantifier in [st] *)
rewrite Hb in H. discriminate.
- (* E_WhileTrue *) (* immediate from the IH *)
apply IHceval2. reflexivity. Qed.
(** **** Exercise: 2 stars, standard, optional (while_true_nonterm_informal)
Explain what the lemma [while_true_nonterm] means in English.
(* FILL IN HERE *)
*)
(** [] *)
(** **** Exercise: 2 stars, standard, especially useful (while_true)
Prove the following theorem. _Hint_: You'll want to use
[while_true_nonterm] here. *)
Theorem while_true : forall b c,
bequiv b <{true}> ->
cequiv
<{ while b do c end }>
<{ while true do skip end }>.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** A more interesting fact about [while] commands is that any number
of copies of the body can be "unrolled" without changing meaning.
Loop unrolling is an important transformation in any real
compiler, so its correctness is of more than academic interest! *)
Theorem loop_unrolling : forall b c,
cequiv
<{ while b do c end }>
<{ if b then c ; while b do c end else skip end }>.
Proof.
(* WORKED IN CLASS *)
intros b c st st'.
split; intros Hce.
- (* -> *)
inversion Hce; subst.
+ (* loop doesn't run *)
apply E_IfFalse. assumption. apply E_Skip.
+ (* loop runs *)
apply E_IfTrue. assumption.
apply E_Seq with (st' := st'0). assumption. assumption.
- (* <- *)
inversion Hce; subst.
+ (* loop runs *)
inversion H5; subst.
apply E_WhileTrue with (st' := st'0).
assumption. assumption. assumption.
+ (* loop doesn't run *)
inversion H5; subst. apply E_WhileFalse. assumption. Qed.
(** **** Exercise: 2 stars, standard, optional (seq_assoc) *)
Theorem seq_assoc : forall c1 c2 c3,
cequiv <{(c1;c2);c3}> <{c1;(c2;c3)}>.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** Proving program properties involving assignments is one place
where the fact that program states are treated extensionally
(e.g., [x !-> m x ; m] and [m] are equal maps) comes in handy. *)
Theorem identity_assignment : forall x,
cequiv
<{ x := x }>
<{ skip }>.
Proof.
intros.
split; intro H; inversion H; subst; clear H.
- (* -> *)
rewrite t_update_same.
apply E_Skip.
- (* <- *)
assert (Hx : st' =[ x := x ]=> (x !-> st' x ; st')).
{ apply E_Asgn. reflexivity. }
rewrite t_update_same in Hx.
apply Hx.
Qed.
(** **** Exercise: 2 stars, standard, especially useful (assign_aequiv) *)
Theorem assign_aequiv : forall (X : string) (a : aexp),
aequiv <{ X }> a ->
cequiv <{ skip }> <{ X := a }>.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard (equiv_classes) *)
(** Given the following programs, group together those that are
equivalent in Imp. Your answer should be given as a list of lists,
where each sub-list represents a group of equivalent programs. For
example, if you think programs (a) through (h) are all equivalent
to each other, but not to (i), your answer should look like this:
[ [prog_a;prog_b;prog_c;prog_d;prog_e;prog_f;prog_g;prog_h] ;
[prog_i] ]
Write down your answer below in the definition of
[equiv_classes]. *)
Definition prog_a : com :=
<{ while ~(X <= 0) do
X := X + 1
end }>.
Definition prog_b : com :=
<{ if (X = 0) then
X := X + 1;
Y := 1
else
Y := 0
end;
X := X - Y;
Y := 0 }>.
Definition prog_c : com :=
<{ skip }> .
Definition prog_d : com :=
<{ while X <> 0 do
X := (X * Y) + 1
end }>.
Definition prog_e : com :=
<{ Y := 0 }>.
Definition prog_f : com :=
<{ Y := X + 1;
while X <> Y do
Y := X + 1
end }>.
Definition prog_g : com :=
<{ while true do
skip
end }>.
Definition prog_h : com :=
<{ while X <> X do
X := X + 1
end }>.
Definition prog_i : com :=
<{ while X <> Y do
X := Y + 1
end }>.
Definition equiv_classes : list (list com)
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(* Do not modify the following line: *)
Definition manual_grade_for_equiv_classes : option (nat*string) := None.
(** [] *)
(* ################################################################# *)
(** * Properties of Behavioral Equivalence *)
(** We next consider some fundamental properties of program
equivalence. *)
(* ================================================================= *)
(** ** Behavioral Equivalence Is an Equivalence *)
(** First, let's verify that the equivalences on [aexps], [bexps], and
[com]s really are _equivalences_ -- i.e., that they are reflexive,
symmetric, and transitive. The proofs are all easy. *)
Lemma refl_aequiv : forall (a : aexp),
aequiv a a.
Proof.
intros a st. reflexivity. Qed.
Lemma sym_aequiv : forall (a1 a2 : aexp),
aequiv a1 a2 -> aequiv a2 a1.
Proof.
intros a1 a2 H. intros st. symmetry. apply H. Qed.
Lemma trans_aequiv : forall (a1 a2 a3 : aexp),
aequiv a1 a2 -> aequiv a2 a3 -> aequiv a1 a3.
Proof.
unfold aequiv. intros a1 a2 a3 H12 H23 st.
rewrite (H12 st). rewrite (H23 st). reflexivity. Qed.
Lemma refl_bequiv : forall (b : bexp),
bequiv b b.
Proof.
unfold bequiv. intros b st. reflexivity. Qed.
Lemma sym_bequiv : forall (b1 b2 : bexp),
bequiv b1 b2 -> bequiv b2 b1.
Proof.
unfold bequiv. intros b1 b2 H. intros st. symmetry. apply H. Qed.
Lemma trans_bequiv : forall (b1 b2 b3 : bexp),
bequiv b1 b2 -> bequiv b2 b3 -> bequiv b1 b3.
Proof.
unfold bequiv. intros b1 b2 b3 H12 H23 st.
rewrite (H12 st). rewrite (H23 st). reflexivity. Qed.
Lemma refl_cequiv : forall (c : com),
cequiv c c.
Proof.
unfold cequiv. intros c st st'. reflexivity. Qed.
Lemma sym_cequiv : forall (c1 c2 : com),
cequiv c1 c2 -> cequiv c2 c1.
Proof.
unfold cequiv. intros c1 c2 H st st'.
rewrite H. reflexivity.
Qed.
Lemma trans_cequiv : forall (c1 c2 c3 : com),
cequiv c1 c2 -> cequiv c2 c3 -> cequiv c1 c3.
Proof.
unfold cequiv. intros c1 c2 c3 H12 H23 st st'.
rewrite H12. apply H23.
Qed.
(* ================================================================= *)
(** ** Behavioral Equivalence Is a Congruence *)
(** Less obviously, behavioral equivalence is also a _congruence_.
That is, the equivalence of two subprograms implies the
equivalence of the larger programs in which they are embedded:
aequiv a a'
-------------------------
cequiv (x := a) (x := a')
cequiv c1 c1'
cequiv c2 c2'
--------------------------
cequiv (c1;c2) (c1';c2')
... and so on for the other forms of commands. *)
(** (Note that we are using the inference rule notation here not
as part of an inductive definition, but simply to write down some
valid implications in a readable format. We prove these
implications below.) *)
(** We will see a concrete example of why these congruence
properties are important in the following section (in the proof of
[fold_constants_com_sound]), but the main idea is that they allow
us to replace a small part of a large program with an equivalent
small part and know that the whole large programs are equivalent
_without_ doing an explicit proof about the parts that didn't
change -- i.e., the "proof burden" of a small change to a large
program is proportional to the size of the change, not the
program! *)
Theorem CAsgn_congruence : forall x a a',
aequiv a a' ->
cequiv <{x := a}> <{x := a'}>.
Proof.
intros x a a' Heqv st st'.
split; intros Hceval.
- (* -> *)
inversion Hceval. subst. apply E_Asgn.
rewrite Heqv. reflexivity.
- (* <- *)
inversion Hceval. subst. apply E_Asgn.
rewrite Heqv. reflexivity. Qed.
(** The congruence property for loops is a little more interesting,
since it requires induction.
_Theorem_: Equivalence is a congruence for [while] -- that is, if
[b] is equivalent to [b'] and [c] is equivalent to [c'], then
[while b do c end] is equivalent to [while b' do c' end].
_Proof_: Suppose [b] is equivalent to [b'] and [c] is
equivalent to [c']. We must show, for every [st] and [st'], that
[st =[ while b do c end ]=> st'] iff [st =[ while b' do c'
end ]=> st']. We consider the two directions separately.
- ([->]) We show that [st =[ while b do c end ]=> st'] implies
[st =[ while b' do c' end ]=> st'], by induction on a
derivation of [st =[ while b do c end ]=> st']. The only
nontrivial cases are when the final rule in the derivation is
[E_WhileFalse] or [E_WhileTrue].
- [E_WhileFalse]: In this case, the form of the rule gives us
[beval st b = false] and [st = st']. But then, since
[b] and [b'] are equivalent, we have [beval st b' =
false], and [E_WhileFalse] applies, giving us
[st =[ while b' do c' end ]=> st'], as required.
- [E_WhileTrue]: The form of the rule now gives us [beval st
b = true], with [st =[ c ]=> st'0] and [st'0 =[ while
b do c end ]=> st'] for some state [st'0], with the
induction hypothesis [st'0 =[ while b' do c' end ]=>
st'].
Since [c] and [c'] are equivalent, we know that [st =[
c' ]=> st'0]. And since [b] and [b'] are equivalent,
we have [beval st b' = true]. Now [E_WhileTrue] applies,
giving us [st =[ while b' do c' end ]=> st'], as
required.
- ([<-]) Similar. [] *)
Theorem CWhile_congruence : forall b b' c c',
bequiv b b' -> cequiv c c' ->
cequiv <{ while b do c end }> <{ while b' do c' end }>.
Proof.
(* WORKED IN CLASS *)
(* We will prove one direction in an "assert"
in order to reuse it for the converse. *)
assert (A: forall (b b' : bexp) (c c' : com) (st st' : state),
bequiv b b' -> cequiv c c' ->
st =[ while b do c end ]=> st' ->
st =[ while b' do c' end ]=> st').
{ unfold bequiv,cequiv.
intros b b' c c' st st' Hbe Hc1e Hce.
remember <{ while b do c end }> as cwhile
eqn:Heqcwhile.
induction Hce; inversion Heqcwhile; subst.
+ (* E_WhileFalse *)
apply E_WhileFalse. rewrite <- Hbe. apply H.
+ (* E_WhileTrue *)
apply E_WhileTrue with (st' := st').
* (* show loop runs *) rewrite <- Hbe. apply H.
* (* body execution *)
apply (Hc1e st st'). apply Hce1.
* (* subsequent loop execution *)
apply IHHce2. reflexivity. }
intros. split.
- apply A; assumption.
- apply A.
+ apply sym_bequiv. assumption.
+ apply sym_cequiv. assumption.
Qed.
(** **** Exercise: 3 stars, standard, optional (CSeq_congruence) *)
Theorem CSeq_congruence : forall c1 c1' c2 c2',
cequiv c1 c1' -> cequiv c2 c2' ->
cequiv <{ c1;c2 }> <{ c1';c2' }>.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 3 stars, standard (CIf_congruence) *)
Theorem CIf_congruence : forall b b' c1 c1' c2 c2',
bequiv b b' -> cequiv c1 c1' -> cequiv c2 c2' ->
cequiv <{ if b then c1 else c2 end }>
<{ if b' then c1' else c2' end }>.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** For example, here are two equivalent programs and a proof of their
equivalence... *)
Example congruence_example:
cequiv
(* Program 1: *)
<{ X := 0;
if (X = 0) then Y := 0
else Y := 42 end }>
(* Program 2: *)
<{ X := 0;
if (X = 0) then Y := X - X (* <--- Changed here *)
else Y := 42 end }>.
Proof.
apply CSeq_congruence.
- apply refl_cequiv.
- apply CIf_congruence.
+ apply refl_bequiv.
+ apply CAsgn_congruence. unfold aequiv. simpl.
symmetry. apply minus_diag.
+ apply refl_cequiv.
Qed.
(** **** Exercise: 3 stars, advanced (not_congr)
We've shown that the [cequiv] relation is both an equivalence and
a congruence on commands. Can you think of a relation on commands
that is an equivalence but _not_ a congruence? Write down the
relation (formally), together with an informal sketch of a proof
that it is an equivalence but not a congruence. *)
(* FILL IN HERE *)
(* Do not modify the following line: *)
Definition manual_grade_for_not_congr : option (nat*string) := None.
(** [] *)
(* ################################################################# *)
(** * Program Transformations *)
(** A _program transformation_ is a function that takes a program as
input and produces a modified program as output. Compiler
optimizations such as constant folding are a canonical example,
but there are many others. *)
(** A program transformation is said to be _sound_ if it preserves the
behavior of the original program. *)
Definition atrans_sound (atrans : aexp -> aexp) : Prop :=
forall (a : aexp),
aequiv a (atrans a).
Definition btrans_sound (btrans : bexp -> bexp) : Prop :=
forall (b : bexp),
bequiv b (btrans b).
Definition ctrans_sound (ctrans : com -> com) : Prop :=
forall (c : com),
cequiv c (ctrans c).
(* ================================================================= *)
(** ** The Constant-Folding Transformation *)
(** An expression is _constant_ if it contains no variable references.
Constant folding is an optimization that finds constant
expressions and replaces them by their values. *)
Fixpoint fold_constants_aexp (a : aexp) : aexp :=
match a with
| ANum n => ANum n
| AId x => AId x
| <{ a1 + a2 }> =>
match (fold_constants_aexp a1,
fold_constants_aexp a2)
with
| (ANum n1, ANum n2) => ANum (n1 + n2)
| (a1', a2') => <{ a1' + a2' }>
end
| <{ a1 - a2 }> =>
match (fold_constants_aexp a1,
fold_constants_aexp a2)
with
| (ANum n1, ANum n2) => ANum (n1 - n2)
| (a1', a2') => <{ a1' - a2' }>
end
| <{ a1 * a2 }> =>
match (fold_constants_aexp a1,
fold_constants_aexp a2)
with
| (ANum n1, ANum n2) => ANum (n1 * n2)
| (a1', a2') => <{ a1' * a2' }>
end
end.
Example fold_aexp_ex1 :
fold_constants_aexp <{ (1 + 2) * X }>
= <{ 3 * X }>.
Proof. reflexivity. Qed.
(** Note that this version of constant folding doesn't do other
"obvious" things like eliminating trivial additions (e.g.,
rewriting [0 + X] to just [X]).: we are focusing attention on a
single optimization for the sake of simplicity.
It is not hard to incorporate other ways of simplifying
expressions -- the definitions and proofs just get longer. We'll
consider optimizations in the exercises. *)
Example fold_aexp_ex2 :
fold_constants_aexp <{ X - ((0 * 6) + Y) }> = <{ X - (0 + Y) }>.
Proof. reflexivity. Qed.
(** Not only can we lift [fold_constants_aexp] to [bexp]s (in the
[BEq], [BNeq], and [BLe] cases); we can also look for constant
_boolean_ expressions and evaluate them in-place as well. *)
Fixpoint fold_constants_bexp (b : bexp) : bexp :=
match b with
| <{true}> => <{true}>
| <{false}> => <{false}>
| <{ a1 = a2 }> =>
match (fold_constants_aexp a1,
fold_constants_aexp a2) with
| (ANum n1, ANum n2) =>
if n1 =? n2 then <{true}> else <{false}>
| (a1', a2') =>
<{ a1' = a2' }>
end
| <{ a1 <> a2 }> =>
match (fold_constants_aexp a1,
fold_constants_aexp a2) with
| (ANum n1, ANum n2) =>
if negb (n1 =? n2) then <{true}> else <{false}>
| (a1', a2') =>
<{ a1' <> a2' }>
end
| <{ a1 <= a2 }> =>
match (fold_constants_aexp a1,
fold_constants_aexp a2) with
| (ANum n1, ANum n2) =>
if n1 <=? n2 then <{true}> else <{false}>
| (a1', a2') =>
<{ a1' <= a2' }>
end
| <{ a1 > a2 }> =>
match (fold_constants_aexp a1,
fold_constants_aexp a2) with
| (ANum n1, ANum n2) =>
if n1 <=? n2 then <{false}> else <{true}>
| (a1', a2') =>
<{ a1' > a2' }>
end
| <{ ~ b1 }> =>
match (fold_constants_bexp b1) with
| <{true}> => <{false}>
| <{false}> => <{true}>
| b1' => <{ ~ b1' }>
end
| <{ b1 && b2 }> =>
match (fold_constants_bexp b1,
fold_constants_bexp b2) with
| (<{true}>, <{true}>) => <{true}>
| (<{true}>, <{false}>) => <{false}>
| (<{false}>, <{true}>) => <{false}>
| (<{false}>, <{false}>) => <{false}>
| (b1', b2') => <{ b1' && b2' }>
end
end.
Example fold_bexp_ex1 :
fold_constants_bexp <{ true && ~(false && true) }>
= <{ true }>.
Proof. reflexivity. Qed.
Example fold_bexp_ex2 :
fold_constants_bexp <{ (X = Y) && (0 = (2 - (1 + 1))) }>
= <{ (X = Y) && true }>.
Proof. reflexivity. Qed.
(** To fold constants in a command, we apply the appropriate folding
functions on all embedded expressions. *)
Fixpoint fold_constants_com (c : com) : com :=
match c with
| <{ skip }> =>
<{ skip }>
| <{ x := a }> =>
<{ x := (fold_constants_aexp a) }>
| <{ c1 ; c2 }> =>
<{ fold_constants_com c1 ; fold_constants_com c2 }>
| <{ if b then c1 else c2 end }> =>
match fold_constants_bexp b with
| <{true}> => fold_constants_com c1
| <{false}> => fold_constants_com c2
| b' => <{ if b' then fold_constants_com c1
else fold_constants_com c2 end}>
end
| <{ while b do c1 end }> =>
match fold_constants_bexp b with
| <{true}> => <{ while true do skip end }>
| <{false}> => <{ skip }>
| b' => <{ while b' do (fold_constants_com c1) end }>
end
end.
Example fold_com_ex1 :
fold_constants_com
(* Original program: *)
<{ X := 4 + 5;
Y := X - 3;
if ((X - Y) = (2 + 4)) then skip
else Y := 0 end;
if (0 <= (4 - (2 + 1))) then Y := 0
else skip end;
while (Y = 0) do
X := X + 1
end }>
= (* After constant folding: *)
<{ X := 9;
Y := X - 3;
if ((X - Y) = 6) then skip
else Y := 0 end;
Y := 0;
while (Y = 0) do
X := X + 1
end }>.
Proof. reflexivity. Qed.
(* ================================================================= *)
(** ** Soundness of Constant Folding *)
(** Now we need to show that what we've done is correct. *)
(** Here's the proof for arithmetic expressions: *)
Theorem fold_constants_aexp_sound :
atrans_sound fold_constants_aexp.
Proof.
unfold atrans_sound. intros a. unfold aequiv. intros st.
induction a; simpl;
(* ANum and AId follow immediately *)
try reflexivity;
(* APlus, AMinus, and AMult follow from the IH
and the observation that
aeval st (<{ a1 + a2 }>)
= ANum ((aeval st a1) + (aeval st a2))
= aeval st (ANum ((aeval st a1) + (aeval st a2)))
(and similarly for AMinus/minus and AMult/mult) *)
try (destruct (fold_constants_aexp a1);
destruct (fold_constants_aexp a2);
rewrite IHa1; rewrite IHa2; reflexivity). Qed.
(** **** Exercise: 3 stars, standard, optional (fold_bexp_Eq_informal)
Here is an informal proof of the [BEq] case of the soundness
argument for boolean expression constant folding. Read it
carefully and compare it to the formal proof that follows. Then
fill in the [BLe] case of the formal proof (without looking at the
[BEq] case, if possible).
_Theorem_: The constant folding function for booleans,
[fold_constants_bexp], is sound.
_Proof_: We must show that [b] is equivalent to [fold_constants_bexp b],
for all boolean expressions [b]. Proceed by induction on [b]. We
show just the case where [b] has the form [a1 = a2].
In this case, we must show
beval st <{ a1 = a2 }>
= beval st (fold_constants_bexp <{ a1 = a2 }>).
There are two cases to consider:
- First, suppose [fold_constants_aexp a1 = ANum n1] and
[fold_constants_aexp a2 = ANum n2] for some [n1] and [n2].
In this case, we have
fold_constants_bexp <{ a1 = a2 }>
= if n1 =? n2 then <{true}> else <{false}>
and
beval st <{a1 = a2}>
= (aeval st a1) =? (aeval st a2).
By the soundness of constant folding for arithmetic
expressions (Lemma [fold_constants_aexp_sound]), we know
aeval st a1
= aeval st (fold_constants_aexp a1)
= aeval st (ANum n1)
= n1
and
aeval st a2
= aeval st (fold_constants_aexp a2)
= aeval st (ANum n2)
= n2,
so
beval st <{a1 = a2}>
= (aeval a1) =? (aeval a2)
= n1 =? n2.
Also, it is easy to see (by considering the cases [n1 = n2] and
[n1 <> n2] separately) that
beval st (if n1 =? n2 then <{true}> else <{false}>)
= if n1 =? n2 then beval st <{true}> else beval st <{false}>
= if n1 =? n2 then true else false
= n1 =? n2.
So
beval st (<{ a1 = a2 }>)
= n1 =? n2.
= beval st (if n1 =? n2 then <{true}> else <{false}>),
as required.
- Otherwise, one of [fold_constants_aexp a1] and
[fold_constants_aexp a2] is not a constant. In this case, we
must show
beval st <{a1 = a2}>
= beval st (<{ (fold_constants_aexp a1) =
(fold_constants_aexp a2) }>),
which, by the definition of [beval], is the same as showing
(aeval st a1) =? (aeval st a2)
= (aeval st (fold_constants_aexp a1)) =?
(aeval st (fold_constants_aexp a2)).
But the soundness of constant folding for arithmetic
expressions ([fold_constants_aexp_sound]) gives us
aeval st a1 = aeval st (fold_constants_aexp a1)
aeval st a2 = aeval st (fold_constants_aexp a2),
completing the case. [] *)
Theorem fold_constants_bexp_sound:
btrans_sound fold_constants_bexp.
Proof.
unfold btrans_sound. intros b. unfold bequiv. intros st.
induction b;
(* true and false are immediate *)
try reflexivity.
- (* BEq *)
simpl.
remember (fold_constants_aexp a1) as a1' eqn:Heqa1'.
remember (fold_constants_aexp a2) as a2' eqn:Heqa2'.
replace (aeval st a1) with (aeval st a1') by
(subst a1'; rewrite <- fold_constants_aexp_sound; reflexivity).
replace (aeval st a2) with (aeval st a2') by
(subst a2'; rewrite <- fold_constants_aexp_sound; reflexivity).
destruct a1'; destruct a2'; try reflexivity.
(* The only interesting case is when both a1 and a2
become constants after folding *)
simpl. destruct (n =? n0); reflexivity.
- (* BNeq *)
simpl.
remember (fold_constants_aexp a1) as a1' eqn:Heqa1'.
remember (fold_constants_aexp a2) as a2' eqn:Heqa2'.
replace (aeval st a1) with (aeval st a1') by
(subst a1'; rewrite <- fold_constants_aexp_sound; reflexivity).
replace (aeval st a2) with (aeval st a2') by
(subst a2'; rewrite <- fold_constants_aexp_sound; reflexivity).
destruct a1'; destruct a2'; try reflexivity.
(* The only interesting case is when both a1 and a2
become constants after folding *)
simpl. destruct (n =? n0); reflexivity.
- (* BLe *)
(* FILL IN HERE *) admit.
- (* BGt *)
(* FILL IN HERE *) admit.
- (* BNot *)
simpl. remember (fold_constants_bexp b) as b' eqn:Heqb'.
rewrite IHb.
destruct b'; reflexivity.
- (* BAnd *)
simpl.
remember (fold_constants_bexp b1) as b1' eqn:Heqb1'.
remember (fold_constants_bexp b2) as b2' eqn:Heqb2'.
rewrite IHb1. rewrite IHb2.
destruct b1'; destruct b2'; reflexivity.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 3 stars, standard (fold_constants_com_sound)
Complete the [while] case of the following proof. *)
Theorem fold_constants_com_sound :
ctrans_sound fold_constants_com.
Proof.
unfold ctrans_sound. intros c.
induction c; simpl.
- (* skip *) apply refl_cequiv.
- (* := *) apply CAsgn_congruence.
apply fold_constants_aexp_sound.
- (* ; *) apply CSeq_congruence; assumption.
- (* if *)
assert (bequiv b (fold_constants_bexp b)). {
apply fold_constants_bexp_sound. }
destruct (fold_constants_bexp b) eqn:Heqb;
try (apply CIf_congruence; assumption).
(* (If the optimization doesn't eliminate the if, then the
result is easy to prove from the IH and
[fold_constants_bexp_sound].) *)
+ (* b always true *)
apply trans_cequiv with c1; try assumption.
apply if_true; assumption.
+ (* b always false *)
apply trans_cequiv with c2; try assumption.
apply if_false; assumption.
- (* while *)
(* FILL IN HERE *) Admitted.
(** [] *)
(* ================================================================= *)
(** ** Soundness of (0 + n) Elimination, Redux *)
(** **** Exercise: 4 stars, standard, optional (optimize_0plus_var)
Recall the definition [optimize_0plus] from the [Imp] chapter
of _Logical Foundations_:
Fixpoint optimize_0plus (a:aexp) : aexp :=
match a with
| ANum n =>
ANum n
| <{ 0 + a2 }> =>
optimize_0plus a2
| <{ a1 + a2 }> =>
<{ (optimize_0plus a1) + (optimize_0plus a2) }>
| <{ a1 - a2 }> =>
<{ (optimize_0plus a1) - (optimize_0plus a2) }>
| <{ a1 * a2 }> =>
<{ (optimize_0plus a1) * (optimize_0plus a2) }>
end.
Note that this function is defined over the old version of [aexp]s,
without states.
Write a new version of this function that deals with variables (by
leaving them alone), plus analogous ones for [bexp]s and commands:
optimize_0plus_aexp
optimize_0plus_bexp
optimize_0plus_com
*)
Fixpoint optimize_0plus_aexp (a : aexp) : aexp
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Fixpoint optimize_0plus_bexp (b : bexp) : bexp
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Fixpoint optimize_0plus_com (c : com) : com
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Example test_optimize_0plus:
optimize_0plus_com
<{ while X <> 0 do X := 0 + X - 1 end }>
= <{ while X <> 0 do X := X - 1 end }>.
Proof.
(* FILL IN HERE *) Admitted.
(** Prove that these three functions are sound, as we did for
[fold_constants_*]. Make sure you use the congruence lemmas in the
proof of [optimize_0plus_com] -- otherwise it will be _long_! *)
Theorem optimize_0plus_aexp_sound:
atrans_sound optimize_0plus_aexp.
Proof.
(* FILL IN HERE *) Admitted.
Theorem optimize_0plus_bexp_sound :
btrans_sound optimize_0plus_bexp.
Proof.
(* FILL IN HERE *) Admitted.
Theorem optimize_0plus_com_sound :
ctrans_sound optimize_0plus_com.
Proof.
(* FILL IN HERE *) Admitted.
(** Finally, let's define a compound optimizer on commands that first
folds constants (using [fold_constants_com]) and then eliminates
[0 + n] terms (using [optimize_0plus_com]). *)
Definition optimizer (c : com) := optimize_0plus_com (fold_constants_com c).
(** Prove that this optimizer is sound. *)
Theorem optimizer_sound :
ctrans_sound optimizer.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(* ################################################################# *)
(** * Proving Inequivalence *)
(** Next, let's look at some programs that are _not_ equivalent. *)
(** Suppose that [c1] is a command of the form
X := a1; Y := a2
and [c2] is the command
X := a1; Y := a2'
where [a2'] is formed by substituting [a1] for all occurrences
of [X] in [a2].
For example, [c1] and [c2] might be:
c1 = (X := 42 + 53;
Y := Y + X)
c2 = (X := 42 + 53;
Y := Y + (42 + 53))
Clearly, this _particular_ [c1] and [c2] are equivalent. Is this
true in general? *)
(** We will see in a moment that it is not, but it is worthwhile
to pause, now, and see if you can find a counter-example on your
own. *)
(** More formally, here is the function that substitutes an arithmetic
expression [u] for each occurrence of a given variable [x] in
another expression [a]: *)
Fixpoint subst_aexp (x : string) (u : aexp) (a : aexp) : aexp :=
match a with
| ANum n =>
ANum n
| AId x' =>
if String.eqb x x' then u else AId x'
| <{ a1 + a2 }> =>
<{ (subst_aexp x u a1) + (subst_aexp x u a2) }>
| <{ a1 - a2 }> =>
<{ (subst_aexp x u a1) - (subst_aexp x u a2) }>
| <{ a1 * a2 }> =>
<{ (subst_aexp x u a1) * (subst_aexp x u a2) }>
end.
Example subst_aexp_ex :
subst_aexp X <{42 + 53}> <{Y + X}>
= <{ Y + (42 + 53)}>.
Proof. simpl. reflexivity. Qed.
(** And here is the property we are interested in, expressing the
claim that commands [c1] and [c2] as described above are
always equivalent. *)
Definition subst_equiv_property : Prop := forall x1 x2 a1 a2,
cequiv <{ x1 := a1; x2 := a2 }>
<{ x1 := a1; x2 := subst_aexp x1 a1 a2 }>.
(** Sadly, the property does _not_ always hold.
Here is a counterexample:
X := X + 1; Y := X
If we perform the substitution, we get
X := X + 1; Y := X + 1
which clearly isn't equivalent. *)
Theorem subst_inequiv :
~ subst_equiv_property.
Proof.
unfold subst_equiv_property.
intros Contra.
(* Here is the counterexample: assuming that [subst_equiv_property]
holds allows us to prove that these two programs are
equivalent... *)
remember <{ X := X + 1;
Y := X }>
as c1.
remember <{ X := X + 1;
Y := X + 1 }>
as c2.
assert (cequiv c1 c2) by (subst; apply Contra).
clear Contra.
(* ... allows us to show that the command [c2] can terminate
in two different final states:
st1 = (Y !-> 1 ; X !-> 1)
st2 = (Y !-> 2 ; X !-> 1). *)
remember (Y !-> 1 ; X !-> 1) as st1.
remember (Y !-> 2 ; X !-> 1) as st2.
assert (H1 : empty_st =[ c1 ]=> st1);
assert (H2 : empty_st =[ c2 ]=> st2);
try (subst;
apply E_Seq with (st' := (X !-> 1));
apply E_Asgn; reflexivity).
clear Heqc1 Heqc2.
apply H in H1.
clear H.
(* Finally, we use the fact that evaluation is deterministic
to obtain a contradiction. *)
assert (Hcontra : st1 = st2)
by (apply (ceval_deterministic c2 empty_st); assumption).
clear H1 H2.
assert (Hcontra' : st1 Y = st2 Y)
by (rewrite Hcontra; reflexivity).
subst. discriminate. Qed.
(** **** Exercise: 4 stars, standard, optional (better_subst_equiv)
The equivalence we had in mind above was not complete nonsense --
in fact, it was actually almost right. To make it correct, we
just need to exclude the case where the variable [X] occurs in the
right-hand side of the first assignment statement. *)
Inductive var_not_used_in_aexp (x : string) : aexp -> Prop :=
| VNUNum : forall n, var_not_used_in_aexp x (ANum n)
| VNUId : forall y, x <> y -> var_not_used_in_aexp x (AId y)
| VNUPlus : forall a1 a2,
var_not_used_in_aexp x a1 ->
var_not_used_in_aexp x a2 ->
var_not_used_in_aexp x (<{ a1 + a2 }>)
| VNUMinus : forall a1 a2,
var_not_used_in_aexp x a1 ->
var_not_used_in_aexp x a2 ->
var_not_used_in_aexp x (<{ a1 - a2 }>)
| VNUMult : forall a1 a2,
var_not_used_in_aexp x a1 ->
var_not_used_in_aexp x a2 ->
var_not_used_in_aexp x (<{ a1 * a2 }>).
Lemma aeval_weakening : forall x st a ni,
var_not_used_in_aexp x a ->
aeval (x !-> ni ; st) a = aeval st a.
Proof.
(* FILL IN HERE *) Admitted.
(** Using [var_not_used_in_aexp], formalize and prove a correct version
of [subst_equiv_property]. *)
(* FILL IN HERE
[] *)
(** **** Exercise: 3 stars, standard (inequiv_exercise)
Prove that an infinite loop is not equivalent to [skip] *)
Theorem inequiv_exercise:
~ cequiv <{ while true do skip end }> <{ skip }>.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(* ################################################################# *)
(** * Extended Exercise: Nondeterministic Imp *)
(** As we have seen (in theorem [ceval_deterministic] in the [Imp]
chapter), Imp's evaluation relation is deterministic. However,
_non_-determinism is an important part of the definition of many
real programming languages. For example, in many imperative
languages (such as C and its relatives), the order in which
function arguments are evaluated is unspecified. The program
fragment
x = 0;
f(++x, x)
might call [f] with arguments [(1, 0)] or [(1, 1)], depending how
the compiler chooses to order things. This can be a little
confusing for programmers, but it gives the compiler writer useful
freedom.
In this exercise, we will extend Imp with a simple
nondeterministic command and study how this change affects
program equivalence. The new command has the syntax [HAVOC X],
where [X] is an identifier. The effect of executing [HAVOC X] is
to assign an _arbitrary_ number to the variable [X],
nondeterministically. For example, after executing the program:
HAVOC Y;
Z := Y * 2
the value of [Y] can be any number, while the value of [Z] is
twice that of [Y] (so [Z] is always even). Note that we are not
saying anything about the _probabilities_ of the outcomes -- just
that there are (infinitely) many different outcomes that can
possibly happen after executing this nondeterministic code.
In a sense, a variable on which we do [HAVOC] roughly corresponds
to an uninitialized variable in a low-level language like C. After
the [HAVOC], the variable holds a fixed but arbitrary number. Most
sources of nondeterminism in language definitions are there
precisely because programmers don't care which choice is made (and
so it is good to leave it open to the compiler to choose whichever
will run faster).
We call this new language _Himp_ (``Imp extended with [HAVOC]''). *)
Module Himp.
(** To formalize Himp, we first add a clause to the definition of
commands. *)
Inductive com : Type :=
| CSkip : com
| CAsgn : string -> aexp -> com
| CSeq : com -> com -> com
| CIf : bexp -> com -> com -> com
| CWhile : bexp -> com -> com
| CHavoc : string -> com. (* <--- NEW *)
Notation "'havoc' l" := (CHavoc l)
(in custom com at level 60, l constr at level 0).
Notation "'skip'" :=
CSkip (in custom com at level 0).
Notation "x := y" :=
(CAsgn x y)
(in custom com at level 0, x constr at level 0,
y at level 85, no associativity).
Notation "x ; y" :=
(CSeq x y)
(in custom com at level 90, right associativity).
Notation "'if' x 'then' y 'else' z 'end'" :=
(CIf x y z)
(in custom com at level 89, x at level 99,
y at level 99, z at level 99).
Notation "'while' x 'do' y 'end'" :=
(CWhile x y)
(in custom com at level 89, x at level 99, y at level 99).
(** **** Exercise: 2 stars, standard (himp_ceval)
Now, we must extend the operational semantics. We have provided
a template for the [ceval] relation below, specifying the big-step
semantics. What rule(s) must be added to the definition of [ceval]
to formalize the behavior of the [HAVOC] command? *)
Reserved Notation "st '=[' c ']=>' st'"
(at level 40, c custom com at level 99, st constr,
st' constr at next level).
Inductive ceval : com -> state -> state -> Prop :=
| E_Skip : forall st,
st =[ skip ]=> st
| E_Asgn : forall st a n x,
aeval st a = n ->
st =[ x := a ]=> (x !-> n ; st)
| E_Seq : forall c1 c2 st st' st'',
st =[ c1 ]=> st' ->
st' =[ c2 ]=> st'' ->
st =[ c1 ; c2 ]=> st''
| E_IfTrue : forall st st' b c1 c2,
beval st b = true ->
st =[ c1 ]=> st' ->
st =[ if b then c1 else c2 end ]=> st'
| E_IfFalse : forall st st' b c1 c2,
beval st b = false ->
st =[ c2 ]=> st' ->
st =[ if b then c1 else c2 end ]=> st'
| E_WhileFalse : forall b st c,
beval st b = false ->
st =[ while b do c end ]=> st
| E_WhileTrue : forall st st' st'' b c,
beval st b = true ->
st =[ c ]=> st' ->
st' =[ while b do c end ]=> st'' ->
st =[ while b do c end ]=> st''
(* FILL IN HERE *)
where "st =[ c ]=> st'" := (ceval c st st').
(** As a sanity check, the following claims should be provable for
your definition: *)
Example havoc_example1 : empty_st =[ havoc X ]=> (X !-> 0).
Proof.
(* FILL IN HERE *) Admitted.
Example havoc_example2 :
empty_st =[ skip; havoc Z ]=> (Z !-> 42).
Proof.
(* FILL IN HERE *) Admitted.
(* Do not modify the following line: *)
Definition manual_grade_for_Check_rule_for_HAVOC : option (nat*string) := None.
(** [] *)
(** Finally, we repeat the definition of command equivalence from above: *)
Definition cequiv (c1 c2 : com) : Prop := forall st st' : state,
st =[ c1 ]=> st' <-> st =[ c2 ]=> st'.
(** Let's apply this definition to prove some nondeterministic
programs equivalent / inequivalent. *)
(** **** Exercise: 3 stars, standard (havoc_swap)
Are the following two programs equivalent? *)
Definition pXY :=
<{ havoc X ; havoc Y }>.
Definition pYX :=
<{ havoc Y; havoc X }>.
(** If you think they are equivalent, prove it. If you think they are
not, prove that. *)
Theorem pXY_cequiv_pYX :
cequiv pXY pYX \/ ~cequiv pXY pYX.
Proof.
(* Hint: You may want to use [t_update_permute] at some point,
in which case you'll probably be left with [X <> Y] as a
hypothesis. You can use [discriminate] to discharge this. *)
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 4 stars, standard, optional (havoc_copy)
Are the following two programs equivalent? *)
Definition ptwice :=
<{ havoc X; havoc Y }>.
Definition pcopy :=
<{ havoc X; Y := X }>.
(** If you think they are equivalent, then prove it. If you think they
are not, then prove that. (Hint: You may find the [assert] tactic
useful.) *)
Theorem ptwice_cequiv_pcopy :
cequiv ptwice pcopy \/ ~cequiv ptwice pcopy.
Proof. (* FILL IN HERE *) Admitted.
(** [] *)
(** The definition of program equivalence we are using here has some
subtle consequences on programs that may loop forever. What
[cequiv] says is that the set of possible _terminating_ outcomes
of two equivalent programs is the same. However, in a language
with nondeterminism, like Himp, some programs always terminate,
some programs always diverge, and some programs can
nondeterministically terminate in some runs and diverge in
others. The final part of the following exercise illustrates this
phenomenon.
*)
(** **** Exercise: 4 stars, advanced (p1_p2_term)
Consider the following commands: *)
Definition p1 : com :=
<{ while ~ (X = 0) do
havoc Y;
X := X + 1
end }>.
Definition p2 : com :=
<{ while ~ (X = 0) do
skip
end }>.
(** Intuitively, [p1] and [p2] have the same termination behavior:
either they loop forever, or they terminate in the same state they
started in. We can capture the termination behavior of [p1] and
[p2] individually with these lemmas: *)
Lemma p1_may_diverge : forall st st', st X <> 0 ->
~ st =[ p1 ]=> st'.
Proof. (* FILL IN HERE *) Admitted.
Lemma p2_may_diverge : forall st st', st X <> 0 ->
~ st =[ p2 ]=> st'.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 4 stars, advanced (p1_p2_equiv)
Use these two lemmas to prove that [p1] and [p2] are actually
equivalent. *)
Theorem p1_p2_equiv : cequiv p1 p2.
Proof. (* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 4 stars, advanced (p3_p4_inequiv)
Prove that the following programs are _not_ equivalent. (Hint:
What should the value of [Z] be when [p3] terminates? What about
[p4]?) *)
Definition p3 : com :=
<{ Z := 1;
while X <> 0 do
havoc X;
havoc Z
end }>.
Definition p4 : com :=
<{ X := 0;
Z := 1 }>.
Theorem p3_p4_inequiv : ~ cequiv p3 p4.
Proof. (* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 5 stars, advanced, optional (p5_p6_equiv)
Prove that the following commands are equivalent. (Hint: As
mentioned above, our definition of [cequiv] for Himp only takes
into account the sets of possible terminating configurations: two
programs are equivalent if and only if the set of possible terminating
states is the same for both programs when given a same starting state
[st]. If [p5] terminates, what should the final state be? Conversely,
is it always possible to make [p5] terminate?) *)
Definition p5 : com :=
<{ while X <> 1 do
havoc X
end }>.
Definition p6 : com :=
<{ X := 1 }>.
Theorem p5_p6_equiv : cequiv p5 p6.
Proof. (* FILL IN HERE *) Admitted.
(** [] *)
End Himp.
(* ################################################################# *)
(** * Additional Exercises *)
(** **** Exercise: 3 stars, standard, optional (swap_noninterfering_assignments)
(Hint: You'll need [functional_extensionality] for this one.) *)
Theorem swap_noninterfering_assignments: forall l1 l2 a1 a2,
l1 <> l2 ->
var_not_used_in_aexp l1 a2 ->
var_not_used_in_aexp l2 a1 ->
cequiv
<{ l1 := a1; l2 := a2 }>
<{ l2 := a2; l1 := a1 }>.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 4 stars, standard, optional (for_while_equiv)
This exercise extends the optional [add_for_loop] exercise from
the [Imp] chapter, where you were asked to extend the language
of commands with C-style [for] loops. Prove that the command:
for (c1; b; c2) {
c3
}
is equivalent to:
c1;
while b do
c3;
c2
end
*)
(* FILL IN HERE
[] *)
(** **** Exercise: 4 stars, advanced, optional (capprox)
In this exercise we define an asymmetric variant of program
equivalence we call _program approximation_. We say that a
program [c1] _approximates_ a program [c2] when, for each of
the initial states for which [c1] terminates, [c2] also terminates
and produces the same final state. Formally, program approximation
is defined as follows: *)
Definition capprox (c1 c2 : com) : Prop := forall (st st' : state),
st =[ c1 ]=> st' -> st =[ c2 ]=> st'.
(** For example, the program
c1 = while X <> 1 do
X := X - 1
end
approximates [c2 = X := 1], but [c2] does not approximate [c1]
since [c1] does not terminate when [X = 0] but [c2] does. If two
programs approximate each other in both directions, then they are
equivalent. *)
(** Find two programs [c3] and [c4] such that neither approximates
the other. *)
Definition c3 : com
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Definition c4 : com
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Theorem c3_c4_different : ~ capprox c3 c4 /\ ~ capprox c4 c3.
Proof. (* FILL IN HERE *) Admitted.
(** Find a program [cmin] that approximates every other program. *)
Definition cmin : com
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Theorem cmin_minimal : forall c, capprox cmin c.
Proof. (* FILL IN HERE *) Admitted.
(** Finally, find a non-trivial property which is preserved by
program approximation (when going from left to right). *)
Definition zprop (c : com) : Prop
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Theorem zprop_preserving : forall c c',
zprop c -> capprox c c' -> zprop c'.
Proof. (* FILL IN HERE *) Admitted.
(** [] *)
(* 2022-08-08 17:31 *)
|
lemma pos_convex_function: fixes f :: "real \<Rightarrow> real" assumes "convex C" and leq: "\<And>x y. x \<in> C \<Longrightarrow> y \<in> C \<Longrightarrow> f' x * (y - x) \<le> f y - f x" shows "convex_on C f" |
/-
Copyright (c) 2018 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Callum Sutton, Yury Kudryashov
-/
import algebra.hom.equiv.basic
import algebra.hom.units
/-!
# Multiplicative and additive equivalence acting on units.
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
-/
variables {F α β A B M N P Q G H : Type*}
/-- A group is isomorphic to its group of units. -/
@[to_additive "An additive group is isomorphic to its group of additive units"]
def to_units [group G] : G ≃* Gˣ :=
{ to_fun := λ x, ⟨x, x⁻¹, mul_inv_self _, inv_mul_self _⟩,
inv_fun := coe,
left_inv := λ x, rfl,
right_inv := λ u, units.ext rfl,
map_mul' := λ x y, units.ext rfl }
@[simp, to_additive] lemma coe_to_units [group G] (g : G) :
(to_units g : G) = g := rfl
namespace units
variables [monoid M] [monoid N] [monoid P]
/-- A multiplicative equivalence of monoids defines a multiplicative equivalence
of their groups of units. -/
def map_equiv (h : M ≃* N) : Mˣ ≃* Nˣ :=
{ inv_fun := map h.symm.to_monoid_hom,
left_inv := λ u, ext $ h.left_inv u,
right_inv := λ u, ext $ h.right_inv u,
.. map h.to_monoid_hom }
@[simp]
lemma map_equiv_symm (h : M ≃* N) : (map_equiv h).symm = map_equiv h.symm :=
rfl
@[simp]
lemma coe_map_equiv (h : M ≃* N) (x : Mˣ) : (map_equiv h x : N) = h x :=
rfl
/-- Left multiplication by a unit of a monoid is a permutation of the underlying type. -/
@[to_additive "Left addition of an additive unit is a permutation of the underlying type.",
simps apply {fully_applied := ff}]
def mul_left (u : Mˣ) : equiv.perm M :=
{ to_fun := λx, u * x,
inv_fun := λx, ↑u⁻¹ * x,
left_inv := u.inv_mul_cancel_left,
right_inv := u.mul_inv_cancel_left }
@[simp, to_additive]
lemma mul_left_symm (u : Mˣ) : u.mul_left.symm = u⁻¹.mul_left :=
equiv.ext $ λ x, rfl
@[to_additive]
lemma mul_left_bijective (a : Mˣ) : function.bijective ((*) a : M → M) :=
(mul_left a).bijective
/-- Right multiplication by a unit of a monoid is a permutation of the underlying type. -/
@[to_additive "Right addition of an additive unit is a permutation of the underlying type.",
simps apply {fully_applied := ff}]
def mul_right (u : Mˣ) : equiv.perm M :=
{ to_fun := λx, x * u,
inv_fun := λx, x * ↑u⁻¹,
left_inv := λ x, mul_inv_cancel_right x u,
right_inv := λ x, inv_mul_cancel_right x u }
@[simp, to_additive]
lemma mul_right_symm (u : Mˣ) : u.mul_right.symm = u⁻¹.mul_right :=
equiv.ext $ λ x, rfl
@[to_additive]
lemma mul_right_bijective (a : Mˣ) : function.bijective ((* a) : M → M) :=
(mul_right a).bijective
end units
namespace equiv
section group
variables [group G]
/-- Left multiplication in a `group` is a permutation of the underlying type. -/
@[to_additive "Left addition in an `add_group` is a permutation of the underlying type."]
protected def mul_left (a : G) : perm G := (to_units a).mul_left
@[simp, to_additive]
lemma coe_mul_left (a : G) : ⇑(equiv.mul_left a) = (*) a := rfl
/-- Extra simp lemma that `dsimp` can use. `simp` will never use this. -/
@[simp, nolint simp_nf,
to_additive "Extra simp lemma that `dsimp` can use. `simp` will never use this."]
lemma mul_left_symm_apply (a : G) : ((equiv.mul_left a).symm : G → G) = (*) a⁻¹ := rfl
@[simp, to_additive]
lemma mul_left_symm (a : G) : (equiv.mul_left a).symm = equiv.mul_left a⁻¹ :=
ext $ λ x, rfl
@[to_additive]
lemma _root_.group.mul_left_bijective (a : G) : function.bijective ((*) a) :=
(equiv.mul_left a).bijective
/-- Right multiplication in a `group` is a permutation of the underlying type. -/
@[to_additive "Right addition in an `add_group` is a permutation of the underlying type."]
protected def mul_right (a : G) : perm G := (to_units a).mul_right
@[simp, to_additive]
lemma coe_mul_right (a : G) : ⇑(equiv.mul_right a) = λ x, x * a := rfl
@[simp, to_additive]
lemma mul_right_symm (a : G) : (equiv.mul_right a).symm = equiv.mul_right a⁻¹ :=
ext $ λ x, rfl
/-- Extra simp lemma that `dsimp` can use. `simp` will never use this. -/
@[simp, nolint simp_nf,
to_additive "Extra simp lemma that `dsimp` can use. `simp` will never use this."]
lemma mul_right_symm_apply (a : G) : ((equiv.mul_right a).symm : G → G) = λ x, x * a⁻¹ := rfl
@[to_additive]
lemma _root_.group.mul_right_bijective (a : G) : function.bijective (* a) :=
(equiv.mul_right a).bijective
/-- A version of `equiv.mul_left a b⁻¹` that is defeq to `a / b`. -/
@[to_additive /-" A version of `equiv.add_left a (-b)` that is defeq to `a - b`. "-/, simps]
protected def div_left (a : G) : G ≃ G :=
{ to_fun := λ b, a / b,
inv_fun := λ b, b⁻¹ * a,
left_inv := λ b, by simp [div_eq_mul_inv],
right_inv := λ b, by simp [div_eq_mul_inv] }
@[to_additive]
lemma div_left_eq_inv_trans_mul_left (a : G) :
equiv.div_left a = (equiv.inv G).trans (equiv.mul_left a) :=
ext $ λ _, div_eq_mul_inv _ _
/-- A version of `equiv.mul_right a⁻¹ b` that is defeq to `b / a`. -/
@[to_additive /-" A version of `equiv.add_right (-a) b` that is defeq to `b - a`. "-/, simps]
protected def div_right (a : G) : G ≃ G :=
{ to_fun := λ b, b / a,
inv_fun := λ b, b * a,
left_inv := λ b, by simp [div_eq_mul_inv],
right_inv := λ b, by simp [div_eq_mul_inv] }
@[to_additive]
lemma div_right_eq_mul_right_inv (a : G) : equiv.div_right a = equiv.mul_right a⁻¹ :=
ext $ λ _, div_eq_mul_inv _ _
end group
end equiv
/-- In a `division_comm_monoid`, `equiv.inv` is a `mul_equiv`. There is a variant of this
`mul_equiv.inv' G : G ≃* Gᵐᵒᵖ` for the non-commutative case. -/
@[to_additive "When the `add_group` is commutative, `equiv.neg` is an `add_equiv`.", simps apply]
def mul_equiv.inv (G : Type*) [division_comm_monoid G] : G ≃* G :=
{ to_fun := has_inv.inv,
inv_fun := has_inv.inv,
map_mul' := mul_inv,
..equiv.inv G }
@[simp] lemma mul_equiv.inv_symm (G : Type*) [division_comm_monoid G] :
(mul_equiv.inv G).symm = mul_equiv.inv G := rfl
|
/-
Copyright (c) 2022 Joël Riou. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Joël Riou
-/
import category_theory.idempotents.karoubi
import category_theory.additive.basic
/-!
# Biproducts in the idempotent completion of a preadditive category
In this file, we define an instance expressing that if `C` is an additive category,
then `karoubi C` is also an additive category.
We also obtain that for all `P : karoubi C` where `C` is a preadditive category `C`, there
is a canonical isomorphism `P ⊞ P.complement ≅ (to_karoubi C).obj P.X` in the category
`karoubi C` where `P.complement` is the formal direct factor of `P.X` corresponding to
the idempotent endomorphism `𝟙 P.X - P.p`.
-/
noncomputable theory
open category_theory.category
open category_theory.limits
open category_theory.preadditive
universes v
namespace category_theory
namespace idempotents
namespace karoubi
variables {C : Type*} [category.{v} C] [preadditive C]
namespace biproducts
/-- The `bicone` used in order to obtain the existence of
the biproduct of a functor `J ⥤ karoubi C` when the category `C` is additive. -/
@[simps]
def bicone [has_finite_biproducts C] {J : Type v} [decidable_eq J] [fintype J]
(F : J → karoubi C) : bicone F :=
{ X :=
{ X := biproduct (λ j, (F j).X),
p := biproduct.map (λ j, (F j).p),
idem := begin
ext j,
simp only [biproduct.ι_map_assoc, biproduct.ι_map],
slice_lhs 1 2 { rw (F j).idem, },
end, },
π := λ j,
{ f := biproduct.map (λ j, (F j).p) ≫ bicone.π _ j,
comm := by simp only [assoc, biproduct.bicone_π, biproduct.map_π,
biproduct.map_π_assoc, (F j).idem], },
ι := λ j,
{ f := (by exact bicone.ι _ j) ≫ biproduct.map (λ j, (F j).p),
comm := by rw [biproduct.ι_map, ← assoc, ← assoc, (F j).idem,
assoc, biproduct.ι_map, ← assoc, (F j).idem], },
ι_π := λ j j', begin
split_ifs,
{ subst h,
simp only [biproduct.bicone_ι, biproduct.ι_map, biproduct.bicone_π,
biproduct.ι_π_self_assoc, comp, category.assoc, eq_to_hom_refl, id_eq,
biproduct.map_π, (F j).idem], },
{ simpa only [hom_ext, biproduct.ι_π_ne_assoc _ h, assoc,
biproduct.map_π, biproduct.map_π_assoc, zero_comp, comp], },
end, }
end biproducts
lemma karoubi_has_finite_biproducts [has_finite_biproducts C] :
has_finite_biproducts (karoubi C) :=
{ has_biproducts_of_shape := λ J hJ₁ hJ₂,
{ has_biproduct := λ F, begin
letI := hJ₂,
apply has_biproduct_of_total (biproducts.bicone F),
ext1, ext1,
simp only [id_eq, comp_id, biproducts.bicone_X_p, biproduct.ι_map],
rw [sum_hom, comp_sum, finset.sum_eq_single j], rotate,
{ intros j' h1 h2,
simp only [biproduct.ι_map, biproducts.bicone_ι_f, biproducts.bicone_π_f,
assoc, comp, biproduct.map_π],
slice_lhs 1 2 { rw biproduct.ι_π, },
split_ifs,
{ exfalso, exact h2 h.symm, },
{ simp only [zero_comp], } },
{ intro h,
exfalso,
simpa only [finset.mem_univ, not_true] using h, },
{ simp only [biproducts.bicone_π_f, comp,
biproduct.ι_map, assoc, biproducts.bicone_ι_f, biproduct.map_π],
slice_lhs 1 2 { rw biproduct.ι_π, },
split_ifs, swap, { exfalso, exact h rfl, },
simp only [eq_to_hom_refl, id_comp, (F j).idem], },
end, } }
instance {D : Type*} [category D] [additive_category D] : additive_category (karoubi D) :=
{ to_preadditive := infer_instance,
to_has_finite_biproducts := karoubi_has_finite_biproducts }
/-- `P.complement` is the formal direct factor of `P.X` given by the idempotent
endomorphism `𝟙 P.X - P.p` -/
@[simps]
def complement (P : karoubi C) : karoubi C :=
{ X := P.X,
p := 𝟙 _ - P.p,
idem := idem_of_id_sub_idem P.p P.idem, }
instance (P : karoubi C) : has_binary_biproduct P P.complement :=
has_binary_biproduct_of_total
{ X := P.X,
fst := P.decomp_id_p,
snd := P.complement.decomp_id_p,
inl := P.decomp_id_i,
inr := P.complement.decomp_id_i,
inl_fst' := P.decomp_id.symm,
inl_snd' := begin
simp only [decomp_id_i_f, decomp_id_p_f, complement_p, comp_sub, comp,
hom_ext, quiver.hom.add_comm_group_zero_f, P.idem],
erw [comp_id, sub_self],
end,
inr_fst' := begin
simp only [decomp_id_i_f, complement_p, decomp_id_p_f, sub_comp, comp,
hom_ext, quiver.hom.add_comm_group_zero_f, P.idem],
erw [id_comp, sub_self],
end,
inr_snd' := P.complement.decomp_id.symm, }
(by simp only [hom_ext, ← decomp_p, quiver.hom.add_comm_group_add_f,
to_karoubi_map_f, id_eq, coe_p, complement_p, add_sub_cancel'_right])
/-- A formal direct factor `P : karoubi C` of an object `P.X : C` in a
preadditive category is actually a direct factor of the image `(to_karoubi C).obj P.X`
of `P.X` in the category `karoubi C` -/
def decomposition (P : karoubi C) : P ⊞ P.complement ≅ (to_karoubi _).obj P.X :=
{ hom := biprod.desc P.decomp_id_i P.complement.decomp_id_i,
inv := biprod.lift P.decomp_id_p P.complement.decomp_id_p,
hom_inv_id' := begin
ext1,
{ simp only [← assoc, biprod.inl_desc, comp_id, biprod.lift_eq, comp_add,
← decomp_id, id_comp, add_right_eq_self],
convert zero_comp,
ext,
simp only [decomp_id_i_f, decomp_id_p_f, complement_p, comp_sub, comp,
quiver.hom.add_comm_group_zero_f, P.idem],
erw [comp_id, sub_self], },
{ simp only [← assoc, biprod.inr_desc, biprod.lift_eq, comp_add,
← decomp_id, comp_id, id_comp, add_left_eq_self],
convert zero_comp,
ext,
simp only [decomp_id_i_f, decomp_id_p_f, complement_p, sub_comp, comp,
quiver.hom.add_comm_group_zero_f, P.idem],
erw [id_comp, sub_self], }
end,
inv_hom_id' := begin
rw biprod.lift_desc,
simp only [← decomp_p],
ext,
dsimp only [complement, to_karoubi],
simp only [quiver.hom.add_comm_group_add_f, add_sub_cancel'_right, id_eq],
end, }
end karoubi
end idempotents
end category_theory
|
// Copyright (c) 2014, German Neuroinformatics Node (G-Node)
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted under the terms of the BSD License. See
// LICENSE file in the root of the Project.
#ifndef CLI_DUMP_H
#define CLI_DUMP_H
#include <nix/hydra/multiArray.hpp>
#include <nix.hpp>
#include <nix/base/types.hpp>
#include <nix/base/Entity.hpp>
#include <nix/base/NamedEntity.hpp>
#include <nix/base/EntityWithMetadata.hpp>
#include <nix/base/EntityWithSources.hpp>
#include <Cli.hpp>
#include <modules/IModule.hpp>
#include <string>
#include <fstream>
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <ctime>
#include <boost/program_options.hpp>
namespace po = boost::program_options;
namespace cli {
namespace module {
const char *const DATA_OPTION = "data";
const char *const PLOT_OPTION = "plot";
class plot_script {
private:
char* script;
static const char* plot_file;
public:
template<typename T1, typename T2>
plot_script(T1 minval, T2 maxval, size_t nx, size_t ny, std::string file_path) {
std::string str_script = (std::string() +
"#!/usr/bin/gnuplot" + "\n\n" +
"set terminal x11" + "\n" +
"set title \"2D data plot\"" + "\n" +
"unset key" + "\n" +
"set tic scale 0" + "\n\n" +
"# Color runs from white to green" + "\n" +
"set palette rgbformula -7,-7,2" + "\n" +
"set cbrange [MINVAL:MAXVAL]" + "\n" +
"set cblabel \"Score\"" + "\n" +
"unset cbtics" + "\n\n" +
"set xrange [-0.5:NX.5]" + "\n"
"set yrange [-0.5:NY.5]" + "\n\n"
"set view map" + "\n"
"splot 'FILE' matrix with image" + "\n"
"pause -1" + "\n");
std::ifstream f(plot_file);
if (f.good()) {
str_script = std::string((std::istreambuf_iterator<char>(f)),
std::istreambuf_iterator<char>());
}
// round min & max to precision of two digits beyond magnitude of their difference,
// if their difference is < 2. Round them to ints otherwise.
double multiplier = abs( round(1 / (maxval - minval)) ) * 100;
minval = (multiplier != 0) ? round(minval * multiplier) / multiplier : round(minval);
maxval = (multiplier != 0) ? round(maxval * multiplier) / multiplier : round(maxval);
str_script.replace(str_script.find("MINVAL"), std::string("MINVAL").length(), nix::util::numToStr(minval));
str_script.replace(str_script.find("MAXVAL"), std::string("MAXVAL").length(), nix::util::numToStr(maxval));
str_script.replace(str_script.find("NX"), std::string("NX").length(), nix::util::numToStr(nx));
str_script.replace(str_script.find("NY"), std::string("NY").length(), nix::util::numToStr(ny));
str_script.replace(str_script.find("FILE"), std::string("FILE").length(), file_path);
script = (char*) malloc (str_script.size());
std::strcpy(script, str_script.c_str());
}
std::string str() {
return std::string(script);
}
~plot_script() {
free(script);
}
};
class yamlstream {
static const char* indent_str;
static const char* scalar_start;
static const char* scalar_end;
static const char* sequ_start;
static const char* sequ_end;
static const char* item_str;
size_t level;
std::stringstream &sstream;
/**
* @brief apply indentation on sstream if last char is "\n"
*
* Apply indentation on sstream if last char is "\n"
*
* @return void
*/
void indent_if();
/**
* @brief apply indentation on sstream if last char is "\n"
*
* Apply indentation on sstream if last char is "\n"
*
* @return void
*/
void endl_if();
/**
* @brief return item_str if and only if level is not zero
*
* Return the item string if and only if we are not outputting to
* the base level, but to some sub level
*
* @return string item_str
*/
std::string item();
/**
* @brief start yaml sequence & increase indent level
*
* Put the defined sequ_start into the stream and increase
* indentation level.
*
* @return self
*/
yamlstream& operator++();
/**
* @brief start yaml sequence & increase indent level
*
* Put the defined sequ_start into the stream and increase
* indentation level.
*
* @return self
*/
yamlstream operator++(int);
/**
* @brief end yaml sequence & decrease indent level
*
* Put the defined sequ_end into the stream and decrease
* indentation level.
*
* @return self
*/
yamlstream operator--();
/**
* @brief end yaml sequence & decrease indent level
*
* Put the defined sequ_end into the stream and decrease
* indentation level.
*
* @return self
*/
yamlstream operator--(int);
/**
* @brief put yaml indentation into stream
*
* Put n_indent times the defined indent into the stream
*
* @return self
*/
yamlstream& operator[](const size_t n_indent);
/**
* @brief convert unix epoch time to local time string
*
* Convert unix epoch time to local time string
*
* @return string with the given time as local time
*/
std::string t(const time_t &tm);
public:
/**
* @brief default ctor
*
* The default constructor.
*/
yamlstream(std::stringstream &sstream) : level(0), sstream(sstream) {};
/**
* @brief return stringstream as string
*
* Return string from stringstream
*
* @return string string content of the stream
*/
std::string str();
/**
* @brief default output into stringstream
*
* Use the default stringstream output.
*
* @param t parameter of any given type T
* @return self
*/
template<typename T>
yamlstream& operator<<(const T &t) {
indent_if();
sstream << t;
return *this;
}
/**
* @brief vector output into stringstream
*
* Output vector elements in inline yaml sequence style.
*
* @param t vector of any given type T
* @return self
*/
template<typename T>
yamlstream& operator<<(const std::vector<T> &t) {
indent_if();
if (t.size()) {
sstream << "[";
for (auto &el : t) {
sstream << el << ((*t.rbegin()) != el ? ", " : "");
}
sstream << "]";
}
return *this;
}
/**
* @brief NDSize output into stringstream
*
* Build vector of sizes and output them as vector.
*
* @param t NDSize class
* @return self
*/
yamlstream& operator<<(const nix::NDSize &t);
/**
* @brief boost::optional output into stringstream
*
* De-referene boost::optional if and only if it is set and output
* content (or empty string if not set) to stream.
*
* @param t boost::optional
* @return self
*/
template<typename T>
yamlstream& operator<<(const boost::optional<T> &t) {
indent_if();
auto opt = nix::util::deRef(t);
sstream << opt;
return *this;
}
/**
* @brief pointer to stringstream output into stringstream
*
* Output via pointer to stringstream.
*
* @param ps pointer to stringstream
* @return self
*/
yamlstream& operator<<(std::stringstream& (*ps)(std::stringstream&))
{
indent_if();
sstream << ps;
return *this;
}
/**
* @brief Entity output into stringstream
*
* Output base Entity to stringstream.
*
* @param entity nix base Entity
* @return self
*/
template<typename T>
yamlstream& operator<<(const nix::base::Entity<T> &entity) {
(*this)
<< "id" << scalar_start << "" << entity.id() << scalar_end
<< "createdAt" << scalar_start << t(entity.createdAt()) << scalar_end
<< "updatedAt" << scalar_start << t(entity.updatedAt()) << scalar_end;
return *this;
}
/**
* @brief NamedEntity output into stringstream
*
* Output base NamedEntity to stringstream.
*
* @param entity nix base NamedEntity
* @return self
*/
template<typename T>
yamlstream& operator<<(const nix::base::NamedEntity<T> &namedEntity) {
(*this)
<< static_cast<nix::base::Entity<T>>(namedEntity)
<< "name" << scalar_start << namedEntity.name() << scalar_end
<< "type" << scalar_start << namedEntity.type() << scalar_end
<< "definition" << scalar_start << namedEntity.definition() << scalar_end;
return *this;
}
/**
* @brief EntityWithMetadata output into stringstream
*
* Output base EntityWithMetadata to stringstream.
*
* @param entity nix base EntityWithMetadata
* @return self
*/
template<typename T>
yamlstream& operator<<(const nix::base::EntityWithMetadata<T> &entityWithMetadata) {
(*this)
<< static_cast<nix::base::NamedEntity<T>>(entityWithMetadata)
<< "metadata"; ++(*this) << entityWithMetadata.metadata(); --(*this);
return *this;
}
/**
* @brief EntityWithSources output into stringstream
*
* Output base EntityWithSources to stringstream.
*
* @param entity nix base EntityWithSources
* @return self
*/
template<typename T>
yamlstream& operator<<(const nix::base::EntityWithSources<T> &entityWithSources) {
(*this)
<< static_cast<nix::base::EntityWithMetadata<T>>(entityWithSources)
<< "sourceCount" << scalar_start << entityWithSources.sourceCount() << scalar_end;
// NOTE: dont output sources as those are handled by derived frontend entity
return *this;
}
/**
* @brief Value output into stringstream
*
* Output Value to stringstream.
*
* @param entity nix value
* @return self
*/
yamlstream& operator<<(const nix::Value &value);
/**
* @brief Property output into stringstream
*
* Output Property to stringstream.
*
* @param entity nix Property
* @return self
*/
yamlstream& operator<<(const nix::Property &property);
/**
* @brief Source output into stringstream
*
* Output Source to stringstream.
*
* @param entity nix Source
* @return self
*/
yamlstream& operator<<(const nix::Source &source);
/**
* @brief Section output into stringstream
*
* Output Section to stringstream.
*
* @param entity nix Section
* @return self
*/
yamlstream& operator<<(const nix::Section §ion);
/**
* @brief SetDimension output into stringstream
*
* Output SetDimension to stringstream.
*
* @param entity nix SetDimension
* @return self
*/
yamlstream& operator<<(const nix::SetDimension &dim);
/**
* @brief SampledDimension output into stringstream
*
* Output SampledDimension to stringstream.
*
* @param entity nix SampledDimension
* @return self
*/
yamlstream& operator<<(const nix::SampledDimension &dim);
/**
* @brief RangeDimension output into stringstream
*
* Output RangeDimension to stringstream.
*
* @param entity nix RangeDimension
* @return self
*/
yamlstream& operator<<(const nix::RangeDimension &dim);
/**
* @brief Dimension output into stringstream
*
* Output Dimension to stringstream.
*
* @param entity nix Dimension
* @return self
*/
yamlstream& operator<<(const nix::Dimension &dim);
/**
* @brief DataArray output into stringstream
*
* Output DataArray to stringstream.
*
* @param entity nix DataArray
* @return self
*/
yamlstream& operator<<(const nix::DataArray &data_array);
/**
* @brief Feature output into stringstream
*
* Output Feature to stringstream.
*
* @param entity nix Feature
* @return self
*/
yamlstream& operator<<(const nix::Feature &feature);
/**
* @brief Tag output into stringstream
*
* Output Tag to stringstream.
*
* @param entity nix Tag
* @return self
*/
yamlstream& operator<<(const nix::Tag &tag);
/**
* @brief MultiTag output into stringstream
*
* Output MultiTag to stringstream.
*
* @param entity nix MultiTag
* @return self
*/
yamlstream& operator<<(const nix::MultiTag &multi_tag);
/**
* @brief Block output into stringstream
*
* Output Block to stringstream.
*
* @param entity nix Block
* @return self
*/
yamlstream& operator<<(const nix::Block &block);
/**
* @brief File output into stringstream
*
* Output File to stringstream.
*
* @param entity nix File
* @return self
*/
yamlstream& operator<<(const nix::File &file);
};
class Dump : virtual public IModule {
yamlstream yaml;
std::stringstream sstream;
public:
Dump() : yaml(yamlstream(sstream)) {}
static const char* module_name;
std::string name() const {
return std::string(module_name);
}
void load(po::options_description &desc) const;
std::string call(const po::variables_map &vm, const po::options_description &desc);
};
} // namespace module
} // namespace cli
#endif
|
[STATEMENT]
lemma in_degree_eq_card_arc_blocks: "in_degree G v = card (arc_to_block ` (in_arcs G v))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. in_degree G v = card (arc_to_block ` in_arcs G v)
[PROOF STEP]
apply (simp add: in_degree_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. card (in_arcs G v) = card (arc_to_block ` in_arcs G v)
[PROOF STEP]
using no_multi_arcs arc_to_block_is_inj_in_arcs
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?e1.0 \<in> arcs G; ?e2.0 \<in> arcs G; arc_to_ends G ?e1.0 = arc_to_ends G ?e2.0\<rbrakk> \<Longrightarrow> ?e1.0 = ?e2.0
inj_on arc_to_block (in_arcs G ?v)
goal (1 subgoal):
1. card (in_arcs G v) = card (arc_to_block ` in_arcs G v)
[PROOF STEP]
by (simp add: card_image) |
Formal statement is: lemma continuous_on_closure_norm_le: fixes f :: "'a::metric_space \<Rightarrow> 'b::real_normed_vector" assumes "continuous_on (closure s) f" and "\<forall>y \<in> s. norm(f y) \<le> b" and "x \<in> (closure s)" shows "norm (f x) \<le> b" Informal statement is: If $f$ is a continuous function on the closure of a set $S$ and $f$ is bounded on $S$, then $f$ is bounded on the closure of $S$. |
FUNCTION:NAME
:BEGIN hello, world
-- @@stderr --
dtrace: script 'test/demo/intro/hello.d' matched 1 probe
|
(*
Author: Wenda Li <[email protected] / [email protected]>
*)
section \<open>Extra lemmas related to polynomials\<close>
theory CC_Polynomials_Extra imports
Winding_Number_Eval.Missing_Algebraic
Winding_Number_Eval.Missing_Transcendental
Sturm_Tarski.PolyMisc
Budan_Fourier.BF_Misc
"Polynomial_Interpolation.Ring_Hom_Poly" (*Move to the standard distribution?*)
begin
subsection \<open>Misc\<close>
lemma poly_linepath_comp':
fixes a::"'a::{real_normed_vector,comm_semiring_0,real_algebra_1}"
shows "poly p (linepath a b t) = poly (p \<circ>\<^sub>p [:a, b-a:]) (of_real t)"
by (auto simp add:poly_pcompose linepath_def scaleR_conv_of_real algebra_simps)
lemma path_poly_comp[intro]:
fixes p::"'a::real_normed_field poly"
shows "path g \<Longrightarrow> path (poly p o g)"
apply (elim path_continuous_image)
by (auto intro:continuous_intros)
lemma cindex_poly_noroot:
assumes "a<b" "\<forall>x. a<x \<and> x<b \<longrightarrow> poly p x\<noteq>0"
shows "cindex_poly a b q p = 0"
unfolding cindex_poly_def
apply (rule sum.neutral)
using assms by (auto intro:jump_poly_not_root)
subsection \<open>More polynomial homomorphism interpretations\<close>
interpretation of_real_poly_hom:map_poly_inj_idom_hom of_real ..
interpretation Re_poly_hom:map_poly_comm_monoid_add_hom Re
by unfold_locales simp_all
interpretation Im_poly_hom:map_poly_comm_monoid_add_hom Im
by unfold_locales simp_all
subsection \<open>More about @{term order}\<close>
lemma order_normalize[simp]:"order x (normalize p) = order x p"
by (metis dvd_normalize_iff normalize_eq_0_iff order_1 order_2 order_unique_lemma)
lemma order_gcd:
assumes "p\<noteq>0" "q\<noteq>0"
shows "order x (gcd p q) = min (order x p) (order x q)"
proof -
define xx op oq where "xx=[:- x, 1:]" and "op = order x p" and "oq = order x q"
obtain pp where pp:"p = xx ^ op * pp" "\<not> xx dvd pp"
using order_decomp[OF \<open>p\<noteq>0\<close>,of x,folded xx_def op_def] by auto
obtain qq where qq:"q = xx ^ oq * qq" "\<not> xx dvd qq"
using order_decomp[OF \<open>q\<noteq>0\<close>,of x,folded xx_def oq_def] by auto
define pq where "pq = gcd pp qq"
have p_unfold:"p = (pq * xx ^ (min op oq)) * ((pp div pq) * xx ^ (op - min op oq))"
and [simp]:"coprime xx (pp div pq)" and "pp\<noteq>0"
proof -
have "xx ^ op = xx ^ (min op oq) * xx ^ (op - min op oq)"
by (simp flip:power_add)
moreover have "pp = pq * (pp div pq)"
unfolding pq_def by simp
ultimately show "p = (pq * xx ^ (min op oq)) * ((pp div pq) * xx ^ (op - min op oq))"
unfolding pq_def pp by(auto simp:algebra_simps)
show "coprime xx (pp div pq)"
apply (rule prime_elem_imp_coprime[OF
prime_elem_linear_poly[of 1 "-x",simplified],folded xx_def])
using \<open>pp = pq * (pp div pq)\<close> pp(2) by auto
qed (use pp \<open>p\<noteq>0\<close> in auto)
have q_unfold:"q = (pq * xx ^ (min op oq)) * ((qq div pq) * xx ^ (oq - min op oq))"
and [simp]:"coprime xx (qq div pq)"
proof -
have "xx ^ oq = xx ^ (min op oq) * xx ^ (oq - min op oq)"
by (simp flip:power_add)
moreover have "qq = pq * (qq div pq)"
unfolding pq_def by simp
ultimately show "q = (pq * xx ^ (min op oq)) * ((qq div pq) * xx ^ (oq - min op oq))"
unfolding pq_def qq by(auto simp:algebra_simps)
show "coprime xx (qq div pq)"
apply (rule prime_elem_imp_coprime[OF
prime_elem_linear_poly[of 1 "-x",simplified],folded xx_def])
using \<open>qq = pq * (qq div pq)\<close> qq(2) by auto
qed
have "gcd p q=normalize (pq * xx ^ (min op oq))"
proof -
have "coprime (pp div pq * xx ^ (op - min op oq)) (qq div pq * xx ^ (oq - min op oq))"
proof (cases "op>oq")
case True
then have "oq - min op oq = 0" by auto
moreover have "coprime (xx ^ (op - min op oq)) (qq div pq)" by auto
moreover have "coprime (pp div pq) (qq div pq)"
apply (rule div_gcd_coprime[of pp qq,folded pq_def])
using \<open>pp\<noteq>0\<close> by auto
ultimately show ?thesis by auto
next
case False
then have "op - min op oq = 0" by auto
moreover have "coprime (pp div pq) (xx ^ (oq - min op oq))"
by (auto simp:coprime_commute)
moreover have "coprime (pp div pq) (qq div pq)"
apply (rule div_gcd_coprime[of pp qq,folded pq_def])
using \<open>pp\<noteq>0\<close> by auto
ultimately show ?thesis by auto
qed
then show ?thesis unfolding p_unfold q_unfold
apply (subst gcd_mult_left)
by auto
qed
then have "order x (gcd p q) = order x pq + order x (xx ^ (min op oq))"
apply simp
apply (subst order_mult)
using assms(1) p_unfold by auto
also have "... = order x (xx ^ (min op oq))"
using pp(2) qq(2) unfolding pq_def xx_def
by (auto simp add: order_0I poly_eq_0_iff_dvd)
also have "... = min op oq"
unfolding xx_def by (rule order_power_n_n)
also have "... = min (order x p) (order x q)" unfolding op_def oq_def by simp
finally show ?thesis .
qed
lemma pderiv_power: "pderiv (p ^ n) = smult (of_nat n) (p ^ (n-1)) * pderiv p"
apply (cases n)
using pderiv_power_Suc by auto
(*TODO: to replace the one (with the same name) in the library, as this version does
not require 'a to be a field?*)
lemma order_pderiv:
fixes p::"'a::{idom,semiring_char_0} poly"
assumes "p\<noteq>0" "poly p x=0"
shows "order x p = Suc (order x (pderiv p))" using assms
proof -
define xx op where "xx=[:- x, 1:]" and "op = order x p"
have "op \<noteq>0" unfolding op_def using assms order_root by blast
obtain pp where pp:"p = xx ^ op * pp" "\<not> xx dvd pp"
using order_decomp[OF \<open>p\<noteq>0\<close>,of x,folded xx_def op_def] by auto
have p_der:"pderiv p = smult (of_nat op) (xx^(op -1)) * pp + xx^op*pderiv pp"
unfolding pp(1) by (auto simp:pderiv_mult pderiv_power xx_def algebra_simps pderiv_pCons)
have "xx^(op -1) dvd (pderiv p)"
unfolding p_der
by (metis One_nat_def Suc_pred assms(1) assms(2) dvd_add dvd_mult_right dvd_triv_left
neq0_conv op_def order_root power_Suc smult_dvd_cancel)
moreover have "\<not> xx^op dvd (pderiv p)"
proof
assume "xx ^ op dvd pderiv p"
then have "xx ^ op dvd smult (of_nat op) (xx^(op -1) * pp)"
unfolding p_der by (simp add: dvd_add_left_iff)
then have "xx ^ op dvd (xx^(op -1)) * pp"
apply (elim dvd_monic[rotated])
using \<open>op\<noteq>0\<close> by (auto simp:lead_coeff_power xx_def)
then have "xx ^ (op-1) * xx dvd (xx^(op -1))"
using \<open>\<not> xx dvd pp\<close> by (simp add: \<open>op \<noteq> 0\<close> mult.commute power_eq_if)
then have "xx dvd 1"
using assms(1) pp(1) by auto
then show False unfolding xx_def by (meson assms(1) dvd_trans one_dvd order_decomp)
qed
ultimately have "op - 1 = order x (pderiv p)"
using order_unique_lemma[of x "op-1" "pderiv p",folded xx_def] \<open>op\<noteq>0\<close>
by auto
then show ?thesis using \<open>op\<noteq>0\<close> unfolding op_def by auto
qed
subsection \<open>More about @{term rsquarefree}\<close>
lemma rsquarefree_0[simp]: "\<not> rsquarefree 0"
unfolding rsquarefree_def by simp
lemma rsquarefree_times:
assumes "rsquarefree (p*q)"
shows "rsquarefree q" using assms
proof (induct p rule:poly_root_induct_alt)
case 0
then show ?case by simp
next
case (no_proots p)
then have [simp]:"p\<noteq>0" "q\<noteq>0" "\<And>a. order a p = 0"
using order_0I by auto
have "order a (p * q) = 0 \<longleftrightarrow> order a q = 0"
"order a (p * q) = 1 \<longleftrightarrow> order a q = 1"
for a
subgoal by (subst order_mult) auto
subgoal by (subst order_mult) auto
done
then show ?case using \<open>rsquarefree (p * q)\<close>
unfolding rsquarefree_def by simp
next
case (root a p)
define pq aa where "pq = p * q" and "aa = [:- a, 1:]"
have [simp]:"pq\<noteq>0" "aa\<noteq>0" "order a aa=1"
subgoal using pq_def root.prems by auto
subgoal by (simp add: aa_def)
subgoal by (metis aa_def order_power_n_n power_one_right)
done
have "rsquarefree (aa * pq)"
unfolding aa_def pq_def using root(2) by (simp add:algebra_simps)
then have "rsquarefree pq"
unfolding rsquarefree_def by (auto simp add:order_mult)
from root(1)[OF this[unfolded pq_def]] show ?case .
qed
lemma rsquarefree_smult_iff:
assumes "s\<noteq>0"
shows "rsquarefree (smult s p) \<longleftrightarrow> rsquarefree p"
unfolding rsquarefree_def using assms by (auto simp add:order_smult)
lemma card_proots_within_rsquarefree:
assumes "rsquarefree p"
shows "proots_count p s = card (proots_within p s)" using assms
proof (induct rule:poly_root_induct[of _ "\<lambda>x. x\<in>s"])
case 0
then have False by simp
then show ?case by simp
next
case (no_roots p)
then show ?case
by (metis all_not_in_conv card.empty proots_count_def proots_within_iff sum.empty)
next
case (root a p)
have "proots_count ([:a, - 1:] * p) s = 1 + proots_count p s"
apply (subst proots_count_times)
subgoal using root.prems rsquarefree_def by blast
subgoal by (metis (no_types, opaque_lifting) add.inverse_inverse add.inverse_neutral
minus_pCons proots_count_pCons_1_iff proots_count_uminus root.hyps(1))
done
also have "... = 1 + card (proots_within p s)"
proof -
have "rsquarefree p" using \<open>rsquarefree ([:a, - 1:] * p)\<close>
by (elim rsquarefree_times)
from root(2)[OF this] show ?thesis by simp
qed
also have "... = card (proots_within ([:a, - 1:] * p) s)" unfolding proots_within_times
proof (subst card_Un_disjoint)
have [simp]:"p\<noteq>0" using root.prems by auto
show "finite (proots_within [:a, - 1:] s)" "finite (proots_within p s)"
by auto
show " 1 + card (proots_within p s) = card (proots_within [:a, - 1:] s)
+ card (proots_within p s)"
using \<open>a \<in> s\<close>
apply (subst proots_within_pCons_1_iff)
by simp
have "poly p a\<noteq>0"
proof (rule ccontr)
assume "\<not> poly p a \<noteq> 0"
then have "order a p >0" by (simp add: order_root)
moreover have "order a [:a,-1:] = 1"
by (metis (no_types, opaque_lifting) add.inverse_inverse add.inverse_neutral minus_pCons
order_power_n_n order_uminus power_one_right)
ultimately have "order a ([:a, - 1:] * p) > 1"
apply (subst order_mult)
subgoal using root.prems by auto
subgoal by auto
done
then show False using \<open>rsquarefree ([:a, - 1:] * p)\<close>
unfolding rsquarefree_def using gr_implies_not0 less_not_refl2 by blast
qed
then show " proots_within [:a, - 1:] s \<inter> proots_within p s = {}"
using proots_within_pCons_1_iff(2) by auto
qed
finally show ?case .
qed
lemma rsquarefree_gcd_pderiv:
fixes p::"'a::{factorial_ring_gcd,semiring_gcd_mult_normalize,semiring_char_0} poly"
assumes "p\<noteq>0"
shows "rsquarefree (p div (gcd p (pderiv p)))"
proof (cases "pderiv p = 0")
case True
have "poly (unit_factor p) x \<noteq>0" for x
using unit_factor_is_unit[OF \<open>p\<noteq>0\<close>]
by (meson assms dvd_trans order_decomp poly_eq_0_iff_dvd unit_factor_dvd)
then have "order x (unit_factor p) = 0" for x
using order_0I by blast
then show ?thesis using True \<open>p\<noteq>0\<close> unfolding rsquarefree_def by simp
next
case False
define q where "q = p div (gcd p (pderiv p))"
have "q\<noteq>0" unfolding q_def by (simp add: assms dvd_div_eq_0_iff)
have order_pq:"order x p = order x q + min (order x p) (order x (pderiv p))"
for x
proof -
have *:"p = q * gcd p (pderiv p)"
unfolding q_def by simp
show ?thesis
apply (subst *)
using \<open>q\<noteq>0\<close> \<open>p\<noteq>0\<close> \<open>pderiv p\<noteq>0\<close> by (simp add:order_mult order_gcd)
qed
have "order x q = 0 \<or> order x q=1" for x
proof (cases "poly p x=0")
case True
from order_pderiv[OF \<open>p\<noteq>0\<close> this]
have "order x p = order x (pderiv p) + 1" by simp
then show ?thesis using order_pq[of x] by auto
next
case False
then have "order x p = 0" by (simp add: order_0I)
then have "order x q = 0" using order_pq[of x] by simp
then show ?thesis by simp
qed
then show ?thesis using \<open>q\<noteq>0\<close> unfolding rsquarefree_def q_def
by auto
qed
lemma poly_gcd_pderiv_iff:
fixes p::"'a::{semiring_char_0,factorial_ring_gcd,semiring_gcd_mult_normalize} poly"
shows "poly (p div (gcd p (pderiv p))) x =0 \<longleftrightarrow> poly p x=0"
proof (cases "pderiv p=0")
case True
then obtain a where "p=[:a:]" using pderiv_iszero by auto
then show ?thesis by (auto simp add: unit_factor_poly_def)
next
case False
then have "p\<noteq>0" using pderiv_0 by blast
define q where "q = p div (gcd p (pderiv p))"
have "q\<noteq>0" unfolding q_def by (simp add: \<open>p\<noteq>0\<close> dvd_div_eq_0_iff)
have order_pq:"order x p = order x q + min (order x p) (order x (pderiv p))" for x
proof -
have *:"p = q * gcd p (pderiv p)"
unfolding q_def by simp
show ?thesis
apply (subst *)
using \<open>q\<noteq>0\<close> \<open>p\<noteq>0\<close> \<open>pderiv p\<noteq>0\<close> by (simp add:order_mult order_gcd)
qed
have "order x q =0 \<longleftrightarrow> order x p = 0"
proof (cases "poly p x=0")
case True
from order_pderiv[OF \<open>p\<noteq>0\<close> this]
have "order x p = order x (pderiv p) + 1" by simp
then show ?thesis using order_pq[of x] by auto
next
case False
then have "order x p = 0" by (simp add: order_0I)
then have "order x q = 0" using order_pq[of x] by simp
then show ?thesis using \<open>order x p = 0\<close> by simp
qed
then show ?thesis
apply (fold q_def)
unfolding order_root using \<open>p\<noteq>0\<close> \<open>q\<noteq>0\<close> by auto
qed
subsection \<open>Composition of a polynomial and a circular path\<close>
lemma poly_circlepath_tan_eq:
fixes z0::complex and r::real and p::"complex poly"
defines "q1\<equiv> fcompose p [:(z0+r)*\<i>,z0-r:] [:\<i>,1:]" and "q2 \<equiv> [:\<i>,1:] ^ degree p"
assumes "0\<le>t" "t\<le>1" "t\<noteq>1/2"
shows "poly p (circlepath z0 r t) = poly q1 (tan (pi*t)) / poly q2 (tan (pi*t))"
(is "?L = ?R")
proof -
have "?L = poly p (z0+ r*exp (2 * of_real pi * \<i> * t))"
unfolding circlepath by simp
also have "... = ?R"
proof -
define f where "f = (poly p \<circ> (\<lambda>x::real. z0 + r * exp (\<i> * x)))"
have f_eq:"f t = ((\<lambda>x::real. poly q1 x / poly q2 x) o (\<lambda>x. tan (x/2)) ) t"
when "cos (t / 2) \<noteq> 0" for t
proof -
have "f t = poly p (z0 + r * (cos t + \<i> * sin t)) "
unfolding f_def exp_Euler by (auto simp add:cos_of_real sin_of_real)
also have "... = poly p ((\<lambda>x. ((z0-r)*x+(z0+r)*\<i>) / (\<i>+x)) (tan (t/2)))"
proof -
define tt where "tt=complex_of_real (tan (t / 2))"
define rr where "rr = complex_of_real r"
have "cos t = (1-tt*tt) / (1 + tt * tt)"
"sin t = 2*tt / (1 + tt * tt)"
unfolding sin_tan_half[of "t/2",simplified] cos_tan_half[of "t/2",OF that, simplified] tt_def
by (auto simp add:power2_eq_square)
moreover have "1 + tt * tt \<noteq> 0" unfolding tt_def
apply (fold of_real_mult)
by (metis (no_types, opaque_lifting) mult_numeral_1 numeral_One of_real_add of_real_eq_0_iff
of_real_numeral sum_squares_eq_zero_iff zero_neq_one)
ultimately have "z0 + r * ( (cos t) + \<i> * (sin t))
=(z0*(1+tt*tt)+rr*(1-tt*tt)+\<i>*rr*2*tt ) / (1 + tt * tt) "
apply (fold rr_def,simp add:add_divide_distrib)
by (simp add:algebra_simps)
also have "... = ((z0-rr)*tt+z0*\<i>+rr*\<i>) / (tt + \<i>)"
proof -
have "tt + \<i> \<noteq> 0"
using \<open>1 + tt * tt \<noteq> 0\<close>
by (metis i_squared neg_eq_iff_add_eq_0 square_eq_iff)
then show ?thesis
using \<open>1 + tt * tt \<noteq> 0\<close> by (auto simp add:divide_simps algebra_simps)
qed
finally have "z0 + r * ( (cos t) + \<i> * (sin t)) = ((z0-rr)*tt+z0*\<i>+rr*\<i>) / (tt + \<i>)" .
then show ?thesis unfolding tt_def rr_def
by (auto simp add:algebra_simps power2_eq_square)
qed
also have "... = (poly p o ((\<lambda>x. ((z0-r)*x+(z0+r)*\<i>) / (\<i>+x)) o (\<lambda>x. tan (x/2)) )) t"
unfolding comp_def by (auto simp:tan_of_real)
also have "... = ((\<lambda>x::real. poly q1 x / poly q2 x) o (\<lambda>x. tan (x/2)) ) t"
unfolding q2_def q1_def
apply (subst fcompose_poly[symmetric])
subgoal for x
apply simp
by (metis Re_complex_of_real add_cancel_right_left complex_i_not_zero imaginary_unit.sel(1) plus_complex.sel(1) rcis_zero_arg rcis_zero_mod)
subgoal by (auto simp:tan_of_real algebra_simps)
done
finally show ?thesis .
qed
have "cos (pi * t) \<noteq>0" unfolding cos_zero_iff_int2
proof
assume "\<exists>i. pi * t = real_of_int i * pi + pi / 2"
then obtain i where "pi * t = real_of_int i * pi + pi / 2" by auto
then have "pi * t=pi * (real_of_int i + 1 / 2)" by (simp add:algebra_simps)
then have "t=real_of_int i + 1 / 2" by auto
then show False using \<open>0\<le>t\<close> \<open>t\<le>1\<close> \<open>t\<noteq>1/2\<close> by auto
qed
from f_eq[of "2*pi*t",simplified,OF this]
show "?thesis"
unfolding f_def comp_def by (auto simp add:algebra_simps)
qed
finally show ?thesis .
qed
subsection \<open>Combining two real polynomials into a complex one\<close>
definition cpoly_of:: "real poly \<Rightarrow> real poly \<Rightarrow> complex poly" where
"cpoly_of pR pI = map_poly of_real pR + smult \<i> (map_poly of_real pI)"
lemma cpoly_of_eq_0_iff[iff]:
"cpoly_of pR pI = 0 \<longleftrightarrow> pR = 0 \<and> pI = 0"
proof -
have "pR = 0 \<and> pI = 0" when "cpoly_of pR pI = 0"
proof -
have "complex_of_real (coeff pR n) + \<i> * complex_of_real (coeff pI n) = 0" for n
using that unfolding poly_eq_iff cpoly_of_def by (auto simp:coeff_map_poly)
then have "coeff pR n = 0 \<and> coeff pI n = 0" for n
by (metis Complex_eq Im_complex_of_real Re_complex_of_real complex.sel(1) complex.sel(2)
of_real_0)
then show ?thesis unfolding poly_eq_iff by auto
qed
then show ?thesis by (auto simp:cpoly_of_def)
qed
lemma cpoly_of_decompose:
"p = cpoly_of (map_poly Re p) (map_poly Im p)"
unfolding cpoly_of_def
apply (induct p)
by (auto simp add:map_poly_pCons map_poly_map_poly complex_eq)
lemma cpoly_of_dist_right:
"cpoly_of (pR*g) (pI*g) = cpoly_of pR pI * (map_poly of_real g)"
unfolding cpoly_of_def by (simp add: distrib_right)
lemma poly_cpoly_of_real:
"poly (cpoly_of pR pI) (of_real x) = Complex (poly pR x) (poly pI x)"
unfolding cpoly_of_def by (simp add: Complex_eq)
lemma poly_cpoly_of_real_iff:
shows "poly (cpoly_of pR pI) (of_real t) =0 \<longleftrightarrow> poly pR t = 0 \<and> poly pI t=0 "
unfolding poly_cpoly_of_real using Complex_eq_0 by blast
lemma order_cpoly_gcd_eq:
assumes "pR\<noteq>0 \<or> pI\<noteq>0"
shows "order t (cpoly_of pR pI) = order t (gcd pR pI)"
proof -
define g where "g = gcd pR pI"
have [simp]:"g\<noteq>0" unfolding g_def using assms by auto
obtain pr pi where pri: "pR = pr * g" "pI = pi * g" "coprime pr pi"
unfolding g_def using assms(1) gcd_coprime_exists \<open>g \<noteq> 0\<close> g_def by blast
then have "pr \<noteq>0 \<or> pi \<noteq>0" using assms mult_zero_left by blast
have "order t (cpoly_of pR pI) = order t (cpoly_of pr pi * (map_poly of_real g))"
unfolding pri cpoly_of_dist_right by simp
also have "... = order t (cpoly_of pr pi) + order t g"
apply (subst order_mult)
using \<open>pr \<noteq>0 \<or> pi \<noteq>0\<close> by (auto simp:map_poly_order_of_real)
also have "... = order t g"
proof -
have "poly (cpoly_of pr pi) t \<noteq>0" unfolding poly_cpoly_of_real_iff
using \<open>coprime pr pi\<close> coprime_poly_0 by blast
then have "order t (cpoly_of pr pi) = 0" by (simp add: order_0I)
then show ?thesis by auto
qed
finally show ?thesis unfolding g_def .
qed
lemma cpoly_of_times:
shows "cpoly_of pR pI * cpoly_of qR qI = cpoly_of (pR * qR - pI * qI) (pI*qR+pR*qI)"
proof -
define PR PI where "PR = map_poly complex_of_real pR"
and "PI = map_poly complex_of_real pI"
define QR QI where "QR = map_poly complex_of_real qR"
and "QI = map_poly complex_of_real qI"
show ?thesis
unfolding cpoly_of_def
by (simp add:algebra_simps of_real_poly_hom.hom_minus smult_add_right
flip: PR_def PI_def QR_def QI_def)
qed
lemma map_poly_Re_cpoly[simp]:
"map_poly Re (cpoly_of pR pI) = pR"
unfolding cpoly_of_def smult_map_poly
apply (simp add:map_poly_map_poly Re_poly_hom.hom_add comp_def)
by (metis coeff_map_poly leading_coeff_0_iff)
lemma map_poly_Im_cpoly[simp]:
"map_poly Im (cpoly_of pR pI) = pI"
unfolding cpoly_of_def smult_map_poly
apply (simp add:map_poly_map_poly Im_poly_hom.hom_add comp_def)
by (metis coeff_map_poly leading_coeff_0_iff)
end |
If $l$ is an accumulation point of the range of $f$, then there exists a subsequence of $f$ that converges to $l$. |
-- simply-typed λ-calculus w/ DeBruijn indices
module LC where
open import Agda.Primitive
open import Agda.Builtin.Bool
open import Data.Bool.Properties hiding (≤-trans ; <-trans ; ≤-refl ; <-irrefl)
open import Data.Empty
open import Data.Nat renaming (_+_ to _+ᴺ_ ; _≤_ to _≤ᴺ_ ; _≥_ to _≥ᴺ_ ; _<_ to _<ᴺ_ ; _>_ to _>ᴺ_)
open import Data.Nat.Properties renaming (_<?_ to _<ᴺ?_)
open import Data.Integer renaming (_+_ to _+ᶻ_ ; _≤_ to _≤ᶻ_ ; _≥_ to _≥ᶻ_ ; _<_ to _<ᶻ_ ; _>_ to _>ᶻ_)
open import Data.Integer.Properties using (⊖-≥ ; 0≤n⇒+∣n∣≡n ; +-monoˡ-≤)
open import Data.List
open import Data.List.Relation.Unary.All
open import Relation.Binary.PropositionalEquality
open import Relation.Nullary
open import Relation.Nullary.Decidable
open import Relation.Nullary.Negation
open import Auxiliary
module defs where
data Exp : Set where
Var : ℕ → Exp
Abs : Exp → Exp
App : Exp → Exp → Exp
data Ty : Set where
Fun : Ty → Ty → Ty
-- typing
Env = List Ty
data _∶_∈_ : ℕ → Ty → Env → Set where
here : {T : Ty} {Γ : Env} → 0 ∶ T ∈ (T ∷ Γ)
there : {n : ℕ} {T₁ T₂ : Ty} {Γ : Env} → n ∶ T₁ ∈ Γ → (ℕ.suc n) ∶ T₁ ∈ (T₂ ∷ Γ)
data _⊢_∶_ : Env → Exp → Ty → Set where
TVar : {n : ℕ} {Γ : Env} {T : Ty} → n ∶ T ∈ Γ → Γ ⊢ (Var n) ∶ T
TAbs : {Γ : Env} {T₁ T₂ : Ty} {e : Exp} → (T₁ ∷ Γ) ⊢ e ∶ T₂ → Γ ⊢ (Abs e) ∶ (Fun T₁ T₂)
TApp : {Γ : Env} {T₁ T₂ : Ty} {e₁ e₂ : Exp} → Γ ⊢ e₁ ∶ (Fun T₁ T₂) → Γ ⊢ e₂ ∶ T₁ → Γ ⊢ (App e₁ e₂) ∶ T₂
-- denotational semantics
module denotational where
open defs
Valᵈ : Ty → Set
Valᵈ (Fun Ty₁ Ty₂) = (Valᵈ Ty₁) → (Valᵈ Ty₂)
access : {n : ℕ} {Γ : Env} {T : Ty} → n ∶ T ∈ Γ → All Valᵈ Γ → Valᵈ T
access here (V ∷ Γ) = V
access (there J) (V ∷ Γ) = access J Γ
eval : {Γ : Env} {T : Ty} {e : Exp} → Γ ⊢ e ∶ T → All Valᵈ Γ → Valᵈ T
eval (TVar c) Val-Γ = access c Val-Γ
eval (TAbs TJ) Val-Γ = λ V → eval TJ (V ∷ Val-Γ)
eval (TApp TJ TJ₁) Val-Γ = (eval TJ Val-Γ) (eval TJ₁ Val-Γ)
-- operational semantics (call-by-value)
module operational where
open defs
-- shifting, required to avoid variable-capturing in substitution
-- see Pierce 2002, pg. 78/79
↑_,_[_] : ℤ → ℕ → Exp → Exp
↑ d , c [ Var x ]
with (x <ᴺ? c)
... | yes p = Var x
... | no ¬p = Var (∣ (ℤ.pos x) +ᶻ d ∣) -- should always be positive anyway
↑ d , c [ Abs t ] = Abs (↑ d , (ℕ.suc c) [ t ])
↑ d , c [ App t t₁ ] = App (↑ d , c [ t ]) (↑ d , c [ t₁ ])
-- shifting in range [n, m]; by def. m < n implies no shift
↑[_,_]_[_] : ℕ → ℕ → ℤ → Exp → Exp
↑[ n , m ] d [ Var x ]
with (x <ᴺ? n)
... | yes p = Var x
... | no ¬p
with (m <ᴺ? x)
... | yes p' = Var x
... | no ¬p' = Var (∣ (ℤ.pos x) +ᶻ d ∣)
↑[ n , m ] d [ Abs e ] = Abs (↑[ ℕ.suc n , ℕ.suc m ] d [ e ])
↑[ n , m ] d [ App e e₁ ] = App (↑[ n , m ] d [ e ]) (↑[ n , m ] d [ e₁ ])
-- shorthands
↑¹[_] : Exp → Exp
↑¹[ e ] = ↑ (ℤ.pos 1) , 0 [ e ]
↑⁻¹[_] : Exp → Exp
↑⁻¹[ e ] = ↑ (ℤ.negsuc 0) , 0 [ e ]
-- substitution
-- see Pierce 2002, pg. 80
[_↦_]_ : ℕ → Exp → Exp → Exp
[ k ↦ s ] Var x
with (Data.Nat._≟_ x k)
... | yes p = s
... | no ¬p = Var x
[ k ↦ s ] Abs t = Abs ([ ℕ.suc k ↦ ↑¹[ s ] ] t)
[ k ↦ s ] App t t₁ = App ([ k ↦ s ] t) ([ k ↦ s ] t₁)
data Val : Exp → Set where
VFun : {e : Exp} → Val (Abs e)
-- reduction relation
data _⇒_ : Exp → Exp → Set where
ξ-App1 : {e₁ e₁' e₂ : Exp} → e₁ ⇒ e₁' → App e₁ e₂ ⇒ App e₁' e₂
ξ-App2 : {e e' v : Exp} → Val v → e ⇒ e' → App v e ⇒ App v e'
β-App : {e v : Exp} → Val v → (App (Abs e) v) ⇒ (↑⁻¹[ ([ 0 ↦ ↑¹[ v ] ] e) ])
---- properties & lemmas
--- properties of shifting
↑-var-refl : {d : ℤ} {c : ℕ} {x : ℕ} {le : ℕ.suc x ≤ᴺ c} → ↑ d , c [ Var x ] ≡ Var x
↑-var-refl {d} {c} {x} {le}
with (x <ᴺ? c)
... | no ¬p = contradiction le ¬p
... | yes p = refl
↑[]-var-refl-< : {n m x : ℕ} {d : ℤ} {le : x <ᴺ n} → ↑[ n , m ] d [ Var x ] ≡ Var x
↑[]-var-refl-< {n} {m} {x} {d} {le}
with (x <ᴺ? n)
... | yes p = refl
... | no ¬p = contradiction le ¬p
↑[]-var-refl-> : {n m x : ℕ} {d : ℤ} {le : m <ᴺ x} → ↑[ n , m ] d [ Var x ] ≡ Var x
↑[]-var-refl-> {n} {m} {x} {d} {le}
with (x <ᴺ? n)
... | yes p = refl
... | no p
with (m <ᴺ? x)
... | no ¬q = contradiction le ¬q
... | yes q = refl
↑[]-var-shift : {n m x : ℕ} {d : ℤ} (le1 : n ≤ᴺ x) (le2 : x ≤ᴺ m) → ↑[ n , m ] d [ Var x ] ≡ Var (∣ (ℤ.pos x) +ᶻ d ∣)
↑[]-var-shift {n} {m} {x} {d} le1 le2
with x <ᴺ? n
... | yes p = contradiction p (<⇒≱ (s≤s le1))
... | no ¬p
with m <ᴺ? x
... | yes p' = contradiction p' (<⇒≱ (s≤s le2))
... | no ¬p' = refl
↑¹-var : {x : ℕ} → ↑¹[ Var x ] ≡ Var (ℕ.suc x)
↑¹-var {zero} = refl
↑¹-var {ℕ.suc x}
rewrite (sym (n+1≡sucn{x +ᴺ 1}))
| (sym (n+1≡sucn{x}))
= cong ↑¹[_] (↑¹-var{x})
↑⁻¹ₖ[↑¹ₖ[s]]≡s : {e : Exp} {k : ℕ} → ↑ -[1+ 0 ] , k [ ↑ + 1 , k [ e ] ] ≡ e
↑⁻¹ₖ[↑¹ₖ[s]]≡s {Var x} {k}
with (x <ᴺ? k)
-- x < k
-- => ↑⁻¹ₖ(↑¹ₖ(Var n)) = ↑⁻¹ₖ(Var n) = Var n
... | yes p = ↑-var-refl{ -[1+ 0 ]}{k}{x}{p}
-- x ≥ k
-- => ↑⁻¹ₖ(↑¹ₖ(Var n)) = ↑⁻¹ₖ(Var |n + 1|) = Var (||n + 1| - 1|) = Var n
... | no ¬p
with (¬[x≤k]⇒¬[sucx≤k] ¬p)
... | ¬p'
with (x +ᴺ 1) <ᴺ? k
... | yes pp = contradiction pp ¬p'
... | no ¬pp
rewrite (∣nℕ+1⊖1∣≡n{x})
= refl
↑⁻¹ₖ[↑¹ₖ[s]]≡s {Abs e} {k} = cong Abs ↑⁻¹ₖ[↑¹ₖ[s]]≡s
↑⁻¹ₖ[↑¹ₖ[s]]≡s {App e e₁} = cong₂ App ↑⁻¹ₖ[↑¹ₖ[s]]≡s ↑⁻¹ₖ[↑¹ₖ[s]]≡s
↑ᵏ[↑ˡ[s]]≡↑ᵏ⁺ˡ[s] : {k l : ℤ} {c : ℕ} {s : Exp} → l ≥ᶻ +0 → ↑ k , c [ ↑ l , c [ s ] ] ≡ ↑ (l +ᶻ k) , c [ s ]
↑ᵏ[↑ˡ[s]]≡↑ᵏ⁺ˡ[s] {k} {l} {c} {Var x} ge
with x <ᴺ? c
↑ᵏ[↑ˡ[s]]≡↑ᵏ⁺ˡ[s] {k} {l} {c} {Var x} ge | no ¬p
with ∣ + x +ᶻ l ∣ <ᴺ? c
... | yes q = contradiction q (<⇒≱ (n≤m⇒n<sucm (≤-trans (≮⇒≥ ¬p) (m≥0⇒∣n+m∣≥n ge))))
... | no ¬q
rewrite (0≤n⇒+∣n∣≡n{+ x +ᶻ l} (m≥0⇒n+m≥0 ge))
| (Data.Integer.Properties.+-assoc (+_ x) l k)
= refl
↑ᵏ[↑ˡ[s]]≡↑ᵏ⁺ˡ[s] {k} {l} {c} {Var x} ge | yes p
with x <ᴺ? c
... | yes p' = refl
... | no ¬p' = contradiction p ¬p'
↑ᵏ[↑ˡ[s]]≡↑ᵏ⁺ˡ[s] {k} {l} {c} {Abs s} le = cong Abs (↑ᵏ[↑ˡ[s]]≡↑ᵏ⁺ˡ[s]{k}{l}{ℕ.suc c}{s} le)
↑ᵏ[↑ˡ[s]]≡↑ᵏ⁺ˡ[s] {k} {l} {c} {App s s₁} le = cong₂ App (↑ᵏ[↑ˡ[s]]≡↑ᵏ⁺ˡ[s]{k}{l}{c}{s} le) (↑ᵏ[↑ˡ[s]]≡↑ᵏ⁺ˡ[s]{k}{l}{c}{s₁} le)
↑k,q[↑l,c[s]]≡↑l+k,c[s] : {k l : ℤ} {q c : ℕ} {s : Exp} → + q ≤ᶻ + c +ᶻ l → c ≤ᴺ q → ↑ k , q [ ↑ l , c [ s ] ] ≡ ↑ (l +ᶻ k) , c [ s ]
↑k,q[↑l,c[s]]≡↑l+k,c[s] {k} {l} {q} {c} {Var x} ge₁ ge₂
with x <ᴺ? c
... | yes p
with x <ᴺ? q
... | yes p' = refl
... | no ¬p' = contradiction (≤-trans p ge₂) ¬p'
↑k,q[↑l,c[s]]≡↑l+k,c[s] {k} {l} {q} {c} {Var x} ge₁ ge₂
| no ¬p
with ∣ + x +ᶻ l ∣ <ᴺ? q
... | yes p' = contradiction p' (≤⇒≯ (+a≤b⇒a≤∣b∣{q}{+ x +ᶻ l} (Data.Integer.Properties.≤-trans ge₁ ((Data.Integer.Properties.+-monoˡ-≤ l (+≤+ (≮⇒≥ ¬p)))))))
... | no ¬p'
rewrite (0≤n⇒+∣n∣≡n{+ x +ᶻ l} (Data.Integer.Properties.≤-trans (+≤+ z≤n) ((Data.Integer.Properties.≤-trans ge₁ ((Data.Integer.Properties.+-monoˡ-≤ l (+≤+ (≮⇒≥ ¬p))))))))
| (Data.Integer.Properties.+-assoc (+_ x) l k)
= refl
↑k,q[↑l,c[s]]≡↑l+k,c[s] {k} {l} {q} {c} {Abs s} ge₁ ge₂ = cong Abs (↑k,q[↑l,c[s]]≡↑l+k,c[s] {k} {l} {ℕ.suc q} {ℕ.suc c} {s} (+q≤+c+l⇒+1q≤+1c+l{q}{c}{l} ge₁) (s≤s ge₂))
↑k,q[↑l,c[s]]≡↑l+k,c[s] {k} {l} {q} {c} {App s s₁} ge₁ ge₂ = cong₂ App (↑k,q[↑l,c[s]]≡↑l+k,c[s] {k} {l} {q} {c} {s} ge₁ ge₂) (↑k,q[↑l,c[s]]≡↑l+k,c[s] {k} {l} {q} {c} {s₁} ge₁ ge₂)
--- properties of substitution
subst-trivial : {x : ℕ} {s : Exp} → [ x ↦ s ] Var x ≡ s
subst-trivial {x} {s}
with x Data.Nat.≟ x
... | no ¬p = contradiction refl ¬p
... | yes p = refl
var-subst-refl : {n m : ℕ} {neq : n ≢ m} {e : Exp} → [ n ↦ e ] (Var m) ≡ (Var m)
var-subst-refl {n} {m} {neq} {e}
with Data.Nat._≟_ n m
| map′ (≡ᵇ⇒≡ m n) (≡⇒≡ᵇ m n) (Data.Bool.Properties.T? (m ≡ᵇ n))
... | yes p | _ = contradiction p neq
... | no ¬p | yes q = contradiction q (≢-sym ¬p)
... | no ¬p | no ¬q = refl
--- properties and manipulation of environments
var-env-< : {Γ : Env} {T : Ty} {n : ℕ} (j : n ∶ T ∈ Γ) → n <ᴺ (length Γ)
var-env-< {.(T ∷ _)} {T} {.0} here = s≤s z≤n
var-env-< {.(_ ∷ _)} {T} {.(ℕ.suc _)} (there j) = s≤s (var-env-< j)
-- type to determine whether var type judgement in env. (Δ ++ Γ) is in Δ or Γ
data extract-env-or {Δ Γ : Env} {T : Ty} {x : ℕ} : Set where
in-Δ : x ∶ T ∈ Δ → extract-env-or
-- x ≥ length Δ makes sure that x really is in Γ; e.g.
-- x = 1, Δ = (S ∷ T), Γ = (T ∷ Γ'); here 1 ∶ T ∈ Δ as well as (1 ∸ 2) ≡ 0 ∶ T ∈ Γ
in-Γ : (x ≥ᴺ length Δ) → (x ∸ length Δ) ∶ T ∈ Γ → extract-env-or
extract : {Δ Γ : Env} {T : Ty} {x : ℕ} (j : x ∶ T ∈ (Δ ++ Γ)) → extract-env-or{Δ}{Γ}{T}{x}
extract {[]} {Γ} {T} {x} j = in-Γ z≤n j
extract {x₁ ∷ Δ} {Γ} {.x₁} {.0} here = in-Δ here
extract {x₁ ∷ Δ} {Γ} {T} {ℕ.suc x} (there j)
with extract {Δ} {Γ} {T} {x} j
... | in-Δ j' = in-Δ (there j')
... | in-Γ ge j'' = in-Γ (s≤s ge) j''
ext-behind : {Δ Γ : Env} {T : Ty} {x : ℕ} → x ∶ T ∈ Δ → x ∶ T ∈ (Δ ++ Γ)
ext-behind here = here
ext-behind (there j) = there (ext-behind j)
ext-front : {n : ℕ} {Γ Δ : Env} {S : Ty} → n ∶ S ∈ Γ → (n +ᴺ (length Δ)) ∶ S ∈ (Δ ++ Γ)
ext-front {n} {Γ} {[]} {S} j
rewrite (n+length[]≡n{A = Ty}{n = n})
= j
ext-front {n} {Γ} {T ∷ Δ} {S} j
rewrite (+-suc n (foldr (λ _ → ℕ.suc) 0 Δ))
= there (ext-front j)
swap-env-behind : {Γ Δ : Env} {T : Ty} → 0 ∶ T ∈ (T ∷ Γ) → 0 ∶ T ∈ (T ∷ Δ)
swap-env-behind {Γ} {Δ} {T} j = here
swap-type : {Δ ∇ Γ : Env} {T : Ty} → (length Δ) ∶ T ∈ (Δ ++ T ∷ ∇ ++ Γ) → (length Δ +ᴺ length ∇) ∶ T ∈ (Δ ++ ∇ ++ T ∷ Γ)
swap-type {Δ} {∇} {Γ} {T} j
with extract{Δ}{T ∷ ∇ ++ Γ} j
... | in-Δ x = contradiction (var-env-< {Δ} {T} x) (<-irrefl refl)
... | in-Γ le j'
with extract{T ∷ ∇}{Γ} j'
... | in-Δ j''
rewrite (n∸n≡0 (length Δ))
| (sym (length[A++B]≡length[A]+length[B]{lzero}{Ty}{Δ}{∇}))
| (sym (++-assoc{lzero}{Ty}{Δ}{∇}{T ∷ Γ}))
= ext-front{0}{T ∷ Γ}{Δ ++ ∇}{T} (swap-env-behind{∇}{Γ}{T} j'')
... | in-Γ le' j''
rewrite (length[A∷B]≡suc[length[B]]{lzero}{Ty}{T}{∇})
| (n∸n≡0 (length Δ))
= contradiction le' (<⇒≱ (s≤s z≤n))
env-pred : {Γ : Env} {S T : Ty} {y : ℕ} {gt : y ≢ 0} → y ∶ T ∈ (S ∷ Γ) → ∣ y ⊖ 1 ∣ ∶ T ∈ Γ
env-pred {Γ} {S} {.S} {.0} {gt} here = contradiction refl gt
env-pred {Γ} {S} {T} {.(ℕ.suc _)} {gt} (there j) = j
env-type-equiv-here : {Γ : Env} {S T : Ty} → 0 ∶ T ∈ (S ∷ Γ) → T ≡ S
env-type-equiv-here {Γ} {S} {.S} here = refl
env-type-equiv : {Δ ∇ : Env} {S T : Ty} → length Δ ∶ T ∈ (Δ ++ S ∷ ∇) → T ≡ S
env-type-equiv {Δ} {∇} {S} {T} j
with extract{Δ}{S ∷ ∇} j
... | in-Δ x = contradiction (var-env-< x) (≤⇒≯ ≤-refl)
... | in-Γ x j'
rewrite (n∸n≡0 (length Δ))
= env-type-equiv-here {∇} {S} {T} j'
env-type-equiv-j : {Γ : Env} {S T : Ty} {n : ℕ} → T ≡ S → n ∶ T ∈ Γ → n ∶ S ∈ Γ
env-type-equiv-j {Γ} {S} {T} {n} eq j
rewrite eq
= j
-- extension of environment
ext : {Γ Δ ∇ : Env} {S : Ty} {s : Exp} → (∇ ++ Γ) ⊢ s ∶ S → (∇ ++ Δ ++ Γ) ⊢ ↑ (ℤ.pos (length Δ)) , length ∇ [ s ] ∶ S
ext {Γ} {Δ} {∇} (TVar {n} x)
with extract{∇}{Γ} x
... | in-Δ x₁
with n <ᴺ? length ∇
... | yes p = TVar (ext-behind x₁)
... | no ¬p = contradiction (var-env-< x₁) ¬p
ext {Γ} {Δ} {∇} (TVar {n} x)
| in-Γ x₁ x₂
with n <ᴺ? length ∇
... | yes p = contradiction x₁ (<⇒≱ p)
... | no ¬p
with (ext-front{n ∸ length ∇}{Γ}{∇ ++ Δ} x₂)
... | w
rewrite (length[A++B]≡length[A]+length[B]{lzero}{Ty}{∇}{Δ})
| (sym (+-assoc (n ∸ length ∇) (length ∇) (length Δ)))
| (m∸n+n≡m{n}{length ∇} (≮⇒≥ ¬p))
| (++-assoc{lzero}{Ty}{∇}{Δ}{Γ})
= TVar w
ext {Γ} {Δ} {∇} {Fun T₁ T₂} {Abs e} (TAbs j) = TAbs (ext{Γ}{Δ}{T₁ ∷ ∇} j)
ext {Γ} {Δ} {∇} {S} {App s₁ s₂} (TApp{T₁ = T₁} j₁ j₂) = TApp (ext{Γ}{Δ}{∇}{Fun T₁ S} j₁) (ext{Γ}{Δ}{∇}{T₁} j₂)
---- progress and preservation
-- progress theorem, i.e. a well-typed closed expression is either a value
-- or can be reduced further
data Progress (e : Exp) {T : Ty} {j : [] ⊢ e ∶ T} : Set where
step : {e' : Exp} → e ⇒ e' → Progress e
value : Val e → Progress e
progress : (e : Exp) {T : Ty} {j : [] ⊢ e ∶ T} → Progress e {T} {j}
progress (Var x) {T} {TVar ()}
progress (Abs e) = value VFun
progress (App e e₁) {T} {TApp{T₁ = T₁}{T₂ = .T} j j₁} with progress e {Fun T₁ T} {j}
... | step x = step (ξ-App1 x)
... | value VFun with progress e₁ {T₁} {j₁}
... | step x₁ = step (ξ-App2 VFun x₁)
... | value x₁ = step (β-App x₁)
---
-- preservation under substitution
preserve-subst : {T S : Ty} {Γ Δ : Env} {e s : Exp} (j : (Δ ++ (S ∷ Γ)) ⊢ e ∶ T) (j' : Γ ⊢ s ∶ S) → (Δ ++ Γ) ⊢ ↑ -[1+ 0 ] , length Δ [ [ length Δ ↦ ↑ (ℤ.pos (ℕ.suc (length Δ))) , 0 [ s ] ] e ] ∶ T
preserve-subst {T} {S} {Γ} {Δ} {e} {s} (TVar{n} x) j'
with extract{Δ}{S ∷ Γ}{T}{n} x
... | in-Δ x₁
with n Data.Nat.≟ length Δ
... | yes p = contradiction p (<⇒≢ (var-env-< x₁))
... | no ¬p
with n <ᴺ? length Δ
... | yes q = TVar (ext-behind{Δ}{Γ} x₁)
... | no ¬q = contradiction (var-env-< x₁) ¬q
preserve-subst {T} {S} {Γ} {Δ} {e} {s} (TVar{n} x) j'
| in-Γ x₁ x₂
with n Data.Nat.≟ length Δ
... | yes p
rewrite (↑k,q[↑l,c[s]]≡↑l+k,c[s]{ -[1+ 0 ]}{+[1+ length Δ ]}{length Δ}{0}{s} (+≤+ n≤sucn) z≤n)
| p
| (env-type-equiv x)
= ext{Γ}{Δ}{[]} j'
... | no ¬p
with n <ᴺ? length Δ
... | yes q = contradiction x₁ (<⇒≱ q)
... | no ¬q rewrite (sym (minus-1{n}{length Δ}{≤∧≢⇒< x₁ (≢-sym ¬p)}))
= TVar (ext-front{Δ = Δ} (env-pred{Γ}{S}{T}{gt = ≢-sym (<⇒≢ (m>n⇒m∸n≥1 (≤∧≢⇒< x₁ (≢-sym ¬p))))} x₂))
preserve-subst {T} {S} {Γ} {Δ} {Abs e'} {s} (TAbs{T₁ = T₁}{T₂} j) j'
with preserve-subst{T₂}{S}{Γ}{T₁ ∷ Δ}{e'}{s} j j'
... | w
rewrite (↑ᵏ[↑ˡ[s]]≡↑ᵏ⁺ˡ[s]{+ 1}{+[1+ length Δ ]}{0}{s} (+≤+ z≤n))
| (length[A∷B]≡suc[length[B]]{lzero}{Ty}{T₁}{Δ})
| (n+1≡sucn{length Δ})
= TAbs w
preserve-subst (TApp j j₁) j' = TApp (preserve-subst j j') (preserve-subst j₁ j')
-- preservation theorem, i.e. a well-typed expression reduces to a well-typed expression
preserve : {T : Ty} {Γ : Env} (e e' : Exp) (j : Γ ⊢ e ∶ T) (r : e ⇒ e') → Γ ⊢ e' ∶ T
preserve (App s₁ s₂) .(App _ s₂) (TApp j j') (ξ-App1{e₁' = s₁'} r) = TApp (preserve s₁ s₁' j r) j' -- IH on inner reduction
preserve (App s₁ s₂) .(App s₁ _) (TApp j j') (ξ-App2{e' = s₂'} x r) = TApp j (preserve s₂ s₂' j' r)
preserve (App (Abs e) s') .(↑⁻¹[ [ 0 ↦ ↑¹[ s' ] ] e ]) (TApp (TAbs j) j') (β-App x) = preserve-subst{Δ = []} j j'
-----------------------------------------------------------------------------------------------------------
-- swap-subst lemma: swap the position of a type in an environment (did not need this after all)
-- aux. calculations
length-≡ : {Δ ∇ Γ : Env} {S : Ty} → ℕ.suc (length Δ +ᴺ (length Γ +ᴺ length ∇)) ≡ length (Δ ++ S ∷ ∇ ++ Γ)
length-≡ {Δ} {∇} {Γ} {S}
rewrite (length[A++B]≡length[A]+length[B]{lzero}{Ty}{Δ}{S ∷ ∇ ++ Γ})
| (length[A++B]≡length[A]+length[B]{lzero}{Ty}{∇}{Γ})
| (+-suc (length Δ) (length ∇ +ᴺ length Γ))
| (+-comm (length ∇) (length Γ))
= refl
length-≡' : {Δ ∇ : Env} {S : Ty} → length (Δ ++ ∇ ++ S ∷ []) ≡ length Δ +ᴺ ℕ.suc (length ∇)
length-≡' {Δ} {∇} {S}
rewrite (length[A++B]≡length[A]+length[B]{lzero}{Ty}{Δ}{∇ ++ S ∷ []})
| (length[A++B]≡length[A]+length[B]{lzero}{Ty}{∇}{S ∷ []})
| (+-suc (length ∇) (0))
| (+-identityʳ (length ∇))
= refl
length-≡'' : {Δ ∇ : Env} {S : Ty} → length (Δ ++ ∇ ++ S ∷ []) ≡ ℕ.suc (length ∇ +ᴺ length Δ)
length-≡'' {Δ} {∇} {S}
rewrite (cong (ℕ.suc) (+-comm (length ∇) (length Δ)))
| (sym (+-suc (length Δ) (length ∇)))
= length-≡'{Δ}{∇}{S}
--- why the "r"? we have to "remember" where S was
--- cannot substitute its position for zero, since ↑ would increase that
--- cannot do ↑ and then subtitute its position for zero, since (position - 1) would be affected aswell
--- fix: cache in unreachable variable r
swap-subst : {T S : Ty} {Γ Δ ∇ : Env} {e : Exp} {r : ℕ} {gt : r >ᴺ ℕ.suc (length Δ +ᴺ (length Γ +ᴺ length ∇))}
→ (Δ ++ (S ∷ ∇) ++ Γ) ⊢ e ∶ T
→ (Δ ++ ∇ ++ (S ∷ Γ)) ⊢ [ r ↦ Var (length Δ +ᴺ length ∇) ] ↑[ ℕ.suc (length Δ) , length Δ +ᴺ length ∇ ] -[1+ 0 ] [ [ length Δ ↦ Var r ] e ] ∶ T
swap-subst {(Fun T₁ T₂)} {S} {Γ} {Δ} {∇} {(Abs e)} {r} {gt} (TAbs j)
rewrite (↑¹-var{length Δ +ᴺ length ∇})
| (n+1≡sucn{r})
= TAbs (swap-subst{T₂}{S}{Γ}{T₁ ∷ Δ}{∇}{e}{ℕ.suc r}{s≤s gt} j)
swap-subst {T} {S} {Γ} {Δ} {∇} {(App e e₁)} {r} {gt} (TApp{T₁ = T₁}{T₂} j j₁) = TApp (swap-subst{Fun T₁ T₂}{S}{Γ}{Δ}{∇}{e}{r}{gt} j) (swap-subst{T₁}{S}{Γ}{Δ}{∇}{e₁}{r}{gt} j₁)
swap-subst {T} {S} {Γ} {Δ} {∇} {Var y} {r} {gt} (TVar j)
with extract{Δ} {(S ∷ ∇) ++ Γ} {T} {y} j
| y <ᴺ? (foldr (λ _ → ℕ.suc) 0 Δ)
| Data.Nat._≟_ y (length Δ)
-- y ∈ Δ
... | in-Δ x | yes p | yes q = contradiction q (<⇒≢ p)
... | in-Δ x | yes p | no ¬q
rewrite (↑[]-var-refl-<{ℕ.suc (length Δ)}{length Δ +ᴺ length ∇}{y}{ -[1+ 0 ] }{[k<x]⇒[k<sucx] p})
| (var-subst-refl{r}{y}{≢-sym (<⇒≢ (a<b≤c⇒a<c p (≤-trans (n≤m⇒n≤sucm (m≤m+n (length Δ) (length Γ +ᴺ length ∇))) (<-trans (s≤s ≤-refl) gt))) )}
{Var (foldr (λ _ → ℕ.suc) 0 Δ +ᴺ foldr (λ _ → ℕ.suc) 0 ∇)})
= TVar (ext-behind x)
... | in-Δ x | no ¬p | _ = contradiction (var-env-< x) ¬p
-- y ∈ (S ∷ ∇ ++ Γ)
... | in-Γ ge x | p | q
with extract{S ∷ ∇} {Γ} {T} {y ∸ length Δ} x
--- y ∈ (S ∷ ∇)
---- y @ S
swap-subst {T} {S} {Γ} {Δ} {∇} {Var y} {r} {gt} (TVar j) | in-Γ ge x | p | yes q' | in-Δ x'
rewrite (↑[]-var-refl->{ℕ.suc (length Δ)}{length Δ +ᴺ length ∇}{r}{ -[1+ 0 ] }{<-trans (s≤s (n+m≤n+q+m {length Δ} {length ∇} {length Γ})) gt})
| (subst-trivial{r} {Var (length Δ +ᴺ length ∇)})
| q'
| (env-type-equiv j)
= TVar (swap-type{Δ}{∇}{Γ}{S} (env-type-equiv-j (env-type-equiv j) j))
---- y ∈ ∇
swap-subst {T} {S} {Γ} {Δ} {∇} {Var y} {r} {gt} (TVar j) | in-Γ ge x | p | no ¬q' | in-Δ x'
rewrite (↑[]-var-shift{ℕ.suc (length Δ)}{length Δ +ᴺ length ∇}{y}{ -[1+ 0 ]} (≤∧≢⇒< ge (≢-sym ¬q')) (≤-trans (≤-pred (m≤n∧m≡q⇒q≤n (m≤n⇒m+o≤n+o{o = length Δ} (var-env-< x'))
(cong ℕ.suc (m∸n+n≡m ge)))) (≤-refl-+-comm{length ∇}{length Δ})))
| length-≡ {Δ} {∇} {Γ} {S}
| (var-subst-refl {r} {∣ y ⊖ 1 ∣} {≢-sym (<⇒≢ (<-trans (n>0⇒n>∣n⊖1∣ (≤-trans (s≤s z≤n) ((≤∧≢⇒< ge (≢-sym ¬q'))))) (<-trans (var-env-< j) gt)))} {Var (length Δ +ᴺ length ∇)})
| sym (minus-1{y}{length Δ}{≤∧≢⇒< ge (≢-sym ¬q')} )
= TVar (ext-front{Δ = Δ}(ext-behind{∇}{S ∷ Γ}{T} (env-pred{gt = ≢-sym (<⇒≢ (m>n⇒m∸n≥1{y}{length Δ} ((≤∧≢⇒< ge (≢-sym ¬q')))))} x')))
--- y ∈ Γ
swap-subst {T} {S} {Γ} {Δ} {∇} {Var y} {r} {gt} (TVar j) | in-Γ ge x | no ¬p | yes q | in-Γ ge' x'
= contradiction q (≢-sym (<⇒≢ (m∸n≢0⇒n<m{y}{length Δ} (≢-sym (<⇒≢ (a<b≤c⇒a<c (s≤s z≤n) (≤-trans (length[A∷B]≥1{lzero}{Ty}{S}{∇}) ge')))))))
swap-subst {T} {S} {Γ} {Δ} {∇} {Var y} {r} {gt} (TVar j) | in-Γ ge x | yes p | yes q | in-Γ ge' x' = contradiction q (<⇒≢ p)
swap-subst {T} {S} {Γ} {Δ} {∇} {Var y} {r} {gt} (TVar j) | in-Γ ge x | p | no ¬q' | in-Γ ge' x'
rewrite (↑[]-var-refl->{ℕ.suc (length Δ)}{length Δ +ᴺ length ∇}{y}{ -[1+ 0 ]}{a<b≤c⇒a<c (s≤s (≤-refl-+-comm{length Δ}{length ∇})) (m≤n∧n≡q⇒m≤q (m≤n⇒m+o≤n+o{o = length Δ} ge') (m∸n+n≡m ge))})
| (var-subst-refl{r}{y}{≢-sym (<⇒≢ (<-trans (var-env-< j) (m≤n∧m≡q⇒q≤n gt (cong ℕ.suc (length-≡{Δ}{∇}{Γ}{S})))))}{ Var (foldr (λ _ → ℕ.suc) 0 Δ +ᴺ foldr (λ _ → ℕ.suc) 0 ∇)})
with ext-front{((y ∸ (length Δ)) ∸ ℕ.suc (length ∇))}{Γ}{Δ ++ ∇ ++ (S ∷ [])}{T}
... | w rewrite (∸-+-assoc y (length Δ) (ℕ.suc (length ∇)))
| (sym (length-≡'{Δ}{∇}{S}))
| (m∸n+n≡m{y}{length (Δ ++ ∇ ++ S ∷ [])} (m≤n∧m≡q⇒q≤n (m≤n∧n≡q⇒m≤q (m≤n⇒m+o≤n+o{o = length Δ} ge') (m∸n+n≡m ge)) (sym (length-≡''{Δ}{∇}{S}))))
| (A++B++D∷[]++C≡A++B++D∷C{lzero}{Ty}{Δ}{∇}{Γ}{S})
= TVar (w x')
|
module Giggle
#PACKAGES USED
using Combinatorics
using DataFrames
using Distances
using Gurobi
using JuMP
using MathOptInterface
using Random
using UUIDs
using XLSX
using Query
#STRUCTS USED
struct vtx
#IDENTIFIERS
name::String
type::String
#PARAMS
x::Float64 #x coor
y::Float64 #y coor
MAX::Float64 #max inventory level
MIN::Float64 #min inventory level
START::Float64 #starting inventory level
#COSTS
h::Float64
end
struct veh
#IDENTIFIERS
name::String
type::String
#PARAMS
cover::Vector{Int64}
Q::Int64
start::Int64
freq::Int64
#COSTS
varq::Float64
vardq::Float64
vard::Float64
fix::Float64
end
struct col
#QUANTITY RELATED
q::JuMP.Containers.DenseAxisArray
u::JuMP.Containers.DenseAxisArray
v::JuMP.Containers.DenseAxisArray
#0-1 VARS
p::JuMP.Containers.DenseAxisArray
y::JuMP.Containers.DenseAxisArray
z::JuMP.Containers.DenseAxisArray
end
struct dval
λ::JuMP.Containers.DenseAxisArray
δ::JuMP.Containers.DenseAxisArray
end
struct stabilizer
slackCoeff::Float64
slackLim::JuMP.Containers.DenseAxisArray
surpCoeff::Float64
surpLim::JuMP.Containers.DenseAxisArray
end
struct bound
idx::NamedTuple
val::Int64
end
struct node
#IDENTIFIERS
parent::UUID
self::UUID
#BASE
base::NamedTuple
bounds::Vector{bound}
#DYNAMIC SETS
columns::Vector{col}
#SUPPORT STRUCTURE
stblzr::stabilizer
status::Vector{String}
end
#CONSTANTS USED THROUGHOUT
const M = 9999999
const rng = MersenneTwister(1234)
#function to generate base data
function base(path::String)
#READ WORKSHEET
xf = XLSX.readxlsx(path)
#DATAFRAME DICT
data = Dict{Symbol,DataFrame}()
#TURN SHEETS INTO DATAFRAME
for sheets in XLSX.sheetnames(xf)
#TRANSFORM TABLE INTO DATAFRAME
df = DataFrame(XLSX.gettable(xf[sheets])...)
#DEFINE THE NAME FROM THE WORKSHEET
data[Symbol(sheets)] = df
end
#PROCESS VERTICES
V = Dict{Int64,vtx}()
#iterate over each row of vertices
for v in eachrow(data[:vertices])
#direct input intu struct vtx
V[v.id] = vtx(
v.name,v.type,
v.x,v.y,
v.MAX,v.MIN,v.START,
v.h
)
end
#PROCESS VEHICLES
K = Dict{Int64,veh}()
#INITIATE INDEXING FOR VEHICLES
idx = 0
#iterate over each row of vehicles
for v in eachrow(data[:vehicles])
#convert string into vector{Int64}
v.cover = parse.(Int64,split(v.cover))
v.loadp = parse.(Int64,split(v.loadp))
for f in v.loadp
idx += 1
K[idx] = veh(
v.name,v.type,
v.cover,v.Q,f,v.freq,
v.varq,v.vardq,v.vard,v.fix
)
end
end
#EXTRACT T SET
t_start = data[:periods].start[end] #STARTING MONTH
t_end = data[:periods].start[end] + data[:periods].T[end] - 1 #FINAL MONTH (CALCULATE BASED ON DURATION)
T = [i for i in t_start:t_end]
#EXTRACT DEMANDS
d = JuMP.Containers.DenseAxisArray{Float64}(undef,collect(keys(V)),T)
for i in keys(V)
row = @from x in data[:demands] begin
@where x.point == i
@select x
@collect DataFrame
end
for t in T
d[i,t] = row[2:end-3][t][end]
end
end
#GENERATE DISTANCE MATRIX (FROM V)
earth = 6378.137
dist = JuMP.Containers.DenseAxisArray{Float64}(undef,collect(keys(V)),collect(keys(V)))
for i in keys(V), j in keys(V)
if i != j
dist[i,j] = haversine([V[i].x,V[i].y],[V[j].x,V[j].y],earth)
else
dist[i,j] = M
end
end
#GENERATE G MATRIX
G = JuMP.Containers.DenseAxisArray{Float64}(undef,collect(keys(V)),collect(keys(K)))
G .= M
for k in keys(K)
seed = K[k].cover[findmin([dist[K[k].start,j] for j in K[k].cover])[2]]
for x in K[k].cover
if x != seed
G[x,k] = (min(dist[K[k].start,x]+dist[x,seed]+dist[seed,K[k].start] , dist[K[k].start,seed]+dist[seed,x]+dist[x,K[k].start]) - (dist[K[k].start,seed] + dist[seed,K[k].start]))
else
G[x,k] = 0
end
end
end
#GENERATE DELIVERY COST MATRIX
deli = JuMP.Containers.DenseAxisArray{Float64}(undef,collect(keys(V)),collect(keys(K)))
deli .= M
for k in keys(K)
for i in K[k].cover
deli[i,k] = K[k].varq + K[k].vardq * dist[K[k].start,i]
end
end
#DATA GENERATION STATUS
trucks = sort(collect(keys(filter(p -> last(p).type == "truck",K))))
ships = sort(collect(keys(filter(p -> last(p).type == "ship",K))))
trains = sort(collect(keys(filter(p -> last(p).type == "train",K))))
println("there are $(length(K)) vehicle data points with the composition:")
println("$(length(trucks)) truck(s) for index $(first(trucks)) to $(last(trucks))")
println("$(length(ships)) ship(s) for index $(first(ships)) to $(last(ships))")
println("$(length(trains)) train(s) for index $(first(trains)) to $(last(trains))")
println()
println("there are $(length(V)) vertex data points with the composition:")
println("$(length(collect(keys(filter(p -> last(p).type == "source",V))))) source(s)")
println("$(length(collect(keys(filter(p -> last(p).type == "point",V))))) point(s)")
return (K=K,V=V,T=T,d=d,dist=dist,G=G,deli=deli)
end
#function to initialize stabilizer
function initStab(dt::NamedTuple,vSlCoeff::Float64,vSurpCoeff::Float64)
slackCoeff = vSlCoeff
surpCoeff = vSurpCoeff
slackLim = JuMP.Containers.DenseAxisArray{Float64}(undef,keys(dt.V),dt.T)
surpLim = JuMP.Containers.DenseAxisArray{Float64}(undef,keys(dt.V),dt.T)
for i in keys(dt.V),t in dt.T
slackLim[i,t] = abs(dt.d[i,t])
surpLim[i,t] = abs(dt.d[i,t])
end
return stabilizer(slackCoeff,slackLim,surpCoeff,surpLim)
end
#function to create root node
function root(dt::NamedTuple;slCoeff::Float64,suCoeff::Float64)
id = uuid1(rng)
root = node(
id,id,
dt,Vector{bound}(),
Vector{col}(),
initStab(dt,slCoeff,suCoeff),
["UNVISITED"]
)
return root
end
#function to update stabilizer
function updateStab(stabilizers::stabilizer,param::Float64)
for i in first(stabilizers.slackLim.axes),t in last(stabilizers.slackLim.axes)
stabilizers.slackLim[i,t] = param * stabilizers.slackLim[i,t]
if stabilizers.slackLim[i,t] < 1
stabilizers.slackLim[i,t] = 0
end
end
for i in first(stabilizers.surpLim.axes),t in last(stabilizers.surpLim.axes)
stabilizers.surpLim[i,t] = param * stabilizers.surpLim[i,t]
if stabilizers.surpLim[i,t] < 1
stabilizers.surpLim[i,t] = 0
end
end
return stabilizers
end
#===============MASTER FUNCTION SET=====#
function master(n::node;silent::Bool,env::Gurobi.Env)
#build and bound
mp = buildMaster(n;silent=silent,env=env)
#solve the mp
optimize!(mp)
return mp
end
function buildMaster(n::node;silent::Bool,env::Gurobi.Env)
#COLUMN LABELING
R = Dict(1:length(n.columns) .=> n.columns)
#MODEL DECLARATION
mp = Model(optimizer_with_attributes(() -> Gurobi.Optimizer(env)))
if silent
set_silent(mp)
end
#VARIABLE DECLARATION
θ = @variable(mp, θ[keys(R),keys(n.base.K),n.base.T] >= 0) #LABEL 20
I = @variable(mp, I[keys(n.base.V),vcat(first(n.base.T)-1,n.base.T)]) #NO BOUNDS (bounded below)
#SLACK SURPLUS DECLARATION
@variable(mp, 0 <= slack_I[i=keys(n.base.V),t=n.base.T] <= n.stblzr.slackLim[i,t])
@variable(mp, 0 <= surp_I[i=keys(n.base.V),t=n.base.T] <= n.stblzr.surpLim[i,t])
#CONSTRAINT 17 & 18 + STARTING INVENTORY
@constraint(
mp, λ[i=keys(n.base.V),t=n.base.T],
I[i,t-1] + sum(R[r].q[i,k,t] * θ[r,k,t] for r in keys(R),k in keys(n.base.K)) + slack_I[i,t] - surp_I[i,t] == n.base.d[i,t] + I[i,t]
)
@constraint(mp, [i=keys(n.base.V),t=n.base.T], n.base.V[i].MIN <= I[i,t] <= n.base.V[i].MAX)
@constraint(mp, [i=keys(n.base.V)], I[i,first(n.base.T)-1] == n.base.V[i].START)
#CONVEXITY CONSTRAINT LABEL 19
@constraint(mp, δ[k=keys(n.base.K),t=n.base.T], sum(θ[r,k,t] for r in keys(R)) <= n.base.K[k].freq)
#BOUND GENERATOR
#empty
#OBJECTIVE FUNCTION
begin
@objective(mp, Min,
sum(
(
n.base.K[k].vard * g(R[r].y[:,k,t];n=n,k=k) +
sum(n.base.deli[i,k] * R[r].u[i,k,t] for i in n.base.K[k].cover) +
n.base.K[k].fix * R[r].z[n.base.K[k].start,k,t]
) * θ[r,k,t] for r in keys(R),k in keys(n.base.K),t in n.base.T
) +
sum(n.base.V[i].h * I[i,t] for i in keys(n.base.V),t in n.base.T) +
sum(n.stblzr.slackCoeff * slack_I[i,t] for i in keys(n.base.V),t in n.base.T) -
sum(n.stblzr.surpCoeff * surp_I[i,t] for i in keys(n.base.V),t in n.base.T)
)
end
return mp
end
function getDual(mp::Model)
return duals = dval(dual.(mp.obj_dict[:λ]),dual.(mp.obj_dict[:δ]))
end
#===============MASTER FUNCTION SET=====#
#===============SUB FUNCTION SET=====#
function sub(n::node,duals::dval;silent::Bool,env::Gurobi.Env)
#build and bound
sp = buildSub(n,duals;silent=silent,env=env)
#solve the sp
optimize!(sp)
return sp
end
function buildSub(n::node,duals::dval;silent::Bool,env::Gurobi.Env)
#MODEL DECLARATION
sp = Model(optimizer_with_attributes(() -> Gurobi.Optimizer(env)))
if silent
set_silent(sp)
end
#VARIABLE DECLARATION
q = @variable(sp, q[keys(n.base.V),keys(n.base.K),n.base.T],Int)
u = @variable(sp, u[keys(n.base.V),keys(n.base.K),n.base.T] >= 0,Int)
v = @variable(sp, v[keys(n.base.V),keys(n.base.K),n.base.T] >= 0,Int)
y = @variable(sp, y[keys(n.base.V),keys(n.base.K),n.base.T], Bin)
z = @variable(sp, z[keys(n.base.V),keys(n.base.K),n.base.T], Bin)
p = @variable(sp, p[keys(n.base.V),keys(n.base.K),n.base.T], Bin)
#CONSTRAINTS
@constraint(sp, [k=keys(n.base.K),t=n.base.T], sum(q[i,k,t] for i in n.base.K[k].cover) == 0)
@constraint(sp, [k=keys(n.base.K),i=n.base.K[k].cover,t=n.base.T], q[i,k,t] == u[i,k,t] - v[i,k,t])
@constraint(sp, [k=keys(n.base.K),i=n.base.K[k].cover,t=n.base.T], u[i,k,t] <= n.base.K[k].Q * y[i,k,t])
@constraint(sp, [k=keys(n.base.K),i=n.base.K[k].cover,t=n.base.T], v[i,k,t] <= n.base.K[k].Q * z[i,k,t])
@constraint( #everything outside the possible starting point is zero
sp, [k=keys(n.base.K),t=n.base.T],
sum(z[i,k,t] for i in filter(x -> x != n.base.K[k].start,n.base.K[k].cover)) == 0
)
@constraint( #the starting point can be zero or one
sp, [k=keys(n.base.K),t=n.base.T],
z[n.base.K[k].start,k,t] <= 1
)
@constraint(sp, [k=keys(n.base.K),i=n.base.K[k].cover,t=n.base.T], p[i,k,t] == y[i,k,t] + z[i,k,t])
#BOUND GENERATOR SUBPROBLEM
#empty
#OBJECTIVE FUNCTION
begin
@objective(sp,Min,
sum(
sum(n.base.K[k].vard * n.base.G[i,k] * y[i,k,t] for i in n.base.K[k].cover) +
sum(n.base.deli[i,k] * u[i,k,t] for i in n.base.K[k].cover) +
n.base.K[k].fix * z[n.base.K[k].start,k,t] for k in keys(n.base.K),t in n.base.T
) -
sum(sum(q[i,k,t] * duals.λ[i,t] for i in n.base.K[k].cover) for k in keys(n.base.K),t in n.base.T) -
sum(duals.δ[k,t] for k in keys(n.base.K),t in n.base.T)
)
end
return sp
end
function getCol(sp::Model)
#EXTRACT VARIABLES
q = value.(sp.obj_dict[:q])
u = value.(sp.obj_dict[:u])
v = value.(sp.obj_dict[:v])
p = value.(sp.obj_dict[:p])
y = value.(sp.obj_dict[:y])
z = value.(sp.obj_dict[:z])
return col(q,u,v,p,y,z)
end
function realPrice(n::node,duals::dval;column::col)
#CALCULATE REAL PRICING
begin
price = (
sum(
n.base.K[k].vard * g(column.y[:,k,t];n=n,k=k) +
sum(n.base.deli[i,k] * column.u[i,k,t] for i in n.base.K[k].cover) +
n.base.K[k].fix * column.z[n.base.K[k].start,k,t] for k in keys(n.base.K),t in n.base.T
) -
sum(sum(column.q[i,k,t] * duals.λ[i,t] for i in n.base.K[k].cover) for k in keys(n.base.K),t in n.base.T) -
sum(duals.δ[k,t] for k in keys(n.base.K),t in n.base.T)
)
end
return price
end
#===============SUB FUNCTION SET=====#
#===============SUPPORT ESTIMATOR FOR TSP===#
function g(y::JuMP.Containers.DenseAxisArray;n::node,k::Int64)
x = twoOpt([i for i in first(y.axes) if y[i] == 1.0];n=n,k=k)
return x
end #CLEARED
function twoOpt(p::Vector{Int64};n::node,k::Int64) #INPUT IS A COLLECTION OF POINTS [15 22 23 24] ORDER IS ARBITRARY
if length(p) > 0
#DETERMINE START AND NODE
start = n.base.K[k].start #passing k stop di sini
nodes = vcat(start,p)
#DO NEAREST NEIGHBORHOOD construction
nnPath = nn(start,nodes;n=n)
#RETURN ITS IMPROVEMENT
return trav(improve(nnPath;n=n);n=n)
else
return 0
end
end #CLEARED
function nn(start::Int64,nodes::Vector{Int64};n::node)
tour = [start]
unvisited = deepcopy(nodes)
current_index = 1
terminate = false
while !terminate
current_position = tour[current_index]
#REMOVE CURRENT NODES AFTER FIRST MOVE
to_remove = findfirst(x -> x==current_position,unvisited)
splice!(unvisited,to_remove)
#if no more unvisited, terminate
if length(unvisited) == 0
push!(tour,first(tour))
terminate = true
else
terpilih = selectNn(current_position,unvisited;n=n)
push!(tour,terpilih)
end
#UPDATE INDEX
current_index += 1
end
return tour
end #CLEARED
function selectNn(current::Int64,unvisited::Vector{Int64};n::node)
chosen = unvisited[1]
for j in unvisited
if n.base.dist[current,chosen] > n.base.dist[current,j]
chosen = j #tuker kalo jaraknya lebih kecil
end
end
return chosen
end #CLEARED
function improve(p::Vector{Int64};n::node) #INPUT: NNPATH i.e. [15 27 28 29 15]
a = view(p,2:length(p)-1,1)
for i in 1:length(a) - 1
for k in i+1:length(a) - 1
best = trav(p;n=n)
reverse!(view(a,i:k,1))
if trav(p;n=n) >= best
reverse!(view(a,i:k,1))
end
end
end
return p
end #CLEARED
function trav(p::Vector{Int64};n::node) #INPUT: NNPATH i.e. [15 27 28 29 15]
return sum(n.base.dist[p[i],p[i+1]] for i in 1:length(p)-1)
end
#===============SUPPORT ESTIMATOR FOR TSP===#
function check(model::Model)
return value(sum(model.obj_dict[:slack_I])) + value(sum(model.obj_dict[:surp_I]))
end #CLEARED
function colGen(n::node,maxCG::Float64;silent::Bool,env::Gurobi.Env,track::Bool)
#INITIALIZE
terminate = false
iter = 0
while !terminate
#GENERATE MASTER PROBLEM
mp = master(n;silent=silent,env=env)
#NODE PROCESSING STATUS CHECK
if has_values(mp) && has_duals(mp)
if track #tracking status
println("obj: $(objective_value(mp))")
end
#EXTRACT DUAL
duals = getDual(mp)
#GENERATE SUBPROBLEM
sp = sub(n,duals;silent=silent,env=env)
#EXTRACT COLS & PRICE
cols = getCol(sp)
price = realPrice(n,duals;column=cols)
slacksurp = check(mp)
if track #tracking status
println(price)
println("nilai slack surp $slacksurp")
end
#NEGATIVE COLUMN CHECK
if isapprox(price, 0, atol = 1e-8) || price > 0
if isapprox(slacksurp, 0, atol = 1e-8)
terminate = true
push!(n.status,"EVALUATED")
if track
println("EVALUATED")
end
else
updateStab(n.stblzr,0.4)
push!(n.status,"STABILIZED")
if track
println("STABILIZED")
end
end
else
push!(n.columns,cols)
push!(n.status,"ADD_COLUMN")
if track
println("ADD_COLUMN")
end
end
#UPDATE ITER
iter += 1
#MAXIMUM ITER
if iter >= maxCG
terminate = true
pop!(n.columns)
push!(n.status,"EVALUATED-TIME OUT")
if track
println("EVALUATED-TIME OUT")
end
end
else
terminate = true
push!(n.status,"NO_SOLUTION")
if track
println("NO_SOLUTION")
end
end
end
if n.status[end] == "EVALUATED" || n.status[end] == "EVALUATED-TIME OUT"
println("NODE $(n.self) FINISHED.")
else
println("NODE $(n.self) FAILED.")
end
end
export base
export root
export master
export buildMaster
export getDual
export sub
export buildSub
export getCol
export realPrice
export colGen
end
|
(******************************************************************************)
(* Submission: "The Interchange Law: A Principle of Concurrent Programming" *)
(* Authors: Tony Hoare, Bernard Möller, Georg Struth, and Frank Zeyda *)
(* File: Partiality.thy *)
(******************************************************************************)
(* LAST REVIEWED: 11 July 2017 *)
section {* Partiality *}
theory Partiality
imports Preliminaries ICL
begin
subsection {* Type Definition *}
text \<open>We define a datatype \<open>'a partial\<close> that adds a distinct \<open>\<bottom>\<close> and \<open>\<top>\<close> to a type \<open>'a\<close>.\<close>
datatype 'a partial =
Bot | Value "'a" | Top
text \<open>The notation \<open>\<bottom>\<close> is introduced for the constructor @{const Bot}.\<close>
adhoc_overloading global_bot Bot
text \<open>The notation \<open>\<top>\<close> is introduced for the constructor @{const Top}.\<close>
adhoc_overloading global_top Top
subsection {* Proof Support *}
text \<open>Attribute used to collect definitional laws for operators.\<close>
named_theorems partial_ops "definitional laws for operators on partial values"
text \<open>Tactic that facilitates proofs about @{type partial} values.\<close>
lemma split_partial_all:
"(\<forall>x::'a partial. P x) = (P Bot \<and> P Top \<and> (\<forall>x::'a. P (Value x)))"
apply (safe; simp?)
apply (case_tac x)
apply (simp_all)
done
lemma split_partial_ex:
"(\<exists>x::'a partial. P x) = (P Bot \<or> P Top \<or> (\<exists>x::'a. P (Value x)))"
apply (safe; simp?)
apply (case_tac x)
apply (simp_all) [3]
apply (auto)
done
lemmas split_partial =
split_partial_all
split_partial_ex
method partial_tac = (
(atomize (full))?,
(simp add: split_partial partial_ops),
(clarsimp; simp?)?)
subsection {* Monadic Constructors *}
text \<open>Note that we have to ensure strictness in both \<open>\<bottom>\<close> and \<open>\<top>\<close>.\<close>
primrec partial_bind ::
"'a partial \<Rightarrow> ('a \<Rightarrow> 'b partial) \<Rightarrow> 'b partial" where
"partial_bind Bot f = Bot" |
"partial_bind (Value x) f = f x" |
"partial_bind Top f = Top"
adhoc_overloading bind partial_bind
definition partial_return :: "'a \<Rightarrow> 'a partial" where
[simp]: "partial_return x = Value x"
adhoc_overloading return partial_return
subsection {* Generic Lifting *}
text \<open>We use the constant below for ad hoc overloading to avoid ambiguities.\<close>
consts lift_partial :: "'a \<Rightarrow> 'b" ("_\<up>\<^sub>p" [1000] 1000)
fun ulift_partial :: "('a \<Rightarrow> 'b) \<Rightarrow> ('a partial \<Rightarrow> 'b partial)" where
"ulift_partial f Bot = Bot" |
"ulift_partial f (Value x) = Value (f x)" |
"ulift_partial f Top = Top"
fun blift_partial ::
"('a \<Rightarrow> 'b \<Rightarrow> 'c) \<Rightarrow> ('a partial \<Rightarrow> 'b partial \<Rightarrow> 'c partial)" where
"blift_partial f Bot Bot = Bot" |
"blift_partial f Bot (Value y) = Bot" |
"blift_partial f Bot Top = Bot" | -- \<open>@{const Bot} dominates.\<close>
"blift_partial f (Value x) Bot = Bot" |
"blift_partial f (Value x) (Value y) = Value (f x y)" |
"blift_partial f (Value x) Top = Top" |
"blift_partial f Top Bot = Bot" | -- \<open>@{const Bot} dominates.\<close>
"blift_partial f Top (Value y) = Top" |
"blift_partial f Top Top = Top"
adhoc_overloading lift_partial ulift_partial
adhoc_overloading lift_partial blift_partial
subsection {* Lifted Operators *}
text \<open>What about relational operators? How do we lift those? [TODO]\<close>
paragraph {* Addition and Subtraction *}
definition plus_partial :: "'a::plus partial binop" (infixl "+\<^sub>p" 70) where
"(op +\<^sub>p) = (op +)\<up>\<^sub>p"
definition minus_partial :: "'a::minus partial binop" (infixl "-\<^sub>p" 70) where
"(op -\<^sub>p) = (op -)\<up>\<^sub>p"
paragraph {* Multiplication and Division *}
definition times_partial :: "'a::times partial binop" (infixl "*\<^sub>p" 70) where
"(op *\<^sub>p) = (op *)\<up>\<^sub>p"
definition divide_partial :: "'a::{divide, zero} partial binop" (infixl "'/\<^sub>p" 70) where
"x /\<^sub>p y = do {x' \<leftarrow> x; y' \<leftarrow> y; if y' \<noteq> 0 then return (x' div y') else \<bottom>}"
paragraph {* Union and Disjoint Union *}
definition union_partial :: "'a set partial binop" (infixl "\<union>\<^sub>p" 70) where
"(op \<union>\<^sub>p) = (op \<union>)\<up>\<^sub>p"
definition disjoint_union :: "'a set partial binop" (infixl "\<oplus>\<^sub>p" 70) where
"x \<oplus>\<^sub>p y = do {x' \<leftarrow> x; y' \<leftarrow> y; if x' \<inter> y' = {} then return (x' \<union> y') else \<bottom>}"
paragraph {* Proof Support *}
declare plus_partial_def [partial_ops]
declare minus_partial_def [partial_ops]
declare times_partial_def [partial_ops]
declare divide_partial_def [partial_ops]
declare union_partial_def [partial_ops]
declare disjoint_union_def [partial_ops]
subsection {* Ordering Relation *}
primrec partial_ord :: "'a partial \<Rightarrow> nat" where
"partial_ord Bot = 0" |
"partial_ord (Value x) = 1" |
"partial_ord Top = 2"
instantiation partial :: (ord) ord
begin
fun less_eq_partial :: "'a partial \<Rightarrow> 'a partial \<Rightarrow> bool" where
"(Value x) \<le> (Value y) \<longleftrightarrow> x \<le> y" |
"a \<le> b \<longleftrightarrow> (partial_ord a) \<le> (partial_ord b)"
fun less_partial :: "'a partial \<Rightarrow> 'a partial \<Rightarrow> bool" where
"(Value x) < (Value y) \<longleftrightarrow> x < y" |
"a < b \<longleftrightarrow> (partial_ord a) < (partial_ord b)"
instance ..
end
subsection {* Class Instantiations *}
subsubsection {* Preorder *}
instance partial :: (preorder) preorder
apply (intro_classes)
-- {* Subgoal 1 *}
apply (partial_tac)
apply (rule less_le_not_le)
-- {* Subgoal 2 *}
apply (partial_tac)
-- {* Subgoal 3 *}
apply (partial_tac)
apply (erule order_trans)
apply (assumption)
done
subsubsection {* Partial Order *}
instance partial :: (order) order
apply (intro_classes)
apply (partial_tac)
done
subsubsection {* Linear Order *}
instance partial :: (linorder) linorder
apply (intro_classes)
apply (partial_tac)
done
subsubsection {* Lattice *}
instantiation partial :: (type) bot
begin
definition bot_partial :: "'a partial" where
[partial_ops]: "bot_partial = Bot"
instance ..
end
instantiation partial :: (type) top
begin
definition top_partial :: "'a partial" where
[partial_ops]: "top_partial = Top"
instance ..
end
instantiation partial :: (lattice) lattice
begin
fun inf_partial :: "'a partial \<Rightarrow> 'a partial \<Rightarrow> 'a partial" where
"Bot \<sqinter> Bot = Bot" |
"Bot \<sqinter> (Value y) = Bot" |
"Bot \<sqinter> Top = Bot" |
"(Value x) \<sqinter> Bot = Bot" |
"(Value x) \<sqinter> (Value y) = Value (x \<sqinter> y)" |
"(Value x) \<sqinter> Top = (Value x)" |
"Top \<sqinter> Bot = Bot" |
"Top \<sqinter> Value y = Value y" |
"Top \<sqinter> Top = Top"
fun sup_partial :: "'a partial \<Rightarrow> 'a partial \<Rightarrow> 'a partial" where
"Bot \<squnion> Bot = Bot" |
"Bot \<squnion> (Value y) = (Value y)" |
"Bot \<squnion> Top = Top" |
"(Value x) \<squnion> Bot = (Value x)" |
"(Value x) \<squnion> (Value y) = Value (x \<squnion> y)" |
"(Value x) \<squnion> Top = Top" |
"Top \<squnion> Bot = Top" |
"Top \<squnion> (Value y) = Top" |
"Top \<squnion> Top = Top"
instance
apply (intro_classes)
-- {* Subgoal 1 *}
apply (partial_tac)
-- {* Subgoal 2 *}
apply (partial_tac)
-- {* Subgoal 3 *}
apply (partial_tac)
-- {* Subgoal 4 *}
apply (partial_tac)
-- {* Subgoal 5 *}
apply (partial_tac)
-- {* Subgoal 6 *}
apply (partial_tac)
done
end
text \<open>Validation of the definition of meet and join above.\<close>
lemma partial_ord_inf_
lemma partial_ord_sup_lemma [simp]:
"\<forall>a b. partial_ord (a \<squnion> b) = max (partial_ord a) (partial_ord b)"
apply (partial_tac)
done
subsubsection {* Complete Lattice *}
instantiation partial :: (complete_lattice) complete_lattice
begin
definition Inf_partial :: "'a partial set \<Rightarrow> 'a partial" where
[partial_ops]:
"Inf_partial xs =
(if Bot \<in> xs then Bot else
let values = {x. Value x \<in> xs} in
if values = {} then Top else Value (Inf values))"
definition Sup_partial :: "'a partial set \<Rightarrow> 'a partial" where
[partial_ops]:
"Sup_partial xs =
(if Top \<in> xs then Top else
let values = {x. Value x \<in> xs} in
if values = {} then Bot else Value (Sup values))"
instance
apply (intro_classes)
-- {* Subgoal 1 *}
apply (partial_tac)
apply (simp add: Inf_lower)
-- {* Subgoal 2 *}
apply (partial_tac)
apply (metis Inf_greatest mem_Collect_eq)
-- {* Subgoal 3 *}
apply (partial_tac)
apply (simp add: Sup_upper)
-- {* Subgoal 4 *}
apply (partial_tac)
apply (metis Sup_least mem_Collect_eq)
-- {* Subgoal 5 *}
apply (partial_tac)
-- {* Subgoal 6 *}
apply (partial_tac)
done
end
subsection {* ICL Lifting Lemmas *}
lemma iclaw_eq_lift_partial [simp]:
"iclaw (op =) seq_op par_op \<Longrightarrow>
iclaw (op =) seq_op\<up>\<^sub>p par_op\<up>\<^sub>p"
apply (unfold iclaw_def iclaw_axioms_def)
apply (partial_tac)
done
lemma preorder_less_eq_lift_partial [simp]:
"preorder (op \<le>::'a::ord relop) \<Longrightarrow>
preorder (op \<le>::'a::ord partial relop)"
apply (unfold_locales)
apply (partial_tac)
apply (meson preorder.refl)
apply (partial_tac)
apply (meson preorder.trans)
done
lemma iclaw_less_eq_lift_partial [simp]:
"iclaw (op \<le>) seq_op par_op \<Longrightarrow>
iclaw (op \<le>) seq_op\<up>\<^sub>p par_op\<up>\<^sub>p"
apply (unfold iclaw_def iclaw_axioms_def)
apply (partial_tac)
done
end
|
[STATEMENT]
lemma ty_binding_existence:
assumes "X \<in> (tyvrs_of a)"
shows "\<exists>T.(TVarB X T=a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>T. TVarB X T = a
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
X \<in> tyvrs_of a
goal (1 subgoal):
1. \<exists>T. TVarB X T = a
[PROOF STEP]
by (nominal_induct a rule: binding.strong_induct) (auto) |
/**
* @file
* @copyright defined in eos/LICENSE.txt
*/
//
#include <stdlib.h>
#include <eosio/kafka_plugin/kafka_producer.hpp>
#include <eosio/kafka_plugin/kafka_plugin.hpp>
#include <eosio/chain/eosio_contract.hpp>
#include <eosio/chain/config.hpp>
#include <eosio/chain/exceptions.hpp>
#include <eosio/chain/transaction.hpp>
#include <eosio/chain/types.hpp>
#include <fc/io/json.hpp>
#include <fc/utf8.hpp>
#include <fc/variant.hpp>
#include <chrono>
#include <boost/signals2/connection.hpp>
#include <thread>
#include <mutex>
#include <boost/thread/condition_variable.hpp>
#include <prometheus/exposer.h>
#include <prometheus/registry.h>
#include <queue>
namespace fc { class variant; }
namespace eosio {
using chain::account_name;
using chain::action_name;
using chain::block_id_type;
using chain::permission_name;
using chain::transaction;
using chain::signed_transaction;
using chain::signed_block;
using chain::transaction_id_type;
using chain::packed_transaction;
static appbase::abstract_plugin& _kafka_plugin = app().register_plugin<kafka_plugin>();
using kafka_producer_ptr = std::shared_ptr<class kafka_producer>;
class PrometheusExposer {
private:
prometheus::Exposer exposer;
std::shared_ptr<prometheus::Registry> registry;
prometheus::Family<prometheus::Gauge>& gaugeFamily;
prometheus::Gauge& blockGauge;
prometheus::Gauge& abnormalityBlockGauge;
prometheus::Gauge& blockWithPreviousTimestampGauge;
prometheus::Gauge& blockWithPreviousActionIDGauge;
public:
PrometheusExposer(const std::string& hostPort)
:
exposer({hostPort}),
registry(std::make_shared<prometheus::Registry>()),
gaugeFamily(prometheus::BuildGauge()
.Name("block_number_reached")
.Help("The last block number being exported by the Kafka plugin")
.Register(*registry)),
blockGauge(gaugeFamily.Add(
{{"name", "blockCounter"}})),
abnormalityBlockGauge(gaugeFamily.Add(
{{"name", "abnormalityBlockCounter"}})),
blockWithPreviousTimestampGauge(gaugeFamily.Add(
{{"name", "blockWithPreviousTimestamp"}})),
blockWithPreviousActionIDGauge(gaugeFamily.Add(
{{"name", "blockWithPreviousActionID"}}))
{
exposer.RegisterCollectable(registry);
}
prometheus::Gauge& getBlockGauge() {
return blockGauge;
}
prometheus::Gauge& getAbnormalityBlockGauge() {
return abnormalityBlockGauge;
}
prometheus::Gauge& getBlockWithPreviousTimestampGauge() {
return blockWithPreviousTimestampGauge;
}
prometheus::Gauge& getBlockWithPreviousActionIDGauge() {
return blockWithPreviousActionIDGauge;
}
};
class kafka_plugin_impl {
public:
kafka_plugin_impl();
~kafka_plugin_impl();
fc::optional<boost::signals2::scoped_connection> accepted_block_connection;
fc::optional<boost::signals2::scoped_connection> irreversible_block_connection;
fc::optional<boost::signals2::scoped_connection> applied_transaction_connection;
chain_plugin *chain_plug;
struct trasaction_info_st {
uint64_t block_number;
fc::time_point block_time;
chain::transaction_trace_ptr trace;
fc::variant tracesVar;
};
void consume_blocks();
void applied_transaction(const chain::transaction_trace_ptr &);
void process_applied_transaction(const trasaction_info_st &);
void _process_applied_transaction(const trasaction_info_st &);
void init(const variables_map &options);
static void kafkaCallbackFunction(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque);
static void handle_kafka_exception();
bool configured{false};
uint32_t start_block_num = 0;
bool start_block_reached = false;
size_t queue_size = 10000;
std::deque<trasaction_info_st> transaction_trace_queue;
std::deque<trasaction_info_st> transaction_trace_process_queue;
std::mutex mtx;
std::condition_variable condition;
std::thread consume_thread;
std::atomic<bool> startup{true};
kafka_producer_ptr producer;
const uint64_t KAFKA_BLOCK_REACHED_LOG_INTERVAL = 1000;
static bool kafkaTriggeredQuit;
std::shared_ptr<PrometheusExposer> prometheusExposer;
uint64_t blockTimestampReached = 0;
uint64_t actionIDReached = 0;
};
bool kafka_plugin_impl::kafkaTriggeredQuit = false;
namespace {
template<typename Queue, typename Entry>
void queue(std::mutex &mtx, std::condition_variable &condition, Queue &queue, const Entry &e,
size_t queue_size) {
int sleep_time = 100;
size_t last_queue_size = 0;
std::unique_lock lock(mtx);
if (queue.size() > queue_size) {
lock.unlock();
condition.notify_one();
if (last_queue_size < queue.size()) {
sleep_time += 100;
} else {
sleep_time -= 100;
if (sleep_time < 0) sleep_time = 100;
}
last_queue_size = queue.size();
std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time));
lock.lock();
}
queue.emplace_back(e);
lock.unlock();
condition.notify_one();
}
}
void filterSetcodeData(vector<chain::action_trace>& vecActions) {
for(auto& actTrace : vecActions) {
if("setcode" == actTrace.act.name.to_string() &&
"eosio" == actTrace.act.account.to_string()) {
chain::setcode sc = actTrace.act.data_as<chain::setcode>();
sc.code.clear();
actTrace.act.data = fc::raw::pack(sc);
dlog("'setcode' action is cleared of code data. Block number is: ${block_number}",
("block_number", actTrace.block_num));
}
}
}
void kafka_plugin_impl::applied_transaction(const chain::transaction_trace_ptr &t) {
try {
filterSetcodeData(t->action_traces);
trasaction_info_st transactioninfo = trasaction_info_st{
.block_number = t->block_num,
.block_time = t->block_time,
.trace =chain::transaction_trace_ptr(t),
.tracesVar = chain_plug->chain().to_variant_with_abi(*t, chain_plug->get_abi_serializer_max_time())
};
trasaction_info_st &info_t = transactioninfo;
queue(mtx, condition, transaction_trace_queue, info_t, queue_size);
} catch (fc::exception &e) {
elog("FC Exception while applied_transaction ${e}", ("e", e.to_string()));
} catch (std::exception &e) {
elog("STD Exception while applied_transaction ${e}", ("e", e.what()));
} catch (...) {
elog("Unknown exception while applied_transaction");
}
}
void kafka_plugin_impl::consume_blocks() {
try {
size_t transaction_trace_size = 1;
while (!kafkaTriggeredQuit ||
transaction_trace_size > 0) {
std::unique_lock lock(mtx);
while (transaction_trace_queue.empty() &&
!kafkaTriggeredQuit) {
condition.wait(lock);
}
// capture for processing
transaction_trace_size = transaction_trace_queue.size();
if (transaction_trace_size > 0) {
transaction_trace_process_queue = move(transaction_trace_queue);
transaction_trace_queue.clear();
}
lock.unlock();
while (!transaction_trace_process_queue.empty()) {
const auto &t = transaction_trace_process_queue.front();
process_applied_transaction(t);
transaction_trace_process_queue.pop_front();
}
}
ilog("kafka_plugin consume thread shutdown gracefully");
} catch (fc::exception &e) {
elog("FC Exception while consuming block ${e}", ("e", e.to_string()));
} catch (std::exception &e) {
elog("STD Exception while consuming block ${e}", ("e", e.what()));
} catch (...) {
elog("Unknown exception while consuming block");
}
}
void kafka_plugin_impl::process_applied_transaction(const trasaction_info_st &t) {
try {
if (!start_block_reached) {
if (t.block_number >= start_block_num) {
start_block_reached = true;
}
}
else {
_process_applied_transaction(t);
}
} catch (fc::exception &e) {
elog("FC Exception while processing applied transaction trace: ${e}", ("e", e.to_detail_string()));
handle_kafka_exception();
} catch (std::exception &e) {
elog("STD Exception while processing applied transaction trace: ${e}", ("e", e.what()));
handle_kafka_exception();
} catch (...) {
elog("Unknown exception while processing applied transaction trace");
handle_kafka_exception();
}
}
// This will return the largest sequence id of this batch of actions.
uint64_t getLargestActionID(const vector<chain::action_trace>& vecActions) {
uint64_t largestActionID = 0;
for( const auto actionTrace : vecActions) {
if(largestActionID < actionTrace.receipt->global_sequence) {
largestActionID = actionTrace.receipt->global_sequence;
}
}
return largestActionID;
}
void kafka_plugin_impl::_process_applied_transaction(const trasaction_info_st &t) {
if(t.trace->action_traces.empty()) {
dlog("Apply transaction with id: ${id} is skipped. No actions inside. Block number is: ${block_number}",
("id", t.trace->id.str())
("block_number", t.block_number));
return;
}
if (t.trace->receipt->status != chain::transaction_receipt_header::executed) {
// Failed transactions are also reported. Ignore those.
return;
}
if( t.block_number < prometheusExposer->getBlockGauge().Value()) { // Late applied action for irreversible block.
prometheusExposer->getAbnormalityBlockGauge().Set(prometheusExposer->getBlockGauge().Value());
return;
}
else {
prometheusExposer->getBlockGauge().Set(t.block_number);
}
uint64_t blockTimeEpochMilliSeconds = t.block_time.time_since_epoch().count() / 1000;
// Correct the block timestamp if it is in the past
if(blockTimeEpochMilliSeconds < blockTimestampReached) {
blockTimeEpochMilliSeconds = blockTimestampReached;
prometheusExposer->getBlockWithPreviousTimestampGauge().Set(t.block_number);
}
else {
blockTimestampReached = blockTimeEpochMilliSeconds;
}
auto& actionTraces = t.trace->action_traces;
uint64_t actionID = getLargestActionID(actionTraces);
if(actionID <= actionIDReached) {
prometheusExposer->getBlockWithPreviousActionIDGauge().Set(t.block_number);
}
else {
actionIDReached = actionID;
}
std::stringstream sstream;
sstream << actionID;
// Store the block time at an upper layer. This allows us to easily correct if it varies for actions inside.
string transaction_metadata_json =
"{\"block_number\":" + std::to_string(t.block_number) + ",\"block_time\":" + std::to_string(blockTimeEpochMilliSeconds) +
",\"trace\":" + fc::json::to_string(t.tracesVar).c_str() + "}";
producer->trx_kafka_sendmsg(KAFKA_TRX_APPLIED,
(char*)transaction_metadata_json.c_str(),
sstream.str());
}
kafka_plugin_impl::kafka_plugin_impl()
:producer(new kafka_producer)
{
}
kafka_plugin_impl::~kafka_plugin_impl() {
if (!startup) {
try {
ilog( "kafka_db_plugin shutdown in process please be patient this can take a few minutes" );
condition.notify_one();
consume_thread.join();
producer->trx_kafka_destroy();
} catch( std::exception& e ) {
elog( "Exception on kafka_plugin shutdown of consume thread: ${e}", ("e", e.what()));
}
}
}
void kafka_plugin_impl::init(const variables_map &options) {
std::string prometheustHostPort = "0.0.0.0:8080";
if(options.count("prometheus-uri")) {
prometheustHostPort = options.at("prometheus-uri").as<std::string>();
}
ilog("Starting Prometheus exposer at port: " + prometheustHostPort);
prometheusExposer.reset(new PrometheusExposer(prometheustHostPort));
ilog("Starting kafka plugin thread");
consume_thread = std::thread([this] { consume_blocks(); });
startup = false;
}
void kafka_plugin_impl::handle_kafka_exception() {
// Trigger quit once only
if(kafkaTriggeredQuit) {
return;
}
// For the time being quit on all
elog( "Kafka plugin triggers Quit due to error.");
app().quit();
kafkaTriggeredQuit = true;
}
void kafka_plugin_impl::kafkaCallbackFunction(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
if(nullptr == rkmessage ||
0 != rkmessage->err )
{
elog("Kafka message delivery failed: ${e}", ("e", rd_kafka_err2str(rkmessage->err)));
handle_kafka_exception();
}
}
////////////
// kafka_plugin
////////////
kafka_plugin::kafka_plugin()
: my(new kafka_plugin_impl()) {
}
kafka_plugin::~kafka_plugin() {
ilog("kafka_plugin::~kafka_plugin()");
plugin_shutdown();
}
void kafka_plugin::set_program_options(options_description &cli, options_description &cfg) {
cfg.add_options()
("accept_trx_topic", bpo::value<std::string>(),
"The topic for accepted transaction.")
("applied_trx_topic", bpo::value<std::string>(),
"The topic for appiled transaction.")
("kafka-uri,k", bpo::value<std::string>(),
"the kafka brokers uri, as 192.168.31.225:9092")
("kafka-queue-size", bpo::value<uint32_t>()->default_value(256),
"The target queue size between nodeos and kafka plugin thread.")
("kafka-block-start", bpo::value<uint32_t>()->default_value(256),
"If specified then only abi data pushed to kafka until specified block is reached.")
("prometheus-uri", bpo::value<std::string>(),
"Host:port on which to open Prometheus Exposer.")
;
}
void kafka_plugin::plugin_initialize(const variables_map &options) {
char *accept_trx_topic = NULL;
char *applied_trx_topic = NULL;
char *brokers_str = NULL;
try {
if (options.count("kafka-uri")) {
brokers_str = (char *) (options.at("kafka-uri").as<std::string>().c_str());
if (options.count("accept_trx_topic") != 0) {
accept_trx_topic = (char *) (options.at("accept_trx_topic").as<std::string>().c_str());
}
if (options.count("applied_trx_topic") != 0) {
applied_trx_topic = (char *) (options.at("applied_trx_topic").as<std::string>().c_str());
}
ilog("brokers_str:${j}", ("j", brokers_str));
if(accept_trx_topic) {
ilog("accept_trx_topic:${j}", ("j", accept_trx_topic));
}
ilog("applied_trx_topic:${j}", ("j", applied_trx_topic));
if (0 != my->producer->trx_kafka_init(brokers_str, accept_trx_topic, applied_trx_topic, my->kafkaCallbackFunction)) {
elog("trx_kafka_init fail");
my->handle_kafka_exception();
} else{
ilog("trx_kafka_init ok");
}
}
if (options.count("kafka-uri")) {
ilog("initializing kafka_plugin");
my->configured = true;
if( options.count( "kafka-queue-size" )) {
my->queue_size = options.at( "kafka-queue-size" ).as<uint32_t>();
}
if( options.count( "kafka-block-start" )) {
my->start_block_num = options.at( "kafka-block-start" ).as<uint32_t>();
}
if( my->start_block_num == 0 ) {
my->start_block_reached = true;
}
// hook up to signals on controller
//chain_plugin* chain_plug = app().find_plugiin<chain_plugin>();
my->chain_plug = app().find_plugin<chain_plugin>();
EOS_ASSERT(my->chain_plug, chain::missing_chain_plugin_exception, "");
auto &chain = my->chain_plug->chain();
my->applied_transaction_connection.emplace(
chain.applied_transaction.connect([&](std::tuple<const chain::transaction_trace_ptr&, const signed_transaction&> tupleTrx) {
my->applied_transaction(std::get<0>(tupleTrx));
}));
my->init(options);
} else {
wlog( "eosio::kafka_plugin configured, but no --kafka-uri specified." );
wlog( "kafka_plugin disabled." );
}
}
FC_LOG_AND_RETHROW()
}
void kafka_plugin::plugin_startup() {
ilog("kafka_plugin::plugin_startup()");
}
void kafka_plugin::plugin_shutdown() {
if(my) {
ilog("kafka_plugin::plugin_shutdown()");
my->kafkaTriggeredQuit = true;
my->applied_transaction_connection.reset();
my.reset();
}
}
} // namespace eosio
|
/* fit/gsl_fit.h
*
* Copyright (C) 2000 Brian Gough
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __GSL_FIT_H__
#define __GSL_FIT_H__
#include <stdlib.h>
#include <gsl/gsl_math.h>
#undef __BEGIN_DECLS
#undef __END_DECLS
#ifdef __cplusplus
# define __BEGIN_DECLS extern "C" {
# define __END_DECLS }
#else
# define __BEGIN_DECLS /* empty */
# define __END_DECLS /* empty */
#endif
__BEGIN_DECLS
int gsl_fit_linear (const double * x, const size_t xstride,
const double * y, const size_t ystride,
const size_t n,
double * c0, double * c1,
double * cov00, double * cov01, double * cov11,
double * sumsq);
int gsl_fit_wlinear (const double * x, const size_t xstride,
const double * w, const size_t wstride,
const double * y, const size_t ystride,
const size_t n,
double * c0, double * c1,
double * cov00, double * cov01, double * cov11,
double * chisq);
int
gsl_fit_linear_est (const double x,
const double c0, const double c1,
const double c00, const double c01, const double c11,
double *y, double *y_err);
int gsl_fit_mul (const double * x, const size_t xstride,
const double * y, const size_t ystride,
const size_t n,
double * c1,
double * cov11,
double * sumsq);
int gsl_fit_wmul (const double * x, const size_t xstride,
const double * w, const size_t wstride,
const double * y, const size_t ystride,
const size_t n,
double * c1,
double * cov11,
double * sumsq);
int
gsl_fit_mul_est (const double x,
const double c1,
const double c11,
double *y, double *y_err);
/* choose better names!! */
int gsl_fit_poly (const double * x,
const double * w,
const double * y,
size_t n,
double * c, size_t m,
double * chisq);
int gsl_fit_fns (const double * A,
const double * w,
const double * y,
size_t n,
double * c, size_t m,
double * chisq);
int gsl_fit_linear_nd (double * m, double * y, double * w);
__END_DECLS
#endif /* __GSL_FIT_H__ */
|
State Before: α✝ : Type uu
β : Type vv
l₁ l₂ : List α✝
inst✝ : DecidableEq α✝
α : Type u_1
l : List α
a : α
x✝ : [a] <+~ l
s : List α
hla : s ~ [a]
h : s <+ l
⊢ a ∈ l State After: no goals Tactic: rwa [perm_singleton.mp hla, singleton_sublist] at h |
If $T$ is a retract of $S$ and $f, g: U \to S$ are homotopic in $S$, then $f, g: U \to T$ are homotopic in $T$. |
By the end of the 1960s , Ornette Coleman had become one of the most influential musicians in jazz after pioneering its most controversial subgenre , free jazz , which jazz critics and musicians initially derided for its deviation from conventional structures of harmony and tonality . In the mid @-@ 1970s , he stopped recording free jazz , recruited electric instrumentalists , and pursued a new creative theory he called harmolodics . According to Coleman 's theory , all the musicians are able to play individual melodies in any key , and still sound coherent as a group . He taught his young sidemen this new improvisational and ensemble approach , based on their individual tendencies , and prevented them from being influenced by conventional styles . Coleman likened this group ethic to a spirit of " collective consciousness " that stresses " human feelings " and " biological rhythms " , and said that he wanted the music , rather than himself , to be successful . He also started to incorporate elements from other styles into his music , including rock influences such as the electric guitar and non @-@ Western rhythms played by Moroccan and Nigerian musicians .
|
% Copyright (C) 2004 Josep Mones i Teixidor <[email protected]>
%
% This program is free software; you can redistribute it and/or modify it under
% the terms of the GNU General Public License as published by the Free Software
% Foundation; either version 3 of the License, or (at your option) any later
% version.
%
% This program is distributed in the hope that it will be useful, but WITHOUT
% ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
% FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
% details.
%
% You should have received a copy of the GNU General Public License along with
% this program; if not, see <http://www.gnu.org/licenses/>.
% -*- texinfo -*-
% @deftypefn {Function File} {@var{BW} = } octave_poly2mask (@var{x},@var{y},@var{m},@var{n})
% Convert a polygon to a region mask.
%
% BW=octave_poly2mask(x,y,m,n) converts a polygon, specified by a list of
% vertices in @var{x} and @var{y} and returns in a @var{m}-by-@var{n}
% logical mask @var{BW} the filled polygon. Region inside the polygon
% is set to 1, values outside the shape are set to 0.
%
% @var{x} and @var{y} should always represent a closed polygon, first
% and last points should be coincident. If they are not octave_poly2mask will
% close it for you. If @var{x} or @var{y} are fractional they are
% nearest integer.
%
% If all the polygon or part of it falls outside the masking area
% (1:m,1:n), it is discarded or clipped.
%
% This function uses scan-line polygon filling algorithm as described
% in http://www.cs.rit.edu/~icss571/filling/ with some minor
% modifications: capability of clipping and scan order, which can
% affect the results of the algorithm (algorithm is described not to
% reach ymax, xmax border when filling to avoid enlarging shapes). In
% this function we scan the image backwards (we begin at ymax and end
% at ymin), and we don't reach ymin, xmin, which we believe should be
% compatible with MATLAB.
% @end deftypefn
% TODO: check how to create a logical BW without any conversion
function BW = octave_poly2mask (x, y, m, n)
if (nargin ~= 4)
print_usage ();
end
% check x and y
x = round (x (:).');
y = round (y (:).');
if (length (x) < 3)
error ('octave_poly2mask: polygon must have at least 3 vertices.');
end
if (length (x) ~= length (y))
error ('octave_poly2mask: length of x doesn''t match length of y.');
end
% create output matrix
BW = false (m, n);
% close polygon if needed
if ((x (1) ~= x (length (x))) || (y (1) ~= y (length (y))))
x = horzcat (x, x (1));
y = horzcat (y, y (1));
end
% build global edge table
ex = [x(1:length (x) - 1); x(1, 2:length (x))]; % x values for each edge
ey = [y(1:length (y) - 1); y(1, 2:length (y))]; % y values for each edge
idx = (ey(1, :) ~= ey(2, :)); % eliminate horizontal edges
ex = ex (:, idx);
ey = ey (:, idx);
eminy = min (ey); % minimum y for each edge
emaxy = max (ey); % maximum y for each edge
t = (ey == [eminy; eminy]); % values associated to miny
exvec = ex(:);
exminy = exvec(t); % x values associated to min y
exmaxy = exvec(~t); % x values associated to max y
emaxy = emaxy.'; % we want them vertical now...
eminy = eminy.';
m_inv = (exmaxy - exminy)./(emaxy - eminy); % calculate inverse slope
ge = [emaxy, eminy, exmaxy, m_inv]; % build global edge table
ge = sortrows (ge, [1, 3]); % sort on eminy and exminy
% we add an extra dummy edge at the end just to avoid checking
% while indexing it
ge = [-Inf, -Inf, -Inf, -Inf; ge];
% initial parity is even (0)
parity = 0;
% init scan line set to bottom line
sl = ge (size (ge, 1), 1);
% init active edge table
% we use a loop because the table is sorted and edge list could be
% huge
ae = [];
gei = size (ge, 1);
while (sl == ge (gei, 1))
ae = [ge(gei, 2:4); ae];
gei = gei - 1;
end
% calc minimum y to draw
miny = min (y);
if (miny < 1)
miny = 1;
end
while (sl >= miny)
% check vert clipping
if (sl <= m)
% draw current scan line
% we have to round because 1/m is fractional
ie = round (reshape (ae (:, 2), 2, size (ae, 1)/2));
% this discards left border of image (this differs from version at
% http://www.cs.rit.edu/~icss571/filling/ which discards right
% border) but keeps an exception when the point is a vertex.
ie (1, :) = ie (1, :) + (ie (1, :) ~= ie (2, :));
% we'll clip too, just in case m,n is not big enough
ie (1, (ie (1, :) < 1)) = 1;
ie (2, (ie (2, :) > n)) = n;
% we eliminate segments outside window
ie = ie (:, (ie (1, :) <= n));
ie = ie (:, (ie (2, :) >= 1));
for i = 1:size(ie,2)
BW (sl, ie (1, i):ie (2, i)) = true;
end
end
% decrement scan line
sl = sl - 1;
% eliminate edges that eymax==sl
% this discards ymin border of image (this differs from version at
% http://www.cs.rit.edu/~icss571/filling/ which discards ymax).
ae = ae ((ae (:, 1) ~= sl), :);
% update x (x1=x0-1/m)
ae(:, 2) = ae(:, 2) - ae(:, 3);
% update ae with new values
while (sl == ge (gei, 1))
ae = vertcat (ae, ge (gei, 2:4));
gei = gei - 1;
end
% order the edges in ae by x value
if (size(ae,1) > 0)
ae = sortrows (ae, 2);
end
end
end
% This should create a filled octagon
%!demo
%! s = [0:pi/4:2*pi];
%! x = cos (s) * 90 + 101;
%! y = sin (s) * 90 + 101;
%! bw = octave_poly2mask(x, y, 200, 200);
%! imshow (bw);
% This should create a 5-vertex star
%!demo
%! s = [0:2*pi/5:pi*4];
%! s = s ([1, 3, 5, 2, 4, 6]);
%! x = cos (s) * 90 + 101;
%! y = sin (s) * 90 + 101;
%! bw = octave_poly2mask (x, y, 200, 200);
%! imshow (bw);
%!# Convex polygons
%!shared xs, ys, Rs, xt, yt, Rt
%! xs=[3,3,10,10];
%! ys=[4,12,12,4];
%! Rs=zeros(16,14);
%! Rs(5:12,4:10)=1;
%! Rs=logical(Rs);
%! xt=[1,4,7];
%! yt=[1,4,1];
%! Rt=[0,0,0,0,0,0,0;
%! 0,0,1,1,1,1,0;
%! 0,0,0,1,1,0,0;
%! 0,0,0,1,0,0,0;
%! 0,0,0,0,0,0,0];
%! Rt=logical(Rt);
%!assert(octave_poly2mask(xs,ys,16,14),Rs); # rectangle
%!assert(octave_poly2mask(xs,ys,8,7),Rs(1:8,1:7)); # clipped
%!assert(octave_poly2mask(xs-7,ys-8,8,7),Rs(9:16,8:14)); # more clipping
%!assert(octave_poly2mask(xt,yt,5,7),Rt); # triangle
%!assert(octave_poly2mask(xt,yt,3,3),Rt(1:3,1:3)); # clipped
%!# Concave polygons
%!test
%! x=[3,3,5,5,8,8,10,10];
%! y=[4,12,12,8,8,11,11,4];
%! R=zeros(16,14);
%! R(5:12,4:5)=1;
%! R(5:8,6:8)=1;
%! R(5:11,9:10)=1;
%! R=logical(R);
%! assert(octave_poly2mask(x,y,16,14), R);
%!# Complex polygons
%!test
%! x=[1,5,1,5];
%! y=[1,1,4,4];
%! R=[0,0,0,0,0,0;
%! 0,0,1,1,0,0;
%! 0,0,1,1,0,0;
%! 0,1,1,1,1,0;
%! 0,0,0,0,0,0];
%! R=logical(R);
%! assert(octave_poly2mask(x,y,5,6), R);
|
rm(list = ls())
D = 4 # neuron diameter
sigma = D/4 # profile
td = seq(-D/2, D/2,by=0.01)
w = exp(-(td*td) / (2*sigma*sigma))
#pdf(file="~/filter-cs.pdf", width = 5, height = 4)
par(mar=c(4.2, 4.2, 1.0, 1.0))
plot(td, w,
type="l",
ylim=range(w),
#axes=T,
#ann=T,
xlab="d [pixels]",
#ylab="weight",
las=1,
cex.axis=1.8,
cex.lab=2.5,
lwd=3
)
box()
grid()
#dev.off()
|
function [hm,HMpix] = euc2hmg(pix)
% EUC2HMG Euclidean to Homogeneous point transform.
% EUC2HMG(E) transforms the Euclidean point E onto homogeneous space by
% appending 1 at the last coordinate.
%
% [h,H_e] = EUC2HMG(E) returns the Jacobian of the transformation.
% Copyright 2008-2009 Joan Sola @ LAAS-CNRS.
hm = [pix;ones(1,size(pix,2))];
if nargout > 1 % Jac -- OK
if size(pix,2) == 1
HMpix = [eye(numel(pix));zeros(1,numel(pix))];
else
error('??? Jacobians not available for multipla points.')
end
end
return
% ========== End of function - Start GPL license ==========
% # START GPL LICENSE
%---------------------------------------------------------------------
%
% This file is part of SLAMTB, a SLAM toolbox for Matlab.
%
% SLAMTB is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% SLAMTB is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with SLAMTB. If not, see <http://www.gnu.org/licenses/>.
%
%---------------------------------------------------------------------
% SLAMTB is Copyright:
% Copyright (c) 2008-2010, Joan Sola @ LAAS-CNRS,
% Copyright (c) 2010-2013, Joan Sola,
% Copyright (c) 2014-2015, Joan Sola @ IRI-UPC-CSIC,
% SLAMTB is Copyright 2009
% by Joan Sola, Teresa Vidal-Calleja, David Marquez and Jean Marie Codol
% @ LAAS-CNRS.
% See on top of this file for its particular copyright.
% # END GPL LICENSE
|
------------------------------------------------------------------------
-- A definitional interpreter
------------------------------------------------------------------------
{-# OPTIONS --cubical --safe #-}
module Lambda.Simplified.Partiality-monad.Inductive.Interpreter where
open import Equality.Propositional.Cubical
open import Prelude hiding (⊥)
open import Bijection equality-with-J using (_↔_)
open import Function-universe equality-with-J hiding (id; _∘_)
open import Monad equality-with-J
open import Univalence-axiom equality-with-J
open import Vec.Function equality-with-J
open import Partiality-monad.Inductive
open import Partiality-monad.Inductive.Fixpoints
open import Partiality-monad.Inductive.Monad
open import Lambda.Simplified.Syntax
open Closure Tm
------------------------------------------------------------------------
-- One interpreter
module Interpreter₁ where
-- This interpreter is defined as the least upper bound of a
-- sequence of increasingly defined interpreters.
infix 10 _∙_
mutual
⟦_⟧′ : ∀ {n} → Tm n → Env n → ℕ → Value ⊥
⟦ var x ⟧′ ρ n = return (ρ x)
⟦ ƛ t ⟧′ ρ n = return (ƛ t ρ)
⟦ t₁ · t₂ ⟧′ ρ n = ⟦ t₁ ⟧′ ρ n >>= λ v₁ →
⟦ t₂ ⟧′ ρ n >>= λ v₂ →
(v₁ ∙ v₂) n
_∙_ : Value → Value → ℕ → Value ⊥
(ƛ t₁ ρ ∙ v₂) zero = never
(ƛ t₁ ρ ∙ v₂) (suc n) = ⟦ t₁ ⟧′ (cons v₂ ρ) n
mutual
⟦⟧′-increasing :
∀ {n} (t : Tm n) ρ n → ⟦ t ⟧′ ρ n ⊑ ⟦ t ⟧′ ρ (suc n)
⟦⟧′-increasing (var x) ρ n = return (ρ x) ■
⟦⟧′-increasing (ƛ t) ρ n = return (ƛ t ρ) ■
⟦⟧′-increasing (t₁ · t₂) ρ n =
⟦⟧′-increasing t₁ ρ n >>=-mono λ v₁ →
⟦⟧′-increasing t₂ ρ n >>=-mono λ v₂ →
∙-increasing v₁ v₂ n
∙-increasing : ∀ v₁ v₂ n → (v₁ ∙ v₂) n ⊑ (v₁ ∙ v₂) (suc n)
∙-increasing (ƛ t₁ ρ) v₂ (suc n) = ⟦⟧′-increasing t₁ (cons v₂ ρ) n
∙-increasing (ƛ t₁ ρ) v₂ zero =
never ⊑⟨ never⊑ _ ⟩■
⟦ t₁ ⟧′ (cons v₂ ρ) 0 ■
⟦_⟧ˢ : ∀ {n} → Tm n → Env n → Increasing-sequence Value
⟦ t ⟧ˢ ρ = ⟦ t ⟧′ ρ , ⟦⟧′-increasing t ρ
⟦_⟧ : ∀ {n} → Tm n → Env n → Value ⊥
⟦ t ⟧ ρ = ⨆ (⟦ t ⟧ˢ ρ)
------------------------------------------------------------------------
-- Another interpreter
module Interpreter₂ where
-- This interpreter is defined using a fixpoint combinator.
M : Type → Type₁
M = Partial (∃ λ n → Tm n × Env n) (λ _ → Value)
infix 10 _∙_
_∙_ : Value → Value → M Value
ƛ t₁ ρ ∙ v₂ = rec (_ , t₁ , cons v₂ ρ)
⟦_⟧′ : ∀ {n} → Tm n → Env n → M Value
⟦ var x ⟧′ ρ = return (ρ x)
⟦ ƛ t ⟧′ ρ = return (ƛ t ρ)
⟦ t₁ · t₂ ⟧′ ρ = ⟦ t₁ ⟧′ ρ >>= λ v₁ →
⟦ t₂ ⟧′ ρ >>= λ v₂ →
v₁ ∙ v₂
evalP : (∃ λ n → Tm n × Env n) → M Value
evalP (_ , t , ρ) = ⟦ t ⟧′ ρ
eval : Trans-⊑ (∃ λ n → Tm n × Env n) (λ _ → Value)
eval = transformer evalP
⟦_⟧ : ∀ {n} → Tm n → Env n → Value ⊥
⟦ t ⟧ ρ = fixP evalP (_ , t , ρ)
------------------------------------------------------------------------
-- The two interpreters are pointwise equal
-- Both interpreters' bodies have the form ⨆ s for some sequences s,
-- and element n in the first interpreter's sequence is equal to
-- element 1 + n in the second interpreter's sequence (when the
-- arguments are equal).
interpreters-equal :
∀ {n} (t : Tm n) ρ →
Interpreter₁.⟦ t ⟧ ρ ≡ Interpreter₂.⟦ t ⟧ ρ
interpreters-equal = λ t ρ →
$⟨ ⟦⟧-lemma t ρ ⟩
(∀ n → Interpreter₁.⟦ t ⟧′ ρ n ≡
app→ Interpreter₂.eval (suc n) (_ , t , ρ)) ↝⟨ cong ⨆ ∘ _↔_.to equality-characterisation-increasing ⟩
⨆ (Interpreter₁.⟦ t ⟧ˢ ρ) ≡
⨆ (tailˢ (at (fix→-sequence Interpreter₂.eval) (_ , t , ρ))) ↝⟨ flip trans (⨆tail≡⨆ _) ⟩
⨆ (Interpreter₁.⟦ t ⟧ˢ ρ) ≡
⨆ (at (fix→-sequence Interpreter₂.eval) (_ , t , ρ)) ↝⟨ id ⟩□
Interpreter₁.⟦ t ⟧ ρ ≡ Interpreter₂.⟦ t ⟧ ρ □
where
open Partial
open Trans-⊑
mutual
⟦⟧-lemma :
∀ {n} (t : Tm n) ρ n →
Interpreter₁.⟦ t ⟧′ ρ n ≡
function (Interpreter₂.⟦ t ⟧′ ρ)
(app→ Interpreter₂.eval n)
⟦⟧-lemma (var x) ρ n = refl
⟦⟧-lemma (ƛ t) ρ n = refl
⟦⟧-lemma (t₁ · t₂) ρ n =
cong₂ _>>=_ (⟦⟧-lemma t₁ ρ n) $ ⟨ext⟩ λ v₁ →
cong₂ _>>=_ (⟦⟧-lemma t₂ ρ n) $ ⟨ext⟩ λ v₂ →
∙-lemma v₁ v₂ n
∙-lemma :
∀ v₁ v₂ n →
(v₁ Interpreter₁.∙ v₂) n ≡
function (v₁ Interpreter₂.∙ v₂)
(app→ Interpreter₂.eval n)
∙-lemma (ƛ t₁ ρ) v₂ zero = refl
∙-lemma (ƛ t₁ ρ) v₂ (suc n) = ⟦⟧-lemma t₁ (cons v₂ ρ) n
------------------------------------------------------------------------
-- An example
-- The semantics of Ω is the non-terminating computation never.
-- A proof for Interpreter₁.
Ω-loops₁ : Interpreter₁.⟦ Ω ⟧ nil ≡ never
Ω-loops₁ =
antisymmetry (least-upper-bound _ _ lemma) (never⊑ _)
where
open Interpreter₁
ω-nil = ƛ (var fzero · var fzero) nil
reduce-twice :
∀ n → ⟦ Ω ⟧′ nil n ≡ (ω-nil ∙ ω-nil) n
reduce-twice n =
⟦ Ω ⟧′ nil n ≡⟨ now->>= ⟩
(⟦ ω ⟧′ nil n >>= λ v₂ → (ω-nil ∙ v₂) n) ≡⟨ now->>= ⟩∎
(ω-nil ∙ ω-nil) n ∎
lemma : ∀ n → ⟦ Ω ⟧′ nil n ⊑ never
lemma zero =
⟦ Ω ⟧′ nil zero ≡⟨ reduce-twice zero ⟩⊑
(ω-nil ∙ ω-nil) zero ⊑⟨⟩
never ■
lemma (suc n) =
⟦ Ω ⟧′ nil (suc n) ≡⟨ reduce-twice (suc n) ⟩⊑
⟦ Ω ⟧′ nil n ⊑⟨ lemma n ⟩■
never ■
-- A direct proof for Interpreter₂.
Ω-loops₂ : Interpreter₂.⟦ Ω ⟧ nil ≡ never
Ω-loops₂ = antisymmetry (least-upper-bound _ _ lemma) (never⊑ _)
module Ω-loops₂ where
open Interpreter₂
open Trans-⊑
open Partial
ω-nil = ƛ (var fzero · var fzero) nil
reduce-twice :
∀ f →
function eval f (_ , Ω , nil) ≡
f (_ , var fzero · var fzero , cons ω-nil nil)
reduce-twice f =
function eval f (_ , Ω , nil) ≡⟨⟩
function (⟦ Ω ⟧′ nil) f ≡⟨ cong {x = function (⟦ Ω ⟧′ nil)} (_$ f) (⟨ext⟩ λ _ → now->>=) ⟩
function (⟦ ω ⟧′ nil >>= λ v₂ → ω-nil ∙ v₂) f ≡⟨ cong {x = function (⟦ ω ⟧′ nil >>= ω-nil ∙_)} (_$ f) (⟨ext⟩ λ _ → now->>=) ⟩
function (ω-nil ∙ ω-nil) f ≡⟨⟩
f (_ , var fzero · var fzero , cons ω-nil nil) ∎
lemma : ∀ n → app→ eval n (_ , Ω , nil) ⊑ never
lemma zero = never⊑ never
lemma (suc zero) =
app→ eval 1 (_ , Ω , nil) ≡⟨ reduce-twice _ ⟩⊑
app→ eval 0 (_ , Ω , nil) ⊑⟨⟩
never ■
lemma (suc (suc n)) =
app→ eval (suc (suc n)) (_ , Ω , nil) ≡⟨ reduce-twice _ ⟩⊑
app→ eval (suc n) (_ , Ω , nil) ⊑⟨ lemma (suc n) ⟩■
never ■
-- Another proof for Interpreter₂. This proof uses Scott induction.
Ω-loops₂′ : Interpreter₂.⟦ Ω ⟧ nil ≡ never
Ω-loops₂′ = antisymmetry lemma (never⊑ _)
where
open Interpreter₂
open Trans-⊑
open Partial
lemma =
⟦ Ω ⟧ nil ⊑⟨⟩
fixP evalP (_ , Ω , nil) ≡⟨ cong (_$ (_ , Ω , nil)) $
fixP-is-fixpoint-combinator evalP ⟩⊑
function eval (fixP evalP) (_ , Ω , nil) ⊑⟨ fixP-induction₁
(λ f → function eval f (_ , Ω , nil) ⊑ never) (
function eval (const never) (_ , Ω , nil) ≡⟨ Ω-loops₂.reduce-twice _ ⟩⊑
never ■)
(λ s hyp →
function eval (⨆ ∘ s) (_ , Ω , nil) ≡⟨ Ω-loops₂.reduce-twice _ ⟩⊑
⨆ (s _) ⊑⟨ least-upper-bound _ _ (λ n →
s _ [ n ] ≡⟨ sym $ Ω-loops₂.reduce-twice _ ⟩⊑
function eval (λ x → s x [ n ]) (_ , Ω , nil) ⊑⟨ hyp n ⟩■
never ■) ⟩
never ■)
evalP
(λ g hyp →
function eval (function eval g) (_ , Ω , nil) ≡⟨ Ω-loops₂.reduce-twice _ ⟩⊑
function eval g (_ , Ω , nil) ⊑⟨ hyp ⟩■
never ■) ⟩■
never ■
------------------------------------------------------------------------
-- Setup
-- Let us use Interpreter₂ as the default interpreter.
open Interpreter₂ public
|
module Control.Monad.Syntax
%default total
infixr 1 =<<, <=<, >=>
||| Left-to-right Kleisli composition of monads.
public export
(>=>) : Monad m => (a -> m b) -> (b -> m c) -> (a -> m c)
(>=>) f g = \x => f x >>= g
public export
||| Right-to-left Kleisli composition of monads, flipped version of `>=>`.
(<=<) : Monad m => (b -> m c) -> (a -> m b) -> (a -> m c)
(<=<) = flip (>=>)
public export
||| Right-to-left monadic bind, flipped version of `>>=`.
(=<<) : Monad m => (a -> m b) -> m a -> m b
(=<<) = flip (>>=)
|
/-
Copyright (c) 2020 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Eric Wieser
! This file was ported from Lean 3 source module ring_theory.matrix_algebra
! leanprover-community/mathlib commit 6c351a8fb9b06e5a542fdf427bfb9f46724f9453
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Data.Matrix.Basis
import Mathbin.RingTheory.TensorProduct
/-!
We show `matrix n n A ≃ₐ[R] (A ⊗[R] matrix n n R)`.
-/
universe u v w
open TensorProduct
open BigOperators
open TensorProduct
open Algebra.TensorProduct
open Matrix
variable {R : Type u} [CommSemiring R]
variable {A : Type v} [Semiring A] [Algebra R A]
variable {n : Type w}
variable (R A n)
namespace matrixEquivTensor
/-- (Implementation detail).
The function underlying `(A ⊗[R] matrix n n R) →ₐ[R] matrix n n A`,
as an `R`-bilinear map.
-/
def toFunBilinear : A →ₗ[R] Matrix n n R →ₗ[R] Matrix n n A :=
(Algebra.lsmul R (Matrix n n A)).toLinearMap.compl₂ (Algebra.linearMap R A).mapMatrix
#align matrix_equiv_tensor.to_fun_bilinear MatrixEquivTensor.toFunBilinear
@[simp]
theorem toFunBilinear_apply (a : A) (m : Matrix n n R) :
toFunBilinear R A n a m = a • m.map (algebraMap R A) :=
rfl
#align matrix_equiv_tensor.to_fun_bilinear_apply MatrixEquivTensor.toFunBilinear_apply
/-- (Implementation detail).
The function underlying `(A ⊗[R] matrix n n R) →ₐ[R] matrix n n A`,
as an `R`-linear map.
-/
def toFunLinear : A ⊗[R] Matrix n n R →ₗ[R] Matrix n n A :=
TensorProduct.lift (toFunBilinear R A n)
#align matrix_equiv_tensor.to_fun_linear MatrixEquivTensor.toFunLinear
variable [DecidableEq n] [Fintype n]
/-- The function `(A ⊗[R] matrix n n R) →ₐ[R] matrix n n A`, as an algebra homomorphism.
-/
def toFunAlgHom : A ⊗[R] Matrix n n R →ₐ[R] Matrix n n A :=
algHomOfLinearMapTensorProduct (toFunLinear R A n)
(by
intros
simp_rw [to_fun_linear, lift.tmul, to_fun_bilinear_apply, mul_eq_mul, Matrix.map_mul]
ext
dsimp
simp_rw [Matrix.mul_apply, Pi.smul_apply, Matrix.map_apply, smul_eq_mul, Finset.mul_sum,
_root_.mul_assoc, Algebra.left_comm])
(by
intros
simp_rw [to_fun_linear, lift.tmul, to_fun_bilinear_apply,
Matrix.map_one (algebraMap R A) (map_zero _) (map_one _), algebraMap_smul,
Algebra.algebraMap_eq_smul_one])
#align matrix_equiv_tensor.to_fun_alg_hom MatrixEquivTensor.toFunAlgHom
@[simp]
theorem toFunAlgHom_apply (a : A) (m : Matrix n n R) :
toFunAlgHom R A n (a ⊗ₜ m) = a • m.map (algebraMap R A) := by
simp [to_fun_alg_hom, alg_hom_of_linear_map_tensor_product, to_fun_linear]
#align matrix_equiv_tensor.to_fun_alg_hom_apply MatrixEquivTensor.toFunAlgHom_apply
/-- (Implementation detail.)
The bare function `matrix n n A → A ⊗[R] matrix n n R`.
(We don't need to show that it's an algebra map, thankfully --- just that it's an inverse.)
-/
def invFun (M : Matrix n n A) : A ⊗[R] Matrix n n R :=
∑ p : n × n, M p.1 p.2 ⊗ₜ stdBasisMatrix p.1 p.2 1
#align matrix_equiv_tensor.inv_fun MatrixEquivTensor.invFun
@[simp]
theorem invFun_zero : invFun R A n 0 = 0 := by simp [inv_fun]
#align matrix_equiv_tensor.inv_fun_zero MatrixEquivTensor.invFun_zero
@[simp]
theorem invFun_add (M N : Matrix n n A) : invFun R A n (M + N) = invFun R A n M + invFun R A n N :=
by simp [inv_fun, add_tmul, Finset.sum_add_distrib]
#align matrix_equiv_tensor.inv_fun_add MatrixEquivTensor.invFun_add
@[simp]
theorem invFun_smul (a : A) (M : Matrix n n A) : invFun R A n (a • M) = a ⊗ₜ 1 * invFun R A n M :=
by simp [inv_fun, Finset.mul_sum]
#align matrix_equiv_tensor.inv_fun_smul MatrixEquivTensor.invFun_smul
@[simp]
theorem invFun_algebraMap (M : Matrix n n R) : invFun R A n (M.map (algebraMap R A)) = 1 ⊗ₜ M :=
by
dsimp [inv_fun]
simp only [Algebra.algebraMap_eq_smul_one, smul_tmul, ← tmul_sum, mul_boole]
congr
conv_rhs => rw [matrix_eq_sum_std_basis M]
convert Finset.sum_product; simp
#align matrix_equiv_tensor.inv_fun_algebra_map MatrixEquivTensor.invFun_algebraMap
theorem right_inv (M : Matrix n n A) : (toFunAlgHom R A n) (invFun R A n M) = M :=
by
simp only [inv_fun, AlgHom.map_sum, std_basis_matrix, apply_ite ⇑(algebraMap R A), smul_eq_mul,
mul_boole, to_fun_alg_hom_apply, RingHom.map_zero, RingHom.map_one, Matrix.map_apply,
Pi.smul_def]
convert Finset.sum_product; apply matrix_eq_sum_std_basis
#align matrix_equiv_tensor.right_inv MatrixEquivTensor.right_inv
theorem left_inv (M : A ⊗[R] Matrix n n R) : invFun R A n (toFunAlgHom R A n M) = M :=
by
induction' M using TensorProduct.induction_on with a m x y hx hy
· simp
· simp
· simp [AlgHom.map_sum, hx, hy]
#align matrix_equiv_tensor.left_inv MatrixEquivTensor.left_inv
/-- (Implementation detail)
The equivalence, ignoring the algebra structure, `(A ⊗[R] matrix n n R) ≃ matrix n n A`.
-/
def equiv : A ⊗[R] Matrix n n R ≃ Matrix n n A
where
toFun := toFunAlgHom R A n
invFun := invFun R A n
left_inv := left_inv R A n
right_inv := right_inv R A n
#align matrix_equiv_tensor.equiv MatrixEquivTensor.equiv
end matrixEquivTensor
variable [Fintype n] [DecidableEq n]
/-- The `R`-algebra isomorphism `matrix n n A ≃ₐ[R] (A ⊗[R] matrix n n R)`.
-/
def matrixEquivTensor : Matrix n n A ≃ₐ[R] A ⊗[R] Matrix n n R :=
AlgEquiv.symm { MatrixEquivTensor.toFunAlgHom R A n, MatrixEquivTensor.equiv R A n with }
#align matrix_equiv_tensor matrixEquivTensor
open matrixEquivTensor
@[simp]
theorem matrixEquivTensor_apply (M : Matrix n n A) :
matrixEquivTensor R A n M = ∑ p : n × n, M p.1 p.2 ⊗ₜ stdBasisMatrix p.1 p.2 1 :=
rfl
#align matrix_equiv_tensor_apply matrixEquivTensor_apply
@[simp]
theorem matrixEquivTensor_apply_std_basis (i j : n) (x : A) :
matrixEquivTensor R A n (stdBasisMatrix i j x) = x ⊗ₜ stdBasisMatrix i j 1 :=
by
have t : ∀ p : n × n, i = p.1 ∧ j = p.2 ↔ p = (i, j) := by tidy
simp [ite_tmul, t, std_basis_matrix]
#align matrix_equiv_tensor_apply_std_basis matrixEquivTensor_apply_std_basis
@[simp]
theorem matrixEquivTensor_apply_symm (a : A) (M : Matrix n n R) :
(matrixEquivTensor R A n).symm (a ⊗ₜ M) = M.map fun x => a * algebraMap R A x :=
by
simp [matrixEquivTensor, to_fun_alg_hom, alg_hom_of_linear_map_tensor_product, to_fun_linear]
rfl
#align matrix_equiv_tensor_apply_symm matrixEquivTensor_apply_symm
|
/-
Copyright (c) 2017 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import category_theory.functor.hom
import category_theory.functor.currying
import category_theory.products.basic
/-!
# The Yoneda embedding
The Yoneda embedding as a functor `yoneda : C ⥤ (Cᵒᵖ ⥤ Type v₁)`,
along with an instance that it is `fully_faithful`.
Also the Yoneda lemma, `yoneda_lemma : (yoneda_pairing C) ≅ (yoneda_evaluation C)`.
## References
* [Stacks: Opposite Categories and the Yoneda Lemma](https://stacks.math.columbia.edu/tag/001L)
-/
namespace category_theory
open opposite
universes v₁ u₁ u₂-- morphism levels before object levels. See note [category_theory universes].
variables {C : Type u₁} [category.{v₁} C]
/--
The Yoneda embedding, as a functor from `C` into presheaves on `C`.
See https://stacks.math.columbia.edu/tag/001O.
-/
@[simps]
def yoneda : C ⥤ (Cᵒᵖ ⥤ Type v₁) :=
{ obj := λ X,
{ obj := λ Y, unop Y ⟶ X,
map := λ Y Y' f g, f.unop ≫ g,
map_comp' := λ _ _ _ f g, begin ext, dsimp, erw [category.assoc] end,
map_id' := λ Y, begin ext, dsimp, erw [category.id_comp] end },
map := λ X X' f, { app := λ Y g, g ≫ f } }
/--
The co-Yoneda embedding, as a functor from `Cᵒᵖ` into co-presheaves on `C`.
-/
@[simps] def coyoneda : Cᵒᵖ ⥤ (C ⥤ Type v₁) :=
{ obj := λ X,
{ obj := λ Y, unop X ⟶ Y,
map := λ Y Y' f g, g ≫ f },
map := λ X X' f, { app := λ Y g, f.unop ≫ g } }
namespace yoneda
lemma obj_map_id {X Y : C} (f : op X ⟶ op Y) :
(yoneda.obj X).map f (𝟙 X) = (yoneda.map f.unop).app (op Y) (𝟙 Y) :=
by { dsimp, simp }
@[simp] lemma naturality {X Y : C} (α : yoneda.obj X ⟶ yoneda.obj Y)
{Z Z' : C} (f : Z ⟶ Z') (h : Z' ⟶ X) : f ≫ α.app (op Z') h = α.app (op Z) (f ≫ h) :=
(functor_to_types.naturality _ _ α f.op h).symm
/--
The Yoneda embedding is full.
See https://stacks.math.columbia.edu/tag/001P.
-/
instance yoneda_full : full (yoneda : C ⥤ Cᵒᵖ ⥤ Type v₁) :=
{ preimage := λ X Y f, f.app (op X) (𝟙 X) }
/--
The Yoneda embedding is faithful.
See https://stacks.math.columbia.edu/tag/001P.
-/
instance yoneda_faithful : faithful (yoneda : C ⥤ Cᵒᵖ ⥤ Type v₁) :=
{ map_injective' := λ X Y f g p, by convert (congr_fun (congr_app p (op X)) (𝟙 X)); dsimp; simp }
/-- Extensionality via Yoneda. The typical usage would be
```
-- Goal is `X ≅ Y`
apply yoneda.ext,
-- Goals are now functions `(Z ⟶ X) → (Z ⟶ Y)`, `(Z ⟶ Y) → (Z ⟶ X)`, and the fact that these
functions are inverses and natural in `Z`.
```
-/
def ext (X Y : C)
(p : Π {Z : C}, (Z ⟶ X) → (Z ⟶ Y)) (q : Π {Z : C}, (Z ⟶ Y) → (Z ⟶ X))
(h₁ : Π {Z : C} (f : Z ⟶ X), q (p f) = f) (h₂ : Π {Z : C} (f : Z ⟶ Y), p (q f) = f)
(n : Π {Z Z' : C} (f : Z' ⟶ Z) (g : Z ⟶ X), p (f ≫ g) = f ≫ p g) : X ≅ Y :=
@preimage_iso _ _ _ _ yoneda _ _ _ _
(nat_iso.of_components (λ Z, { hom := p, inv := q, }) (by tidy))
/--
If `yoneda.map f` is an isomorphism, so was `f`.
-/
lemma is_iso {X Y : C} (f : X ⟶ Y) [is_iso (yoneda.map f)] : is_iso f :=
is_iso_of_fully_faithful yoneda f
end yoneda
namespace coyoneda
@[simp] lemma naturality {X Y : Cᵒᵖ} (α : coyoneda.obj X ⟶ coyoneda.obj Y)
{Z Z' : C} (f : Z' ⟶ Z) (h : unop X ⟶ Z') : (α.app Z' h) ≫ f = α.app Z (h ≫ f) :=
(functor_to_types.naturality _ _ α f h).symm
instance coyoneda_full : full (coyoneda : Cᵒᵖ ⥤ C ⥤ Type v₁) :=
{ preimage := λ X Y f, (f.app _ (𝟙 X.unop)).op }
instance coyoneda_faithful : faithful (coyoneda : Cᵒᵖ ⥤ C ⥤ Type v₁) :=
{ map_injective' := λ X Y f g p,
begin
have t := congr_fun (congr_app p X.unop) (𝟙 _),
simpa using congr_arg quiver.hom.op t,
end }
/--
If `coyoneda.map f` is an isomorphism, so was `f`.
-/
lemma is_iso {X Y : Cᵒᵖ} (f : X ⟶ Y) [is_iso (coyoneda.map f)] : is_iso f :=
is_iso_of_fully_faithful coyoneda f
/-- The identity functor on `Type` is isomorphic to the coyoneda functor coming from `punit`. -/
def punit_iso : coyoneda.obj (opposite.op punit) ≅ 𝟭 (Type v₁) :=
nat_iso.of_components
(λ X, { hom := λ f, f ⟨⟩, inv := λ x _, x })
(by tidy)
end coyoneda
namespace functor
/--
A functor `F : Cᵒᵖ ⥤ Type v₁` is representable if there is object `X` so `F ≅ yoneda.obj X`.
See https://stacks.math.columbia.edu/tag/001Q.
-/
class representable (F : Cᵒᵖ ⥤ Type v₁) : Prop :=
(has_representation : ∃ X (f : yoneda.obj X ⟶ F), is_iso f)
instance {X : C} : representable (yoneda.obj X) :=
{ has_representation := ⟨X, 𝟙 _, infer_instance⟩ }
/--
A functor `F : C ⥤ Type v₁` is corepresentable if there is object `X` so `F ≅ coyoneda.obj X`.
See https://stacks.math.columbia.edu/tag/001Q.
-/
class corepresentable (F : C ⥤ Type v₁) : Prop :=
(has_corepresentation : ∃ X (f : coyoneda.obj X ⟶ F), is_iso f)
instance {X : Cᵒᵖ} : corepresentable (coyoneda.obj X) :=
{ has_corepresentation := ⟨X, 𝟙 _, infer_instance⟩ }
-- instance : corepresentable (𝟭 (Type v₁)) :=
-- corepresentable_of_nat_iso (op punit) coyoneda.punit_iso
section representable
variables (F : Cᵒᵖ ⥤ Type v₁)
variable [F.representable]
/-- The representing object for the representable functor `F`. -/
noncomputable def repr_X : C :=
(representable.has_representation : ∃ X (f : _ ⟶ F), _).some
/-- The (forward direction of the) isomorphism witnessing `F` is representable. -/
noncomputable def repr_f : yoneda.obj F.repr_X ⟶ F :=
representable.has_representation.some_spec.some
/--
The representing element for the representable functor `F`, sometimes called the universal
element of the functor.
-/
noncomputable def repr_x : F.obj (op F.repr_X) :=
F.repr_f.app (op F.repr_X) (𝟙 F.repr_X)
instance : is_iso F.repr_f :=
representable.has_representation.some_spec.some_spec
/--
An isomorphism between `F` and a functor of the form `C(-, F.repr_X)`. Note the components
`F.repr_w.app X` definitionally have type `(X.unop ⟶ F.repr_X) ≅ F.obj X`.
-/
noncomputable def repr_w : yoneda.obj F.repr_X ≅ F := as_iso F.repr_f
@[simp] lemma repr_w_hom : F.repr_w.hom = F.repr_f := rfl
lemma repr_w_app_hom (X : Cᵒᵖ) (f : unop X ⟶ F.repr_X) :
(F.repr_w.app X).hom f = F.map f.op F.repr_x :=
begin
change F.repr_f.app X f = (F.repr_f.app (op F.repr_X) ≫ F.map f.op) (𝟙 F.repr_X),
rw ←F.repr_f.naturality,
dsimp,
simp
end
end representable
section corepresentable
variables (F : C ⥤ Type v₁)
variable [F.corepresentable]
/-- The representing object for the corepresentable functor `F`. -/
noncomputable def corepr_X : C :=
(corepresentable.has_corepresentation : ∃ X (f : _ ⟶ F), _).some.unop
/-- The (forward direction of the) isomorphism witnessing `F` is corepresentable. -/
noncomputable def corepr_f : coyoneda.obj (op F.corepr_X) ⟶ F :=
corepresentable.has_corepresentation.some_spec.some
/--
The representing element for the corepresentable functor `F`, sometimes called the universal
element of the functor.
-/
noncomputable def corepr_x : F.obj F.corepr_X :=
F.corepr_f.app F.corepr_X (𝟙 F.corepr_X)
instance : is_iso F.corepr_f :=
corepresentable.has_corepresentation.some_spec.some_spec
/--
An isomorphism between `F` and a functor of the form `C(F.corepr X, -)`. Note the components
`F.corepr_w.app X` definitionally have type `F.corepr_X ⟶ X ≅ F.obj X`.
-/
noncomputable def corepr_w : coyoneda.obj (op F.corepr_X) ≅ F := as_iso F.corepr_f
lemma corepr_w_app_hom (X : C) (f : F.corepr_X ⟶ X) :
(F.corepr_w.app X).hom f = F.map f F.corepr_x :=
begin
change F.corepr_f.app X f = (F.corepr_f.app F.corepr_X ≫ F.map f) (𝟙 F.corepr_X),
rw ←F.corepr_f.naturality,
dsimp,
simp
end
end corepresentable
end functor
lemma representable_of_nat_iso (F : Cᵒᵖ ⥤ Type v₁) {G} (i : F ≅ G) [F.representable] :
G.representable :=
{ has_representation := ⟨F.repr_X, F.repr_f ≫ i.hom, infer_instance⟩ }
lemma corepresentable_of_nat_iso (F : C ⥤ Type v₁) {G} (i : F ≅ G) [F.corepresentable] :
G.corepresentable :=
{ has_corepresentation := ⟨op F.corepr_X, F.corepr_f ≫ i.hom, infer_instance⟩ }
instance : functor.corepresentable (𝟭 (Type v₁)) :=
corepresentable_of_nat_iso (coyoneda.obj (op punit)) coyoneda.punit_iso
open opposite
variables (C)
-- We need to help typeclass inference with some awkward universe levels here.
instance prod_category_instance_1 : category ((Cᵒᵖ ⥤ Type v₁) × Cᵒᵖ) :=
category_theory.prod.{(max u₁ v₁) v₁} (Cᵒᵖ ⥤ Type v₁) Cᵒᵖ
instance prod_category_instance_2 : category (Cᵒᵖ × (Cᵒᵖ ⥤ Type v₁)) :=
category_theory.prod.{v₁ (max u₁ v₁)} Cᵒᵖ (Cᵒᵖ ⥤ Type v₁)
open yoneda
/--
The "Yoneda evaluation" functor, which sends `X : Cᵒᵖ` and `F : Cᵒᵖ ⥤ Type`
to `F.obj X`, functorially in both `X` and `F`.
-/
def yoneda_evaluation : Cᵒᵖ × (Cᵒᵖ ⥤ Type v₁) ⥤ Type (max u₁ v₁) :=
evaluation_uncurried Cᵒᵖ (Type v₁) ⋙ ulift_functor.{u₁}
@[simp] lemma yoneda_evaluation_map_down
(P Q : Cᵒᵖ × (Cᵒᵖ ⥤ Type v₁)) (α : P ⟶ Q) (x : (yoneda_evaluation C).obj P) :
((yoneda_evaluation C).map α x).down = α.2.app Q.1 (P.2.map α.1 x.down) := rfl
/--
The "Yoneda pairing" functor, which sends `X : Cᵒᵖ` and `F : Cᵒᵖ ⥤ Type`
to `yoneda.op.obj X ⟶ F`, functorially in both `X` and `F`.
-/
def yoneda_pairing : Cᵒᵖ × (Cᵒᵖ ⥤ Type v₁) ⥤ Type (max u₁ v₁) :=
functor.prod yoneda.op (𝟭 (Cᵒᵖ ⥤ Type v₁)) ⋙ functor.hom (Cᵒᵖ ⥤ Type v₁)
@[simp] lemma yoneda_pairing_map
(P Q : Cᵒᵖ × (Cᵒᵖ ⥤ Type v₁)) (α : P ⟶ Q) (β : (yoneda_pairing C).obj P) :
(yoneda_pairing C).map α β = yoneda.map α.1.unop ≫ β ≫ α.2 := rfl
/--
The Yoneda lemma asserts that that the Yoneda pairing
`(X : Cᵒᵖ, F : Cᵒᵖ ⥤ Type) ↦ (yoneda.obj (unop X) ⟶ F)`
is naturally isomorphic to the evaluation `(X, F) ↦ F.obj X`.
See https://stacks.math.columbia.edu/tag/001P.
-/
def yoneda_lemma : yoneda_pairing C ≅ yoneda_evaluation C :=
{ hom :=
{ app := λ F x, ulift.up ((x.app F.1) (𝟙 (unop F.1))),
naturality' :=
begin
intros X Y f, ext, dsimp,
erw [category.id_comp, ←functor_to_types.naturality],
simp only [category.comp_id, yoneda_obj_map],
end },
inv :=
{ app := λ F x,
{ app := λ X a, (F.2.map a.op) x.down,
naturality' :=
begin
intros X Y f, ext, dsimp,
rw [functor_to_types.map_comp_apply]
end },
naturality' :=
begin
intros X Y f, ext, dsimp,
rw [←functor_to_types.naturality, functor_to_types.map_comp_apply]
end },
hom_inv_id' :=
begin
ext, dsimp,
erw [←functor_to_types.naturality,
obj_map_id],
simp only [yoneda_map_app, quiver.hom.unop_op],
erw [category.id_comp],
end,
inv_hom_id' :=
begin
ext, dsimp,
rw [functor_to_types.map_id_apply]
end }.
variables {C}
/--
The isomorphism between `yoneda.obj X ⟶ F` and `F.obj (op X)`
(we need to insert a `ulift` to get the universes right!)
given by the Yoneda lemma.
-/
@[simps] def yoneda_sections (X : C) (F : Cᵒᵖ ⥤ Type v₁) :
(yoneda.obj X ⟶ F) ≅ ulift.{u₁} (F.obj (op X)) :=
(yoneda_lemma C).app (op X, F)
/--
We have a type-level equivalence between natural transformations from the yoneda embedding
and elements of `F.obj X`, without any universe switching.
-/
def yoneda_equiv {X : C} {F : Cᵒᵖ ⥤ Type v₁} : (yoneda.obj X ⟶ F) ≃ F.obj (op X) :=
(yoneda_sections X F).to_equiv.trans equiv.ulift
@[simp]
lemma yoneda_equiv_apply {X : C} {F : Cᵒᵖ ⥤ Type v₁} (f : yoneda.obj X ⟶ F) :
yoneda_equiv f = f.app (op X) (𝟙 X) :=
rfl
@[simp]
lemma yoneda_equiv_symm_app_apply {X : C} {F : Cᵒᵖ ⥤ Type v₁} (x : F.obj (op X))
(Y : Cᵒᵖ) (f : Y.unop ⟶ X) :
(yoneda_equiv.symm x).app Y f = F.map f.op x :=
rfl
lemma yoneda_equiv_naturality {X Y : C} {F : Cᵒᵖ ⥤ Type v₁} (f : yoneda.obj X ⟶ F) (g : Y ⟶ X) :
F.map g.op (yoneda_equiv f) = yoneda_equiv (yoneda.map g ≫ f) :=
begin
change (f.app (op X) ≫ F.map g.op) (𝟙 X) = f.app (op Y) (𝟙 Y ≫ g),
rw ←f.naturality,
dsimp,
simp,
end
/--
When `C` is a small category, we can restate the isomorphism from `yoneda_sections`
without having to change universes.
-/
def yoneda_sections_small {C : Type u₁} [small_category C] (X : C)
(F : Cᵒᵖ ⥤ Type u₁) :
(yoneda.obj X ⟶ F) ≅ F.obj (op X) :=
yoneda_sections X F ≪≫ ulift_trivial _
@[simp]
lemma yoneda_sections_small_hom {C : Type u₁} [small_category C] (X : C)
(F : Cᵒᵖ ⥤ Type u₁) (f : yoneda.obj X ⟶ F) :
(yoneda_sections_small X F).hom f = f.app _ (𝟙 _) :=
rfl
@[simp]
lemma yoneda_sections_small_inv_app_apply {C : Type u₁} [small_category C] (X : C)
(F : Cᵒᵖ ⥤ Type u₁) (t : F.obj (op X)) (Y : Cᵒᵖ) (f : Y.unop ⟶ X) :
((yoneda_sections_small X F).inv t).app Y f = F.map f.op t :=
rfl
local attribute[ext] functor.ext
/-- The curried version of yoneda lemma when `C` is small. -/
def curried_yoneda_lemma {C : Type u₁} [small_category C] :
(yoneda.op ⋙ coyoneda : Cᵒᵖ ⥤ (Cᵒᵖ ⥤ Type u₁) ⥤ Type u₁) ≅ evaluation Cᵒᵖ (Type u₁) :=
eq_to_iso (by tidy) ≪≫ curry.map_iso (yoneda_lemma C ≪≫
iso_whisker_left (evaluation_uncurried Cᵒᵖ (Type u₁)) ulift_functor_trivial) ≪≫
eq_to_iso (by tidy)
/-- The curried version of yoneda lemma when `C` is small. -/
def curried_yoneda_lemma' {C : Type u₁} [small_category C] :
yoneda ⋙ (whiskering_left Cᵒᵖ (Cᵒᵖ ⥤ Type u₁)ᵒᵖ (Type u₁)).obj yoneda.op ≅ 𝟭 (Cᵒᵖ ⥤ Type u₁) :=
eq_to_iso (by tidy) ≪≫ curry.map_iso (iso_whisker_left (prod.swap _ _)
(yoneda_lemma C ≪≫ iso_whisker_left
(evaluation_uncurried Cᵒᵖ (Type u₁)) ulift_functor_trivial : _)) ≪≫ eq_to_iso (by tidy)
end category_theory
|
People looking for sales jobs in the West Midlands are in a region which is coping well during the economic downturn, according to one minister who praised the area's 'can do' spirit.
Energy and Climate Change minister Lord Hunt of Kings Heath said that the region is a good location for green industries and the sector is expected to grow by four per cent in the next year, which could be good news for sales jobs.
Lord Hunt said: "The West Midlands is already taking on the challenge of climate change. Its low carbon sector accounts for some £8.4 billion of the region's economy, comprising 4,179 companies with around 74,000 employees."
He added that as the UK advances as a powerful manufacturer in the green sector the West Midlands will play a key role.
Electronic sales jobs were recently boosted by Nokia as the Swedish company presented its new Booklet 3G netbook, which could require new employees to produce it. |
#' Habitat Blueprint Browser
#'
#' This packages contains the Habitat Blueprint Browser shiny app. Data used
#' by the app was generated using the \code{rremat} package.
#'
#' @name habitatblueprint-package
#' @docType package
#'
#' @importFrom scales date_trans
#' @importFrom RColorBrewer brewer.pal
#' @importFrom dplyr arrange
#' @importFrom dplyr filter
#' @importFrom dplyr group_by
#' @importFrom dplyr left_join
#' @importFrom dplyr summarize
#' @importFrom shiny runApp
#' @importFrom tidyr gather_
#' @importFrom tidyr spread_
#' @import ggplot2
NULL
#' CTD Transect Meta Data
#'
#' Meta data for CTD transects. Structured as:
#' \itemize{
#' \item \code{start} timestamp of transect commencement
#' \item \code{end} timestamp of transect completion
#' \item \code{note} additional note on e.g. mouth state
#' }
#' @details Transect meta data was collected from the Russian River Estuary
#' Circulation and Water Quality Data Reports (2011, 2012, 2013, 2014)
#' submitted to the Sonoma County Water Agency.
#' @docType data
#' @keywords datasets
#' @name ctdmeta
#' @usage data(ctdmeta)
#' @format A data frame with 3 variables
NULL
#' Interpolated CTD Grids
#'
#' Interpolated CTD grids constructed using natural neighbor interpolation in
#' Matlab. Structured as:
#' \itemize{
#' \item \code{sa} salinity (PSU)
#' \item \code{ta} temperature (degrees C)
#' \item \code{oa} dissolved oxygen (mg/L)
#' \item \code{dist} longitudinal distance from mouth (km)
#' \item \code{elev} elevation (m NAVD29)
#' \item \code{date} cast date
#' }
#' @docType data
#' @keywords datasets
#' @name grids
#' @usage data(grids)
#' @format A data frame with 6 variables
#' @seealso ctdmeta ctd
NULL
#' Habitat Data
#'
#' Habitat evaluation of interpolated CTD transects. Same structure and data
#' as \code{grids}, but with the following additional columns:
#'
#' @docType data
#' @keywords datasets
#' @name habgrids
#' @usage data(habgrids)
#' @format A data frame with 6 variables
#' @seealso grids
NULL
#' CTD Cast Data
#'
#' CTD cast data collected by the Largier Lab and processed using
#' \code{rremat}. Structured as:
#' \enumerate{
#' \item \code{date} cast date
#' \item \code{dist} longitudinal distance from mouth (km)
#' \item \code{surfelev} water surface elevation (m NAVD29)
#' \item \code{elev} elevation (m NAVD29)
#' \item \code{depth} cast depth (m)
#' \item \code{ta} temperature (degrees C)
#' \item \code{sa} salinity (PSU)
#' \item \code{da}
#' \item \code{oa} dissolved oxygen (mg/L)
#' \item \code{sat} dissolved oxygen percent saturation (%)
#' \item \code{fl} fluorescence (ug/L))
#' \item \code{bt} beam transmission (%)
#' \item \code{par} photosynthetically-active radiation (umol^-1 m^-2)
#' \item \code{ph} pH
#' }
#' @docType data
#' @keywords datasets
#' @name ctd
#' @usage data(ctd)
#' @format A data frame with 14 variables
#' @seealso ctdmeta grids habgrids
NULL
#' Water Level Data
#'
#' Pressure gauge water Level data collected by the Largier Lab and processed
#' using \code{rremat}. Structured as:
#' \itemize{
#' \item \code{site} gauge location
#' \item \code{mtime} timestamp (UTC)
#' \item \code{depth} water depth (m)
#' }
#' @docType data
#' @keywords datasets
#' @name wll
#' @usage data(wll)
#' @format A data frame with 3 variables
NULL
#' River Flow Data
#'
#' River flow data managed by USGS. Structured as:
#' \itemize{
#' \item \code{site} gauge location. Austin Creek near Cazadero and Russian
#' River near Guerneville are included.
#' \item \code{datetime} timestamp (PST/PDT)
#' \item \code{flow} flow rate (cfs)
#' }
#' @docType data
#' @keywords datasets
#' @name inflows
#' @usage data(inflows)
#' @format A data frame with 3 variables
NULL
#' Tide Height Data
#'
#' Tide height data managed by NOAA. Structured as:
#' \itemize{
#' \item \code{datetime} timestamp (UTC)
#' \item \code{height} Tide height above MLLW (m)
#' \item \code{sigma}
#' }
#' @docType data
#' @keywords datasets
#' @name tides
#' @usage data(tides)
#' @format A data frame with 3 variables
NULL
#' Volume Lookup Table
#'
#' Volume lookup table constructed from RRE bathymetry. Structured as:
#' \itemize{
#' \item \code{dist} distance along the thalweg marking the edge of each zone
#' furthest from the mouth, e.g. \code{dist = 100} identifies the region
#' spanning 0 meters and 100 meters along the thalweg.
#' \item \code{elev} elevation marking the the top of the depth increment,
#' i.e. \code{elev = 0.1} identifies the region spanning 0 and 0.1 meters
#' elevation.
#' \item \code{count} the total number of cells contained within each zone
#' and vertical increment.
#' }
#' @details The volume lookup table was generated from the RRE bathymetry DEM
#' and zone delineations based on a manual trace of the river thaleweg. The
#' volume cells are defined as having a length of 1 meter, width of 1 meter,
#' and height of 0.1 meters (cell volume of 0.1 cubic meters). Zones are
#' delineated in 100-meter increments along the thalweg, and each zone is
#' divided in the vertical into 0.1-meter increments from a minimum elevation
#' of -15.8 meters to a maximum elevation of 2.6 meters. The lookup table
#' lists the count of volume elements in each vertical increment in each
#' zone, i.e. each row lists the number of elements between river distances
#' \code{d} and \code{d - 100} and between depths \code{z} and
#' \code{z - 0.1}.
#' @docType data
#' @keywords datasets
#' @name volumes
#' @usage data(volumes)
#' @format A data frame with 3 variables
NULL
#' Estuary Closures
#'
#' Estuary closures identified from water level and photographic data.
#' Structured as:
#' \enumerate{
#' \item \code{id} (arbitary) closure id
#' \item \code{initiation} date of closure initiation
#' \item \code{breach} date of mouth breach
#' }
#' @docType data
#' @keywords datasets
#' @name closures
#' @usage data(closures)
#' @format A data frame with 3 variables
NULL
#' Habitat Blueprint Browser
#'
#' Start the Habitat Blueprint Browser.
#'
#' @examples
#' \dontrun{
#' HabitatBrowser()
#' }
#'
#' @importFrom shiny runApp
#' @export
HabitatBrowser = function(autoscale = FALSE, ...){
environment(autoscale) = asNamespace('habitatblueprint')
runApp(system.file("shiny/", package = "habitatblueprint", mustWork = TRUE),
...)
}
|
using Test
using MarketData
using TimeSeries
@testset "broadcast" begin
@testset "base element-wise operators on TimeArray values" begin
@testset "only values on intersecting Dates computed" begin
let
ta = cl[1:2] ./ op[2:3]
@test values(ta)[1] ≈ 0.94688222
@test length(ta) == 1
end
let
ta = cl[1:4] .+ op[4:7]
@test values(ta)[1] ≈ 201.12 atol=.01
@test length(ta) == 1
end
end
@testset "unary operation on TimeArray values" begin
@test values(+cl)[1] == values(cl)[1]
@test values(-cl)[1] == -values(cl)[1]
@test values(.!(cl .== op))[1] == true
@test values(log.(cl))[1] == log.(values(cl)[1])
@test values(sqrt.(cl))[1] == sqrt.(values(cl)[1])
@test values(+ohlc)[1, :] == values(ohlc)[1,:]
@test values(-ohlc)[1, :] == -(values(ohlc)[1,:])
@test values(.!(ohlc .== ohlc))[1, 1] == false
@test values(log.(ohlc))[1, :] == log.(values(ohlc)[1, :])
@test values(sqrt.(ohlc))[1, :] == sqrt.(values(ohlc)[1, :])
@test_throws DomainError sqrt.(-ohlc)
end
@testset "dot operation between TimeArray values and Int/Float and viceversa" begin
@test values(cl .- 100)[1] ≈ 11.94 atol=.01
@test values(cl .+ 100)[1] ≈ 211.94 atol=.01
@test values(cl .* 100)[1] ≈ 11194 atol=1
@test values(cl ./ 100)[1] ≈ 1.1194 atol=.0001
@test values(cl .^ 2)[1] ≈ 12530.5636 atol=.0001
@test values(cl .% 2)[1] == values(cl)[1] % 2
@test values(100 .- cl)[1] ≈ -11.94 atol=.01
@test values(100 .+ cl)[1] ≈ 211.94 atol=.01
@test values(100 .* cl)[1] ≈ 11194 atol=.01
@test values(100 ./ cl)[1] ≈ 0.8933357155619082
@test values(2 .^ cl)[1] == 4980784073277740581384811358191616
@test values(2 .% cl)[1] == 2
@test values(ohlc .- 100)[1,:] == values(ohlc)[1,:] .- 100
@test values(ohlc .+ 100)[1,:] == values(ohlc)[1,:] .+ 100
@test values(ohlc .* 100)[1,:] == values(ohlc)[1,:] .* 100
@test values(ohlc ./ 100)[1,:] == values(ohlc)[1,:] ./ 100
@test values(ohlc .^ 2)[1,:] == values(ohlc)[1,:] .^ 2
@test values(ohlc .% 2)[1,:] == values(ohlc)[1,:] .% 2
@test values(100 .- ohlc)[1,:] == 100 .- values(ohlc)[1,:]
@test values(100 .+ ohlc)[1,:] == 100 .+ values(ohlc)[1,:]
@test values(100 .* ohlc)[1,:] == 100 .* values(ohlc)[1,:]
@test values(100 ./ ohlc)[1,:] == 100 ./ values(ohlc)[1,:]
@test values(2 .^ ohlc)[1,:] == 2 .^ values(ohlc)[1,:]
@test values(2 .% ohlc)[1,:] == 2 .% values(ohlc)[1,:]
end
@testset "mathematical operations between two same-column-count TimeArrays" begin
@test values(cl .+ op)[1] ≈ 216.82 atol=.01
@test values(cl .- op)[1] ≈ 7.06 atol=.01
@test values(cl .* op)[1] ≈ 11740.2672 atol=0.0001
@test values(cl ./ op)[1] ≈ 1.067315 atol=0.001
@test values(cl .% op) == values(cl) .% values(op)
@test values(cl .^ op) == values(cl) .^ values(op)
@test values(cl .* (cl.> 200)) == values(cl) .* (values(cl) .> 200)
@test values(round.(cl) .* cl) == round.(Int, values(cl)) .* values(cl)
@test values(ohlc .+ ohlc) == values(ohlc) .+ values(ohlc)
@test values(ohlc .- ohlc) == values(ohlc) .- values(ohlc)
@test values(ohlc .* ohlc) == values(ohlc) .* values(ohlc)
@test values(ohlc ./ ohlc) == values(ohlc) ./ values(ohlc)
@test values(ohlc .% ohlc) == values(ohlc) .% values(ohlc)
@test values(ohlc .^ ohlc) == values(ohlc) .^ values(ohlc)
@test values(ohlc .* (ohlc .> 200)) == values(ohlc) .* (values(ohlc) .> 200)
@test values(round.(ohlc) .* ohlc) == round.(Int, values(ohlc)) .* values(ohlc)
end
@testset "broadcasted mathematical operations between different-column-count TimeArrays" begin
@test values(ohlc .+ cl) == values(ohlc) .+ values(cl)
@test values(ohlc .- cl) == values(ohlc) .- values(cl)
@test values(ohlc .* cl) == values(ohlc) .* values(cl)
@test values(ohlc ./ cl) == values(ohlc) ./ values(cl)
@test values(ohlc .% cl) == values(ohlc) .% values(cl)
@test values(ohlc .^ cl) == values(ohlc) .^ values(cl)
@test values(ohlc .* (cl.> 200)) == values(ohlc) .* (values(cl) .> 200)
@test values(round.(ohlc) .* cl) == round.(Int, values(ohlc)) .* values(cl)
@test values(cl .+ ohlc) == values(cl) .+ values(ohlc)
@test values(cl .- ohlc) == values(cl) .- values(ohlc)
@test values(cl .* ohlc) == values(cl) .* values(ohlc)
@test values(cl ./ ohlc) == values(cl) ./ values(ohlc)
@test values(cl .% ohlc) == values(cl) .% values(ohlc)
@test values(cl .^ ohlc) == values(cl) .^ values(ohlc)
@test values(cl .* (ohlc .> 200)) == values(cl) .* (values(ohlc) .> 200)
@test values(round.(cl) .* ohlc) == round.(Int, values(cl)) .* values(ohlc)
# One array must have a single column
@test_throws DimensionMismatch (ohlc[:Open, :Close] .+ ohlc)
end
@testset "comparison operations between TimeArray values and Int/Float (and viceversa)" begin
@test values(cl .> 111.94)[1] == false
@test values(cl .< 111.94)[1] == false
@test values(cl .>= 111.94)[1] == true
@test values(cl .<= 111.94)[1] == true
@test values(cl .== 111.94)[1] == true
@test values(cl .!= 111.94)[1] == false
@test values(111.94 .> cl)[1] == false
@test values(111.94 .< cl)[1] == false
@test values(111.94 .>= cl)[1] == true
@test values(111.94 .<= cl)[1] == true
@test values(111.94 .== cl)[1] == true
@test values(111.94 .!= cl)[1] == false
@test values(ohlc .> 111.94)[1,:] == [false, true, false, false]
@test values(ohlc .< 111.94)[1,:] == [true, false, true, false]
@test values(ohlc .>= 111.94)[1,:] == [false, true, false, true]
@test values(ohlc .<= 111.94)[1,:] == [true, false, true, true]
@test values(ohlc .== 111.94)[1,:] == [false, false, false, true]
@test values(ohlc .!= 111.94)[1,:] == [true, true, true, false]
@test values(111.94 .> ohlc)[1,:] == [true, false, true, false]
@test values(111.94 .< ohlc)[1,:] == [false, true, false, false]
@test values(111.94 .>= ohlc)[1,:] == [true, false, true, true]
@test values(111.94 .<= ohlc)[1,:] == [false, true, false, true]
@test values(111.94 .== ohlc)[1,:] == [false, false, false, true]
@test values(111.94 .!= ohlc)[1,:] == [true, true, true, false]
end
@testset "comparison operations between TimeArray values and Bool (and viceversa)" begin
@test values((cl .> 111.94) .== true)[1] == false
@test values((cl .> 111.94) .!= true)[1] == true
@test values(true .== (cl .> 111.94))[1] == false
@test values(true .!= (cl .> 111.94))[1] == true
@test values((ohlc .> 111.94).== true)[1,:] == [false, true, false, false]
@test values((ohlc .> 111.94).!= true)[1,:] == [true, false, true, true]
@test values(true .== (ohlc .> 111.94))[1,:] == [false, true, false, false]
@test values(true .!= (ohlc .> 111.94))[1,:] == [true, false, true, true]
end
@testset "comparison operations between same-column-count TimeArrays" begin
@test values(cl .> op)[1] == true
@test values(cl .< op)[1] == false
@test values(cl .<= op)[1] == false
@test values(cl .>= op)[1] == true
@test values(cl .== op)[1] == false
@test values(cl .!= op)[1] == true
@test values(ohlc .> ohlc)[1,:] == [false, false, false, false]
@test values(ohlc .< ohlc)[1,:] == [false, false, false, false]
@test values(ohlc .<= ohlc)[1,:] == [true, true, true, true]
@test values(ohlc .>= ohlc)[1,:] == [true, true, true, true]
@test values(ohlc .== ohlc)[1,:] == [true, true, true, true]
@test values(ohlc .!= ohlc)[1,:] == [false, false, false, false]
end
@testset "comparison operations between different-column-count TimeArrays" begin
@test values(ohlc .> cl) == (values(ohlc) .> values(cl))
@test values(ohlc .< cl) == (values(ohlc) .< values(cl))
@test values(ohlc .>= cl) == (values(ohlc) .>= values(cl))
@test values(ohlc .<= cl) == (values(ohlc) .<= values(cl))
@test values(ohlc .== cl) == (values(ohlc) .== values(cl))
@test values(ohlc .!= cl) == (values(ohlc) .!= values(cl))
@test values(cl .> ohlc) == (values(cl) .> values(ohlc))
@test values(cl .< ohlc) == (values(cl) .< values(ohlc))
@test values(cl .>= ohlc) == (values(cl) .>= values(ohlc))
@test values(cl .<= ohlc) == (values(cl) .<= values(ohlc))
@test values(cl .== ohlc) == (values(cl) .== values(ohlc))
@test values(cl .!= ohlc) == (values(cl) .!= values(ohlc))
# One array must have a single column
@test_throws DimensionMismatch (ohlc[:Open, :Close] .== ohlc)
end
@testset "bitwise elementwise operations between bool and TimeArrays' values" begin
@test values((cl .> 100) .& true)[1] == true
@test values((cl .> 100) .| true)[1] == true
@test values((cl .> 100) .⊻ true)[1] == false
@test values(false .& (cl .> 100))[1] == false
@test values(false .| (cl .> 100))[1] == true
@test values(false .⊻ (cl .> 100))[1] == true
@test values((ohlc .> 100) .& true)[4,:] == [true, true, false, false]
@test values((ohlc .> 100) .| true)[4,:] == [true, true, true, true]
@test values((ohlc .> 100) .⊻ true)[4,:] == [false, false, true, true]
@test values(false .& (ohlc .> 100))[4,:] == [false, false, false, false]
@test values(false .| (ohlc .> 100))[4,:] == [true, true, false, false]
@test values(false .⊻ (ohlc .> 100))[4,:] == [true, true, false, false]
end
@testset "bitwise elementwise operations between same-column-count TimeArrays' boolean values" begin
@test values((cl .> 100) .& (cl .< 120))[1] == true
@test values((cl .> 100) .| (cl .< 120))[1] == true
@test values((cl .> 100) .⊻ (cl .< 120))[1] == false
@test values((ohlc .> 100) .& (ohlc .< 120))[4,:] == [true, true, false, false]
@test values((ohlc .> 100) .| (ohlc .< 120))[4,:] == [true, true, true, true]
@test values((ohlc .> 100) .⊻ (ohlc .< 120))[4,:] == [false, false, true, true]
@test values((ohlc .> 100) .⊻ (cl .< 120))[4,:] == [false, false, true, true]
end
end
@testset "dot call auto-fusion" begin
@testset "single TimeArray" begin
let ta = sin.(log.(2, op))
@test colnames(ta) == [:Open]
@test timestamp(ta) == timestamp(op)
@test meta(ta) == meta(op)
@test values(ta)[1] == sin(log(2, values(op)[1]))
@test values(ta)[end] == sin(log(2, values(op)[end]))
end
f(x, c) = x + c
let ta = f.(cl, 42)
@test colnames(ta) == [:Close]
@test timestamp(ta) == timestamp(cl)
@test meta(ta) == meta(cl)
@test values(ta)[1] == values(cl)[1] + 42
@test values(ta)[end] == values(cl)[end] + 42
end
let ta = sin.(log.(2, ohlc))
@test colnames(ta) == [:Open, :High, :Low, :Close]
@test timestamp(ta) == timestamp(ohlc)
@test meta(ta) == meta(ohlc)
@test values(ta)[1, 1] == sin(log(2, values(ohlc)[1, 1]))
@test values(ta)[1, 2] == sin(log(2, values(ohlc)[1, 2]))
@test values(ta)[1, 3] == sin(log(2, values(ohlc)[1, 3]))
@test values(ta)[1, 4] == sin(log(2, values(ohlc)[1, 4]))
@test values(ta)[end, 1] == sin(log(2, values(ohlc)[end, 1]))
@test values(ta)[end, 2] == sin(log(2, values(ohlc)[end, 2]))
@test values(ta)[end, 3] == sin(log(2, values(ohlc)[end, 3]))
@test values(ta)[end, 4] == sin(log(2, values(ohlc)[end, 4]))
end
end
@testset "TimeArray and Array" begin
let ta = cl[1:4] .+ [1, 2, 3, 4]
@test colnames(ta) == [:Close]
@test timestamp(ta) == timestamp(cl)[1:4]
@test meta(ta) == meta(cl)
@test values(ta)[1] == values(cl)[1] + 1
@test values(ta)[2] == values(cl)[2] + 2
@test values(ta)[3] == values(cl)[3] + 3
@test values(ta)[4] == values(cl)[4] + 4
end
let ta = ohlc[1:4] .+ [1, 2, 3, 4]
@test colnames(ta) == [:Open, :High, :Low, :Close]
@test timestamp(ta) == timestamp(ohlc)[1:4]
@test meta(ta) == meta(ohlc)
@test values(ta)[1, 1] == values(ohlc)[1, 1] + 1
@test values(ta)[1, 2] == values(ohlc)[1, 2] + 1
@test values(ta)[1, 3] == values(ohlc)[1, 3] + 1
@test values(ta)[1, 4] == values(ohlc)[1, 4] + 1
end
let arr = [1, 2, 3, 4]
@test_throws DimensionMismatch cl .+ arr
end
end
@testset "custom function" begin
let f(x, y, c) = x - y + c, ta = f.(op, cl, 42)
@test colnames(ta) == [:Open_Close]
@test timestamp(ta) == timestamp(cl)
@test timestamp(ta) == timestamp(op)
@test meta(ta) == meta(op)
@test values(ta)[1] == values(op)[1] - values(cl)[1] + 42
@test values(ta)[end] == values(op)[end] - values(cl)[end] + 42
end
end
@testset "broadcast 2D TimeArray" begin
let A = reshape(values(cl), 500, 1) # 2D, dim -> 500×1
ta = TimeArray(timestamp(cl), A) .+ ohlc
@test length(colnames(ta)) == 4
@test timestamp(ta) == timestamp(cl)
end
end
end # @testset "dot call auto-fusion"
end # @testset "broadcast"
|
On November 21 , 2011 , Nathan agreed to terms on a two @-@ year deal with the Texas Rangers worth $ 14 @.@ 5 million guaranteed with an option for a third year at $ 9 million or a $ 500 @,@ 000 buyout .
|
(*Here we give the denotational semantics for inductive
predicates and prove that they are the least fixpoint
that makes all the constructors true*)
Require Import Common.
Require Import Syntax.
Require Import Types.
Require Import Typing.
Require Import Substitution. (*for bnd_t - move? *)
Require Import IndTypes.
Require Import Semantics.
Require Import Denotational.
Require Import Alpha.
Require Import Hlist.
Require Import FunctionalExtensionality.
Require Import Coq.Logic.Eqdep_dec.
Set Bullet Behavior "Strict Subproofs".
Section IndPropRep.
Context {sigma: sig} {gamma: context}
(gamma_valid: valid_context sigma gamma)
(pd: pi_dom).
Variable all_unif: forall m,
mut_in_ctx m gamma ->
uniform m.
Section Def.
(*First, define interpretations*)
(*An interpretation where we substitute p with P*)
Definition interp_with_P (pi: pi_funpred gamma_valid pd) (p: predsym)
(P: forall srts,
arg_list (domain (dom_aux pd)) (sym_sigma_args p srts) -> bool) :
pi_funpred gamma_valid pd :=
{|
funs := funs gamma_valid pd pi;
preds :=
fun pr : predsym =>
match predsym_eq_dec p pr with
| left Heq =>
match Heq with
| eq_refl => P
end
| _ => preds gamma_valid pd pi pr
end;
constrs := constrs gamma_valid pd pi
|}.
(*For the list of predsyms, we need to search through the list
to apply the correct pred. The dependent types make this
complicated, so we use a separate function*)
Fixpoint find_apply_pred (pi: pi_funpred gamma_valid pd)
(ps: list predsym)
(*Our props are an hlist, where we have a Pi for each pi
of type (srts -> arg_list pi srts -> bool)*)
(Ps: hlist (fun (p: predsym) => forall srts,
arg_list (domain (dom_aux pd))
(sym_sigma_args p srts) -> bool) ps) (p: predsym) :
(forall srts : list sort,
arg_list (domain (dom_aux pd))
(sym_sigma_args p srts) -> bool) :=
(match ps as ps' return
(hlist (fun p : predsym =>
forall srts : list sort,
arg_list (domain (dom_aux pd))
(sym_sigma_args p srts) -> bool) ps') ->
forall srts : list sort,
arg_list (domain (dom_aux pd))
(sym_sigma_args p srts) -> bool with
(*Underneath the depednent types, this is quite
simple: iterate through the list, compare for equality
and if so, apply the correct Pi function*)
| nil => fun _ => (preds gamma_valid pd pi p)
| p' :: ptl => fun Hp =>
match (predsym_eq_dec p p') with
| left Heq => ltac:(rewrite Heq; exact (hlist_hd Hp))
| right _ => (find_apply_pred pi ptl (hlist_tl Hp) p)
end
end) Ps.
(*Do the same for a list of predsyms*)
Definition interp_with_Ps (pi: pi_funpred gamma_valid pd)
(ps: list predsym)
(*Our props are an hlist, where we have a Pi for each pi
of type (srts -> arg_list pi srts -> bool)*)
(Ps: hlist (fun (p: predsym) => forall srts,
arg_list (domain (dom_aux pd))
(sym_sigma_args p srts) -> bool) ps) :
pi_funpred gamma_valid pd :=
{|
funs := funs gamma_valid pd pi;
preds := find_apply_pred pi ps Ps;
constrs := constrs gamma_valid pd pi
|}.
Lemma find_apply_pred_in (pf: pi_funpred gamma_valid pd)
(ps: list predsym)
(Ps: hlist
(fun p' : predsym =>
forall srts : list sort,
arg_list (domain (dom_aux pd)) (sym_sigma_args p' srts) -> bool)
ps)
(p: predsym)
(Hinp: in_bool predsym_eq_dec p ps) :
find_apply_pred pf ps Ps p =
get_hlist_elt predsym_eq_dec Ps p Hinp.
Proof.
induction ps; simpl.
- inversion Hinp.
- revert Hinp. simpl.
destruct (predsym_eq_dec p a); subst; auto.
Qed.
Lemma find_apply_pred_notin (pf: pi_funpred gamma_valid pd)
(ps: list predsym)
(Ps: hlist
(fun p' : predsym =>
forall srts : list sort,
arg_list (domain (dom_aux pd)) (sym_sigma_args p' srts) -> bool)
ps)
(p: predsym) :
~ In p ps ->
find_apply_pred pf ps Ps p = preds gamma_valid pd pf p.
Proof.
induction ps; simpl; auto.
intros Hnot. destruct (predsym_eq_dec p a); subst; auto.
exfalso. apply Hnot; auto.
Qed.
Lemma interp_with_Ps_single (pi: pi_funpred gamma_valid pd)
(p: predsym)
(Ps: hlist (fun (p:predsym) => forall srts,
arg_list (domain (dom_aux pd))
(sym_sigma_args p srts) -> bool) [p]) :
interp_with_Ps pi [p] Ps =
interp_with_P pi p (hlist_hd Ps).
Proof.
unfold interp_with_Ps. simpl.
unfold interp_with_P. f_equal.
apply functional_extensionality_dep. intros p'.
destruct (predsym_eq_dec p' p).
- subst. destruct (predsym_eq_dec p p); simpl; [|contradiction].
assert (e = eq_refl) by (apply UIP_dec; apply predsym_eq_dec).
rewrite H. reflexivity.
- destruct (predsym_eq_dec p p'); subst; auto.
contradiction.
Qed.
Definition iter_and (l: list Prop) : Prop :=
fold_right and True l.
Lemma prove_iter_and (ps: list Prop):
(forall x, In x ps -> x) <-> iter_and ps.
Proof.
induction ps; simpl; split; intros; auto.
- destruct H0.
- split. apply H. left. reflexivity.
rewrite <- IHps. intros x Hinx. apply H. right. assumption.
- destruct H. destruct H0; subst; auto.
apply IHps; auto.
Qed.
Fixpoint dep_map {A B: Type} {P: A -> Prop} (f: forall x, P x -> B)
(l: list A) (Hall: Forall P l) : list B :=
match l as l' return Forall P l' -> list B with
| nil => fun _ => nil
| x :: tl => fun Hforall => f x (Forall_inv Hforall) ::
dep_map f tl (Forall_inv_tail Hforall)
end Hall.
Lemma dep_map_in {A B: Type} {P: A -> Prop} (f: forall x, P x -> B)
(l: list A) (Hall: Forall P l) (x: B):
In x (dep_map f l Hall) ->
exists y H, In y l /\ f y H = x.
Proof.
revert Hall. induction l; simpl; intros. destruct H.
inversion Hall; subst.
destruct H.
- subst. exists a. exists (Forall_inv Hall). split; auto.
- specialize (IHl _ H). destruct IHl as [y [Hy [Hiny Hxy]]].
exists y. exists Hy. split; auto.
Qed.
Lemma in_dep_map {A B: Type} {P: A -> Prop} (f: forall x, P x -> B)
(l: list A) (Hall: Forall P l) (x: A):
In x l ->
exists H,
In (f x H) (dep_map f l Hall).
Proof.
revert Hall. induction l; simpl; intros. destruct H.
inversion Hall; subst. destruct H; subst.
- exists (Forall_inv Hall). left. reflexivity.
- specialize (IHl (Forall_inv_tail Hall) H).
destruct IHl as [Hx Hinx]. exists Hx. right. assumption.
Qed.
Lemma dep_map_irrel {A B: Type} {P: A -> Prop} (f: forall x, P x -> B)
(l: list A) (Hall1 Hall2: Forall P l):
(forall x H1 H2, f x H1 = f x H2) ->
dep_map f l Hall1 = dep_map f l Hall2.
Proof.
intros Hirrel.
revert Hall1 Hall2.
induction l; intros; simpl; auto.
erewrite IHl. f_equal. apply Hirrel.
Qed.
(*Since inductive predicates can be mutually recursive, we need
a list of predsyms and formula lists. This makes the dependent
types tricky, since we need a (P: forall srts, arg_list srts -> bool)
for each such predsym*)
Definition indpred_rep (pf: pi_funpred gamma_valid pd)
(vt: val_typevar) (vv: val_vars pd vt)
(indpred : list (predsym * list formula))
(Hform: Forall (Forall (valid_formula sigma)) (map snd indpred))
(p: predsym)
(Hin: in_bool predsym_eq_dec p (map fst indpred))
(srts: list sort)
(a: arg_list (domain (dom_aux pd))
(sym_sigma_args p srts)) : bool :=
(*Our props are an hlist, where we have a Pi for each pi
of type (srts -> arg_list pi srts -> bool)*)
all_dec ((forall (Ps: hlist (fun (p': predsym) =>
(forall (srts: list sort),
arg_list (domain (dom_aux pd))
(sym_sigma_args p' srts) -> bool)) (map fst indpred)),
(*The precondition is the conjunction of all of the
inductive hypotheses from the list of formulas, with
each recursive instance using the appropriate Pi*)
((fix build_indpred (l: list (list formula))
(Hl: Forall (Forall (valid_formula sigma)) l) : Prop :=
match l as l' return
Forall (Forall (valid_formula sigma)) l' -> Prop
with
| nil => fun _ => True
| fs :: ftl => fun Hall =>
iter_and (map is_true (dep_map (@formula_rep _ _ gamma_valid pd all_unif
vt (interp_with_Ps pf _ Ps) vv)
fs (Forall_inv Hall))) /\
build_indpred ftl (Forall_inv_tail Hall)
end Hl) _ Hform)
->
(*All of this together implies Pi srts a, for the
i corresponding to p*)
(get_hlist_elt predsym_eq_dec Ps p Hin) srts a)).
(*The version for non-mutual-recursion is a lot simpler*)
Definition indpred_rep_single (pf: pi_funpred gamma_valid pd)
(vt: val_typevar) (vv: val_vars pd vt) (p: predsym)
(fs: list formula) (Hform: Forall (valid_formula sigma) fs) (srts: list sort)
(a: arg_list (domain (dom_aux pd))
(sym_sigma_args p srts)) : bool :=
all_dec
(forall (P: forall (srts: list sort),
arg_list (domain (dom_aux pd))
(sym_sigma_args p srts) -> bool),
iter_and (map is_true (dep_map (@formula_rep _ _ gamma_valid
pd all_unif vt (interp_with_P pf p P) vv)
fs Hform)) -> P srts a).
(*We prove these equivalent in the single case
(it makes things easier when we don't need hlists)*)
Definition Forall_single {A: Type} {P: A -> Prop} {x: list A}
(Hform: Forall P x) :
Forall (Forall P) [x] :=
Forall_cons _ Hform (@Forall_nil (list A) (Forall P)).
Lemma in_triv {A} p (fs: list A): is_true (in_bool predsym_eq_dec p (map fst [(p, fs)])).
Proof.
simpl. destruct (predsym_eq_dec); auto.
Defined.
(*Prove equivalence*)
Lemma indpred_rep_single_equiv (pf: pi_funpred gamma_valid pd)
(vt: val_typevar) (vv: val_vars pd vt) (p: predsym)
(fs: list formula) (Hform: Forall (valid_formula sigma) fs) (srts: list sort)
(a: arg_list (domain (dom_aux pd))
(sym_sigma_args p srts)) :
indpred_rep_single pf vt vv p fs Hform srts a =
indpred_rep pf vt vv [(p, fs)] (Forall_single Hform) p (in_triv p fs) srts a.
Proof.
unfold indpred_rep_single.
unfold indpred_rep. simpl.
apply all_dec_eq.
split; intros.
- generalize dependent (in_triv p fs). simpl.
destruct (predsym_eq_dec p p); simpl; auto.
intros _. unfold eq_rect_r, eq_rect.
assert (e = eq_refl) by (apply UIP_dec; apply predsym_eq_dec).
rewrite H1. simpl.
specialize (H (hlist_hd Ps)).
apply H. destruct H0 as [Hand _].
revert Hand.
rewrite (interp_with_Ps_single pf p Ps); intros Hand.
erewrite dep_map_irrel. apply Hand. intros.
apply fmla_rep_irrel.
- revert H. generalize dependent (in_triv p fs); simpl.
destruct (predsym_eq_dec p p); simpl; auto.
intros _ Hmult.
specialize (Hmult (HL_cons (fun p' : predsym =>
forall srts : list sort,
arg_list (domain (dom_aux pd)) (sym_sigma_args p' srts) -> bool)
p nil P (@HL_nil _ _))).
revert Hmult. simpl. unfold eq_rect_r, eq_rect.
assert (e = eq_refl). apply UIP_dec. apply predsym_eq_dec.
rewrite H. simpl. intros Hmult.
apply Hmult; clear Hmult. split; auto.
rewrite (interp_with_Ps_single pf p _). simpl.
erewrite dep_map_irrel. apply H0.
intros. apply fmla_rep_irrel.
Qed.
End Def.
(*Now we prove that [indpred_rep] is the
least predicate that satifies the constructors. *)
(*We must show the following:
1. For all constructors, f, [[f]]_i holds, where i maps
each predicate symbol p in ps to [indpred_rep ps p].
2. Given any other Ps such that the constructors hold
under i, where i maps p in ps to (nth i Ps),
then (indpred_rep p ps x) -> (nth i Ps x)
The second part is very easy. The first is very hard.*)
(*First, some helpful lemmas*)
(*One of the complications is that the [build_indpred]
function is difficult to work with. This is a more
useful form *)
Lemma build_indpred_iff (pf: pi_funpred gamma_valid pd)
(vt: val_typevar) (vv: val_vars pd vt) (ps: list predsym)
(Ps: hlist
(fun p' : predsym =>
forall srts : list sort,
arg_list (domain (dom_aux pd)) (sym_sigma_args p' srts) -> bool)
ps)
(fs: list (list formula))
(Hform: Forall (Forall (valid_formula sigma)) fs):
((fix build_indpred
(l : list (list formula))
(Hl : Forall (Forall (valid_formula sigma)) l) {struct l} :
Prop :=
match
l as l'
return (Forall (Forall (valid_formula sigma)) l' -> Prop)
with
| [] =>
fun _ : Forall (Forall (valid_formula sigma)) [] => True
| fs :: ftl =>
fun
Hall : Forall (Forall (valid_formula sigma))
(fs :: ftl) =>
iter_and
(map is_true
(dep_map
(formula_rep gamma_valid pd all_unif vt
(interp_with_Ps pf ps Ps) vv)
fs (Forall_inv Hall))) /\
build_indpred ftl (Forall_inv_tail Hall)
end Hl) fs Hform) <->
(forall (f: list formula)
(Hallf: Forall (valid_formula sigma) f)
(Hinf: In f fs),
iter_and
(map is_true
(dep_map
(formula_rep gamma_valid pd all_unif vt
(interp_with_Ps pf ps Ps) vv) f Hallf))).
Proof.
revert Hform.
induction fs; simpl; intros; split; intros; auto.
- destruct Hinf.
- destruct H as [Hhd Htl].
destruct Hinf; subst.
+ erewrite dep_map_irrel. apply Hhd.
intros. apply fmla_rep_irrel.
+ eapply IHfs. apply Htl. auto.
- split.
+ erewrite dep_map_irrel. apply H. left; auto.
intros. apply fmla_rep_irrel.
+ eapply IHfs. intros. apply H. right; auto.
Unshelve.
inversion Hform; subst. auto.
Qed.
Scheme Minimality for valid_ind_form Sort Prop.
Section LeastPred.
(*We prove the second part (the "least predicate" part)
first, since it is easy*)
Theorem indpred_least_pred (pf: pi_funpred gamma_valid pd)
(vt: val_typevar) (vv: val_vars pd vt)
(ps: list (predsym * list formula))
(Hform: Forall (Forall (valid_formula sigma)) (map snd ps)):
forall (Ps: hlist
(fun p' : predsym =>
forall srts : list sort,
arg_list (domain (dom_aux pd)) (sym_sigma_args p' srts) -> bool)
(map fst ps)),
(*If P holds of all of the constructors*)
(forall (fs : list formula) (Hform : Forall (valid_formula sigma) fs),
In fs (map snd ps) ->
iter_and (map is_true (dep_map
(formula_rep gamma_valid pd all_unif vt
(interp_with_Ps pf (map fst ps) Ps) vv) fs Hform))) ->
(*Then indpred_rep p fs x -> P x*)
forall (p: predsym) (Hinp: in_bool predsym_eq_dec p (map fst ps))
(srts: list sort)
(a: arg_list (domain (dom_aux pd))
(sym_sigma_args p srts)),
indpred_rep pf vt vv ps Hform p Hinp srts a ->
get_hlist_elt predsym_eq_dec Ps p Hinp srts a.
Proof.
intros Ps Hand p Hinp srts a.
unfold indpred_rep.
rewrite simpl_all_dec. intros.
apply H.
rewrite build_indpred_iff.
auto.
Qed.
(*On the other hand, the first part is hard (showing that [indpred_rep]
holds of all constructors). Here is an approach to show it:
1. Prove that any constructor is equivalent to one where we
have a bunch of forall quantifiers, followed by a bunch
of let statements, followed by a chain of impliciations
ending in indpred_rep p fs x for some x
2. From this, unfold the definition of indpred_rep,
and we eventually have to prove that, for each of the
"and" formulas in the implication, if [[f]] is true
when ps map to [indpred_rep ps], then [[f]] is true
when ps map to Ps for any Ps. This is true if f is
strictly positive, showing why this condition is crucial.
Step 1 requires a lot of steps
1. define variable substitution and prove correctness
2. define a function to substitute all bound variables
to new, unique values (alpha equivalence)
3. define a transformation into the (forall _, let _, and f_i -> f)
form, and prove that preserves the semantics.
Then, prove that this both ends in P srts x for
[valid_ind_forms] and that the [f_i]'s are strictly
positive
4. Prove the crucial lemma that [[f]]_[ps->indpred_rep ps] ->
[[f]]_[ps->Ps] for any Ps if ps occur strictly
positively in f
5. Prove the main theorem*)
(*We did steps 1 and 2 in Denotational.v (TODO). We start with
step 3*)
Definition tup_1 {A B C D: Type} (x: A * B * C * D) :=
match x with
| (a, _, _, _) => a
end.
Definition tup_2 {A B C D: Type} (x: A * B * C * D) :=
match x with
| (_, b, _, _) => b
end.
Definition tup_3 {A B C D: Type} (x: A * B * C * D) :=
match x with
| (_, _, c, _) => c
end.
Definition tup_4 {A B C D: Type} (x: A * B * C * D) :=
match x with
| (_, _, _, d) => d
end.
(*The decomposition*)
Fixpoint indpred_decomp (f: formula) :
(list vsymbol * list (vsymbol * term) * list formula * formula) :=
match f with
| Fquant Tforall x f1 =>
let t := indpred_decomp f1 in
(x :: tup_1 t, tup_2 t, tup_3 t, tup_4 t)
| Fbinop Timplies f1 f2 =>
let t := indpred_decomp f2 in
(tup_1 t, tup_2 t, f1 :: tup_3 t, tup_4 t)
| Flet t1 v f1 =>
let t := indpred_decomp f1 in
(tup_1 t, (v, t1) :: tup_2 t, tup_3 t, tup_4 t)
| _ => (nil, nil, nil, f)
end.
(*Now we prove that for [valid_ind_form] formulas with
well-formed bound variables, [indpred_decomp] produces
an equivalent formula when interpreted.*)
Ltac split_all :=
repeat match goal with
| H: ?P /\ ?Q |- _ => destruct H
| |- ?P /\ ?Q => split
end.
(*A few results about [indpred_decomp]*)
(*First, validity results we need - this proof is very easy*)
Lemma indpred_decomp_valid (f: formula) (Hval: valid_formula sigma f) :
Forall (fun x : string * vty => valid_type sigma (snd x)) (tup_1 (indpred_decomp f)) /\
Forall (fun x : string * vty * term => term_has_type sigma (snd x) (snd (fst x)))
(tup_2 (indpred_decomp f)) /\
Forall (valid_formula sigma) (tup_3 (indpred_decomp f)) /\
valid_formula sigma (tup_4 (indpred_decomp f)).
Proof.
revert Hval.
apply (term_formula_ind) with(P1:=fun _ => True) (P2:= fun f =>
valid_formula sigma f ->
Forall (fun x : string * vty => valid_type sigma (snd x)) (tup_1 (indpred_decomp f)) /\
Forall (fun x : string * vty * term => term_has_type sigma (snd x) (snd (fst x)))
(tup_2 (indpred_decomp f)) /\
Forall (valid_formula sigma) (tup_3 (indpred_decomp f)) /\
valid_formula sigma (tup_4 (indpred_decomp f))); simpl; auto; intros.
- destruct q; simpl; auto.
inversion H0; subst. specialize (H H6).
split_all; auto.
- destruct b; simpl; auto.
inversion H1; subst. specialize (H H5).
specialize (H0 H7). split_all; auto.
- inversion H1; subst.
specialize (H0 H7). split_all; auto.
- apply (Tconst (ConstInt 0)).
Qed.
Ltac triv_fls :=
split_all; intros;
match goal with | H: False |- _ => destruct H end.
Lemma indpred_decomp_bound (f: formula) :
(forall x, In x (tup_1 (indpred_decomp f)) -> In x (bnd_f f)) /\
(forall x, In x (tup_2 (indpred_decomp f)) -> In (fst x) (bnd_f f)).
Proof.
apply (term_formula_ind) with(P1:=fun _ => True) (P2:= fun f =>
(forall x : vsymbol, In x (tup_1 (indpred_decomp f)) -> In x (bnd_f f)) /\
(forall x : vsymbol * term,
In x (tup_2 (indpred_decomp f)) -> In (fst x) (bnd_f f))); simpl; auto; intros;
try solve[triv_fls].
- destruct q; simpl;[|triv_fls].
split_all; intros.
+ destruct H1; subst. left; auto. right. apply H. auto.
+ apply H0 in H1. right; auto.
- destruct b; simpl; try triv_fls. split; intros; simpl;
apply in_or_app; right; apply H0; auto.
- split_all; intros. right. apply in_or_app. right. apply H0; auto.
destruct H2; subst. left; auto. right. apply in_or_app. right.
apply H1. auto.
- apply (Tconst (ConstInt 0)).
Qed.
Lemma indpred_decomp_wf (f: formula) (Hwf: fmla_wf f):
(forall x, ~ (In x (tup_1 (indpred_decomp f)) /\
In x (map fst (tup_2 (indpred_decomp f))))).
Proof.
revert Hwf.
apply (term_formula_ind) with(P1:=fun _ => True) (P2:= fun f =>
fmla_wf f ->
forall x : vsymbol,
~
(In x (tup_1 (indpred_decomp f)) /\ In x (map fst (tup_2 (indpred_decomp f)))));
auto; simpl; auto; intros; try solve[intro C; triv_fls].
- destruct q; simpl; [|intro C; triv_fls].
intro C. split_all.
destruct H1; subst.
+ specialize (H (wf_quant _ _ _ H0)).
unfold fmla_wf in H0.
simpl in H0. split_all. inversion H0; subst.
rewrite in_map_iff in H2. destruct H2 as [y [Hy Hiny]].
assert (In (fst y) (bnd_f f0)).
apply indpred_decomp_bound. auto. subst. contradiction.
+ apply (H (wf_quant _ _ _ H0) x); auto.
- destruct b; simpl; intro C; try triv_fls.
apply (H0 (proj2 (wf_binop _ _ _ H1)) x). auto.
- specialize (H0 (wf_let _ _ _ H1)).
intro C. split_all.
destruct H3; subst.
+ unfold fmla_wf in H1. simpl in H1. split_all. inversion H1; subst.
apply H6. apply in_or_app. right. apply indpred_decomp_bound; auto.
+ apply (H0 x); auto.
- apply (Tconst (ConstInt 0)).
Qed.
(*How we transform this decomposition into a formula*)
Definition indpred_transform (f: formula) : formula :=
(fforalls (tup_1 (indpred_decomp f))
(iter_flet (tup_2 (indpred_decomp f))
(Fbinop Timplies
(iter_fand (tup_3 (indpred_decomp f)))
(tup_4 (indpred_decomp f))))).
Lemma indpred_transform_valid (f: formula) (Hval: valid_formula sigma f) :
valid_formula sigma (indpred_transform f).
Proof.
unfold indpred_transform.
apply fforalls_valid;[|apply indpred_decomp_valid; auto].
apply iter_flet_valid; [| apply indpred_decomp_valid; auto].
constructor; [|apply indpred_decomp_valid; auto].
apply iter_fand_valid; auto.
apply indpred_decomp_valid; auto.
Qed.
(*Now, we prove that any formula which is valid and whose bound
variables are well-formed is equivalent to the one formed
by [indpred_decomp]*)
Lemma indpred_decomp_equiv (pf: pi_funpred gamma_valid pd)
(vt: val_typevar) (vv: @val_vars sigma pd vt)
(f: formula) (Hval: valid_formula sigma f)
(Hwf: fmla_wf f) :
formula_rep gamma_valid pd all_unif vt pf vv f Hval =
formula_rep gamma_valid pd all_unif vt pf vv
(indpred_transform f) (indpred_transform_valid f Hval).
Proof.
revert vv.
generalize dependent (indpred_transform_valid f Hval).
(*TODO: we need a better way to do induction with formulas*)
revert Hval Hwf.
apply term_formula_ind with(P1:=fun _ => True)
(P2:= fun f => forall Hval : valid_formula sigma f,
fmla_wf f -> forall (v : valid_formula sigma (indpred_transform f))
(vv : val_vars pd vt),
formula_rep gamma_valid pd all_unif vt pf vv f Hval =
formula_rep gamma_valid pd all_unif vt pf vv (indpred_transform f) v);
unfold indpred_transform; simpl; auto; intros; try solve[apply true_impl].
- destruct q; simpl; auto; [|apply true_impl].
simpl in v0.
simpl_rep_full. apply all_dec_eq.
split; intros Hall d.
+ rewrite <- H with (Hval:=(valid_quant_inj Hval)).
apply (Hall d).
apply wf_quant in H0; auto.
+ erewrite H. apply (Hall d).
apply wf_quant in H0; auto.
- destruct b; try solve[apply true_impl].
simpl.
simpl in v.
(*We need to know that we can push a let and a quantifier
across an implication. This is why we need the wf assumption*)
simpl_rep_full.
rewrite bool_of_binop_impl.
assert (Hval1 : valid_formula sigma
(fforalls (tup_1 (indpred_decomp f2))
(iter_flet (tup_2 (indpred_decomp f2))
(Fbinop Timplies (iter_fand (tup_3 (indpred_decomp f2)))
(tup_4 (indpred_decomp f2)))))). {
apply fforalls_valid_inj in v. split_all.
apply fforalls_valid; auto.
apply iter_flet_valid_inj in H2. split_all.
apply iter_flet_valid; auto.
inversion H2; subst.
constructor; auto.
inversion H8; subst. auto.
}
rewrite H0 with(v:=Hval1); [| apply (wf_binop _ _ _ H1)].
assert (Hval2: valid_formula sigma
(fforalls (tup_1 (indpred_decomp f2))
(iter_flet (tup_2 (indpred_decomp f2))
(Fbinop Timplies f1 (Fbinop Timplies
(iter_fand (tup_3 (indpred_decomp f2)))
(tup_4 (indpred_decomp f2))))))). {
inversion Hval; subst.
apply fforalls_valid_inj in Hval1. split_all.
apply iter_flet_valid_inj in H2. split_all.
inversion H2; subst.
apply fforalls_valid; auto.
apply iter_flet_valid; auto.
constructor; auto.
}
rewrite and_impl_bound with(Hval2:=Hval2).
assert (Hval3: valid_formula sigma (Fbinop Timplies f1
(fforalls (tup_1 (indpred_decomp f2))
(iter_flet (tup_2 (indpred_decomp f2))
(Fbinop Timplies (iter_fand (tup_3 (indpred_decomp f2)))
(tup_4 (indpred_decomp f2))))))). {
apply fforalls_valid_inj in Hval2; split_all.
apply iter_flet_valid_inj in H2; split_all.
inversion H2; subst. constructor; auto.
}
rewrite (distr_impl_let_forall _ _ _ vt pf vv f1) with(Hval2:=Hval3).
+ simpl_rep_full. rewrite bool_of_binop_impl.
apply all_dec_eq. split; intros;
erewrite fmla_rep_irrel;
apply H2; erewrite fmla_rep_irrel; apply H3.
+ (*Now, prove that everything in tup_1 is a bound variable in formula*)
intros. intro C. split_all.
unfold fmla_wf in H1. split_all. apply (H4 x).
split_all; simpl; auto. apply union_elts. left; auto.
apply in_or_app. right. apply indpred_decomp_bound; auto.
+ intros x C. unfold fmla_wf in H1. split_all.
apply (H4 (fst x)). split_all.
simpl. apply union_elts. left; auto.
simpl. apply in_or_app. right. apply indpred_decomp_bound; auto.
- (*On to let case*)
simpl_rep_full.
assert (Hval1: valid_formula sigma
(fforalls (tup_1 (indpred_decomp f0))
(iter_flet (tup_2 (indpred_decomp f0))
(Fbinop Timplies (iter_fand (tup_3 (indpred_decomp f0)))
(tup_4 (indpred_decomp f0)))))). {
apply fforalls_valid_inj in v0; split_all.
inversion H2; subst.
apply fforalls_valid; auto.
}
rewrite H0 with(v:=Hval1); [| apply (wf_let _ _ _ H1)].
(*We showed that we can push a let through a fforalls as long
as v is not in any of those bound variables*)
assert (Hval2: valid_formula sigma (Flet tm v
(fforalls (tup_1 (indpred_decomp f0))
(iter_flet (tup_2 (indpred_decomp f0))
(Fbinop Timplies (iter_fand (tup_3 (indpred_decomp f0)))
(tup_4 (indpred_decomp f0))))))). {
apply fforalls_valid_inj in v0; split_all.
inversion H2; subst.
constructor; auto.
}
erewrite distr_let_foralls with(Hval2:=Hval2).
simpl_rep_full.
erewrite term_rep_irrel.
erewrite fmla_rep_irrel. reflexivity.
(*These contradict wf*)
intro C.
assert (In v (bnd_f f0)). {
apply indpred_decomp_bound; auto.
}
unfold fmla_wf in H1. split_all. simpl in H1. inversion H1; subst.
apply H6. apply in_or_app; right; auto.
intros y Hy C.
assert (In y (bnd_f f0)). {
apply indpred_decomp_bound; auto.
}
unfold fmla_wf in H1. split_all. simpl in H3.
apply (H3 y).
split_all; auto.
apply union_elts. left; auto.
right. apply in_or_app. right; auto.
- apply (Tconst (ConstInt 0)).
Qed.
(*Finally, we need to reason about the last part of the formula.
We show that, for [valid_ind_form]s, this is Fpred p tys tms, for
tys and tms given by the following function *)
Fixpoint get_indprop_args (f: formula) : (list vty * list term) :=
match f with
| Fpred p tys tms => (tys, tms)
| Fquant Tforall x f1 => get_indprop_args f1
| Flet t x f1 => get_indprop_args f1
| Fbinop Timplies f1 f2 => get_indprop_args f2
| _ => (nil ,nil)
end.
Lemma ind_form_decomp (p: predsym) (f: formula)
(Hval: valid_ind_form p f) :
(tup_4 (indpred_decomp f)) = Fpred p (fst (get_indprop_args f))
(snd (get_indprop_args f)).
Proof.
induction Hval; simpl; auto.
Qed.
(** Results based on Positivity/Strict Positivity *)
Lemma positive_fforalls (ps: list predsym) (q: list vsymbol) (f: formula):
ind_positive ps f <->
ind_positive ps (fforalls q f).
Proof.
split; intros.
- induction q; intros; simpl; auto. constructor; auto.
- induction q; simpl; auto; intros. inversion H; subst; auto.
Qed.
Lemma positive_iter_flet (ps: list predsym) (l: list (vsymbol * term))
(f: formula):
ind_positive ps (iter_flet l f) <->
(Forall (fun x => (forall p, In p ps -> negb (predsym_in_term p (snd x)))) l) /\
ind_positive ps f.
Proof.
split; intros.
- induction l; simpl; auto.
simpl in H. inversion H; subst.
specialize (IHl H4). split_all; auto.
- induction l; simpl; auto. apply H.
split_all. inversion H; subst.
constructor; auto.
Qed.
(*First, if p appears in f positively, then p
appears in [indpred_transform] positively *)
Lemma indpred_transform_positive (ps: list predsym) (f: formula)
(Hpos: ind_positive ps f):
ind_positive ps (indpred_transform f).
Proof.
unfold indpred_transform.
apply positive_fforalls.
(*lets are harder because we need to know doesnt appear in term*)
induction Hpos; simpl; auto.
- constructor. constructor. auto. constructor; auto.
- constructor; auto.
- rewrite positive_iter_flet in IHHpos.
rewrite positive_iter_flet. split_all; auto.
clear H0.
inversion H1; subst.
constructor; auto.
apply ISP_and; auto.
Qed.
Lemma strict_pos_and (ps: list predsym) (f1 f2: formula):
ind_strictly_positive ps (Fbinop Tand f1 f2) ->
ind_strictly_positive ps f1 /\
ind_strictly_positive ps f2.
Proof.
intros. inversion H; subst.
- simpl in H0.
split; apply ISP_notin; intros p Hinp; specialize (H0 p Hinp);
rewrite negb_orb in H0; apply andb_true_iff in H0; apply H0.
- auto.
Qed.
Lemma iter_and_strict_pos (ps: list predsym) (fs: list formula):
ind_strictly_positive ps (iter_fand fs) ->
forall f, In f fs ->
ind_strictly_positive ps f.
Proof.
induction fs; simpl; auto.
- intros; triv_fls.
- intros.
apply strict_pos_and in H. split_all.
destruct H0; subst; auto.
Qed.
(*From this, we prove that p appears in the "and" part
strictly positively*)
Lemma indpred_decomp_and_strict_pos (ps: list predsym) (f: formula)
(Hpos: ind_positive ps f):
(forall f1, In f1 (tup_3 (indpred_decomp f)) -> ind_strictly_positive ps f1).
Proof.
intros.
apply indpred_transform_positive in Hpos.
unfold indpred_transform in Hpos.
apply positive_fforalls in Hpos.
apply positive_iter_flet in Hpos.
split_all.
inversion H1; subst.
apply (iter_and_strict_pos _ _ H4); auto.
Qed.
(*We also conclude that p appears in the last part
positively*)
Lemma indpred_decomp_last_pos (ps: list predsym) (f: formula)
(Hpos: ind_positive ps f):
ind_positive ps (tup_4 (indpred_decomp f)).
Proof.
apply indpred_transform_positive in Hpos.
unfold indpred_transform in Hpos.
apply positive_fforalls in Hpos.
apply positive_iter_flet in Hpos.
split_all. inversion H0; subst. auto.
Qed.
(*We also need the fact that everything in (tup_2) does not include
anything in ps*)
Lemma indpred_decomp_let_notin (ps: list predsym) (f: formula)
(Hpos: ind_positive ps f):
Forall (fun x =>
forall (p: predsym), In p ps ->
negb (predsym_in_term p (snd x))) (tup_2 (indpred_decomp f)).
Proof.
apply indpred_transform_positive in Hpos.
unfold indpred_transform in Hpos.
apply positive_fforalls in Hpos.
apply positive_iter_flet in Hpos.
split_all; auto.
Qed.
(*We need the following: since all of the constructor
formulas are closed, they are equivalent under any valuation;
accordingly, so is [indpred_rep]*)
Lemma constrs_val_eq (pf: pi_funpred gamma_valid pd)
(vt: val_typevar) (v1 v2: val_vars pd vt)
(fs: list formula)
(Hform: Forall (valid_formula sigma) fs)
(Hclosed: Forall closed_formula fs) :
iter_and (map is_true (dep_map
(formula_rep gamma_valid pd
all_unif vt pf v1) fs Hform)) =
iter_and (map is_true (dep_map
(formula_rep gamma_valid pd
all_unif vt pf v2) fs Hform)).
Proof.
f_equal. f_equal.
revert Hform.
induction fs; simpl; auto.
intros. inversion Hform; subst. inversion Hclosed; subst.
f_equal; auto.
apply fmla_closed_val; auto.
Qed.
Lemma indpred_rep_val_eq (pf: pi_funpred gamma_valid pd)
(vt: val_typevar) (v1 v2: val_vars pd vt)
(ps: list (predsym * list formula))
(Hform: Forall (Forall (valid_formula sigma)) (map snd ps))
(Hclosed: Forall (Forall closed_formula) (map snd ps))
(p: predsym) (Hinp: in_bool predsym_eq_dec p (map fst ps)):
indpred_rep pf vt v1 ps Hform p Hinp =
indpred_rep pf vt v2 ps Hform p Hinp.
Proof.
unfold indpred_rep.
repeat(apply functional_extensionality_dep; intros).
apply all_dec_eq.
split; intros Hand Ps; specialize (Hand Ps);
rewrite build_indpred_iff; intros Hallconstrs;
apply Hand; rewrite build_indpred_iff;
intros f Hallf Hinf; specialize (Hallconstrs f Hallf Hinf);
erewrite constrs_val_eq; try apply Hallconstrs;
rewrite Forall_forall in Hclosed; apply Hclosed; assumption.
Qed.
(*Now we prove our key intermediate lemma that we need:
suppose f is a formula in which p appears strictly positiviely,
then [[f]]_(p->indpred_rep p) implies [[f]]_(p->P) for any P*)
Lemma strict_pos_impred_implies_P' (pf: pi_funpred gamma_valid pd)
(vt: val_typevar) (vv: val_vars pd vt)
(ps: list (predsym * (list formula)))
(f: formula)
(Hvalf: valid_formula sigma f)
(Hpos: ind_strictly_positive (map fst ps) f)
(Hform: Forall (Forall (valid_formula sigma)) (map snd ps))
(Hclosed: Forall (Forall closed_formula) (map snd ps))
(Hindpred: forall (p : predsym)
(Hinp : in_bool predsym_eq_dec p (map fst ps)),
preds gamma_valid pd pf p =
indpred_rep pf vt vv ps Hform p Hinp)
:
forall (Ps: hlist
(fun p' : predsym =>
forall srts : list sort,
arg_list (domain (dom_aux pd)) (sym_sigma_args p' srts) -> bool)
(map fst ps)),
(*If P holds of all of the constructors*)
(forall (fs: list formula) (Hform: Forall (valid_formula sigma) fs),
In fs (map snd ps) ->
iter_and (map is_true (dep_map
(formula_rep gamma_valid pd all_unif vt
(interp_with_Ps pf (map fst ps) Ps) vv) fs Hform))) ->
(*Then [[f]]_(p->indpred_rep p) implies [[f]]_(p->P)*)
formula_rep gamma_valid pd all_unif vt pf vv f Hvalf ->
formula_rep gamma_valid pd all_unif vt (interp_with_Ps pf (map fst ps) Ps) vv f Hvalf.
Proof.
intros Ps HandPs.
generalize dependent vv.
induction Hpos; simpl; intros vv Hindpred HandP; auto;
simpl_rep_full.
- intros Hrep. erewrite fmla_predsym_agree. apply Hrep.
all: auto.
intros p' Hinp'.
unfold interp_with_Ps; simpl.
(*Show that p' not in (map fst ps)*)
destruct (in_bool_spec predsym_eq_dec p' (map fst ps));
[|apply find_apply_pred_notin; auto].
specialize (H _ i). rewrite Hinp' in H. inversion H.
- (*Show arg lists are the same: because P cannot appear
in list by strict positivity*)
assert ((pred_arg_list pd vt p vs ts
(term_rep gamma_valid pd all_unif vt pf vv) Hvalf) =
(pred_arg_list pd vt p vs ts
(term_rep gamma_valid pd all_unif vt
(interp_with_Ps pf (map fst ps) Ps) vv) Hvalf)). {
apply get_arg_list_eq.
rewrite Forall_forall. intros.
rewrite term_rep_irrel with(Hty2:=Hty2).
apply term_predsym_agree; simpl; auto.
intros p' Hinp'. symmetry.
destruct (in_bool_spec predsym_eq_dec p' (map fst ps));
[|apply find_apply_pred_notin; auto].
specialize (H0 _ _ H1 i). rewrite Hinp' in H0. inversion H0.
}
rewrite <- H1. rewrite Hindpred with(Hinp:=(In_in_bool predsym_eq_dec _ _ H)).
rewrite find_apply_pred_in with(Hinp:=(In_in_bool predsym_eq_dec _ _ H)).
apply indpred_least_pred. auto.
- rewrite !bool_of_binop_impl, !simpl_all_dec.
intros Hinpl Hval.
apply IHHpos; auto.
apply Hinpl.
(*Now we use the fact that P is not in f1*)
rewrite (fmla_predsym_agree) with(p2:=(interp_with_Ps pf (map fst ps) Ps)); auto.
intros p' Hinp'.
simpl. symmetry.
destruct (in_bool_spec predsym_eq_dec p' (map fst ps));
[|apply find_apply_pred_notin; auto].
specialize (H _ i). rewrite Hinp' in H. inversion H.
- destruct q;simpl_rep_full.
+ rewrite !simpl_all_dec; intros Hall d; specialize (Hall d).
apply IHHpos; auto.
(*Use closed fmla assumptions*)
* intros p Hinp.
erewrite indpred_rep_val_eq; auto.
* intros. erewrite constrs_val_eq; auto.
rewrite Forall_forall in Hclosed. apply Hclosed; auto.
+ rewrite !simpl_all_dec; intros [d Hex]; exists d.
apply IHHpos; auto.
* intros p Hinp.
erewrite indpred_rep_val_eq; auto.
* intros. erewrite constrs_val_eq; auto.
rewrite Forall_forall in Hclosed. apply Hclosed; auto.
- unfold is_true; rewrite !andb_true_iff;
intros [Hf1 Hf2].
split; [apply IHHpos1 | apply IHHpos2]; auto.
- unfold is_true; rewrite !orb_true_iff;
intros [Hf1 | Hf2];
[left; apply IHHpos1 | right; apply IHHpos2]; auto.
- intros Hf. apply IHHpos; auto.
+ intros p Hinp. erewrite indpred_rep_val_eq; auto.
+ intros. erewrite constrs_val_eq; auto.
rewrite Forall_forall in Hclosed. apply Hclosed; auto.
+ (*Need fact that p doesn't appear in let term*)
erewrite term_predsym_agree. apply Hf. all: auto.
intros p' Hinp'. simpl.
destruct (in_bool_spec predsym_eq_dec p' (map fst ps));
[|apply find_apply_pred_notin; auto].
specialize (H _ i). rewrite Hinp' in H. inversion H.
- (*First, know that [[f1]] eq in both cases because P cannot be
present*)
assert (Hf1: formula_rep gamma_valid pd all_unif vt pf vv f1
(proj1 (valid_if_inj Hvalf)) =
formula_rep gamma_valid pd all_unif vt (interp_with_Ps pf (map fst ps) Ps) vv f1
(proj1 (valid_if_inj Hvalf))). {
apply fmla_predsym_agree; auto; simpl; intros p' Hinp'.
symmetry.
destruct (in_bool_spec predsym_eq_dec p' (map fst ps));
[|apply find_apply_pred_notin; auto].
specialize (H _ i). rewrite Hinp' in H. inversion H.
}
rewrite <- Hf1.
destruct (formula_rep gamma_valid pd all_unif vt pf vv f1
(proj1 (valid_if_inj Hvalf)));
[apply IHHpos1 | apply IHHpos2]; auto.
- (*Hmm, this is the hardest one - need rewrite lemma for match*)
(*Here, we need a nested induction*)
iter_match_gen Hvalf Htms Hps Hvalf.
induction pats; simpl; auto.
intros. destruct a as [fh ph]. revert H2.
(*Show that [term_rep] is equal because P cannot appear*)
assert (Hteq:
(term_rep gamma_valid pd all_unif vt pf vv t ty Hvalf) =
(term_rep gamma_valid pd all_unif vt (interp_with_Ps pf (map fst ps) Ps) vv t ty
Hvalf)). {
apply term_predsym_agree; auto. intros p' Hinp'; simpl.
symmetry.
destruct (in_bool_spec predsym_eq_dec p' (map fst ps));
[|apply find_apply_pred_notin; auto].
specialize (H _ i). rewrite Hinp' in H. inversion H.
}
rewrite <- Hteq at 1.
destruct (match_val_single gamma_valid pd all_unif vt ty fh (Forall_inv Hps)
(term_rep gamma_valid pd all_unif vt pf vv t ty Hvalf)) eqn : Hm.
+ (*First case follows from original IH*)
apply H1; simpl; auto.
* intros p Hinp. erewrite indpred_rep_val_eq; auto.
* intros. erewrite constrs_val_eq; auto.
rewrite Forall_forall in Hclosed. apply Hclosed; auto.
+ (*From nested IH*)
apply IHpats; auto.
* intros h Hinf. apply H0. right; auto.
* intros. apply H1; auto. right; auto.
Qed.
(*TODO: move*)
(*If some pred P does not appear in any terms for [substi_multi_let],
then valuations are equal no matter what P is*)
Lemma substi_mult_notin_eq (pf1 pf2: pi_funpred gamma_valid pd)
(vt: val_typevar) (vv: val_vars pd vt) (l: list (vsymbol * term))
(ps: list predsym) Hall
(Hallnotin: Forall (fun x => (forall p, In p ps ->
negb (predsym_in_term p (snd x)))) l) :
(forall p, ~ In p ps -> (preds gamma_valid pd pf1 p) = (preds gamma_valid pd pf2 p)) ->
(forall f, funs gamma_valid pd pf1 f = funs gamma_valid pd pf2 f) ->
substi_multi_let gamma_valid pd all_unif vt pf1 vv l Hall =
substi_multi_let gamma_valid pd all_unif vt pf2 vv l Hall.
Proof.
revert Hall vv.
induction l; simpl; auto; intros.
inversion Hallnotin; subst.
destruct a.
assert (substi pd vt vv v
(term_rep gamma_valid pd all_unif vt pf1 vv t (snd v) (Forall_inv Hall)) =
(substi pd vt vv v
(term_rep gamma_valid pd all_unif vt pf2 vv t (snd v) (Forall_inv Hall)))). {
unfold substi. apply functional_extensionality_dep; intros; simpl.
destruct (vsymbol_eq_dec x v); subst; auto.
unfold eq_rec_r, eq_rec, eq_rect. simpl.
apply term_predsym_agree; auto.
intros p Hinp.
apply H. intro Hinp'.
simpl in H3. apply H3 in Hinp'.
rewrite Hinp in Hinp'. inversion Hinp'.
}
rewrite H1.
apply IHl; auto.
Qed.
(*Finally, we prove the main theorem*)
(*TODO: prove version with recursive instance of Ps
(need lemma that we can assign Ps anything and it doesn't
matter - not hard to show)*)
Theorem indpred_constrs_true
(pf: pi_funpred gamma_valid pd)
(vt: val_typevar) (vv: val_vars pd vt)
(indpred: list (predsym * list formula))
(Hform: Forall (Forall (valid_formula sigma)) (map snd indpred))
(Hvalind: Forall (fun t => Forall (valid_ind_form (fst t)) (snd t))
indpred)
(Hpos: Forall (Forall (ind_positive (map fst indpred)))
(map snd indpred))
(Hclosed: Forall (Forall closed_formula) (map snd indpred))
(Hindpred: forall p
(Hinp: in_bool predsym_eq_dec p (map fst indpred)),
preds gamma_valid pd pf p =
indpred_rep pf vt vv indpred Hform p Hinp) :
(forall (f: formula) (Hvalf: valid_formula sigma f),
In f (concat (map snd indpred)) ->
formula_rep gamma_valid pd all_unif vt pf vv f Hvalf).
Proof.
intros f Hvalf Hinf.
rewrite in_concat in Hinf.
destruct Hinf as [fs [Hinfs Hinf]].
assert (Hinfs':=Hinfs).
rewrite in_map_iff in Hinfs.
destruct Hinfs as [[p fs'] [Hfst Hinfs]]; simpl in Hfst; subst.
(*Part 1: work with alpha conversion to get wf*)
rewrite a_convert_f_rep.
assert (Hvalindf: valid_ind_form p f). {
rewrite Forall_forall in Hvalind.
specialize (Hvalind (p, fs) Hinfs). simpl in Hvalind.
rewrite Forall_forall in Hvalind. apply Hvalind; auto.
}
assert (Hposf: ind_positive (map fst indpred) f). {
rewrite Forall_forall in Hpos.
specialize (Hpos fs Hinfs').
rewrite Forall_forall in Hpos.
apply Hpos; auto.
}
assert (Hvalinda:=(a_convert_f_valid_ind_form p f Hvalindf)).
assert (Hwfa:=(a_convert_f_wf f)).
assert (Hposa:=(a_convert_f_pos (map fst indpred) f Hposf)).
(*Part 2: Work with [indpred_transform] *)
rewrite indpred_decomp_equiv; auto.
assert (Hvaldec:=(indpred_transform_valid _ (a_convert_f_valid _ Hvalf))).
(*Then we can unfold manually*)
unfold indpred_transform in *.
assert (A:=Hvaldec).
apply fforalls_valid_inj in A.
destruct A as [Hval1 Halltup1].
rewrite fmla_rep_irrel with
(Hval2:= (fforalls_valid (tup_1 (indpred_decomp (a_convert_f f))) _ Hval1 Halltup1)).
rewrite fforalls_val. rewrite simpl_all_dec. intros h.
assert (A:=Hval1).
apply iter_flet_valid_inj in A.
destruct A as [Hval2 Halltup2].
rewrite (fmla_rep_irrel) with(Hval2:=(iter_flet_valid _ _ Hval2 Halltup2)).
rewrite iter_flet_val. simpl_rep_full.
rewrite bool_of_binop_impl, simpl_all_dec.
intros Hconstrs.
(*Might need lemma about equality of fmlas*)
assert (Hval3: valid_formula sigma (Fpred p (fst (get_indprop_args (a_convert_f f))) (snd (get_indprop_args (a_convert_f f))))). {
rewrite <- ind_form_decomp; auto.
inversion Hval2; subst; auto.
}
rewrite fmla_rewrite with(Hval2:=Hval3); [|apply ind_form_decomp; auto].
simpl_rep_full.
assert (Hinp: In p (map fst indpred)). {
rewrite in_map_iff. exists (p, fs); auto.
}
assert (Hinp': in_bool predsym_eq_dec p (map fst indpred)) by
(apply In_in_bool; auto).
rewrite Hindpred with(Hinp:=Hinp').
(*Now we can unfold the definition of [indpred_rep_single]*)
unfold indpred_rep.
rewrite simpl_all_dec; intros Ps Hallconstrs.
(*We need 2 things from this (unwieldy) definition:
that all constructors in fs are true under p->P interp,
and that f is true. Obviously the second follows*)
assert (Hallfs: Forall (valid_formula sigma) fs). {
clear -Hform Hinfs'.
rewrite Forall_forall in Hform; auto.
}
rewrite build_indpred_iff in Hallconstrs.
assert (Hconstrsfs :=(Hallconstrs fs Hallfs Hinfs')).
(*Now, we need to know that this constructor (f) is true
under p->P interp*)
assert (Hformf: formula_rep gamma_valid pd
all_unif vt (interp_with_Ps pf (map fst indpred) Ps) vv f Hvalf). {
rewrite <- prove_iter_and in Hconstrsfs.
apply Hconstrsfs.
rewrite in_map_iff. exists (formula_rep gamma_valid pd
all_unif vt (interp_with_Ps pf (map fst indpred) Ps) vv f Hvalf).
split; auto.
assert (Hex:=(in_dep_map
(formula_rep gamma_valid pd all_unif vt
(interp_with_Ps pf (map fst indpred) Ps) vv) _ Hallfs _ Hinf)).
destruct Hex as [Hval4 Hinf'].
erewrite fmla_rep_irrel. apply Hinf'.
}
(*Now we repeat the process again (alpha, [indpred_transform, etc])*)
revert Hformf.
rewrite a_convert_f_rep, indpred_decomp_equiv; auto.
unfold indpred_transform.
rewrite fmla_rep_irrel with
(Hval2:= (fforalls_valid _ _ Hval1 Halltup1)).
rewrite fforalls_val, simpl_all_dec.
intros. specialize (Hformf h).
revert Hformf.
rewrite (fmla_rep_irrel) with(Hval2:=(iter_flet_valid _ _ Hval2 Halltup2)).
rewrite iter_flet_val; simpl_rep_full.
rewrite bool_of_binop_impl, simpl_all_dec.
rewrite fmla_rewrite with(f1:=(tup_4 _))(Hval2:=Hval3); [|apply ind_form_decomp; auto].
simpl_rep_full.
(*Here we need to deal with [find_apply_pred] - need to show
it is equal to [get_hlist_elt]*)
rewrite find_apply_pred_in with(Hinp:=Hinp').
intros.
(*Need this in multiple places*)
assert ((substi_multi_let gamma_valid pd all_unif vt (interp_with_Ps pf (map fst indpred) Ps)
(substi_mult pd vt vv (tup_1 (indpred_decomp (a_convert_f f))) h)
(tup_2 (indpred_decomp (a_convert_f f))) Halltup2) =
(substi_multi_let gamma_valid pd all_unif vt pf
(substi_mult pd vt vv (tup_1 (indpred_decomp (a_convert_f f))) h)
(tup_2 (indpred_decomp (a_convert_f f))) Halltup2)). {
apply substi_mult_notin_eq with(ps:=map fst indpred); simpl; auto.
- apply indpred_decomp_let_notin with(ps:=map fst indpred); auto.
- apply find_apply_pred_notin.
}
(*Now, we need to show that the arguments to P are actually the same
because these terms cannot involve P*)
(*Ugly but oh well*)
match goal with | H: _ -> is_true (get_hlist_elt _ _ _ _ ?y ?z)
|- is_true (get_hlist_elt _ _ _ _ ?y ?a) => assert (z = a) end.
- apply get_arg_list_eq.
rewrite Forall_forall. intros x Hinx ty Hty1 Hty2.
rewrite H.
rewrite term_rep_irrel with(Hty2:=Hty2).
apply term_predsym_agree; auto.
intros p1 Hinp1.
unfold interp_with_Ps; simpl.
destruct (in_bool_spec predsym_eq_dec p1 (map fst indpred));
[|rewrite find_apply_pred_notin; auto].
(*Use fact that p1 not in x*)
assert (Hindt: ind_positive (map fst indpred) (tup_4 (indpred_decomp (a_convert_f f)))).
apply indpred_decomp_last_pos; auto.
rewrite ind_form_decomp with(p:=p) in Hindt; auto.
inversion Hindt; subst.
specialize (H4 p1 x Hinx i).
rewrite Hinp1 in H4. inversion H4.
- rewrite <- H0. apply Hformf.
clear H0 Hformf.
rewrite H. clear H.
remember (substi_multi_let gamma_valid pd all_unif vt pf
(substi_mult pd vt vv (tup_1 (indpred_decomp (a_convert_f f))) h)
(tup_2 (indpred_decomp (a_convert_f f))) Halltup2) as vv'.
clear Heqvv'.
(*Now, we just need to prove that the [iter_and] of all of
these constructors is true, when we interpre p with P
instead of [pf]. Here we will use the strict positivity
lemma *)
rewrite iter_fand_rep.
rewrite iter_fand_rep in Hconstrs.
intros f' Hvalf' Hinf'.
specialize (Hconstrs f' Hvalf' Hinf').
revert Hconstrs.
(*Nearly done, need full version of lemma*)
eapply strict_pos_impred_implies_P' with(ps:=indpred)(Hform:=Hform); auto.
+ apply (indpred_decomp_and_strict_pos _ _ Hposa); auto.
+ intros p1 Hinp1.
erewrite indpred_rep_val_eq; auto.
+ intros. erewrite constrs_val_eq; auto.
rewrite Forall_forall in Hclosed. apply Hclosed; auto.
Qed.
End LeastPred.
(*We prove simpler versions for the non-mutual case, since
working with hlists is awkward *)
Section Single.
Theorem indpred_constrs_true_single
(pf: pi_funpred gamma_valid pd)
(vt: val_typevar) (vv: val_vars pd vt)
(p: predsym) (fs: list formula)
(Hform: Forall (valid_formula sigma) fs)
(Hvalind: Forall (fun f => valid_ind_form p f) fs)
(Hpos: Forall (fun f => ind_positive [p] f) fs)
(Hclosed: Forall closed_formula fs)
(Hindpred: (preds gamma_valid pd pf) p =
indpred_rep_single pf vt vv p fs Hform) :
(forall (f: formula) (Hvalf: valid_formula sigma f),
In f fs ->
formula_rep gamma_valid pd all_unif vt pf vv f Hvalf).
Proof.
intros.
apply (indpred_constrs_true) with(indpred:=[(p, fs)])(Hform:=(Forall_single Hform));
simpl; auto.
- intros p' Hinp'.
assert (p = p'). { destruct (predsym_eq_dec p' p); subst; auto.
inversion Hinp'. }
subst.
assert (Hinp' = (in_triv p' [(p', fs)])). {
apply UIP_dec. apply Bool.bool_dec.
}
rewrite H0.
repeat (apply functional_extensionality_dep; intros).
rewrite <- indpred_rep_single_equiv, Hindpred.
reflexivity.
- rewrite app_nil_r. auto.
Qed.
Theorem indpred_least_pred_single (pf: pi_funpred gamma_valid pd)
(vt: val_typevar) (vv: val_vars pd vt)
(p: predsym) (fs: list formula) (Hform: Forall (valid_formula sigma) fs):
forall (P:
forall srts : list sort,
arg_list (domain (dom_aux pd))
(sym_sigma_args p srts) ->
bool
),
(*If P holds of all of the constructors*)
iter_and
(map is_true
(dep_map
(formula_rep gamma_valid pd
all_unif vt (interp_with_P pf p P) vv) fs Hform)) ->
(*Then indpred_rep p fs x -> P x*)
forall (srts : list sort)
(a: arg_list (domain (dom_aux pd))
(sym_sigma_args p srts)),
indpred_rep_single pf vt vv p fs Hform srts a -> P srts a.
Proof.
intros P Hand srts a. unfold indpred_rep_single.
rewrite simpl_all_dec. intros.
apply H. apply Hand.
Qed.
End Single.
End IndPropRep. |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UniqueRandomizer for CrossBeam.
This differs from standard UniqueRandomizer in two ways:
* This supports adding new children to trie nodes.
* In order to support that, this implementation keeps track of different info
per trie node compared to the standard implementation.
But actually the ICLR'22 paper does not use this functionality. Instead, a fresh
UniqueRandomizer data structure is used each iteration of search.
"""
import numpy as np
import torch
EXPLORATION_FACTOR = 0.5
class _TrieNode(object):
"""A node for the UniqueRandomizer trie.
Attributes:
parent: The _TrieNode parent of this node, or None if this node is the root.
index_in_parent: The index of this node in the parent, or None if this node
is the root.
unnorm_probs: the original unnormalized probabilities of its children. In
the underlying probability distribution, the probability of going to
child i equals `unnorm_probs[i] / sum(unnorm_probs)`.
unnorm_unsampled_mass: the unnormalized unsampled probability mass of its
children. In the altered distribution after some leaves have been sampled,
the probability of going to child i equals
`unnorm_unsampled_mass[i] / sum(unnorm_unsampled_mass)`.
children: A list of _TrieNode children. A child may be None if it is not
expanded yet. The entire list will be None if this node has never sampled
a child yet. The list will be empty if this node is a leaf in the trie.
"""
def __init__(self, parent, index_in_parent):
self.parent = parent
self.index_in_parent = index_in_parent
self.children = None
self.cache = {}
self._sum_unnorm_unsampled_mass = None # Will compute later.
def sample_child(self, unnorm_probs):
"""Samples a child _TrieNode.
This will create the child _TrieNode if it does not already exist.
Args:
unnorm_probs: A 1-D numpy array containing the initial unnormalized
probability distribution that this node should use.
Returns:
A tuple of the child _TrieNode and the child's index.
"""
if unnorm_probs is None:
num_elements = len(self.children)
else:
if isinstance(unnorm_probs, torch.Tensor):
unnorm_probs = unnorm_probs.cpu().numpy()
else:
unnorm_probs = np.array(unnorm_probs)
num_elements = len(unnorm_probs)
if not self.children:
# This is the first sample. Set up children.
self.unnorm_probs = unnorm_probs
self._sum_unnorm_probs = np.sum(self.unnorm_probs)
self.unnorm_unsampled_mass = np.copy(self.unnorm_probs)
# _sum_unnorm_unsampled_mass is not needed now, compute later in the
# upward pass upon reaching a leaf.
self.children = [None] * num_elements
# Faster to choose from unnorm_probs when it's still accurate (i.e., on
# the first sample).
distribution = self.unnorm_probs / self._sum_unnorm_probs
elif num_elements > len(self.children):
# Adding more children.
old_num_elements = len(self.children)
new_unnorm_probs = np.array(unnorm_probs)
assert np.all(self.unnorm_probs == new_unnorm_probs[:old_num_elements])
# The handwavy part. Children now potentially have more mass to explore
# (they could have more children as well), so the unsampled mass is
# reduced. We don't know how much it's reduced until we go to the child
# and see how much mass it puts toward its new children. For now, we'll
# assume a constant proportion of new mass to the child, and this will be
# computed more accurately on the upward pass after visiting the child.
# Children that are sampled leaves should stay sampled.
# TODO(kshi): Have a better heuristic here.
child_leaf_mask = [bool(child) and child.children == [] # pylint: disable=g-explicit-bool-comparison
for child in self.children]
self.unnorm_unsampled_mass = np.where(
child_leaf_mask,
self.unnorm_unsampled_mass,
(self.unnorm_unsampled_mass + self.unnorm_probs * EXPLORATION_FACTOR)
/ (1 + EXPLORATION_FACTOR))
self.unnorm_unsampled_mass = np.append(
self.unnorm_unsampled_mass,
new_unnorm_probs[old_num_elements:])
self.unnorm_probs = new_unnorm_probs
self._sum_unnorm_probs = np.sum(self.unnorm_probs)
self._sum_unnorm_unsampled_mass = np.sum(self.unnorm_unsampled_mass)
self.children.extend([None] * (num_elements - old_num_elements))
distribution = (self.unnorm_unsampled_mass /
self._sum_unnorm_unsampled_mass)
else:
distribution = (self.unnorm_unsampled_mass /
self._sum_unnorm_unsampled_mass)
child_index = int(np.random.choice(np.arange(num_elements), p=distribution))
child = self.children[child_index]
if not child:
child = self.children[child_index] = _TrieNode(
parent=self, index_in_parent=child_index)
return child, child_index
def mark_leaf_sampled(self):
"""Marks this node as a leaf (sampled) and propagates updates upward."""
self.children = []
node = self
parent = node.parent
while parent is not None:
parent.unnorm_unsampled_mass[node.index_in_parent] = (
0 if not node.children else (
parent.unnorm_probs[node.index_in_parent] *
node._sum_unnorm_unsampled_mass / node._sum_unnorm_probs)) # pylint: disable=protected-access
parent._sum_unnorm_unsampled_mass = np.sum(parent.unnorm_unsampled_mass) # pylint: disable=protected-access
node = parent
parent = node.parent
def compute_sum_cache(self):
if self.children:
self._sum_unnorm_unsampled_mass = np.sum(self.unnorm_unsampled_mass)
def needs_probabilities(self):
"""Returns whether this node needs probabilities."""
return self.children is None
def exhausted(self):
"""Returns whether all of the mass at this node has been sampled."""
# Distinguish [] and None.
if self.children == []: # pylint: disable=g-explicit-bool-comparison
return True
if self._sum_unnorm_unsampled_mass is None:
return False # This node is not a leaf but has never been sampled from.
return self._sum_unnorm_unsampled_mass == 0
class UniqueRandomizer(object):
"""Samples unique sequences of discrete random choices.
When using a UniqueRandomizer object to provide randomness, the client
algorithm must be deterministic and behave identically when given a constant
sequence of choices.
When a sequence of choices is complete, the client algorithm must call
`mark_sequence_complete()`. This will update the internal data so that the
next sampled choices form a new sequence, which is guaranteed to be different
from previous complete sequences.
Choices returned by a UniqueRandomizer object respect the initial probability
distributions provided by the client algorithm, conditioned on the constraint
that a complete sequence of choices cannot be sampled more than once.
The `sample_*` methods all return an int in the range [0, num_choices).
Attributes:
current_node: The current node in the trie.
"""
def __init__(self) -> None:
"""Initializes a UniqueRandomizer object."""
self._root_node = _TrieNode(None, None)
self.current_node = self._root_node
def sample_distribution(self, unnorm_probs):
"""Samples from a given unnormalized probability distribution."""
self.current_node, choice_index = self.current_node.sample_child(
unnorm_probs)
return choice_index
def mark_sequence_complete(self):
"""Used to mark a complete sequence of choices."""
self.current_node.mark_leaf_sampled()
self.current_node = self._root_node
def clear_sequence(self):
"""Clear the current sequence, as if it were not sampled."""
node = self.current_node
while node is not None:
node.compute_sum_cache()
node = node.parent
self.current_node = self._root_node
def needs_probabilities(self):
"""Returns whether the current node requires probabilities."""
return self.current_node.needs_probabilities()
def exhausted(self):
return self.current_node.exhausted()
|
[STATEMENT]
lemma (in inj_comm_ring_hom) poly_div_hom:
"map_poly hom (poly_div p q) = poly_div (map_poly hom p) (map_poly hom q)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_poly hom (poly_div p q) = poly_div (map_poly hom p) (map_poly hom q)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. map_poly hom (poly_div p q) = poly_div (map_poly hom p) (map_poly hom q)
[PROOF STEP]
have zero: "\<forall>x. hom x = 0 \<longrightarrow> x = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>x. hom x = (0::'b) \<longrightarrow> x = (0::'a)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>x. hom x = (0::'b) \<longrightarrow> x = (0::'a)
goal (1 subgoal):
1. map_poly hom (poly_div p q) = poly_div (map_poly hom p) (map_poly hom q)
[PROOF STEP]
interpret mh: map_poly_inj_comm_ring_hom
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_poly_inj_comm_ring_hom hom
[PROOF STEP]
..
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. map_poly hom (poly_div p q) = poly_div (map_poly hom p) (map_poly hom q)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_poly hom (poly_div p q) = poly_div (map_poly hom p) (map_poly hom q)
[PROOF STEP]
unfolding poly_div_def mh.resultant_hom[symmetric]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. resultant (map_poly (map_poly hom) (poly_x_mult_y p)) (map_poly (map_poly hom) (poly_lift q)) = resultant (poly_x_mult_y (map_poly hom p)) (poly_lift (map_poly hom q))
[PROOF STEP]
by (simp add: poly_x_mult_y_hom)
[PROOF STATE]
proof (state)
this:
map_poly hom (poly_div p q) = poly_div (map_poly hom p) (map_poly hom q)
goal:
No subgoals!
[PROOF STEP]
qed |
! { dg-do run }
!
! Tests the fix for PR68216
!
! Reported on clf: https://groups.google.com/forum/#!topic/comp.lang.fortran/eWQTKfqKLZc
!
PROGRAM hello
!
! This is based on the first testcase, from Francisco (Ayyy LMAO). Original
! lines are commented out. The second testcase from this thread is acalled
! at the end of the program.
!
IMPLICIT NONE
CHARACTER(LEN=:),DIMENSION(:),ALLOCATABLE :: array_lineas
CHARACTER(LEN=:),DIMENSION(:),ALLOCATABLE :: array_copia
character (3), dimension (2) :: array_fijo = ["abc","def"]
character (100) :: buffer
INTEGER :: largo , cant_lineas , i
write (buffer, "(2a3)") array_fijo
! WRITE(*,*) ' Escriba un numero para el largo de cada linea'
! READ(*,*) largo
largo = LEN (array_fijo)
! WRITE(*,*) ' Escriba la cantidad de lineas'
! READ(*,*) cant_lineas
cant_lineas = size (array_fijo, 1)
ALLOCATE(CHARACTER(LEN=largo) :: array_lineas(cant_lineas))
! WRITE(*,*) 'Escriba el array', len(array_lineas), size(array_lineas)
READ(buffer,"(2a3)") (array_lineas(i),i=1,cant_lineas)
! WRITE(*,*) 'Array guardado: '
! DO i=1,cant_lineas
! WRITE(*,*) array_lineas(i)
! ENDDO
if (any (array_lineas .ne. array_fijo)) STOP 1
! The following are additional tests beyond that of the original.
!
! Check that allocation with source = another deferred length is OK
allocate (array_copia, source = array_lineas)
if (any (array_copia .ne. array_fijo)) STOP 2
deallocate (array_lineas, array_copia)
! Check that allocation with source = a non-deferred length is OK
allocate (array_lineas, source = array_fijo)
if (any (array_lineas .ne. array_fijo)) STOP 3
deallocate (array_lineas)
! Check that allocation with MOLD = a non-deferred length is OK
allocate (array_copia, mold = [array_fijo(:)(1:2), array_fijo(:)(1:2)])
if (size (array_copia, 1) .ne. 4) STOP 4
if (LEN (array_copia, 1) .ne. 2) STOP 5
! Check that allocation with MOLD = another deferred length is OK
allocate (array_lineas, mold = array_copia)
if (size (array_copia, 1) .ne. 4) STOP 6
if (LEN (array_copia, 1) .ne. 2) STOP 7
deallocate (array_lineas, array_copia)
! READ(*,*)
call testdefchar
contains
subroutine testdefchar
!
! This is the testcase in the above thread from Blokbuster
!
implicit none
character(:), allocatable :: test(:)
allocate(character(3) :: test(2))
test(1) = 'abc'
test(2) = 'def'
if (any (test .ne. ['abc', 'def'])) STOP 8
test = ['aa','bb','cc']
if (any (test .ne. ['aa', 'bb', 'cc'])) STOP 9
end subroutine testdefchar
END PROGRAM
|
# precompile is now the default
VERSION < v"0.7.0-rc2" && __precompile__()
module LibSndFile
import SampledSignals
using SampledSignals: SampleSource, SampleSink, SampleBuf
using SampledSignals: PCM16Sample, PCM32Sample
using SampledSignals: nframes, nchannels, samplerate
using FileIO: File, Stream, filename, stream
using FileIO: add_format, add_loader, add_saver, @format_str
if VERSION >= v"0.7-"
using Printf: @printf
using LinearAlgebra: transpose!
else
using Compat: Cvoid, @cfunction, @warn
end
const supported_formats = (format"WAV", format"FLAC", format"OGG")
include("libsndfile_h.jl")
include("lengthIO.jl")
include("sourcesink.jl")
include("loadsave.jl")
include("readwrite.jl")
function __init__()
# ogg currently not in the registry
add_format(format"OGG", "OggS", [".ogg", ".oga"], [:LibSndFile])
for fmt in supported_formats
add_loader(fmt, :LibSndFile)
add_saver(fmt, :LibSndFile)
end
end
depsjl = joinpath(@__DIR__, "..", "deps", "deps.jl")
if isfile(depsjl)
include(depsjl)
else
error("LibSndFile not properly installed. Please run Pkg.build(\"LibSndFile\")")
end
end # module LibSndFile
|
#pragma once
#include "Hypo/Graphics/Exports.h"
#include "Hypo/System/DataTypes/ObjPtr.h"
#include "Hypo/Window/Context/GraphicsContext.h"
#include <gsl/span>
namespace Hypo
{
enum class TextureType
{
None,
Rgb,
Rgba,
sRgb,
sRgba,
Stencil,
Depth
};
HYPO_GRAPHICS_API uInt32 GetTexturePixelSize(TextureType type);
HYPO_GRAPHICS_API uInt32 GetTextureSize(uInt32 width, uInt32 height, TextureType type);
class TextureData
{
public:
TextureData(uInt32 width, uInt32 height, TextureType type);
TextureData(uInt32 width, uInt32 height, TextureType type, std::vector<Byte> pixels);
TextureData() = default;
TextureData(const TextureData&) = default;
uInt32 GetWidth() const { return m_Width; }
uInt32 GetHeight() const { return m_Height; }
TextureType GetType()const { return m_Type; }
uInt32 GetPixelsSize() const { return m_Pixels.size(); }
const std::vector<Byte>& GetPixels() const { return m_Pixels; }
private:
uInt32 m_Width = 0;
uInt32 m_Height = 0;
TextureType m_Type = TextureType::None;
std::vector<Byte> m_Pixels;
};
HYPO_GRAPHICS_API TextureData TextureFromFile(std::string path);
}
|
import numpy as np
from ...utils.jaggedarray import unravel
def links_at_patch(patches, sort=True, nodes_at_link=None):
"""Construct as links_at_patch array for a graph.
Parameters
----------
patches : tuple of ndarray of int
Links that define patches as `(links, offset_to_patch)`.
sort : bool, optional
Sort the links.
nodes_at_link : ndarray of int, shape `(n_links, 2)`
Nodes at link tail and head.
Examples
--------
"""
from ..sort.ext.remap_element import reorder_links_at_patch
from ..quantity.ext.of_link import calc_midpoint_of_link
links_at_patch, offset_to_patch = patches
sort = False
# print 'SORTING TURNED OFF'
if sort:
xy_of_link = np.empty((len(nodes_at_link), 2), dtype=float)
calc_midpoint_of_link(nodes_at_link, x_of_node, y_of_node, xy_of_link)
reorder_links_at_patch(links_at_patch, offset_to_patch, xy_of_link)
sort_patches(links_at_patch, offset_to_patch, xy_of_link)
return unravel(links_at_patch, offset_to_patch, pad=-1)
|
Formal statement is: lemma GPicard6: assumes "open M" "z \<in> M" "a \<noteq> 0" and holf: "f holomorphic_on (M - {z})" and f0a: "\<And>w. w \<in> M - {z} \<Longrightarrow> f w \<noteq> 0 \<and> f w \<noteq> a" obtains r where "0 < r" "ball z r \<subseteq> M" "bounded(f ` (ball z r - {z})) \<or> bounded((inverse \<circ> f) ` (ball z r - {z}))" Informal statement is: If $f$ is holomorphic on an open set $M$ and $f(w) \neq 0$ and $f(w) \neq a$ for all $w \in M - \{z\}$, then either $f$ or $1/f$ is bounded on a punctured neighborhood of $z$. |
Formal statement is: lemma has_vector_derivative_complex_iff: "(f has_vector_derivative x) F \<longleftrightarrow> ((\<lambda>x. Re (f x)) has_field_derivative (Re x)) F \<and> ((\<lambda>x. Im (f x)) has_field_derivative (Im x)) F" Informal statement is: A complex-valued function $f$ has a complex derivative $x$ at $a$ if and only if the real and imaginary parts of $f$ have real derivatives $Re(x)$ and $Im(x)$ at $a$, respectively. |
Formal statement is: lemma topological_space_generate_topology: "class.topological_space (generate_topology S)" Informal statement is: The topology generated by a set of subsets of a set is a topology. |
A Married couple is driving down the interstate at 55 mph. But What Wife Told Next to Husband is Shocking.
PREVIOUS POST Previous post: A nice, calm and respectable lady went into the pharmacy. What Happened Next There is Hilarious.
NEXT POST Next post: A Man is on DeathBed When Asked His wife If She Cheated on Him. But What Wife Told To Husband if Shocking. |
[STATEMENT]
lemma pow_res_is_univ_semialgebraic:
assumes "x \<in> carrier Q\<^sub>p"
shows "is_univ_semialgebraic (pow_res n x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
proof(cases "n = 0")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. n = 0 \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
2. n \<noteq> 0 \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
n = 0
goal (2 subgoals):
1. n = 0 \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
2. n \<noteq> 0 \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
have T0: "pow_res n x = {x}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pow_res n x = {x}
[PROOF STEP]
unfolding True
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pow_res 0 x = {x}
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
x \<in> carrier Q\<^sub>p
goal (1 subgoal):
1. pow_res 0 x = {x}
[PROOF STEP]
by (simp add: assms zeroth_pow_res)
[PROOF STATE]
proof (state)
this:
pow_res n x = {x}
goal (2 subgoals):
1. n = 0 \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
2. n \<noteq> 0 \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
have "[x] \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. [x] \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)
[PROOF STEP]
using assms Qp.to_R1_closed
[PROOF STATE]
proof (prove)
using this:
x \<in> carrier Q\<^sub>p
?a \<in> carrier Q\<^sub>p \<Longrightarrow> [?a] \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)
goal (1 subgoal):
1. [x] \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
[x] \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)
goal (2 subgoals):
1. n = 0 \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
2. n \<noteq> 0 \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
hence "is_semialgebraic 1 {[x]}"
[PROOF STATE]
proof (prove)
using this:
[x] \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)
goal (1 subgoal):
1. is_semialgebraic 1 {[x]}
[PROOF STEP]
using is_algebraic_imp_is_semialg singleton_is_algebraic
[PROOF STATE]
proof (prove)
using this:
[x] \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)
is_algebraic Q\<^sub>p ?n ?S \<Longrightarrow> is_semialgebraic ?n ?S
?as \<in> carrier (Q\<^sub>p\<^bsup>?n\<^esup>) \<Longrightarrow> is_algebraic Q\<^sub>p ?n {?as}
goal (1 subgoal):
1. is_semialgebraic 1 {[x]}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
is_semialgebraic 1 {[x]}
goal (2 subgoals):
1. n = 0 \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
2. n \<noteq> 0 \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
is_semialgebraic 1 {[x]}
goal (1 subgoal):
1. is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
unfolding T0
[PROOF STATE]
proof (prove)
using this:
is_semialgebraic 1 {[x]}
goal (1 subgoal):
1. is_univ_semialgebraic {x}
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
is_semialgebraic 1 {[x]}
x \<in> carrier Q\<^sub>p
goal (1 subgoal):
1. is_univ_semialgebraic {x}
[PROOF STEP]
by (simp add: \<open>x \<in> carrier Q\<^sub>p\<close> finite_is_univ_semialgebraic)
[PROOF STATE]
proof (state)
this:
is_univ_semialgebraic (pow_res n x)
goal (1 subgoal):
1. n \<noteq> 0 \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. n \<noteq> 0 \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
n \<noteq> 0
goal (1 subgoal):
1. n \<noteq> 0 \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
proof(cases "x = \<zero>")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. x = \<zero> \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
2. x \<noteq> \<zero> \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
x = \<zero>
goal (2 subgoals):
1. x = \<zero> \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
2. x \<noteq> \<zero> \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x = \<zero>
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
x = \<zero>
goal (1 subgoal):
1. is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
using finite_is_univ_semialgebraic False pow_res_of_zero
[PROOF STATE]
proof (prove)
using this:
x = \<zero>
\<lbrakk>?A \<subseteq> carrier Q\<^sub>p; finite ?A\<rbrakk> \<Longrightarrow> is_univ_semialgebraic ?A
n \<noteq> 0
pow_res ?n \<zero> = {\<zero>}
goal (1 subgoal):
1. is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
by (metis Qp.zero_closed empty_subsetI finite.emptyI finite.insertI insert_subset)
[PROOF STATE]
proof (state)
this:
is_univ_semialgebraic (pow_res n x)
goal (1 subgoal):
1. x \<noteq> \<zero> \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<noteq> \<zero> \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
case F: False
[PROOF STATE]
proof (state)
this:
x \<noteq> \<zero>
goal (1 subgoal):
1. x \<noteq> \<zero> \<Longrightarrow> is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<noteq> \<zero>
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
x \<noteq> \<zero>
goal (1 subgoal):
1. is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
using False pow_res_semialg_def[of x n] diff_is_univ_semialgebraic[of _ "{\<zero>}"] finite_is_univ_semialgebraic[of "{\<zero>}"]
[PROOF STATE]
proof (prove)
using this:
x \<noteq> \<zero>
n \<noteq> 0
\<lbrakk>x \<in> nonzero Q\<^sub>p; 1 \<le> n\<rbrakk> \<Longrightarrow> \<exists>P\<in>carrier (UP Q\<^sub>p). pow_res n x = univ_basic_semialg_set n P - {\<zero>}
\<lbrakk>is_univ_semialgebraic ?A; is_univ_semialgebraic {\<zero>}\<rbrakk> \<Longrightarrow> is_univ_semialgebraic (?A - {\<zero>})
\<lbrakk>{\<zero>} \<subseteq> carrier Q\<^sub>p; finite {\<zero>}\<rbrakk> \<Longrightarrow> is_univ_semialgebraic {\<zero>}
goal (1 subgoal):
1. is_univ_semialgebraic (pow_res n x)
[PROOF STEP]
by (metis Qp.zero_closed assms empty_subsetI finite.emptyI finite.insertI insert_subset less_one less_or_eq_imp_le linorder_neqE_nat not_nonzero_Qp univ_basic_semialg_set_is_univ_semialgebraic)
[PROOF STATE]
proof (state)
this:
is_univ_semialgebraic (pow_res n x)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
is_univ_semialgebraic (pow_res n x)
goal:
No subgoals!
[PROOF STEP]
qed |
function node_display ( v, bi, p )
%% NODE_DISPLAY displays nodes within a boundary.
%
% Parameters:
%
% Input, real V(V_NUM,2), the coordinates of vertices used to define the curves.
%
% Input, integer BI(BI_NUM), a sequence of indices into V. Each closed curve
% is defined by giving a sequence of indices, which is terminated by
% repeating the starting index. Thus, BI = { 3, 1, 5, 3, 4, 2, 9, 7, 4 }
% describes two curves: ( 3, 1, 5, 3 ) and (4, 2, 9, 7, 4 ).
%
% Input, real P(P_NUM,2), the coordinates of nodes.
%
bi_num = length ( bi );
clf
hold on;
next = 1;
s = bi(1);
t2 = s;
draw = 1;
while ( next < bi_num )
t1 = t2;
next = next + 1;
t2 = bi(next);
if ( draw )
line ( [ v(t1,1), v(t2,1) ], [ v(t1,2), v(t2,2) ], 'LineWidth', 2, 'Color', 'k' );
if ( t2 == s )
draw = 0;
end
else
s = t2;
draw = 1;
end
end
plot ( p(:,1), p(:,2), 'b.', 'MarkerSize', 15 );
plot ( v(:,1), v(:,2), 'r.', 'MarkerSize', 15 );
axis equal
grid on
hold off
return
end
|
PROGRAM WAVES_DIRECTIONS
C
C MAKES A PLOT OF WAVE DIRECTIONS OBTAINED FROM FIT_WAVE AND
C TDSEXEY
C
CHARACTER*120 JUNK
REAL ANGMAG,SUNCLOCK
CHARACTER*3 MONTH(12)
DATA MONTH /'JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG',
1 'SEP','OCT','NOV','DEC'/
C
COMMON /MONGOPAR/
1 X1,X2,Y1,Y2,GX1,GX2,GY1,GY2,LX1,LX2,LY1,LY2,
1 GX,GY,CX,CY,
1 EXPAND,ANGLE,LTYPE,LWEIGHT,
1 CHEIGHT,CWIDTH,CXDEF,CYDEF,PSDEF,PYDEF,COFF,
1 TERMOUT,XYSWAPPED,NUMDEV,
1 PI,USERVAR(10),AUTODOT
INTEGER*4 LX1,LX2,LY1,LY2,LTYPE,LWEIGHT,NUMDEV
C
C THE 12 CONSEQUTIVE 1997 nOV 4 EVENTS:
C DATA SUNCLOCK /0., 185.,98.,2042.,2485.,1452.,71.,2435.,3231.,
C 1 797.,4005.,2789.,6*0.,1942.,2*0.,1030.,2*1752.,
C 1 6*1853.,23*0.,2*190.,25*0./
C DATA ANGMAG /0., 108.,108.,108.,108.,102.,102.,105.,110.,
C 1 110.,110.,111.,6*0.,-20.0,2*0.,-17.8,2*-37.2,
C 1 6*-19.2,23*0.,2*109.1,25*0./
C DATA ITERM /3/
DATA ITERM /-1/
C
NDO = 100
C
CALL MGOINIT
CALL MGOSETUP(ITERM)
CALL MGOERASE
C
IF(ITERM.LT.0) THEN
CALL MGOSETLOC(500., 850., 2300., 2650.)
ELSE
CALL MGOSETLOC(200.,870.,80.,750.)
ENDIF
C
C CALL MGOSETEXPAND(.85)
C IF(ITERM.GT.0) THEN
C CALL MGOGRELOCATE(10.,0.) ! maxch, crt
C ELSE
C CALL MGOGRELOCATE(400.,50.) ! hardcopy
C ENDIF
C CALL MGOPUTLABEL(53,STR,9)
C
C ISUBR = 1 ! 1 IS 8 VARIABLE FIT, LONGITUDINAL WAVES
C 2 IS 12 VARIABLE FIT, WITH CIRCULAR POL.
C
READ(5,*,ERR=10) ISUBR
PRINT*,'READIN ISUBR=',ISUBR
GO TO 20
10 PRINT*,'ERROR IN WAVES_DIRECTIONS READIN'
ISUBR = 1 ! DEFAULT
C
20 OPEN(UNIT=44,FILE='FITWAVE.RESULTS',STATUS='OLD',READONLY)
OPEN(UNIT=45,FILE='FITWAVE12.RESULTS',STATUS='OLD',READONLY)
READ(44,22) JUNK
READ(44,22) JUNK
READ(45,22) JUNK
READ(45,22) JUNK
22 FORMAT(A)
N = 0
100 CONTINUE
N = N+1
PRINT*,'ISUBR,N,NDO',ISUBR,N,NDO
IF(ISUBR.EQ.1) THEN
c READ(44,144,END=200,ERR=300) NDATE,NEVENT,F1,DELF,EX1,EY1,
c 1 EX2,EY2,PH1,PH2,RMS,N1,N2
READ(44,*,END=150,ERR=200) NDATE,NEVENT,F1,DELF,EX1,EY1,
1 EX2,EY2,PH1,PH2,RMS,N1,N2,ANGMAG,SUNCLOCK
144 FORMAT(2I10,F7.3,F7.3,4F6.2,2F8.2,F9.4)
print*,'sunclock,angmag',sunclock,angmag
IF(N.NE.NDO) GO TO 100
150 print*,'after read',n,ndate,nevent,rms
EMAG1 = SQRT(EX1**2 + EY1**2)
EMAG2 = SQRT(EX2**2 + EY2**2)
EMAX = AMAX1(EMAG1,EMAG2)
EMAX = 1.2*EMAX
ELSE
READ(45,*,END=160,ERR=200) NDATE,NEVENT,F1,DELF,EX1,EY1,
1 EX2,EY2,TX1,TY1,TX2,TY2,RMS,N1,N2,ANGMAG,SUNCLOCK
IF(N.NE.NDO) GO TO 100
160 print*,'sunclock,angmag',sunclock,angmag
EMAX = AMAX1(ABS(EX1),ABS(EY1),ABS(EX2),ABS(EY2),ABS(TX1),
1 ABS(TX2),ABS(TY1),ABS(TY2))
145 FORMAT(2I10,F7.3,F7.3,4F6.2,2F8.2,F9.4)
EMAX = 1.5*EMAX
ENDIF
C
C START GRAPH
C
CALL MGOSETLIM(-EMAX,-EMAX,EMAX,EMAX)
ANG_EX = 360. - 360.*SUNCLOCK/4096. - 45.
C LEFT HANDED COORDINATE SYSTEM, Z IN DOWNWARD
C
C PLOT A HEAVY LINE IN DIRECTION OF B
C
BX = EMAX*COSD(ANGMAG)
BY = EMAX*SIND(ANGMAG)
CALL MGOSETLWEIGHT(3)
CALL MGORELOCATE(0.,0.)
CALL MGODRAW(BX,BY)
CALL MGOPUTLABEL(1,'B',6)
CALL MGOSETLWEIGHT(1)
C
C PLOT A DOTTED LINE TO SUN, WRITE EVENT NO. AND DO BOX
C
CALL MGOSETLTYPE(2)
CALL MGORELOCATE(0.,0.)
CALL MGODRAW(.7*EMAX,0.)
CALL MGOPUTLABEL(3,'SUN',6)
CALL MGOSETLTYPE(0)
CALL MGORELOCATE(-.8*EMAX,1.3*EMAX)
NYEAR = NDATE
NDAY = MOD(NDATE,100)
NYEAR = NYEAR/100
NM = MOD(NYEAR,100)
NYEAR = NYEAR/100
WRITE(JUNK,235) NYEAR,MONTH(NM),NDAY
235 FORMAT(I5,'-',A3,'-',I2)
CALL MGOPUTLABEL(12,JUNK,6)
WRITE(JUNK,234) NEVENT
234 FORMAT(I10)
CALL MGOPUTLABEL(11,' EVENT NO.',6)
CALL MGOPUTLABEL(10,JUNK,6)
CALL MGORELOCATE(-.69*EMAX,1.15*EMAX)
CALL MGOSETEXPAND(.8)
WRITE(JUNK,236) N1,N2
236 FORMAT('SAMPLES',I5,' TO',I5)
CALL MGOPUTLABEL(20,JUNK,6)
WRITE(JUNK,237) RMS
237 FORMAT(F6.2)
CALL MGOPUTLABEL(6,' RMS',6)
CALL MGOPUTLABEL(6,JUNK,6)
CALL MGOPUTLABEL(5,' mV/m',6)
CALL MGOSETEXPAND(1.)
CALL MGOXLABEL(5,' mV/m')
CALL MGOYLABEL(5,' mV/m')
CALL MGOBOX(1,2)
C
IF(ISUBR.EQ.1) THEN
EANG1 = ATAN2D(-EY1,EX1) + ANG_EX
EANG2 = ATAN2D(-EY2,EX2) + ANG_EX
CALL MGORELOCATE(0.,0.)
EXT1 = EMAG1*COSD(EANG1)
EYT1 = EMAG1*SIND(EANG1)
CALL MGODRAW(EXT1,EYT1)
IF(DELF.GT.0.) THEN
CALL MGOPUTLABEL(8,' E\dL\df',6)
ELSE
CALL MGOPUTLABEL(8,' E\dH\df',6)
ENDIF
CALL MGORELOCATE(0.,0.)
CALL MGODRAW(-EXT1,-EYT1)
EXT2 = EMAG2*COSD(EANG2)
EYT2 = EMAG2*SIND(EANG2)
CALL MGORELOCATE(0.,0.)
CALL MGODRAW(EXT2,EYT2)
CALL MGORELOCATE(0.,0.)
CALL MGODRAW(-EXT2,-EYT2)
IF(DELF.GT.0.) THEN
CALL MGOPUTLABEL(8,' E\dH\df',6)
ELSE
CALL MGOPUTLABEL(8,' E\dL\df',6)
ENDIF
ELSE
CALL ELLIPSE(ANG_EX,DELF,EX1,EY1,EX2,EY2,TX1,TY1,TX2,TY2)
ENDIF
C
C
200 CONTINUE
PRINT*,'IN READ',NEVENT,F1,DELF,EX1
PRINT*,'IN READ',EX2,EY1,EY2,RMS
CALL MGOSETEXPAND(.5)
CALL MGOPLOTID('WAVES_DIRECTIONS',' ')
CALL MGOSETEXPAND(1.)
IF(ITERM.LT.0) THEN
CALL MGOPRNTPLOT(NVEC)
PRINT*,' NO. VECTORS PLOTTED',NVEC
ELSE
CALL MGOTCLOSE
ENDIF
C
CLOSE(UNIT=44)
CLOSE(UNIT=45)
PAUSE
C
STOP
300 PRINT*,'READ ERROR AT N,NDATE,NEVENT =',N,NDATE,NEVENT
STOP
END
SUBROUTINE ELLIPSE(ANG_EX,DELF,EX1,EY1,EX2,EY2,TX1,TY1,TX2,TY2)
C
DATA TWOPI /6.2831853/
C
NUMBER = 100
DELT = TWOPI/NUMBER
C
C
C DO N = N1,N2
C T = (N-1024)*DELT
C EX = EX1*SIN(TWOPI*F1*T + PH1) + EX2*SIN(TWOPI*F2*T + PH2)
C 1 + TX1*COS(TWOPI*F1*T + PH1) + TX2*COS(TWOPI*F2*T + PH2)
C EY = EY1*SIN(TWOPI*F1*T + PH1) + EY2*SIN(TWOPI*F2*T + PH2)
C 1 + TY1*COS(TWOPI*F1*T + PH1) + TY2*COS(TWOPI*F2*T + PH2)
C SUMSQ = SUMSQ + (X4DATA(N,1)/XLEN - EX)**2
C 1 + (X4DATA(N,2)/YLEN - EY)**2
C ENDDO
C
C PLOT ELECTRIC FIELD ELLIPSE OF FIRST WAVE
C
COSAANG = COSD(ANG_EX)
SINAANG = SIND(ANG_EX)
EX = TX1
EY = TY1
EXGSE = EX*COSAANG + EY*SINAANG
EYGSE = EX*SINAANG - EY*COSAANG
CALL MGORELOCATE(EXGSE,EYGSE)
IF(DELF.GT.0.) THEN
CALL MGOPUTLABEL(8,' E\dL\df',6)
ELSE
CALL MGOPUTLABEL(8,' E\dH\df',6)
ENDIF
CALL MGORELOCATE(EXGSE,EYGSE)
C
DO N = 1,NUMBER
T = N*DELT
EX = EX1*SIN(T) + TX1*COS(T)
EY = EY1*SIN(T) + TY1*COS(T)
EXGSE = EX*COSAANG + EY*SINAANG
EYGSE = EX*SINAANG - EY*COSAANG
CALL MGODRAW(EXGSE,EYGSE)
ENDDO
C
C PUT ON ARROW
C
T = 0.
EX = EX1*SIN(T) + TX1*COS(T)
EY = EY1*SIN(T) + TY1*COS(T)
EXGSE = EX*COSAANG + EY*SINAANG
EYGSE = EX*SINAANG - EY*COSAANG
C CALL MGODRAW(EXGSE,EYGSE)
T = T + DELT
EX = EX1*SIN(T) + TX1*COS(T)
EY = EY1*SIN(T) + TY1*COS(T)
DDX = EX*COSAANG + EY*SINAANG - EXGSE
DDY = EX*SINAANG - EY*COSAANG - EYGSE
SIZE = .03
CALL ARROW(EXGSE,EYGSE,DDX,DDY,SIZE)
C
C PLOT ELECTRIC FIELD ELLIPSE OF SECOND WAVE
C
EX = TX2
EY = TY2
EXGSE = EX*COSAANG + EY*SINAANG
EYGSE = EX*SINAANG - EY*COSAANG
CALL MGORELOCATE(EXGSE,EYGSE)
C CALL MGOPUTLABEL(3,' E2',6)
IF(DELF.LE.0.) THEN
CALL MGOPUTLABEL(8,' E\dL\df',6)
ELSE
CALL MGOPUTLABEL(8,' E\dH\df',6)
ENDIF
CALL MGORELOCATE(EXGSE,EYGSE)
C
DO N = 1,NUMBER
T = N*DELT
EX = EX2*SIN(T) + TX2*COS(T)
EY = EY2*SIN(T) + TY2*COS(T)
EXGSE = EX*COSAANG + EY*SINAANG
EYGSE = EX*SINAANG - EY*COSAANG
CALL MGODRAW(EXGSE,EYGSE)
ENDDO
C
C PUT ON ARROW
C
T = 0.
EX = EX2*SIN(T) + TX2*COS(T)
EY = EY2*SIN(T) + TY2*COS(T)
EXGSE = EX*COSAANG + EY*SINAANG
EYGSE = EX*SINAANG - EY*COSAANG
C CALL MGODRAW(EXGSE,EYGSE)
T = T + DELT
EX = EX2*SIN(T) + TX2*COS(T)
EY = EY2*SIN(T) + TY2*COS(T)
DDX = EX*COSAANG + EY*SINAANG - EXGSE
DDY = EX*SINAANG - EY*COSAANG - EYGSE
CALL ARROW(EXGSE,EYGSE,DDX,DDY,SIZE)
C
RETURN
END
|
{-# OPTIONS -v tc.unquote:30 #-}
open import Common.Prelude
open import Common.Reflection
open import Agda.Builtin.Sigma
data Box : Bool → Set where
box : (b : Bool) → Box b
works : (b : Bool) → Box b → Bool
works b (box .b) = unquote (give (var 0 []))
works₂ : (b : Bool) → Box b → Bool
unquoteDef works₂ = defineFun works₂ (clause
( ("b" , vArg unknown) ∷ [])
( vArg (var 0)
∷ vArg (con (quote box)
(vArg (dot unknown) ∷ []))
∷ [])
(var 0 []) ∷ [])
works₃ : (b : Bool) → Box b → (x y : Bool) → Bool
unquoteDef works₃ = defineFun works₃ (clause
( ("y" , vArg unknown)
∷ ("x" , vArg unknown)
∷ ("b" , vArg unknown)
∷ [])
( vArg (var 2)
∷ vArg (con (quote box)
(vArg (dot unknown) ∷ []))
∷ vArg (var 1)
∷ vArg (var 0)
∷ [])
(var 2 []) ∷ [])
|
# Start-to-Finish Example: `GiRaFFE_NRPy` 1D tests
### Authors: Patrick Nelson & Terrence Pierre Jacques
### Adapted from [Start-to-Finish Example: Head-On Black Hole Collision](../Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide.ipynb)
## This module compiles and runs code tests for all 1D initial data options available in GiRaFFE-NRPy+, evolving one-dimensional GRFFE waves.
### NRPy+ Source Code for this module:
* Main python module for all 1D initial data: [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py) __Options:__
1. [Fast Wave](Tutorial-GiRaFFEfood_NRPy_1D_tests-fast_wave.ipynb)
1. [Alfven Wave](Tutorial-GiRaFFEfood_NRPy_1D_alfven_wave.ipynb)
1. [Degenerate Alfven Wave](Tutorial-GiRaFFEfood_NRPy_1D_tests-degen_Alfven_wave.ipynb)
1. [Three Alfven Waves](Tutorial-GiRaFFEfood_NRPy_1D_tests-three_waves.ipynb)
1. [FFE Breakdown](Tutorial-GiRaFFEfood_NRPy_1D_tests-FFE_breakdown.ipynb)
* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-Afield_flux.ipynb) Generates the expressions to find the flux term of the induction equation.
* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-A2B.ipynb) Generates the driver to compute the magnetic field from the vector potential/
* [GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-BCs.ipynb) Generates the code to apply boundary conditions to the vector potential, scalar potential, and three-velocity.
* [GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-C2P_P2C.ipynb) Generates the conservative-to-primitive and primitive-to-conservative solvers.
* [GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Metric_Face_Values.ipynb) Generates code to interpolate metric gridfunctions to cell faces.
* [GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-PPM.ipynb) Genearates code to reconstruct primitive variables on cell faces.
* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-Source_Terms.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.
* [GiRaFFE_NRPy/Stilde_flux.py](../../edit/in_progress/GiRaFFE_NRPy/Stilde_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Stilde_flux.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.
* [../GRFFE/equations.py](../../edit/GRFFE/equations.py) [\[**tutorial**\]](../Tutorial-GRFFE_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.
* [../GRHD/equations.py](../../edit/GRHD/equations.py) [\[**tutorial**\]](../Tutorial-GRHD_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.
Here we use NRPy+ to generate the C source code necessary to set up initial data for an Alfvén wave (see [the original GiRaFFE paper](https://arxiv.org/pdf/1704.00599.pdf)). Then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4).
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
This notebook is organized as follows
1. [Step 1](#initializenrpy): Set core NRPy+ parameters for numerical grids
1. [Step 2](#grffe): Output C code for GRFFE evolution
1. [Step 2.a](#mol): Output macros for Method of Lines timestepping
1. [Step 3](#gf_id): Import `GiRaFFEfood_NRPy` initial data modules
1. [Step 4](#cparams): Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h`
1. [Step 5](#mainc): `GiRaFFE_NRPy_standalone.c`: The Main C Code
1. [Step 6](#compileexec): Compile and execute C codes
1. [Step 7](#plots): Data Visualization
1. [Step 8](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='setup'></a>
# Step 1: Set up core functions and parameters for solving GRFFE equations \[Back to [top](#toc)\]
$$\label{setup}$$
```python
import shutil, os, sys # Standard Python modules for multiplatform OS-level functions
# First, we'll add the parent directory to the list of directories Python will check for modules.
nrpy_dir_path = os.path.join("..")
if nrpy_dir_path not in sys.path:
sys.path.append(nrpy_dir_path)
# Step P1: Import needed NRPy+ core modules:
from outputC import outCfunction, lhrh # NRPy+: Core C code output module
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import finite_difference as fin # NRPy+: Finite difference C code generation module
import NRPy_param_funcs as par # NRPy+: Parameter interface
import grid as gri # NRPy+: Functions having to do with numerical grids
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
# Step P2: Create C code output directory:
Ccodesdir = os.path.join("GiRaFFE_staggered_1D_Tests_standalone_Ccodes/")
# First remove C code output directory if it exists
# Courtesy https://stackoverflow.com/questions/303200/how-do-i-remove-delete-a-folder-that-is-not-empty
# !rm -r ScalarWaveCurvilinear_Playground_Ccodes
shutil.rmtree(Ccodesdir, ignore_errors=True)
# Then create a fresh directory
cmd.mkdir(Ccodesdir)
# Step P3: Create executable output directory:
outdir = os.path.join(Ccodesdir,"output/")
cmd.mkdir(outdir)
# Step P5: Set timestepping algorithm (we adopt the Method of Lines)
REAL = "double" # Best to use double here.
default_CFL_FACTOR= 0.5 # (GETS OVERWRITTEN WHEN EXECUTED.) In pure axisymmetry (symmetry_axes = 2 below) 1.0 works fine. Otherwise 0.5 or lower.
# Step P6: Set the finite differencing order to 2.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4)
thismodule = "Start_to_Finish-GiRaFFE_NRPy-1D_tests"
TINYDOUBLE = par.Cparameters("REAL", thismodule, "TINYDOUBLE", 1e-100)
import GiRaFFE_NRPy.GiRaFFE_NRPy_Main_Driver_staggered as md
# par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_speed_limit_StildeD = False")
par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_current_sheet_prescription = False")
```
<a id='grffe'></a>
# Step 2: Output C code for GRFFE evolution \[Back to [top](#toc)\]
$$\label{grffe}$$
We will first write the C codes needed for GRFFE evolution. We have already written a module to generate all these codes and call the functions in the appropriate order, so we will import that here. We will take the slightly unusual step of doing this before we generate the initial data functions because the main driver module will register all the gridfunctions we need. It will also generate functions that, in addition to their normal spot in the MoL timestepping, will need to be called during the initial data step to make sure all the variables are appropriately filled in.
All of this is handled with a single call to `GiRaFFE_NRPy_Main_Driver_generate_all()`, which will register gridfunctions, write all the C code kernels, and write the C code functions to call those.
```python
md.GiRaFFE_NRPy_Main_Driver_generate_all(Ccodesdir)
```
Output C function calculate_StildeD0_source_term() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_StildeD0_source_term.h
Output C function calculate_StildeD1_source_term() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_StildeD1_source_term.h
Output C function calculate_StildeD2_source_term() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_StildeD2_source_term.h
Output C function calculate_Stilde_rhsD() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_Stilde_rhsD.h
Output C function GiRaFFE_NRPy_cons_to_prims() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/C2P/GiRaFFE_NRPy_cons_to_prims.h
Output C function GiRaFFE_NRPy_prims_to_cons() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/C2P/GiRaFFE_NRPy_prims_to_cons.h
<a id='mol'></a>
## Step 2.a: Output macros for Method of Lines timestepping \[Back to [top](#toc)\]
$$\label{mol}$$
Now, we generate the code to implement the method of lines using the fourth-order Runge-Kutta algorithm.
```python
RK_method = "RK4"
# Step 3: Generate Runge-Kutta-based (RK-based) timestepping code.
# As described above the Table of Contents, this is a 3-step process:
# 3.A: Evaluate RHSs (RHS_string)
# 3.B: Apply boundary conditions (post_RHS_string, pt 1)
import MoLtimestepping.C_Code_Generation as MoL
from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict
RK_order = Butcher_dict[RK_method][1]
cmd.mkdir(os.path.join(Ccodesdir,"MoLtimestepping/"))
MoL.MoL_C_Code_Generation(RK_method,
RHS_string = """
GiRaFFE_NRPy_RHSs(¶ms,auxevol_gfs,RK_INPUT_GFS,RK_OUTPUT_GFS);""",
post_RHS_string = """
GiRaFFE_NRPy_post_step(¶ms,xx,auxevol_gfs,RK_OUTPUT_GFS,n+1);\n""",
outdir = os.path.join(Ccodesdir,"MoLtimestepping/"))
```
<a id='gf_id'></a>
# Step 3: Import `GiRaFFEfood_NRPy` initial data modules \[Back to [top](#toc)\]
$$\label{gf_id}$$
With the preliminaries out of the way, we will write the C functions to set up initial data. There are two categories of initial data that must be set: the spacetime metric variables, and the GRFFE plasma variables. We will set up the spacetime first, namely the Minkowski spacetime.
```python
gammaDD = ixp.zerorank2(DIM=3)
for i in range(3):
for j in range(3):
if i==j:
gammaDD[i][j] = sp.sympify(1) # else: leave as zero
betaU = ixp.zerorank1() # All should be 0
alpha = sp.sympify(1)
# Description and options for this initial data
desc = "Generate a flat spacetime metric."
loopopts_id ="AllPoints" # we don't need to read coordinates for flat spacetime.
# For testing: Also set inverse metric:
gammaUU, unused_gammaDET = ixp.symm_matrix_inverter3x3(gammaDD)
name = "set_initial_spacetime_metric_data"
values_to_print = [
lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD00"),rhs=gammaDD[0][0]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD01"),rhs=gammaDD[0][1]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD02"),rhs=gammaDD[0][2]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD11"),rhs=gammaDD[1][1]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD12"),rhs=gammaDD[1][2]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD22"),rhs=gammaDD[2][2]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU0"),rhs=betaU[0]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU1"),rhs=betaU[1]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU2"),rhs=betaU[2]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","alpha"),rhs=alpha)
]
outCfunction(
outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name,
params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs",
body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False"),
loopopts = loopopts_id)
```
Output C function set_initial_spacetime_metric_data() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/set_initial_spacetime_metric_data.h
Now, we will write out the initials data function for the GRFFE variables.
```python
initial_data_dir = os.path.join(Ccodesdir,"InitialData/")
cmd.mkdir(initial_data_dir)
ID_opts = ["AlfvenWave", "ThreeAlfvenWaves", "DegenAlfvenWave", "FastWave", "FFEBD"]
import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy as gid
for initial_data in ID_opts:
if initial_data=="AlfvenWave":
gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = initial_data, stagger_enable = True)
desc = "Generate Alfven wave 1D initial data for GiRaFFEfood_NRPy."
elif initial_data=="ThreeAlfvenWaves":
gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = "ThreeWaves", stagger_enable = True)
desc = "Generate three Alfven wave 1D initial data for GiRaFFEfood_NRPy."
elif initial_data=="DegenAlfvenWave":
gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = initial_data, stagger_enable = True)
desc = "Generate degenerate Alfven wave 1D initial data for GiRaFFEfood_NRPy."
elif initial_data=="FastWave":
gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = initial_data, stagger_enable = True)
desc = "Generate fast wave 1D initial data for GiRaFFEfood_NRPy."
elif initial_data=="FFEBD":
gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = "FFE_Breakdown", stagger_enable = True)
desc = "Generate FFE breakdown 1D initial data for GiRaFFEfood_NRPy."
name = initial_data
values_to_print = [
lhrh(lhs=gri.gfaccess("out_gfs","AD0"),rhs=gid.AD[0]),
lhrh(lhs=gri.gfaccess("out_gfs","AD1"),rhs=gid.AD[1]),
lhrh(lhs=gri.gfaccess("out_gfs","AD2"),rhs=gid.AD[2]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU0"),rhs=gid.ValenciavU[0]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU1"),rhs=gid.ValenciavU[1]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU2"),rhs=gid.ValenciavU[2]),
# lhrh(lhs=gri.gfaccess("auxevol_gfs","BU0"),rhs=gid.BU[0]),
# lhrh(lhs=gri.gfaccess("auxevol_gfs","BU1"),rhs=gid.BU[1]),
# lhrh(lhs=gri.gfaccess("auxevol_gfs","BU2"),rhs=gid.BU[2]),
lhrh(lhs=gri.gfaccess("out_gfs","psi6Phi"),rhs=sp.sympify(0))
]
outCfunction(
outfile = os.path.join(initial_data_dir,name+".c"), desc=desc, name=name,
params ="const paramstruct *params, REAL *xx[3], REAL *auxevol_gfs, REAL *out_gfs",
body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False"),
rel_path_to_Cparams='../',
loopopts ="AllPoints,Read_xxs")
inital_data_body = """
const char *option1 = "AlfvenWave";
const char *option2 = "ThreeAlfvenWaves";
const char *option3 = "DegenAlfvenWave";
const char *option4 = "FastWave";
const char *option5 = "FFEBD";
if (strcmp(initial_data_option, option1) == 0) {
AlfvenWave(params, xx, auxevol_gfs, out_gfs);
}
else if (strcmp(initial_data_option, option2) == 0) {
ThreeAlfvenWaves(params, xx, auxevol_gfs, out_gfs);
}
else if (strcmp(initial_data_option, option3) == 0) {
DegenAlfvenWave(params, xx, auxevol_gfs, out_gfs);
}
else if (strcmp(initial_data_option, option4) == 0) {
FastWave(params, xx, auxevol_gfs, out_gfs);
}
else if (strcmp(initial_data_option, option5) == 0) {
FFEBD(params, xx, auxevol_gfs, out_gfs);
}
else {
printf("ERROR: Invalid choice of initial data.");
exit(1);
}
"""
name = "initial_data"
desc = "Main initial data function."
includes = ["AlfvenWave.c", "ThreeAlfvenWaves.c", "DegenAlfvenWave.c", "FastWave.c", "FFEBD.c"]
outCfunction(
outfile = os.path.join(initial_data_dir,name+".h"), desc=desc, name=name,
params ="const char *initial_data_option, const paramstruct *restrict params,REAL *xx[3],REAL *restrict auxevol_gfs,REAL *restrict out_gfs",
body = inital_data_body,
includes = includes,
prefunc="#include <string.h>",
rel_path_to_Cparams='../',
loopopts ="")
```
Output C function AlfvenWave() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/AlfvenWave.c
Output C function ThreeAlfvenWaves() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/ThreeAlfvenWaves.c
Output C function DegenAlfvenWave() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/DegenAlfvenWave.c
Output C function FastWave() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/FastWave.c
Output C function FFEBD() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/FFEBD.c
Output C function initial_data() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/initial_data.h
<a id='cparams'></a>
# Step 4: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \[Back to [top](#toc)\]
$$\label{cparams}$$
Based on declared NRPy+ Cparameters, first we generate `declare_Cparameters_struct.h`, `set_Cparameters_default.h`, and `set_Cparameters[-SIMD].h`.
Then we output `free_parameters.h`, which sets initial data parameters, as well as grid domain & reference metric parameters, applying `domain_size` and `sinh_width`/`SymTP_bScale` (if applicable) as set above
```python
# Step 3.e: Output C codes needed for declaring and setting Cparameters; also set free_parameters.h
# Step 3.e.i: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h
par.generate_Cparameters_Ccodes(os.path.join(Ccodesdir))
# Step 3.e.ii: Set free_parameters.h
with open(os.path.join(Ccodesdir,"free_parameters.h"),"w") as file:
file.write("""// Override parameter defaults with values based on command line arguments and NGHOSTS.
params.Nxx0 = atoi(argv[1]);
params.Nxx1 = atoi(argv[2]);
params.Nxx2 = atoi(argv[3]);
params.Nxx_plus_2NGHOSTS0 = params.Nxx0 + 2*NGHOSTS;
params.Nxx_plus_2NGHOSTS1 = params.Nxx1 + 2*NGHOSTS;
params.Nxx_plus_2NGHOSTS2 = params.Nxx2 + 2*NGHOSTS;
// Step 0d: Set up space and time coordinates
// Step 0d.i: Declare \Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]:
const REAL xxmin[3] = {-1.3255,-0.085,-0.085};
const REAL xxmax[3] = { 1.6745, 0.115, 0.115};
params.dxx0 = (xxmax[0] - xxmin[0]) / ((REAL)params.Nxx0+1);
params.dxx1 = (xxmax[1] - xxmin[1]) / ((REAL)params.Nxx1+1);
params.dxx2 = (xxmax[2] - xxmin[2]) / ((REAL)params.Nxx2+1);
printf("dxx0,dxx1,dxx2 = %.5e,%.5e,%.5e\\n",params.dxx0,params.dxx1,params.dxx2);
params.invdx0 = 1.0 / params.dxx0;
params.invdx1 = 1.0 / params.dxx1;
params.invdx2 = 1.0 / params.dxx2;
const int poison_grids = 0;
// Standard GRFFE parameters:
params.GAMMA_SPEED_LIMIT = 2000.0;
params.diss_strength = 0.1;
""")
```
<a id='bc_functs'></a>
# Step 4: Set up boundary condition functions for chosen singular, curvilinear coordinate system \[Back to [top](#toc)\]
$$\label{bc_functs}$$
Next apply singular, curvilinear coordinate boundary conditions [as documented in the corresponding NRPy+ tutorial notebook](Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb)
...But, for the moment, we're actually just using this because it writes the file `gridfunction_defines.h`.
```python
import CurviBoundaryConditions.CurviBoundaryConditions as cbcs
cbcs.Set_up_CurviBoundaryConditions(os.path.join(Ccodesdir,"boundary_conditions/"),Cparamspath=os.path.join("../"),enable_copy_of_static_Ccodes=False)
```
Wrote to file "GiRaFFE_staggered_1D_Tests_standalone_Ccodes/boundary_conditions/parity_conditions_symbolic_dot_products.h"
Evolved parity: ( AD0:1, AD1:2, AD2:3, StildeD0:1, StildeD1:2, StildeD2:3,
psi6Phi:0 )
AuxEvol parity: ( BU0:1, BU1:2, BU2:3, B_lU0:1, B_lU1:2, B_lU2:3, B_rU0:1,
B_rU1:2, B_rU2:3, BstaggerU0:1, BstaggerU1:2, BstaggerU2:3,
Bstagger_lU0:1, Bstagger_lU1:2, Bstagger_lU2:3, Bstagger_rU0:1,
Bstagger_rU1:2, Bstagger_rU2:3, Stilde_flux_HLLED0:1,
Stilde_flux_HLLED1:2, Stilde_flux_HLLED2:3, ValenciavU0:1,
ValenciavU1:2, ValenciavU2:3, Valenciav_lU0:1, Valenciav_lU1:2,
Valenciav_lU2:3, Valenciav_llU0:1, Valenciav_llU1:2, Valenciav_llU2:3,
Valenciav_lrU0:1, Valenciav_lrU1:2, Valenciav_lrU2:3, Valenciav_rU0:1,
Valenciav_rU1:2, Valenciav_rU2:3, Valenciav_rlU0:1, Valenciav_rlU1:2,
Valenciav_rlU2:3, Valenciav_rrU0:1, Valenciav_rrU1:2, Valenciav_rrU2:3,
alpha:0, alpha_face:0, betaU0:1, betaU1:2, betaU2:3, beta_faceU0:1,
beta_faceU1:2, beta_faceU2:3, cmax_x:0, cmax_y:0, cmax_z:0, cmin_x:0,
cmin_y:0, cmin_z:0, gammaDD00:4, gammaDD01:5, gammaDD02:6, gammaDD11:7,
gammaDD12:8, gammaDD22:9, gamma_faceDD00:4, gamma_faceDD01:5,
gamma_faceDD02:6, gamma_faceDD11:7, gamma_faceDD12:8, gamma_faceDD22:9,
psi6_temp:0, psi6center:0 )
Wrote to file "GiRaFFE_staggered_1D_Tests_standalone_Ccodes/boundary_conditions/EigenCoord_Cart_to_xx.h"
<a id='mainc'></a>
# Step 5: `GiRaFFE_NRPy_standalone.c`: The Main C Code \[Back to [top](#toc)\]
$$\label{mainc}$$
```python
# Part P0: Define REAL, set the number of ghost cells NGHOSTS (from NRPy+'s FD_CENTDERIVS_ORDER),
# and set the CFL_FACTOR (which can be overwritten at the command line)
with open(os.path.join(Ccodesdir,"GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h"), "w") as file:
file.write("""
// Part P0.a: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER
#define NGHOSTS """+str(3)+"""
#define NGHOSTS_A2B """+str(2)+"""
// Part P0.b: Set the numerical precision (REAL) to double, ensuring all floating point
// numbers are stored to at least ~16 significant digits
#define REAL """+REAL+"""
// Part P0.c: Set the CFL Factor. Can be overwritten at command line.
REAL CFL_FACTOR = """+str(default_CFL_FACTOR)+";")
```
```python
%%writefile $Ccodesdir/GiRaFFE_NRPy_standalone.c
// Step P0: Define REAL and NGHOSTS; and declare CFL_FACTOR. This header is generated in NRPy+.
#include "GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h"
#include "declare_Cparameters_struct.h"
const int NSKIP_1D_OUTPUT = 1;
// Step P1: Import needed header files
#include "stdio.h"
#include "stdlib.h"
#include "math.h"
#include "time.h"
#include "stdint.h" // Needed for Windows GCC 6.x compatibility
#ifndef M_PI
#define M_PI 3.141592653589793238462643383279502884L
#endif
#ifndef M_SQRT1_2
#define M_SQRT1_2 0.707106781186547524400844362104849039L
#endif
// Step P2: Declare the IDX4S(gf,i,j,k) macro, which enables us to store 4-dimensions of
// data in a 1D array. In this case, consecutive values of "i"
// (all other indices held to a fixed value) are consecutive in memory, where
// consecutive values of "j" (fixing all other indices) are separated by
// Nxx_plus_2NGHOSTS0 elements in memory. Similarly, consecutive values of
// "k" are separated by Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1 in memory, etc.
#define IDX4S(g,i,j,k) \
( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) )
#define IDX4ptS(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2) * (g) )
#define IDX3S(i,j,k) ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) ) ) )
#define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++)
#define LOOP_ALL_GFS_GPS(ii) _Pragma("omp parallel for") \
for(int (ii)=0;(ii)<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;(ii)++)
// Step P3: Set gridfunction macros
#include "boundary_conditions/gridfunction_defines.h"
// Step P4: Include the RHS, BC, and primitive recovery functions
#include "GiRaFFE_NRPy_Main_Driver.h"
// Step P5: Include the initial data functions
#include "set_initial_spacetime_metric_data.h"
#include "InitialData/initial_data.h"
// main() function:
// Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates
// Step 1: Set up scalar wave initial data
// Step 2: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm,
// applying quadratic extrapolation outer boundary conditions.
// Step 3: Output relative error between numerical and exact solution.
// Step 4: Free all allocated memory
int main(int argc, const char *argv[]) {
paramstruct params;
#include "set_Cparameters_default.h"
// Step 0a: Read command-line input, error out if nonconformant
if(argc != 5 || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < NGHOSTS) {
printf("Error: Expected three command-line arguments: ./GiRaFFE_NRPy_standalone [Nx] [Ny] [Nz],\n");
printf("where Nx is the number of grid points in the x direction, and so forth.\n");
printf("Nx,Ny,Nz MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS);
exit(1);
}
// Step 0c: Set free parameters, overwriting Cparameters defaults
// by hand or with command-line input, as desired.
#include "free_parameters.h"
#include "set_Cparameters-nopointer.h"
// ... and then set up the numerical grid structure in time:
const REAL t_final = 2.0;
const REAL CFL_FACTOR = 0.5; // Set the CFL Factor
// Step 0c: Allocate memory for gridfunctions
const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2;
// Step 0k: Allocate memory for gridfunctions
#include "MoLtimestepping/RK_Allocate_Memory.h"
REAL *restrict auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot);
REAL *evol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot);
REAL *auxevol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot);
// For debugging, it can be useful to set everything to NaN initially.
if(poison_grids) {
for(int ii=0;ii<NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) {
y_n_gfs[ii] = 1.0/0.0;
y_nplus1_running_total_gfs[ii] = 1.0/0.0;
//k_odd_gfs[ii] = 1.0/0.0;
//k_even_gfs[ii] = 1.0/0.0;
diagnostic_output_gfs[ii] = 1.0/0.0;
evol_gfs_exact[ii] = 1.0/0.0;
}
for(int ii=0;ii<NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) {
auxevol_gfs[ii] = 1.0/0.0;
auxevol_gfs_exact[ii] = 1.0/0.0;
}
}
// Step 0d: Set up coordinates: Set dx, and then dt based on dx_min and CFL condition
// This is probably already defined above, but just in case...
#ifndef MIN
#define MIN(A, B) ( ((A) < (B)) ? (A) : (B) )
#endif
REAL dt = CFL_FACTOR * MIN(dxx0,MIN(dxx1,dxx2)); // CFL condition
int Nt = (int)(t_final / dt + 0.5); // The number of points in time.
//Add 0.5 to account for C rounding down integers.
// Step 0e: Set up cell-centered Cartesian coordinate grids
REAL *xx[3];
xx[0] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS0);
xx[1] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS1);
xx[2] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS2);
for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) xx[0][j] = xxmin[0] + (j-NGHOSTS+1)*dxx0;
for(int j=0;j<Nxx_plus_2NGHOSTS1;j++) xx[1][j] = xxmin[1] + (j-NGHOSTS+1)*dxx1;
for(int j=0;j<Nxx_plus_2NGHOSTS2;j++) xx[2][j] = xxmin[2] + (j-NGHOSTS+1)*dxx2;
// Step 1: Set up initial data to be exact solution at time=0:
REAL time = 0.0;
set_initial_spacetime_metric_data(¶ms, xx, auxevol_gfs);
const char *initial_data_option = argv[4];
initial_data(initial_data_option, ¶ms, xx, auxevol_gfs, y_n_gfs);
// Fill in the remaining quantities
GiRaFFE_compute_B_and_Bstagger_from_A(¶ms,
auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD00GF,
auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD01GF,
auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD02GF,
auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD11GF,
auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD12GF,
auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD22GF,
auxevol_gfs+Nxx_plus_2NGHOSTS_tot*PSI6_TEMPGF, /* Temporary storage,overwritten */
y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD0GF,
y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD1GF,
y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD2GF,
auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU0GF,
auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU1GF,
auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU2GF,
auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU0GF,
auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU1GF,
auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU2GF);
//override_BU_with_old_GiRaFFE(¶ms,auxevol_gfs,0);
GiRaFFE_NRPy_prims_to_cons(¶ms,auxevol_gfs,y_n_gfs);
// Extra stack, useful for debugging:
GiRaFFE_NRPy_cons_to_prims(¶ms,xx,auxevol_gfs,y_n_gfs);
for(int n=0;n<=Nt;n++) { // Main loop to progress forward in time.
//for(int n=0;n<=1;n++) { // Main loop to progress forward in time.
// Step 1a: Set current time to correct value & compute exact solution
time = ((REAL)n)*dt;
/* Step 2: Validation: Output relative error between numerical and exact solution, */
if(time == 0.0 || time == 0.5 || time == 1.0 || time == 2.0 || time == 0.02 || time == 0.56) {
// Step 2c: Output relative error between exact & numerical at center of grid.
const int i0mid=Nxx_plus_2NGHOSTS0/2;
const int i1mid=Nxx_plus_2NGHOSTS1/2;
const int i2mid=Nxx_plus_2NGHOSTS2/2;
char filename[100];
sprintf(filename,"out%d__%s-%08d.txt", Nxx0, initial_data_option, n);
FILE *out2D = fopen(filename, "w");
for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) {
const int idx = IDX3S(i0,i1mid,i2mid);
fprintf(out2D,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n",
xx[0][i0],
auxevol_gfs[IDX4ptS(BU0GF,idx)],auxevol_gfs[IDX4ptS(BU1GF,idx)],auxevol_gfs[IDX4ptS(BU2GF,idx)],
y_n_gfs[IDX4ptS(AD0GF,idx)],y_n_gfs[IDX4ptS(AD1GF,idx)],y_n_gfs[IDX4ptS(AD2GF,idx)],
y_n_gfs[IDX4ptS(STILDED0GF,idx)],y_n_gfs[IDX4ptS(STILDED1GF,idx)],y_n_gfs[IDX4ptS(STILDED2GF,idx)],
auxevol_gfs[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU2GF,idx)],
y_n_gfs[IDX4ptS(PSI6PHIGF,idx)], time);
}
fclose(out2D);
// For convergence testing, we'll shift the grid x -> x-1 and output initial data again, giving the exact solution.
LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) {
xx[0][i0] += -mu_AW*time;
//xx[0][i0] += -time;
}
set_initial_spacetime_metric_data(¶ms,xx,auxevol_gfs_exact);
initial_data(initial_data_option, ¶ms,xx,auxevol_gfs_exact,evol_gfs_exact);
// Fill in the remaining quantities
//driver_A_to_B(¶ms,evol_gfs_exact,auxevol_gfs_exact);
GiRaFFE_NRPy_prims_to_cons(¶ms,auxevol_gfs_exact,evol_gfs_exact);
// And now, we'll set the grid back to rights.
LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) {
xx[0][i0] -= -mu_AW*time;
//xx[0][i0] -= -time;
}
sprintf(filename,"out%d-%08d_exact.txt",Nxx0,n);
FILE *out2D_exact = fopen(filename, "w");
for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) {
const int idx = IDX3S(i0,i1mid,i2mid);
fprintf(out2D_exact,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n",
xx[0][i0],
auxevol_gfs_exact[IDX4ptS(BU0GF,idx)],auxevol_gfs_exact[IDX4ptS(BU1GF,idx)],auxevol_gfs_exact[IDX4ptS(BU2GF,idx)],
evol_gfs_exact[IDX4ptS(AD0GF,idx)],evol_gfs_exact[IDX4ptS(AD1GF,idx)],evol_gfs_exact[IDX4ptS(AD2GF,idx)],
evol_gfs_exact[IDX4ptS(STILDED0GF,idx)],evol_gfs_exact[IDX4ptS(STILDED1GF,idx)],evol_gfs_exact[IDX4ptS(STILDED2GF,idx)],
auxevol_gfs_exact[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU2GF,idx)],
evol_gfs_exact[IDX4ptS(PSI6PHIGF,idx)]);
}
fclose(out2D_exact);
}
// Step 3: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm,
// applying quadratic extrapolation outer boundary conditions.
// Step 3.b: Step forward one timestep (t -> t+dt) in time using
// chosen RK-like MoL timestepping algorithm
#include "MoLtimestepping/RK_MoL.h"
} // End main loop to progress forward in time.
// Step 4: Free all allocated memory
#include "MoLtimestepping/RK_Free_Memory.h"
free(auxevol_gfs);
free(auxevol_gfs_exact);
free(evol_gfs_exact);
for(int i=0;i<3;i++) free(xx[i]);
return 0;
}
```
Writing GiRaFFE_staggered_1D_Tests_standalone_Ccodes//GiRaFFE_NRPy_standalone.c
<a id='compileexec'></a>
# Step 6: Compile generated C codes & perform GRFFE simulations \[Back to [top](#toc)\]
$$\label{compileexec}$$
To aid in the cross-platform-compatible (with Windows, MacOS, & Linux) compilation and execution, we make use of `cmdline_helper` [(**Tutorial**)](Tutorial-cmdline_helper.ipynb).
```python
cmd.C_compile(os.path.join(Ccodesdir,"GiRaFFE_NRPy_standalone.c"),
os.path.join(Ccodesdir,"output","GiRaFFE_NRPy_standalone"),compile_mode="optimized")
# Change to output directory
os.chdir(outdir)
# Clean up existing output files
cmd.delete_existing_files("out*.txt")
cmd.delete_existing_files("out*.png")
# ID options are: "AlfvenWave", "ThreeAlfvenWaves", "DegenAlfvenWave", "FastWave", "FFEBD"
for opt in ID_opts:
cmd.Execute("GiRaFFE_NRPy_standalone", "299 4 4 "+opt, "out_298"+opt+".txt")
# cmd.Execute("GiRaFFE_NRPy_standalone", "1280 9 9 "+opt, "out_1280"+opt+".txt")
# cmd.Execute("GiRaFFE_NRPy_standalone", "1280 32 32 "+opt, "out_"+opt+".txt")
# cmd.Execute("GiRaFFE_NRPy_standalone", "149 9 9 AlfvenWave","out149.txt")
# Return to root directory
os.chdir(os.path.join("../../"))
```
Compiling executable...
(EXEC): Executing `gcc -std=gnu99 -Ofast -fopenmp -march=native -funroll-loops GiRaFFE_staggered_1D_Tests_standalone_Ccodes/GiRaFFE_NRPy_standalone.c -o GiRaFFE_staggered_1D_Tests_standalone_Ccodes/output/GiRaFFE_NRPy_standalone -lm`...
(BENCH): Finished executing in 3.0177741050720215 seconds.
Finished compilation.
(EXEC): Executing `taskset -c 0,1,2,3 ./GiRaFFE_NRPy_standalone 299 4 4 AlfvenWave`...
(BENCH): Finished executing in 15.846468687057495 seconds.
(EXEC): Executing `taskset -c 0,1,2,3 ./GiRaFFE_NRPy_standalone 299 4 4 ThreeAlfvenWaves`...
(BENCH): Finished executing in 15.84378170967102 seconds.
(EXEC): Executing `taskset -c 0,1,2,3 ./GiRaFFE_NRPy_standalone 299 4 4 DegenAlfvenWave`...
(BENCH): Finished executing in 15.844474077224731 seconds.
(EXEC): Executing `taskset -c 0,1,2,3 ./GiRaFFE_NRPy_standalone 299 4 4 FastWave`...
(BENCH): Finished executing in 15.850006580352783 seconds.
(EXEC): Executing `taskset -c 0,1,2,3 ./GiRaFFE_NRPy_standalone 299 4 4 FFEBD`...
(BENCH): Finished executing in 16.047165870666504 seconds.
<a id='plots'></a>
# Step 7: Data Visualization \[Back to [top](#toc)\]
$$\label{plots}$$
Now we plot the data and recreate figure 1 from the [GiRaFFE paper](https://arxiv.org/pdf/1704.00599.pdf). We reconstruct the electric field via
$$
E_i = -\epsilon_{ijk}v^j B^k
$$
the `calc_E` function below. We also calculate the FFE condition $B^2 - E^2$ below using the `calc_Bsquared_minus_Esquared` function.
```python
eDDD = ixp.LeviCivitaSymbol_dim3_rank3()
def calc_E(data):
VU0 = data[:, 10]
VU1 = data[:, 11]
VU2 = data[:, 12]
BU0 = data[:, 1]
BU1 = data[:, 2]
BU2 = data[:, 3]
VU = [VU0, VU1, VU2]
BU = [BU0, BU1, BU2]
ED = np.zeros((VU0.size, 3))
for i in range(3):
for j in range(3):
for k in range(3):
ED[:,i] = ED[:,i] - eDDD[i][j][k]*VU[j]*BU[k]
return ED
def calc_Bsquared_minus_Esquared(data):
EU = calc_E(data)
BU0 = data[:, 1]
BU1 = data[:, 2]
BU2 = data[:, 3]
return (BU0**2 + BU1**2 + BU2**2) - (EU[:,0]**2 + EU[:,1]**2 + EU[:,2]**2)
```
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib as mpl
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13
labels = ["x","BU0","BU1","BU2","AD0","AD1","AD2","StildeD0","StildeD1","StildeD2","ValenciavU0","ValenciavU1","ValenciavU2", "psi6Phi"]
fig = plt.figure(figsize=(6, 15))
# spec = mpl.gridspec.GridSpec(ncols=6, nrows=2,wspace=0.65, hspace=0.4) # 6 columns evenly divides both 2 & 3
# ax1 = fig.add_subplot(spec[0,0:2]) # row 0 with axes spanning 2 cols on evens
# ax2 = fig.add_subplot(spec[0,2:4])
# ax3 = fig.add_subplot(spec[0,4:])
# ax4 = fig.add_subplot(spec[1,1:3]) # row 0 with axes spanning 2 cols on odds
# ax5 = fig.add_subplot(spec[1,3:5])
gs = gridspec.GridSpec(nrows=5, ncols=1, hspace=0.5)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[2, 0])
ax4 = fig.add_subplot(gs[3, 0])
ax5 = fig.add_subplot(gs[4, 0])
Data_num_Fast_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FastWave-00000000.txt"))
Data_num_Fast_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FastWave-00000100.txt"))
E_Fast_A = calc_E(Data_num_Fast_A)
E_Fast_B = calc_E(Data_num_Fast_B)
ax1.scatter(Data_num_Fast_A[:,0], np.abs(E_Fast_A[:,2]), s=1,label = 't = 0')
ax1.plot(Data_num_Fast_B[:,0], np.abs(E_Fast_B[:,2]), 'k-', label = 't = 0.5')
ax1.set_xlim(-0.5, 1.5)
ax1.set_ylim(0.6)
ax1.text(0.95, 0.01, 'Fast Wave',
verticalalignment='bottom', horizontalalignment='right',
transform=ax1.transAxes,
color='black', fontsize=14)
ax1.set_xlabel('x')
ax1.set_ylabel(r'$|E^z|$')
ax1.legend()
Data_num_Alf_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__AlfvenWave-00000000.txt"))
Data_num_Alf_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__AlfvenWave-00000400.txt"))
ax2.scatter(Data_num_Alf_A[:,0], Data_num_Alf_A[:,3], s=1, label = 't = 0')
ax2.plot(Data_num_Alf_B[:,0], Data_num_Alf_B[:,3], 'k-', label = 't = 2.0')
ax2.set_xlim(-1.5, 1.5)
ax2.set_ylim(1.1)
ax2.text(0.95, 0.01, 'Alfven Wave',
verticalalignment='bottom', horizontalalignment='right',
transform=ax2.transAxes,
color='black', fontsize=14)
ax2.set_xlabel('x')
ax2.set_ylabel(r'$B^z$')
ax2.legend(loc='center right')
Data_num_DegenAlf_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__DegenAlfvenWave-00000000.txt"))
Data_num_DegenAlf_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__DegenAlfvenWave-00000200.txt"))
E_DegenAlf_A = calc_E(Data_num_DegenAlf_A)
E_DegenAlf_B = calc_E(Data_num_DegenAlf_B)
ax3.scatter(Data_num_DegenAlf_A[:,0], E_DegenAlf_A[:,1], s=1, label = 't = 0')
ax3.plot(Data_num_DegenAlf_B[:,0], E_DegenAlf_B[:,1], 'k-', label = 't = 1.0')
ax3.set_xlim(-1.5, 1.5)
ax3.set_ylim(-1.35)
ax3.text(0.95, 0.01, 'Deg. Alfven Wave',
verticalalignment='bottom', horizontalalignment='right',
transform=ax3.transAxes,
color='black', fontsize=14)
ax3.set_xlabel('x')
ax3.set_ylabel(r'$E^y$')
ax3.legend()
# Data_num_ThreeAlf_A = np.loadtxt(os.path.join(Ccodesdir,"output","out149__ThreeAlfvenWaves-00000000.txt"))
Data_num_ThreeAlf_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__ThreeAlfvenWaves-00000112.txt"))
# ax2.plot(Data_num_ThreeAlf_A[:,0], Data_num_ThreeAlf_A[:,2], 'k-')
ax4.scatter(Data_num_ThreeAlf_B[:,0], Data_num_ThreeAlf_B[:,2], s=1, label = 't = 0.56')
ax4.set_xlim(-1.0, 1.0)
# ax4.set_ylim()
ax4.text(0.95, 0.01, 'Three Waves',
verticalalignment='bottom', horizontalalignment='right',
transform=ax4.transAxes,
color='black', fontsize=14)
ax4.set_xlabel('x')
ax4.set_ylabel(r'$B^y$')
ax4.legend(loc='center')
Data_num_FFEBD_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FFEBD-00000000.txt"))
Data_num_FFEBD_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FFEBD-00000004.txt"))
B2mE2_A = calc_Bsquared_minus_Esquared(Data_num_FFEBD_A)
B2mE2_B = calc_Bsquared_minus_Esquared(Data_num_FFEBD_B)
ax5.scatter(Data_num_FFEBD_A[:,0], B2mE2_A, s=1, label = 't = 0')
ax5.plot(Data_num_FFEBD_B[:,0], B2mE2_B, 'k-', label = 't = 0.02')
ax5.set_xlim(-0.4, 0.6)
ax5.text(0.95, 0.01, 'FFE Breakdown',
verticalalignment='bottom', horizontalalignment='right',
transform=ax5.transAxes,
color='black', fontsize=14)
ax5.set_xlabel('x')
ax5.set_ylabel(r'$B^2 - E^2$')
ax5.legend()
plt.savefig(os.path.join(Ccodesdir,"output","NRPy-GiRaFFE"), dpi=800, bbox_inches="tight")
plt.close(fig)
```
```python
img1 = plt.imread(os.path.join(Ccodesdir,"output","NRPy-GiRaFFE.png"))
img2 = plt.imread(os.path.join("GiRaFFE_NRPy/example_par_files/figure1_GiRaFFE_paper.png"))
NUM_ROWS = 1
IMGs_IN_ROW = 2
f, ax = plt.subplots(NUM_ROWS, IMGs_IN_ROW, figsize=(28,18))
plt.subplots_adjust(wspace=0.05)
plt.axis('off')
ax[0].imshow(img1)
ax[1].imshow(img2)
ax[0].set_title('image 1')
ax[1].set_title('image 2')
# title = 'side by side view of images'
# f.suptitle(title, fontsize=16)
plt.tight_layout()
# plt.xticks([])
# plt.yticks([])
plt.show()
```
<a id='latex_pdf_output'></a>
# Step 8: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered.pdf](Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```python
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered",location_of_template_file=os.path.join(".."))
```
Created Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered.tex, and
compiled LaTeX file to PDF file Tutorial-Start_to_Finish-
GiRaFFE_NRPy-1D_tests-staggered.pdf
|
function [K,H] = calc_Laplacians(I,seg_map,M,eta,beta1,beta2)
[rows,cols,B] = size(I);
N = rows*cols;
if isempty(seg_map)
[W,Neighbors] = image2graph(I,eta,1e-9);
else
[W,Neighbors] = image2graph(double(seg_map),1e-3,1e-9);
end
D = diag(sum(W,2));
L = D - W;
L = sparse(L);
K = L - beta2/beta1*speye(N);
W = ones(M,M);
D = diag(sum(W,2));
H = D - W;
W = diag(ones(1,B-1),-1) + diag(ones(1,B-1),1);
D = diag(sum(W,2));
G = D - W;
G = sparse(G);
|
## Probability and Computing - Mitzenmacher, Upfal
```python
import numpy as np
from sympy import poly
from sympy.abc import x
```
Define two polynomials:
- $F$ is a product of monomials
- $G$ is in canonical form
```python
F = poly((x + 1) * (x - 2) * (x + 3) * (x - 4) * (x + 5) * (x - 6))
G = poly(x**6 - 7 * x**3 + 25)
```
Want to check if $F \equiv G$ without converting $F$ to canonical form.
```python
def polycheck(F: 'poly', G: 'poly', δ: int, k: int, replacement: bool) -> bool:
"""Randomized algorithm for verifying whether F and G are equivalent.
If F ≡ G, then the algo always computes the correct answer.
If F ≢ G, then the algo can compute the wrong answer by
finding r s.t. r is the root of F(x) - G(x) = 0,
which, by FTA, can happen at most in d / (δ * d) cases,
meaning that the prob. of error (in one iter) is <= 1/δ.
If sampling is performed WITH replacement, then iterations are independent,
therefore, the probability of error becomes <= (1/δ)**k
i.e. exponentially small in the number of trials.
If sampling is performed WITHOUT replacement, we get a tighter bound <= (1/δ)**k,
since the error now consists of the event "finding k distinct roots",
which is much stronger.
Args:
F, G: sympy polynomials.
δ: upper bound for the sample space.
k: number of iterations (trials).
replacement: whether to perform sampling with or without replacement.
Returns:
True if F,G are found to be equivalent, otherwise False.
"""
d = max(F.degree(), G.degree())
space = np.arange(1, δ * d + 1) # {1, ..., δ * d}
# choose values uniformly at random from the sample space
rs = np.random.choice(space, replace=replacement, size=k)
for r in rs:
if F(r) != G(r):
return False
return True
```
```python
polycheck(F, G, δ=100, k=10, replacement=False)
```
|
/* interpolation/interp_poly.c
*
* Copyright (C) 2001 DAN, HO-JIN
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <config.h>
#include <math.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_poly.h>
#include <gsl/gsl_interp.h>
typedef struct
{
double *d;
double *coeff;
double *work;
}
polynomial_state_t;
static void *
polynomial_alloc (size_t size)
{
polynomial_state_t *state =
(polynomial_state_t *) malloc (sizeof (polynomial_state_t));
if (state == 0)
{
GSL_ERROR_NULL ("failed to allocate space for polynomial state",
GSL_ENOMEM);
}
state->d = (double *) malloc (sizeof (double) * size);
if (state->d == 0)
{
free (state);
GSL_ERROR_NULL ("failed to allocate space for d", GSL_ENOMEM);
}
state->coeff = (double *) malloc (sizeof (double) * size);
if (state->coeff == 0)
{
free (state->d);
free (state);
GSL_ERROR_NULL ("failed to allocate space for d", GSL_ENOMEM);
}
state->work = (double *) malloc (sizeof (double) * size);
if (state->work == 0)
{
free (state->coeff);
free (state->d);
free (state);
GSL_ERROR_NULL ("failed to allocate space for d", GSL_ENOMEM);
}
return state;
}
static int
polynomial_init (void *vstate,
const double xa[], const double ya[], size_t size)
{
polynomial_state_t *state = (polynomial_state_t *) vstate;
int status = gsl_poly_dd_init (state->d, xa, ya, size);
return status;
}
static int
polynomial_eval (const void *vstate,
const double xa[], const double ya[], size_t size, double x,
gsl_interp_accel * acc, double *y)
{
const polynomial_state_t *state = (const polynomial_state_t *) vstate;
*y = gsl_poly_dd_eval (state->d, xa, size, x);
return GSL_SUCCESS;
}
static int
polynomial_deriv (const void *vstate,
const double xa[], const double ya[], size_t size, double x,
gsl_interp_accel * acc, double *y)
{
const polynomial_state_t *state = (const polynomial_state_t *) vstate;
gsl_poly_dd_taylor (state->coeff, x, state->d, xa, size, state->work);
*y = state->coeff[1];
return GSL_SUCCESS;
}
static int
polynomial_deriv2 (const void *vstate,
const double xa[], const double ya[], size_t size,
double x, gsl_interp_accel * acc, double *y)
{
const polynomial_state_t *state = (const polynomial_state_t *) vstate;
gsl_poly_dd_taylor (state->coeff, x, state->d, xa, size, state->work);
*y = 2.0 * state->coeff[2];
return GSL_SUCCESS;
}
static int
polynomial_integ (const void *vstate, const double xa[], const double ya[],
size_t size, gsl_interp_accel * acc, double a, double b,
double *result)
{
const polynomial_state_t *state = (const polynomial_state_t *) vstate;
size_t i;
double sum;
gsl_poly_dd_taylor (state->coeff, 0.0, state->d, xa, size, state->work);
sum = state->coeff[0] * (b - a);
for (i = 1; i < size; i++)
{
sum += state->coeff[i] * (pow (b, i + 1) - pow (a, i + 1)) / (i + 1.0);
}
*result = sum;
return GSL_SUCCESS;
}
static void
polynomial_free (void *vstate)
{
polynomial_state_t *state = (polynomial_state_t *) vstate;
free (state->d);
free (state->coeff);
free (state->work);
free (state);
}
static const gsl_interp_type polynomial_type = {
"polynomial",
3,
&polynomial_alloc,
&polynomial_init,
&polynomial_eval,
&polynomial_deriv,
&polynomial_deriv2,
&polynomial_integ,
&polynomial_free,
};
const gsl_interp_type *gsl_interp_polynomial = &polynomial_type;
|
Load LFindLoad.
From lfind Require Import LFind.
Unset Printing Notations.
Set Printing Implicit.
Inductive natural : Type := Zero : natural | Succ : natural -> natural .
Inductive lst : Type := Nil : lst | Cons : natural -> lst -> lst .
Inductive tree : Type := Node : natural -> tree -> tree -> tree | Leaf : tree.
Inductive Pair : Type := mkpair : natural -> natural -> Pair
with Zlst : Type := zcons : Pair -> Zlst -> Zlst | znil : Zlst.
Fixpoint plus (plus_arg0 : natural) (plus_arg1 : natural) : natural
:= match plus_arg0, plus_arg1 with
| Zero, n => n
| Succ n, m => Succ (plus n m)
end.
Fixpoint mult (mult_arg0 : natural) (mult_arg1 : natural) : natural
:= match mult_arg0, mult_arg1 with
| Zero, n => Zero
| Succ n, m => plus (mult n m) m
end.
Fixpoint qmult (qmult_arg0 : natural) (qmult_arg1 : natural) (qmult_arg2 : natural) : natural
:= match qmult_arg0, qmult_arg1, qmult_arg2 with
| Zero, n, m => m
| Succ n, m, p => qmult n m (plus p m)
end.
Lemma plus_succ : forall (x y : natural), plus x (Succ y) = Succ (plus x y).
Proof.
intros.
induction x.
- reflexivity.
- simpl. rewrite IHx. reflexivity.
Qed.
Lemma plus_assoc : forall (x y z : natural), plus (plus x y) z = plus x (plus y z).
Proof.
intros.
induction x.
- reflexivity.
- simpl. rewrite IHx. reflexivity.
Qed.
Lemma plus_zero : forall (x : natural), plus x Zero = x.
Proof.
intros.
induction x.
- reflexivity.
- simpl. rewrite IHx. reflexivity.
Qed.
Lemma plus_commut : forall (x y : natural), plus x y = plus y x.
Proof.
intros.
induction x.
- lfind. Admitted.
|
%kkbitorinv 'Output = NOT(Input 1) OR (Input 2 or Constant)'
% This MatLab function was automatically generated by a converter (KhorosToMatLab) from the Khoros kbitorinv.pane file
%
% Parameters:
% InputFile: i1 'Input 1', required: 'First input data object'
% OutputFile: o 'Output', required: 'Resulting output data object'
% InputFile: i2 'Input 2', optional: 'Second input data object'
% Integer: real 'Constant', default: 0: 'Constant value'
%
% Example: o = kkbitorinv({i1, i2}, {'i1','';'o','';'i2','';'real',0})
%
% Khoros helpfile follows below:
%
% PROGRAM
% kbitorinv - Output = NOT(Input 1) OR (Input 2 or Constant)
%
% DESCRIPTION
% The bitwise OR Inverted operator performs a bitwise NOT on \fBInput 1"
% and then a bitwise OR between the result of the NOT operation and
% either \fBInput 2" or the \fBConstant\fP value, which
% ever is specified by the user.
%
% "Data Type - Single Input" 5
% .cI $DATAMANIP/repos/shared/man/sections/value_type_1input
%
% "Data Type - Two Input Objects" 5
% .cI $DATAMANIP/repos/shared/man/sections/value_type_2input
% The bitwise operators will not operate on float or complex data types.
%
% "Map Data" 5
% The bitwise operations have not been written to be fully polymorphic yet.
% They does not
% check for map data, and will therefore always operate on the value data,
% even if a map exists. This will most likely corrupt indexing into the map.
% In the case of a single input, it is recommended to use the "Copy to
% Value" (kcptoval) segment operator to temporarily move the map data into
% the value segment, run the bitwise operation on the data, then move it
% back to the map with the "Copy from Value" (kcpfromval) operator.
% When operating with two input objects, where at least one of the objects
% contains map data, data should be mapped prior to running the bitwise
% operator. The "Map Data" operator (kmapdata) can be used to perform
% the mapping operation.
%
% "Validity Mask - Single Input" 5
% .cI $DATAMANIP/repos/shared/man/sections/mask_1input
%
% "Validity Mask - Two Input Objects" 5
% Masking has not been implemented yet for the bitwise operators. Therefore,
% only the mask from the first input object will be transferred to the output.
%
% "Input Objects of Different Sizes" 5
% .cI $DATAMANIP/repos/shared/man/sections/resize_2input
% The value used to pad the data when the input files are not the same size is
% zero.
%
% "Explicit Location and Time Data - Single Input" 5
% .cI $DATAMANIP/repos/shared/man/sections/loc_and_time_1input
%
% "Explicit Location and Time Data - Two Input Objects" 5
% The bitwise operations have not been extended to understand location and
% time data. Therefore, only location and time data present in the first input
% object will be transferred to the output.
%
% Executing \fBOR Inverted" runs the program "kbitwise\fP with
% the -orinv flag.
%
%
%
% EXAMPLES
%
% "SEE ALSO"
% DATAMANIP::kbitwise
%
% RESTRICTIONS
%
% THIS ROUTINE is still under development, and has not been modified to fully
% support the polymorphic data model. See paragraphs above for discussions
% concerning the polymorphic data segments.
%
% All processing is currently performed as UNSIGNED long, so the operations
% may not work properly on negative values.
%
% The bitwise operators will not operate on float or complex data types.
%
% REFERENCES
%
% COPYRIGHT
% Copyright (C) 1993 - 1997, Khoral Research, Inc. ("KRI") All rights reserved.
%
function varargout = kkbitorinv(varargin)
if nargin ==0
Inputs={};arglist={'',''};
elseif nargin ==1
Inputs=varargin{1};arglist={'',''};
elseif nargin ==2
Inputs=varargin{1}; arglist=varargin{2};
else error('Usage: [out1,..] = kkbitorinv(Inputs,arglist).');
end
if size(arglist,2)~=2
error('arglist must be of form {''ParameterTag1'',value1;''ParameterTag2'',value2}')
end
narglist={'i1', '__input';'o', '__output';'i2', '__input';'real', 0};
maxval={0,0,1,0};
minval={0,0,1,0};
istoggle=[0,0,1,1];
was_set=istoggle * 0;
paramtype={'InputFile','OutputFile','InputFile','Integer'};
% identify the input arrays and assign them to the arguments as stated by the user
if ~iscell(Inputs)
Inputs = {Inputs};
end
NumReqOutputs=1; nextinput=1; nextoutput=1;
for ii=1:size(arglist,1)
wasmatched=0;
for jj=1:size(narglist,1)
if strcmp(arglist{ii,1},narglist{jj,1}) % a given argument was matched to the possible arguments
wasmatched = 1;
was_set(jj) = 1;
if strcmp(narglist{jj,2}, '__input')
if (nextinput > length(Inputs))
error(['Input ' narglist{jj,1} ' has no corresponding input!']);
end
narglist{jj,2} = 'OK_in';
nextinput = nextinput + 1;
elseif strcmp(narglist{jj,2}, '__output')
if (nextoutput > nargout)
error(['Output nr. ' narglist{jj,1} ' is not present in the assignment list of outputs !']);
end
if (isempty(arglist{ii,2}))
narglist{jj,2} = 'OK_out';
else
narglist{jj,2} = arglist{ii,2};
end
nextoutput = nextoutput + 1;
if (minval{jj} == 0)
NumReqOutputs = NumReqOutputs - 1;
end
elseif isstr(arglist{ii,2})
narglist{jj,2} = arglist{ii,2};
else
if strcmp(paramtype{jj}, 'Integer') & (round(arglist{ii,2}) ~= arglist{ii,2})
error(['Argument ' arglist{ii,1} ' is of integer type but non-integer number ' arglist{ii,2} ' was supplied']);
end
if (minval{jj} ~= 0 | maxval{jj} ~= 0)
if (minval{jj} == 1 & maxval{jj} == 1 & arglist{ii,2} < 0)
error(['Argument ' arglist{ii,1} ' must be bigger or equal to zero!']);
elseif (minval{jj} == -1 & maxval{jj} == -1 & arglist{ii,2} > 0)
error(['Argument ' arglist{ii,1} ' must be smaller or equal to zero!']);
elseif (minval{jj} == 2 & maxval{jj} == 2 & arglist{ii,2} <= 0)
error(['Argument ' arglist{ii,1} ' must be bigger than zero!']);
elseif (minval{jj} == -2 & maxval{jj} == -2 & arglist{ii,2} >= 0)
error(['Argument ' arglist{ii,1} ' must be smaller than zero!']);
elseif (minval{jj} ~= maxval{jj} & arglist{ii,2} < minval{jj})
error(['Argument ' arglist{ii,1} ' must be bigger than ' num2str(minval{jj})]);
elseif (minval{jj} ~= maxval{jj} & arglist{ii,2} > maxval{jj})
error(['Argument ' arglist{ii,1} ' must be smaller than ' num2str(maxval{jj})]);
end
end
end
if ~strcmp(narglist{jj,2},'OK_out') & ~strcmp(narglist{jj,2},'OK_in')
narglist{jj,2} = arglist{ii,2};
end
end
end
if (wasmatched == 0 & ~strcmp(arglist{ii,1},''))
error(['Argument ' arglist{ii,1} ' is not a valid argument for this function']);
end
end
% match the remaining inputs/outputs to the unused arguments and test for missing required inputs
for jj=1:size(narglist,1)
if strcmp(paramtype{jj}, 'Toggle')
if (narglist{jj,2} ==0)
narglist{jj,1} = '';
end;
narglist{jj,2} = '';
end;
if ~strcmp(narglist{jj,2},'__input') && ~strcmp(narglist{jj,2},'__output') && istoggle(jj) && ~ was_set(jj)
narglist{jj,1} = '';
narglist{jj,2} = '';
end;
if strcmp(narglist{jj,2}, '__input')
if (minval{jj} == 0) % meaning this input is required
if (nextinput > size(Inputs))
error(['Required input ' narglist{jj,1} ' has no corresponding input in the list!']);
else
narglist{jj,2} = 'OK_in';
nextinput = nextinput + 1;
end
else % this is an optional input
if (nextinput <= length(Inputs))
narglist{jj,2} = 'OK_in';
nextinput = nextinput + 1;
else
narglist{jj,1} = '';
narglist{jj,2} = '';
end;
end;
else
if strcmp(narglist{jj,2}, '__output')
if (minval{jj} == 0) % this is a required output
if (nextoutput > nargout & nargout > 1)
error(['Required output ' narglist{jj,1} ' is not stated in the assignment list!']);
else
narglist{jj,2} = 'OK_out';
nextoutput = nextoutput + 1;
NumReqOutputs = NumReqOutputs-1;
end
else % this is an optional output
if (nargout - nextoutput >= NumReqOutputs)
narglist{jj,2} = 'OK_out';
nextoutput = nextoutput + 1;
else
narglist{jj,1} = '';
narglist{jj,2} = '';
end;
end
end
end
end
if nargout
varargout = cell(1,nargout);
else
varargout = cell(1,1);
end
global KhorosRoot
if exist('KhorosRoot') && ~isempty(KhorosRoot)
w=['"' KhorosRoot];
else
if ispc
w='"C:\Program Files\dip\khorosBin\';
else
[s,w] = system('which cantata');
w=['"' w(1:end-8)];
end
end
[varargout{:}]=callKhoros([w 'kbitwise" -orinv'],Inputs,narglist);
|
[STATEMENT]
lemma word_upto_eq_upto: "s \<le> e \<Longrightarrow> e \<le> unat (max_word :: 'l word) \<Longrightarrow>
word_upto ((of_nat :: nat \<Rightarrow> ('l :: len) word) s) (of_nat e) = map of_nat (upt s (Suc e))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>s \<le> e; e \<le> unat max_word\<rbrakk> \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat e) = map word_of_nat [s..<Suc e]
[PROOF STEP]
proof(induction e)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>s \<le> 0; 0 \<le> unat max_word\<rbrakk> \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat 0) = map word_of_nat [s..<Suc 0]
2. \<And>e. \<lbrakk>\<lbrakk>s \<le> e; e \<le> unat max_word\<rbrakk> \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat e) = map word_of_nat [s..<Suc e]; s \<le> Suc e; Suc e \<le> unat max_word\<rbrakk> \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
[PROOF STEP]
let ?mwon = "of_nat :: nat \<Rightarrow> 'l word"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>s \<le> 0; 0 \<le> unat max_word\<rbrakk> \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat 0) = map word_of_nat [s..<Suc 0]
2. \<And>e. \<lbrakk>\<lbrakk>s \<le> e; e \<le> unat max_word\<rbrakk> \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat e) = map word_of_nat [s..<Suc e]; s \<le> Suc e; Suc e \<le> unat max_word\<rbrakk> \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
[PROOF STEP]
let ?mmw = "max_word :: 'l word"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>s \<le> 0; 0 \<le> unat max_word\<rbrakk> \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat 0) = map word_of_nat [s..<Suc 0]
2. \<And>e. \<lbrakk>\<lbrakk>s \<le> e; e \<le> unat max_word\<rbrakk> \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat e) = map word_of_nat [s..<Suc e]; s \<le> Suc e; Suc e \<le> unat max_word\<rbrakk> \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
[PROOF STEP]
case (Suc e)
[PROOF STATE]
proof (state)
this:
\<lbrakk>s \<le> e; e \<le> unat max_word\<rbrakk> \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat e) = map word_of_nat [s..<Suc e]
s \<le> Suc e
Suc e \<le> unat max_word
goal (2 subgoals):
1. \<lbrakk>s \<le> 0; 0 \<le> unat max_word\<rbrakk> \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat 0) = map word_of_nat [s..<Suc 0]
2. \<And>e. \<lbrakk>\<lbrakk>s \<le> e; e \<le> unat max_word\<rbrakk> \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat e) = map word_of_nat [s..<Suc e]; s \<le> Suc e; Suc e \<le> unat max_word\<rbrakk> \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
[PROOF STEP]
proof(cases "?mwon s = ?mwon (Suc e)")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. word_of_nat s = word_of_nat (Suc e) \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
2. word_of_nat s \<noteq> word_of_nat (Suc e) \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
word_of_nat s = word_of_nat (Suc e)
goal (2 subgoals):
1. word_of_nat s = word_of_nat (Suc e) \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
2. word_of_nat s \<noteq> word_of_nat (Suc e) \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
[PROOF STEP]
have "s = Suc e"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. s = Suc e
[PROOF STEP]
using le_unat_uoi Suc.prems True
[PROOF STATE]
proof (prove)
using this:
?y \<le> unat ?z \<Longrightarrow> unat (word_of_nat ?y) = ?y
s \<le> Suc e
Suc e \<le> unat max_word
word_of_nat s = word_of_nat (Suc e)
goal (1 subgoal):
1. s = Suc e
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
s = Suc e
goal (2 subgoals):
1. word_of_nat s = word_of_nat (Suc e) \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
2. word_of_nat s \<noteq> word_of_nat (Suc e) \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
[PROOF STEP]
with True
[PROOF STATE]
proof (chain)
picking this:
word_of_nat s = word_of_nat (Suc e)
s = Suc e
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
word_of_nat s = word_of_nat (Suc e)
s = Suc e
goal (1 subgoal):
1. word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
[PROOF STEP]
by(subst word_upto.simps) (simp)
[PROOF STATE]
proof (state)
this:
word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
goal (1 subgoal):
1. word_of_nat s \<noteq> word_of_nat (Suc e) \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. word_of_nat s \<noteq> word_of_nat (Suc e) \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
word_of_nat s \<noteq> word_of_nat (Suc e)
goal (1 subgoal):
1. word_of_nat s \<noteq> word_of_nat (Suc e) \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
[PROOF STEP]
hence le: "s \<le> e"
[PROOF STATE]
proof (prove)
using this:
word_of_nat s \<noteq> word_of_nat (Suc e)
goal (1 subgoal):
1. s \<le> e
[PROOF STEP]
using le_SucE Suc.prems
[PROOF STATE]
proof (prove)
using this:
word_of_nat s \<noteq> word_of_nat (Suc e)
\<lbrakk>?m \<le> Suc ?n; ?m \<le> ?n \<Longrightarrow> ?R; ?m = Suc ?n \<Longrightarrow> ?R\<rbrakk> \<Longrightarrow> ?R
s \<le> Suc e
Suc e \<le> unat max_word
goal (1 subgoal):
1. s \<le> e
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
s \<le> e
goal (1 subgoal):
1. word_of_nat s \<noteq> word_of_nat (Suc e) \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
[PROOF STEP]
have lm: "e \<le> unat ?mmw"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. e \<le> unat max_word
[PROOF STEP]
using Suc.prems
[PROOF STATE]
proof (prove)
using this:
s \<le> Suc e
Suc e \<le> unat max_word
goal (1 subgoal):
1. e \<le> unat max_word
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
e \<le> unat max_word
goal (1 subgoal):
1. word_of_nat s \<noteq> word_of_nat (Suc e) \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
[PROOF STEP]
have sucm: "(of_nat :: nat \<Rightarrow> ('l :: len) word) (Suc e) - 1 = of_nat e"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. word_of_nat (Suc e) - 1 = word_of_nat e
[PROOF STEP]
using Suc.prems(2)
[PROOF STATE]
proof (prove)
using this:
Suc e \<le> unat max_word
goal (1 subgoal):
1. word_of_nat (Suc e) - 1 = word_of_nat e
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
word_of_nat (Suc e) - 1 = word_of_nat e
goal (1 subgoal):
1. word_of_nat s \<noteq> word_of_nat (Suc e) \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
[PROOF STEP]
note mIH = Suc.IH[OF le lm]
[PROOF STATE]
proof (state)
this:
word_upto (word_of_nat s) (word_of_nat e) = map word_of_nat [s..<Suc e]
goal (1 subgoal):
1. word_of_nat s \<noteq> word_of_nat (Suc e) \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
[PROOF STEP]
by(subst word_upto.simps) (simp add: False[simplified] Suc.prems mIH sucm)
[PROOF STATE]
proof (state)
this:
word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
word_upto (word_of_nat s) (word_of_nat (Suc e)) = map word_of_nat [s..<Suc (Suc e)]
goal (1 subgoal):
1. \<lbrakk>s \<le> 0; 0 \<le> unat max_word\<rbrakk> \<Longrightarrow> word_upto (word_of_nat s) (word_of_nat 0) = map word_of_nat [s..<Suc 0]
[PROOF STEP]
qed(simp add: word_upto.simps) |
[STATEMENT]
lemma expands_to_min_lt:
assumes "(f expands_to F) basis" "eventually (\<lambda>x. f x < g x) at_top"
shows "((\<lambda>x. min (f x) (g x)) expands_to F) basis"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<lambda>x. min (f x) (g x)) expands_to F) basis
[PROOF STEP]
using assms(1)
[PROOF STATE]
proof (prove)
using this:
(f expands_to F) basis
goal (1 subgoal):
1. ((\<lambda>x. min (f x) (g x)) expands_to F) basis
[PROOF STEP]
by (rule expands_to_cong) (insert assms(2), auto simp: min_def elim: eventually_mono) |
(* Title: JinjaDCI/BV/ClassAdd.thy
Author: Susannah Mansky
2019-20, UIUC
*)
section \<open> Property preservation under @{text "class_add"} \<close>
theory ClassAdd
imports BVConform
begin
lemma err_mono: "A \<subseteq> B \<Longrightarrow> err A \<subseteq> err B"
by(unfold err_def) auto
lemma opt_mono: "A \<subseteq> B \<Longrightarrow> opt A \<subseteq> opt B"
by(unfold opt_def) auto
lemma list_mono:
assumes "A \<subseteq> B" shows "list n A \<subseteq> list n B"
proof(rule)
fix xs assume "xs \<in> list n A"
then obtain size: "size xs = n" and inA: "set xs \<subseteq> A" by simp
with assms have "set xs \<subseteq> B" by simp
with size show "xs \<in> list n B" by(clarsimp intro!: listI)
qed
(****************************************************************)
\<comment> \<open> adding a class in the simplest way \<close>
abbreviation class_add :: "jvm_prog \<Rightarrow> jvm_method cdecl \<Rightarrow> jvm_prog" where
"class_add P cd \<equiv> cd#P"
subsection "Fields"
lemma class_add_has_fields:
assumes fs: "P \<turnstile> D has_fields FDTs" and nc: "\<not>is_class P C"
shows "class_add P (C, cdec) \<turnstile> D has_fields FDTs"
using assms
proof(induct rule: Fields.induct)
case (has_fields_Object D fs ms FDTs)
from has_fields_is_class_Object[OF fs] nc have "C \<noteq> Object" by fast
with has_fields_Object show ?case
by(auto simp: class_def fun_upd_apply intro!: TypeRel.has_fields_Object)
next
case rec: (has_fields_rec C1 D fs ms FDTs FDTs')
with has_fields_is_class have [simp]: "D \<noteq> C" by auto
with rec have "C1 \<noteq> C" by(clarsimp simp: is_class_def)
with rec show ?case
by(auto simp: class_def fun_upd_apply intro: TypeRel.has_fields_rec)
qed
lemma class_add_has_fields_rev:
"\<lbrakk> class_add P (C, cdec) \<turnstile> D has_fields FDTs; \<not>P \<turnstile> D \<preceq>\<^sup>* C \<rbrakk>
\<Longrightarrow> P \<turnstile> D has_fields FDTs"
proof(induct rule: Fields.induct)
case (has_fields_Object D fs ms FDTs)
then show ?case by(auto simp: class_def fun_upd_apply intro!: TypeRel.has_fields_Object)
next
case rec: (has_fields_rec C1 D fs ms FDTs FDTs')
then have sub1: "P \<turnstile> C1 \<prec>\<^sup>1 D"
by(auto simp: class_def fun_upd_apply intro!: subcls1I split: if_split_asm)
with rec.prems have cls: "\<not> P \<turnstile> D \<preceq>\<^sup>* C" by (meson converse_rtrancl_into_rtrancl)
with cls rec show ?case
by(auto simp: class_def fun_upd_apply
intro: TypeRel.has_fields_rec split: if_split_asm)
qed
lemma class_add_has_field:
assumes "P \<turnstile> C\<^sub>0 has F,b:T in D" and "\<not> is_class P C"
shows "class_add P (C, cdec) \<turnstile> C\<^sub>0 has F,b:T in D"
using assms by(auto simp: has_field_def dest!: class_add_has_fields[of P C\<^sub>0])
lemma class_add_has_field_rev:
assumes has: "class_add P (C, cdec) \<turnstile> C\<^sub>0 has F,b:T in D"
and ncp: "\<And>D'. P \<turnstile> C\<^sub>0 \<preceq>\<^sup>* D' \<Longrightarrow> D' \<noteq> C"
shows "P \<turnstile> C\<^sub>0 has F,b:T in D"
using assms by(auto simp: has_field_def dest!: class_add_has_fields_rev)
lemma class_add_sees_field:
assumes "P \<turnstile> C\<^sub>0 sees F,b:T in D" and "\<not> is_class P C"
shows "class_add P (C, cdec) \<turnstile> C\<^sub>0 sees F,b:T in D"
using assms by(auto simp: sees_field_def dest!: class_add_has_fields[of P C\<^sub>0])
lemma class_add_sees_field_rev:
assumes has: "class_add P (C, cdec) \<turnstile> C\<^sub>0 sees F,b:T in D"
and ncp: "\<And>D'. P \<turnstile> C\<^sub>0 \<preceq>\<^sup>* D' \<Longrightarrow> D' \<noteq> C"
shows "P \<turnstile> C\<^sub>0 sees F,b:T in D"
using assms by(auto simp: sees_field_def dest!: class_add_has_fields_rev)
lemma class_add_field:
assumes fd: "P \<turnstile> C\<^sub>0 sees F,b:T in D" and "\<not> is_class P C"
shows "field P C\<^sub>0 F = field (class_add P (C, cdec)) C\<^sub>0 F"
using class_add_sees_field[OF assms, of cdec] fd by simp
subsection "Methods"
lemma class_add_sees_methods:
assumes ms: "P \<turnstile> D sees_methods Mm" and nc: "\<not>is_class P C"
shows "class_add P (C, cdec) \<turnstile> D sees_methods Mm"
using assms
proof(induct rule: Methods.induct)
case (sees_methods_Object D fs ms Mm)
from sees_methods_is_class_Object[OF ms] nc have "C \<noteq> Object" by fast
with sees_methods_Object show ?case
by(auto simp: class_def fun_upd_apply intro!: TypeRel.sees_methods_Object)
next
case rec: (sees_methods_rec C1 D fs ms Mm Mm')
with sees_methods_is_class have [simp]: "D \<noteq> C" by auto
with rec have "C1 \<noteq> C" by(clarsimp simp: is_class_def)
with rec show ?case
by(auto simp: class_def fun_upd_apply intro: TypeRel.sees_methods_rec)
qed
lemma class_add_sees_methods_rev:
"\<lbrakk> class_add P (C, cdec) \<turnstile> D sees_methods Mm;
\<And>D'. P \<turnstile> D \<preceq>\<^sup>* D' \<Longrightarrow> D' \<noteq> C \<rbrakk>
\<Longrightarrow> P \<turnstile> D sees_methods Mm"
proof(induct rule: Methods.induct)
case (sees_methods_Object D fs ms Mm)
then show ?case
by(auto simp: class_def fun_upd_apply intro!: TypeRel.sees_methods_Object)
next
case rec: (sees_methods_rec C1 D fs ms Mm Mm')
then have sub1: "P \<turnstile> C1 \<prec>\<^sup>1 D"
by(auto simp: class_def fun_upd_apply intro!: subcls1I)
have cls: "\<And>D'. P \<turnstile> D \<preceq>\<^sup>* D' \<Longrightarrow> D' \<noteq> C"
proof -
fix D' assume "P \<turnstile> D \<preceq>\<^sup>* D'"
with sub1 have "P \<turnstile> C1 \<preceq>\<^sup>* D'" by simp
with rec.prems show "D' \<noteq> C" by simp
qed
with cls rec show ?case
by(auto simp: class_def fun_upd_apply intro: TypeRel.sees_methods_rec)
qed
lemma class_add_sees_methods_Obj:
assumes "P \<turnstile> Object sees_methods Mm" and nObj: "C \<noteq> Object"
shows "class_add P (C, cdec) \<turnstile> Object sees_methods Mm"
proof -
from assms obtain C' fs ms where cls: "class P Object = Some(C',fs,ms)"
by(auto elim!: Methods.cases)
with nObj have cls': "class (class_add P (C, cdec)) Object = Some(C',fs,ms)"
by(simp add: class_def fun_upd_apply)
from assms cls have "Mm = map_option (\<lambda>m. (m, Object)) \<circ> map_of ms" by(auto elim!: Methods.cases)
with assms cls' show ?thesis
by(auto simp: is_class_def fun_upd_apply intro!: sees_methods_Object)
qed
lemma class_add_sees_methods_rev_Obj:
assumes "class_add P (C, cdec) \<turnstile> Object sees_methods Mm" and nObj: "C \<noteq> Object"
shows "P \<turnstile> Object sees_methods Mm"
proof -
from assms obtain C' fs ms where cls: "class (class_add P (C, cdec)) Object = Some(C',fs,ms)"
by(auto elim!: Methods.cases)
with nObj have cls': "class P Object = Some(C',fs,ms)"
by(simp add: class_def fun_upd_apply)
from assms cls have "Mm = map_option (\<lambda>m. (m, Object)) \<circ> map_of ms" by(auto elim!: Methods.cases)
with assms cls' show ?thesis
by(auto simp: is_class_def fun_upd_apply intro!: sees_methods_Object)
qed
lemma class_add_sees_method:
assumes "P \<turnstile> C\<^sub>0 sees M\<^sub>0, b : Ts\<rightarrow>T = m in D" and "\<not> is_class P C"
shows "class_add P (C, cdec) \<turnstile> C\<^sub>0 sees M\<^sub>0, b : Ts\<rightarrow>T = m in D"
using assms by(auto simp: Method_def dest!: class_add_sees_methods[of P C\<^sub>0])
lemma class_add_method:
assumes md: "P \<turnstile> C\<^sub>0 sees M\<^sub>0, b : Ts\<rightarrow>T = m in D" and "\<not> is_class P C"
shows "method P C\<^sub>0 M\<^sub>0 = method (class_add P (C, cdec)) C\<^sub>0 M\<^sub>0"
using class_add_sees_method[OF assms, of cdec] md by simp
lemma class_add_sees_method_rev:
"\<lbrakk> class_add P (C, cdec) \<turnstile> C\<^sub>0 sees M\<^sub>0, b : Ts\<rightarrow>T = m in D;
\<not> P \<turnstile> C\<^sub>0 \<preceq>\<^sup>* C \<rbrakk>
\<Longrightarrow> P \<turnstile> C\<^sub>0 sees M\<^sub>0, b : Ts\<rightarrow>T = m in D"
by(auto simp: Method_def dest!: class_add_sees_methods_rev)
lemma class_add_sees_method_Obj:
"\<lbrakk> P \<turnstile> Object sees M\<^sub>0, b : Ts\<rightarrow>T = m in D; C \<noteq> Object \<rbrakk>
\<Longrightarrow> class_add P (C, cdec) \<turnstile> Object sees M\<^sub>0, b : Ts\<rightarrow>T = m in D"
by(auto simp: Method_def dest!: class_add_sees_methods_Obj[where P=P])
lemma class_add_sees_method_rev_Obj:
"\<lbrakk> class_add P (C, cdec) \<turnstile> Object sees M\<^sub>0, b : Ts\<rightarrow>T = m in D; C \<noteq> Object \<rbrakk>
\<Longrightarrow> P \<turnstile> Object sees M\<^sub>0, b : Ts\<rightarrow>T = m in D"
by(auto simp: Method_def dest!: class_add_sees_methods_rev_Obj[where P=P])
subsection "Types and states"
lemma class_add_is_type:
"is_type P T \<Longrightarrow> is_type (class_add P (C, cdec)) T"
by(cases cdec, simp add: is_type_def is_class_def class_def fun_upd_apply split: ty.splits)
lemma class_add_types:
"types P \<subseteq> types (class_add P (C, cdec))"
using class_add_is_type by(cases cdec, clarsimp)
lemma class_add_states:
"states P mxs mxl \<subseteq> states (class_add P (C, cdec)) mxs mxl"
proof -
let ?A = "types P" and ?B = "types (class_add P (C, cdec))"
have ab: "?A \<subseteq> ?B" by(rule class_add_types)
moreover have "\<And>n. list n ?A \<subseteq> list n ?B" using ab by(rule list_mono)
moreover have "list mxl (err ?A) \<subseteq> list mxl (err ?B)" using err_mono[OF ab] by(rule list_mono)
ultimately show ?thesis by(auto simp: JVM_states_unfold intro!: err_mono opt_mono)
qed
lemma class_add_check_types:
"check_types P mxs mxl \<tau>s \<Longrightarrow> check_types (class_add P (C, cdec)) mxs mxl \<tau>s"
using class_add_states by(fastforce simp: check_types_def)
subsection "Subclasses and subtypes"
lemma class_add_subcls:
"\<lbrakk> P \<turnstile> D \<preceq>\<^sup>* D'; \<not> is_class P C \<rbrakk>
\<Longrightarrow> class_add P (C, cdec) \<turnstile> D \<preceq>\<^sup>* D'"
proof(induct rule: rtrancl.induct)
case (rtrancl_into_rtrancl a b c)
then have "b \<noteq> C" by(clarsimp simp: is_class_def dest!: subcls1D)
with rtrancl_into_rtrancl show ?case
by(fastforce dest!: subcls1D simp: class_def fun_upd_apply
intro!: rtrancl_trans[of a b] subcls1I)
qed(simp)
lemma class_add_subcls_rev:
"\<lbrakk> class_add P (C, cdec) \<turnstile> D \<preceq>\<^sup>* D'; \<not>P \<turnstile> D \<preceq>\<^sup>* C \<rbrakk>
\<Longrightarrow> P \<turnstile> D \<preceq>\<^sup>* D'"
proof(induct rule: rtrancl.induct)
case (rtrancl_into_rtrancl a b c)
then have "b \<noteq> C" by(clarsimp simp: is_class_def dest!: subcls1D)
with rtrancl_into_rtrancl show ?case
by(fastforce dest!: subcls1D simp: class_def fun_upd_apply
intro!: rtrancl_trans[of a b] subcls1I)
qed(simp)
lemma class_add_subtype:
"\<lbrakk> subtype P x y; \<not> is_class P C \<rbrakk>
\<Longrightarrow> subtype (class_add P (C, cdec)) x y"
proof(induct rule: widen.induct)
case (widen_subcls C D)
then show ?case using class_add_subcls by simp
qed(simp+)
lemma class_add_widens:
"\<lbrakk> P \<turnstile> Ts [\<le>] Ts'; \<not> is_class P C \<rbrakk>
\<Longrightarrow> (class_add P (C, cdec)) \<turnstile> Ts [\<le>] Ts'"
using class_add_subtype by (metis (no_types) list_all2_mono)
lemma class_add_sup_ty_opt:
"\<lbrakk> P \<turnstile> l1 \<le>\<^sub>\<top> l2; \<not> is_class P C \<rbrakk>
\<Longrightarrow> class_add P (C, cdec) \<turnstile> l1 \<le>\<^sub>\<top> l2"
using class_add_subtype by(auto simp: sup_ty_opt_def Err.le_def lesub_def split: err.splits)
lemma class_add_sup_loc:
"\<lbrakk> P \<turnstile> LT [\<le>\<^sub>\<top>] LT'; \<not> is_class P C \<rbrakk>
\<Longrightarrow> class_add P (C, cdec) \<turnstile> LT [\<le>\<^sub>\<top>] LT'"
using class_add_sup_ty_opt[where P=P and C=C] by (simp add: list.rel_mono_strong)
lemma class_add_sup_state:
"\<lbrakk> P \<turnstile> \<tau> \<le>\<^sub>i \<tau>'; \<not> is_class P C \<rbrakk>
\<Longrightarrow> class_add P (C, cdec) \<turnstile> \<tau> \<le>\<^sub>i \<tau>'"
using class_add_subtype class_add_sup_ty_opt
by(auto simp: sup_state_def Listn.le_def Product.le_def lesub_def class_add_widens
class_add_sup_ty_opt list_all2_mono)
lemma class_add_sup_state_opt:
"\<lbrakk> P \<turnstile> \<tau> \<le>' \<tau>'; \<not> is_class P C \<rbrakk>
\<Longrightarrow> class_add P (C, cdec) \<turnstile> \<tau> \<le>' \<tau>'"
by(auto simp: sup_state_opt_def Opt.le_def lesub_def class_add_widens
class_add_sup_ty_opt list_all2_mono)
subsection "Effect"
lemma class_add_is_relevant_class:
"\<lbrakk> is_relevant_class i P C\<^sub>0; \<not> is_class P C \<rbrakk>
\<Longrightarrow> is_relevant_class i (class_add P (C, cdec)) C\<^sub>0"
by(cases i, auto simp: class_add_subcls)
lemma class_add_is_relevant_class_rev:
assumes irc: "is_relevant_class i (class_add P (C, cdec)) C\<^sub>0"
and ncp: "\<And>cd D'. cd \<in> set P \<Longrightarrow> \<not>P \<turnstile> fst cd \<preceq>\<^sup>* C"
and wfxp: "wf_syscls P"
shows "is_relevant_class i P C\<^sub>0"
using assms
proof(cases i)
case (Getfield F D) with assms
show ?thesis by(fastforce simp: wf_syscls_def sys_xcpts_def dest!: class_add_subcls_rev)
next
case (Putfield F D) with assms
show ?thesis by(fastforce simp: wf_syscls_def sys_xcpts_def dest!: class_add_subcls_rev)
next
case (Checkcast D) with assms
show ?thesis by(fastforce simp: wf_syscls_def sys_xcpts_def dest!: class_add_subcls_rev)
qed(simp_all)
lemma class_add_is_relevant_entry:
"\<lbrakk> is_relevant_entry P i pc e; \<not> is_class P C \<rbrakk>
\<Longrightarrow> is_relevant_entry (class_add P (C, cdec)) i pc e"
by(clarsimp simp: is_relevant_entry_def class_add_is_relevant_class)
lemma class_add_is_relevant_entry_rev:
"\<lbrakk> is_relevant_entry (class_add P (C, cdec)) i pc e;
\<And>cd D'. cd \<in> set P \<Longrightarrow> \<not>P \<turnstile> fst cd \<preceq>\<^sup>* C;
wf_syscls P \<rbrakk>
\<Longrightarrow> is_relevant_entry P i pc e"
by(auto simp: is_relevant_entry_def dest!: class_add_is_relevant_class_rev)
lemma class_add_relevant_entries:
"\<not> is_class P C
\<Longrightarrow> set (relevant_entries P i pc xt) \<subseteq> set (relevant_entries (class_add P (C, cdec)) i pc xt)"
by(clarsimp simp: relevant_entries_def class_add_is_relevant_entry)
lemma class_add_relevant_entries_eq:
assumes wf: "wf_prog wf_md P" and nclass: "\<not> is_class P C"
shows "relevant_entries P i pc xt = relevant_entries (class_add P (C, cdec)) i pc xt"
proof -
have ncp: "\<And>cd D'. cd \<in> set P \<Longrightarrow> \<not>P \<turnstile> fst cd \<preceq>\<^sup>* C"
by(rule wf_subcls_nCls'[OF assms])
moreover from wf have wfsys: "wf_syscls P" by(simp add: wf_prog_def)
moreover
note class_add_is_relevant_entry[OF _ nclass, of i pc _ cdec]
class_add_is_relevant_entry_rev[OF _ ncp wfsys, of cdec i pc]
ultimately show ?thesis by (metis filter_cong relevant_entries_def)
qed
lemma class_add_norm_eff_pc:
assumes ne: "\<forall>(pc',\<tau>') \<in> set (norm_eff i P pc \<tau>). pc' < mpc"
shows "\<forall>(pc',\<tau>') \<in> set (norm_eff i (class_add P (C, cdec)) pc \<tau>). pc' < mpc"
using assms by(cases i, auto simp: norm_eff_def)
lemma class_add_norm_eff_sup_state_opt:
assumes ne: "\<forall>(pc',\<tau>') \<in> set (norm_eff i P pc \<tau>). P \<turnstile> \<tau>' \<le>' \<tau>s!pc'"
and nclass: "\<not> is_class P C" and app: "app\<^sub>i (i, P, pc, mxs, T, \<tau>)"
shows "\<forall>(pc',\<tau>') \<in> set (norm_eff i (class_add P (C, cdec)) pc \<tau>). (class_add P (C, cdec)) \<turnstile> \<tau>' \<le>' \<tau>s!pc'"
proof -
obtain ST LT where "\<tau> = (ST,LT)" by(cases \<tau>)
with assms show ?thesis proof(cases i)
qed(fastforce simp: norm_eff_def
dest!: class_add_field[where cdec=cdec] class_add_method[where cdec=cdec]
class_add_sup_loc[OF _ nclass] class_add_subtype[OF _ nclass]
class_add_widens[OF _ nclass] class_add_sup_state_opt[OF _ nclass])+
qed
lemma class_add_xcpt_eff_eq:
assumes wf: "wf_prog wf_md P" and nclass: "\<not> is_class P C"
shows "xcpt_eff i P pc \<tau> xt = xcpt_eff i (class_add P (C, cdec)) pc \<tau> xt"
using class_add_relevant_entries_eq[OF assms, of i pc xt cdec] by(cases \<tau>, simp add: xcpt_eff_def)
lemma class_add_eff_pc:
assumes eff: "\<forall>(pc',\<tau>') \<in> set (eff i P pc xt (Some \<tau>)). pc' < mpc"
and wf: "wf_prog wf_md P" and nclass: "\<not> is_class P C"
shows "\<forall>(pc',\<tau>') \<in> set (eff i (class_add P (C, cdec)) pc xt (Some \<tau>)). pc' < mpc"
using eff class_add_norm_eff_pc class_add_xcpt_eff_eq[OF wf nclass]
by(auto simp: norm_eff_def eff_def)
lemma class_add_eff_sup_state_opt:
assumes eff: "\<forall>(pc',\<tau>') \<in> set (eff i P pc xt (Some \<tau>)). P \<turnstile> \<tau>' \<le>' \<tau>s!pc'"
and wf: "wf_prog wf_md P"and nclass: "\<not> is_class P C"
and app: "app\<^sub>i (i, P, pc, mxs, T, \<tau>)"
shows "\<forall>(pc',\<tau>') \<in> set (eff i (class_add P (C, cdec)) pc xt (Some \<tau>)).
(class_add P (C, cdec)) \<turnstile> \<tau>' \<le>' \<tau>s!pc'"
proof -
from eff have ne: "\<forall>(pc', \<tau>')\<in>set (norm_eff i P pc \<tau>). P \<turnstile> \<tau>' \<le>' \<tau>s ! pc'"
by(simp add: norm_eff_def eff_def)
from eff have "\<forall>(pc', \<tau>')\<in>set (xcpt_eff i P pc \<tau> xt). P \<turnstile> \<tau>' \<le>' \<tau>s ! pc'"
by(simp add: xcpt_eff_def eff_def)
with class_add_norm_eff_sup_state_opt[OF ne nclass app]
class_add_xcpt_eff_eq[OF wf nclass]class_add_sup_state_opt[OF _ nclass]
show ?thesis by(cases cdec, auto simp: eff_def norm_eff_def xcpt_app_def)
qed
lemma class_add_app\<^sub>i:
assumes "app\<^sub>i (i, P, pc, mxs, T\<^sub>r, ST, LT)" and "\<not> is_class P C"
shows "app\<^sub>i (i, class_add P (C, cdec), pc, mxs, T\<^sub>r, ST, LT)"
using assms
proof(cases i)
case New then show ?thesis using assms by(fastforce simp: is_class_def class_def fun_upd_apply)
next
case Getfield then show ?thesis using assms
by(auto simp: class_add_subtype dest!: class_add_sees_field[where P=P])
next
case Getstatic then show ?thesis using assms by(auto dest!: class_add_sees_field[where P=P])
next
case Putfield then show ?thesis using assms
by(auto dest!: class_add_subtype[where P=P] class_add_sees_field[where P=P])
next
case Putstatic then show ?thesis using assms
by(auto dest!: class_add_subtype[where P=P] class_add_sees_field[where P=P])
next
case Checkcast then show ?thesis using assms
by(clarsimp simp: is_class_def class_def fun_upd_apply)
next
case Invoke then show ?thesis using assms
by(fastforce dest!: class_add_widens[where P=P] class_add_sees_method[where P=P])
next
case Invokestatic then show ?thesis using assms
by(fastforce dest!: class_add_widens[where P=P] class_add_sees_method[where P=P])
next
case Return then show ?thesis using assms by(clarsimp simp: class_add_subtype)
qed(simp+)
lemma class_add_xcpt_app:
assumes xa: "xcpt_app i P pc mxs xt \<tau>"
and wf: "wf_prog wf_md P" and nclass: "\<not> is_class P C"
shows "xcpt_app i (class_add P (C, cdec)) pc mxs xt \<tau>"
using xa class_add_relevant_entries_eq[OF wf nclass] nclass
by(auto simp: xcpt_app_def is_class_def class_def fun_upd_apply) auto
lemma class_add_app:
assumes app: "app i P mxs T pc mpc xt t"
and wf: "wf_prog wf_md P" and nclass: "\<not> is_class P C"
shows "app i (class_add P (C, cdec)) mxs T pc mpc xt t"
proof(cases t)
case (Some \<tau>)
let ?P = "class_add P (C, cdec)"
from assms Some have eff: "\<forall>(pc', \<tau>')\<in>set (eff i P pc xt \<lfloor>\<tau>\<rfloor>). pc' < mpc" by(simp add: app_def)
from assms Some have app\<^sub>i: "app\<^sub>i (i,P,pc,mxs,T,\<tau>)" by(simp add: app_def)
with class_add_app\<^sub>i[OF _ nclass] Some have "app\<^sub>i (i,?P,pc,mxs,T,\<tau>)" by(cases \<tau>,simp)
moreover
from app class_add_xcpt_app[OF _ wf nclass] Some
have "xcpt_app i ?P pc mxs xt \<tau>" by(simp add: app_def del: xcpt_app_def)
moreover
from app class_add_eff_pc[OF eff wf nclass] Some
have "\<forall>(pc',\<tau>') \<in> set (eff i ?P pc xt t). pc' < mpc" by auto
moreover note app Some
ultimately show ?thesis by(simp add: app_def)
qed(simp)
subsection "Well-formedness and well-typedness"
lemma class_add_wf_mdecl:
"\<lbrakk> wf_mdecl wf_md P C\<^sub>0 md;
\<And>C\<^sub>0 md. wf_md P C\<^sub>0 md \<Longrightarrow> wf_md (class_add P (C, cdec)) C\<^sub>0 md \<rbrakk>
\<Longrightarrow> wf_mdecl wf_md (class_add P (C, cdec)) C\<^sub>0 md"
by(clarsimp simp: wf_mdecl_def class_add_is_type)
lemma class_add_wf_mdecl':
assumes wfd: "wf_mdecl wf_md P C\<^sub>0 md"
and ms: "(C\<^sub>0,S,fs,ms) \<in> set P" and md: "md \<in> set ms"
and wf_md': "\<And>C\<^sub>0 S fs ms m.\<lbrakk>(C\<^sub>0,S,fs,ms) \<in> set P; m \<in> set ms\<rbrakk> \<Longrightarrow> wf_md' (class_add P (C, cdec)) C\<^sub>0 m"
shows "wf_mdecl wf_md' (class_add P (C, cdec)) C\<^sub>0 md"
using assms by(clarsimp simp: wf_mdecl_def class_add_is_type)
lemma class_add_wf_cdecl:
assumes wfcd: "wf_cdecl wf_md P cd" and cdP: "cd \<in> set P"
and ncp: "\<not> P \<turnstile> fst cd \<preceq>\<^sup>* C" and dist: "distinct_fst P"
and wfmd: "\<And>C\<^sub>0 md. wf_md P C\<^sub>0 md \<Longrightarrow> wf_md (class_add P (C, cdec)) C\<^sub>0 md"
and nclass: "\<not> is_class P C"
shows "wf_cdecl wf_md (class_add P (C, cdec)) cd"
proof -
let ?P = "class_add P (C, cdec)"
obtain C1 D fs ms where [simp]: "cd = (C1,(D,fs,ms))" by(cases cd)
from wfcd
have "\<forall>f\<in>set fs. wf_fdecl ?P f" by(auto simp: wf_cdecl_def wf_fdecl_def class_add_is_type)
moreover
from wfcd wfmd class_add_wf_mdecl
have "\<forall>m\<in>set ms. wf_mdecl wf_md ?P C1 m" by(auto simp: wf_cdecl_def)
moreover
have "C1 \<noteq> Object \<Longrightarrow> is_class ?P D \<and> \<not> ?P \<turnstile> D \<preceq>\<^sup>* C1
\<and> (\<forall>(M,b,Ts,T,m)\<in>set ms.
\<forall>D' b' Ts' T' m'. ?P \<turnstile> D sees M,b':Ts' \<rightarrow> T' = m' in D' \<longrightarrow>
b = b' \<and> ?P \<turnstile> Ts' [\<le>] Ts \<and> ?P \<turnstile> T \<le> T')"
proof -
assume nObj[simp]: "C1 \<noteq> Object"
with cdP dist have sub1: "P \<turnstile> C1 \<prec>\<^sup>1 D" by(auto simp: class_def intro!: subcls1I map_of_SomeI)
with ncp have ncp': "\<not> P \<turnstile> D \<preceq>\<^sup>* C" by(auto simp: converse_rtrancl_into_rtrancl)
with wfcd
have clsD: "is_class ?P D"
by(auto simp: wf_cdecl_def is_class_def class_def fun_upd_apply)
moreover
from wfcd sub1
have "\<not> ?P \<turnstile> D \<preceq>\<^sup>* C1" by(auto simp: wf_cdecl_def dest!: class_add_subcls_rev[OF _ ncp'])
moreover
have "\<And>M b Ts T m D' b' Ts' T' m'. (M,b,Ts,T,m) \<in> set ms
\<Longrightarrow> ?P \<turnstile> D sees M,b':Ts' \<rightarrow> T' = m' in D'
\<Longrightarrow> b = b' \<and> ?P \<turnstile> Ts' [\<le>] Ts \<and> ?P \<turnstile> T \<le> T'"
proof -
fix M b Ts T m D' b' Ts' T' m'
assume ms: "(M,b,Ts,T,m) \<in> set ms" and meth': "?P \<turnstile> D sees M,b':Ts' \<rightarrow> T' = m' in D'"
with sub1
have "P \<turnstile> D sees M,b':Ts' \<rightarrow> T' = m' in D'"
by(fastforce dest!: class_add_sees_method_rev[OF _ ncp'])
moreover
with wfcd ms meth'
have "b = b' \<and> P \<turnstile> Ts' [\<le>] Ts \<and> P \<turnstile> T \<le> T'"
by(cases m', fastforce simp: wf_cdecl_def elim!: ballE[where x="(M,b,Ts,T,m)"])
ultimately show "b = b' \<and> ?P \<turnstile> Ts' [\<le>] Ts \<and> ?P \<turnstile> T \<le> T'"
by(auto dest!: class_add_subtype[OF _ nclass] class_add_widens[OF _ nclass])
qed
ultimately show ?thesis by clarsimp
qed
moreover note wfcd
ultimately show ?thesis by(simp add: wf_cdecl_def)
qed
lemma class_add_wf_cdecl':
assumes wfcd: "wf_cdecl wf_md P cd" and cdP: "cd \<in> set P"
and ncp: "\<not>P \<turnstile> fst cd \<preceq>\<^sup>* C" and dist: "distinct_fst P"
and wfmd: "\<And>C\<^sub>0 S fs ms m.\<lbrakk>(C\<^sub>0,S,fs,ms) \<in> set P; m \<in> set ms\<rbrakk> \<Longrightarrow> wf_md' (class_add P (C, cdec)) C\<^sub>0 m"
and nclass: "\<not> is_class P C"
shows "wf_cdecl wf_md' (class_add P (C, cdec)) cd"
proof -
let ?P = "class_add P (C, cdec)"
obtain C1 D fs ms where [simp]: "cd = (C1,(D,fs,ms))" by(cases cd)
from wfcd
have "\<forall>f\<in>set fs. wf_fdecl ?P f" by(auto simp: wf_cdecl_def wf_fdecl_def class_add_is_type)
moreover
from cdP wfcd wfmd
have "\<forall>m\<in>set ms. wf_mdecl wf_md' ?P C1 m"
by(auto simp: wf_cdecl_def wf_mdecl_def class_add_is_type)
moreover
have "C1 \<noteq> Object \<Longrightarrow> is_class ?P D \<and> \<not> ?P \<turnstile> D \<preceq>\<^sup>* C1
\<and> (\<forall>(M,b,Ts,T,m)\<in>set ms.
\<forall>D' b' Ts' T' m'. ?P \<turnstile> D sees M,b':Ts' \<rightarrow> T' = m' in D' \<longrightarrow>
b = b' \<and> ?P \<turnstile> Ts' [\<le>] Ts \<and> ?P \<turnstile> T \<le> T')"
proof -
assume nObj[simp]: "C1 \<noteq> Object"
with cdP dist have sub1: "P \<turnstile> C1 \<prec>\<^sup>1 D" by(auto simp: class_def intro!: subcls1I map_of_SomeI)
with ncp have ncp': "\<not> P \<turnstile> D \<preceq>\<^sup>* C" by(auto simp: converse_rtrancl_into_rtrancl)
with wfcd
have clsD: "is_class ?P D"
by(auto simp: wf_cdecl_def is_class_def class_def fun_upd_apply)
moreover
from wfcd sub1
have "\<not> ?P \<turnstile> D \<preceq>\<^sup>* C1" by(auto simp: wf_cdecl_def dest!: class_add_subcls_rev[OF _ ncp'])
moreover
have "\<And>M b Ts T m D' b' Ts' T' m'. (M,b,Ts,T,m) \<in> set ms
\<Longrightarrow> ?P \<turnstile> D sees M,b':Ts' \<rightarrow> T' = m' in D'
\<Longrightarrow> b = b' \<and> ?P \<turnstile> Ts' [\<le>] Ts \<and> ?P \<turnstile> T \<le> T'"
proof -
fix M b Ts T m D' b' Ts' T' m'
assume ms: "(M,b,Ts,T,m) \<in> set ms" and meth': "?P \<turnstile> D sees M,b':Ts' \<rightarrow> T' = m' in D'"
with sub1
have "P \<turnstile> D sees M,b':Ts' \<rightarrow> T' = m' in D'"
by(fastforce dest!: class_add_sees_method_rev[OF _ ncp'])
moreover
with wfcd ms meth'
have "b = b' \<and> P \<turnstile> Ts' [\<le>] Ts \<and> P \<turnstile> T \<le> T'"
by(cases m', fastforce simp: wf_cdecl_def elim!: ballE[where x="(M,b,Ts,T,m)"])
ultimately show "b = b' \<and> ?P \<turnstile> Ts' [\<le>] Ts \<and> ?P \<turnstile> T \<le> T'"
by(auto dest!: class_add_subtype[OF _ nclass] class_add_widens[OF _ nclass])
qed
ultimately show ?thesis by clarsimp
qed
moreover note wfcd
ultimately show ?thesis by(simp add: wf_cdecl_def)
qed
lemma class_add_wt_start:
"\<lbrakk> wt_start P C\<^sub>0 b Ts mxl \<tau>s; \<not> is_class P C \<rbrakk>
\<Longrightarrow> wt_start (class_add P (C, cdec)) C\<^sub>0 b Ts mxl \<tau>s"
using class_add_sup_state_opt by(clarsimp simp: wt_start_def split: staticb.splits)
lemma class_add_wt_instr:
assumes wti: "P,T,mxs,mpc,xt \<turnstile> i,pc :: \<tau>s"
and wf: "wf_prog wf_md P" and nclass: "\<not> is_class P C"
shows "class_add P (C, cdec),T,mxs,mpc,xt \<turnstile> i,pc :: \<tau>s"
proof -
let ?P = "class_add P (C, cdec)"
from wti have eff: "\<forall>(pc', \<tau>')\<in>set (eff i P pc xt (\<tau>s ! pc)). P \<turnstile> \<tau>' \<le>' \<tau>s ! pc'"
by(simp add: wt_instr_def)
from wti have app\<^sub>i: "\<tau>s!pc \<noteq> None \<Longrightarrow> app\<^sub>i (i,P,pc,mxs,T,the (\<tau>s!pc))"
by(simp add: wt_instr_def app_def)
from wti class_add_app[OF _ wf nclass]
have "app i ?P mxs T pc mpc xt (\<tau>s!pc)" by(simp add: wt_instr_def)
moreover
have "\<forall>(pc',\<tau>') \<in> set (eff i ?P pc xt (\<tau>s!pc)). ?P \<turnstile> \<tau>' \<le>' \<tau>s!pc'"
proof(cases "\<tau>s!pc")
case Some with eff class_add_eff_sup_state_opt[OF _ wf nclass app\<^sub>i] show ?thesis by auto
qed(simp add: eff_def)
moreover note wti
ultimately show ?thesis by(clarsimp simp: wt_instr_def)
qed
lemma class_add_wt_method:
assumes wtm: "wt_method P C\<^sub>0 b Ts T\<^sub>r mxs mxl\<^sub>0 is xt (\<Phi> C\<^sub>0 M\<^sub>0)"
and wf: "wf_prog wf_md P" and nclass: "\<not> is_class P C"
shows "wt_method (class_add P (C, cdec)) C\<^sub>0 b Ts T\<^sub>r mxs mxl\<^sub>0 is xt (\<Phi> C\<^sub>0 M\<^sub>0)"
proof -
let ?P = "class_add P (C, cdec)"
let ?\<tau>s = "\<Phi> C\<^sub>0 M\<^sub>0"
from wtm class_add_check_types
have "check_types ?P mxs ((case b of Static \<Rightarrow> 0 | NonStatic \<Rightarrow> 1)+size Ts+mxl\<^sub>0) (map OK ?\<tau>s)"
by(simp add: wt_method_def)
moreover
from wtm class_add_wt_start nclass
have "wt_start ?P C\<^sub>0 b Ts mxl\<^sub>0 ?\<tau>s" by(simp add: wt_method_def)
moreover
from wtm class_add_wt_instr[OF _ wf nclass]
have "\<forall>pc < size is. ?P,T\<^sub>r,mxs,size is,xt \<turnstile> is!pc,pc :: ?\<tau>s" by(clarsimp simp: wt_method_def)
moreover note wtm
ultimately
show ?thesis by(clarsimp simp: wt_method_def)
qed
lemma class_add_wt_method':
"\<lbrakk> (\<lambda>P C (M,b,Ts,T\<^sub>r,(mxs,mxl\<^sub>0,is,xt)). wt_method P C b Ts T\<^sub>r mxs mxl\<^sub>0 is xt (\<Phi> C M)) P C\<^sub>0 md;
wf_prog wf_md P; \<not> is_class P C \<rbrakk>
\<Longrightarrow> (\<lambda>P C (M,b,Ts,T\<^sub>r,(mxs,mxl\<^sub>0,is,xt)). wt_method P C b Ts T\<^sub>r mxs mxl\<^sub>0 is xt (\<Phi> C M))
(class_add P (C, cdec)) C\<^sub>0 md"
by(clarsimp simp: class_add_wt_method)
subsection \<open> @{text "distinct_fst"} \<close>
lemma class_add_distinct_fst:
"\<lbrakk> distinct_fst P; \<not> is_class P C \<rbrakk>
\<Longrightarrow> distinct_fst (class_add P (C, cdec))"
by(clarsimp simp: distinct_fst_def is_class_def class_def)
subsection "Conformance"
lemma class_add_conf:
"\<lbrakk> P,h \<turnstile> v :\<le> T; \<not> is_class P C \<rbrakk>
\<Longrightarrow> class_add P (C, cdec),h \<turnstile> v :\<le> T"
by(clarsimp simp: conf_def class_add_subtype)
lemma class_add_oconf:
fixes obj::obj
assumes oc: "P,h \<turnstile> obj \<surd>" and ns: "\<not> is_class P C"
and ncp: "\<And>D'. P \<turnstile> fst(obj) \<preceq>\<^sup>* D' \<Longrightarrow> D' \<noteq> C"
shows "(class_add P (C, cdec)),h \<turnstile> obj \<surd>"
proof -
obtain C\<^sub>0 fs where [simp]: "obj=(C\<^sub>0,fs)" by(cases obj)
from oc have
oc': "\<And>F D T. P \<turnstile> C\<^sub>0 has F,NonStatic:T in D \<Longrightarrow> (\<exists>v. fs (F, D) = \<lfloor>v\<rfloor> \<and> P,h \<turnstile> v :\<le> T)"
by(simp add: oconf_def)
have "\<And>F D T. class_add P (C, cdec) \<turnstile> C\<^sub>0 has F,NonStatic:T in D
\<Longrightarrow> \<exists>v. fs(F,D) = Some v \<and> class_add P (C, cdec),h \<turnstile> v :\<le> T"
proof -
fix F D T assume "class_add P (C, cdec) \<turnstile> C\<^sub>0 has F,NonStatic:T in D"
with class_add_has_field_rev[OF _ ncp] have meth: "P \<turnstile> C\<^sub>0 has F,NonStatic:T in D" by simp
then show "\<exists>v. fs(F,D) = Some v \<and> class_add P (C, cdec),h \<turnstile> v :\<le> T"
using oc'[OF meth] class_add_conf[OF _ ns] by(fastforce simp: oconf_def)
qed
then show ?thesis by(simp add: oconf_def)
qed
lemma class_add_soconf:
assumes soc: "P,h,C\<^sub>0 \<turnstile>\<^sub>s sfs \<surd>" and ns: "\<not> is_class P C"
and ncp: "\<And>D'. P \<turnstile> C\<^sub>0 \<preceq>\<^sup>* D' \<Longrightarrow> D' \<noteq> C"
shows "(class_add P (C, cdec)),h,C\<^sub>0 \<turnstile>\<^sub>s sfs \<surd>"
proof -
from soc have
oc': "\<And>F T. P \<turnstile> C\<^sub>0 has F,Static:T in C\<^sub>0 \<Longrightarrow> (\<exists>v. sfs F = \<lfloor>v\<rfloor> \<and> P,h \<turnstile> v :\<le> T)"
by(simp add: soconf_def)
have "\<And>F T. class_add P (C, cdec) \<turnstile> C\<^sub>0 has F,Static:T in C\<^sub>0
\<Longrightarrow> \<exists>v. sfs F = Some v \<and> class_add P (C, cdec),h \<turnstile> v :\<le> T"
proof -
fix F T assume "class_add P (C, cdec) \<turnstile> C\<^sub>0 has F,Static:T in C\<^sub>0"
with class_add_has_field_rev[OF _ ncp] have meth: "P \<turnstile> C\<^sub>0 has F,Static:T in C\<^sub>0" by simp
then show "\<exists>v. sfs F = Some v \<and> class_add P (C, cdec),h \<turnstile> v :\<le> T"
using oc'[OF meth] class_add_conf[OF _ ns] by(fastforce simp: soconf_def)
qed
then show ?thesis by(simp add: soconf_def)
qed
lemma class_add_hconf:
assumes "P \<turnstile> h \<surd>" and "\<not> is_class P C"
and "\<And>a obj D'. h a = Some obj \<Longrightarrow> P \<turnstile> fst(obj) \<preceq>\<^sup>* D' \<Longrightarrow> D' \<noteq> C"
shows "class_add P (C, cdec) \<turnstile> h \<surd>"
using assms by(auto simp: hconf_def intro!: class_add_oconf)
lemma class_add_hconf_wf:
assumes wf: "wf_prog wf_md P" and "P \<turnstile> h \<surd>" and "\<not> is_class P C"
and "\<And>a obj. h a = Some obj \<Longrightarrow> fst(obj) \<noteq> C"
shows "class_add P (C, cdec) \<turnstile> h \<surd>"
using wf_subcls_nCls[OF wf] assms by(fastforce simp: hconf_def intro!: class_add_oconf)
lemma class_add_shconf:
assumes "P,h \<turnstile>\<^sub>s sh \<surd>" and ns: "\<not> is_class P C"
and "\<And>C sobj D'. sh C = Some sobj \<Longrightarrow> P \<turnstile> C \<preceq>\<^sup>* D' \<Longrightarrow> D' \<noteq> C"
shows "class_add P (C, cdec),h \<turnstile>\<^sub>s sh \<surd>"
using assms by(fastforce simp: shconf_def)
lemma class_add_shconf_wf:
assumes wf: "wf_prog wf_md P" and "P,h \<turnstile>\<^sub>s sh \<surd>" and "\<not> is_class P C"
and "\<And>C sobj. sh C = Some sobj \<Longrightarrow> C \<noteq> C"
shows "class_add P (C, cdec),h \<turnstile>\<^sub>s sh \<surd>"
using wf_subcls_nCls[OF wf] assms by(fastforce simp: shconf_def)
end |
theory Assertion_Semantics
imports Assertion_Lang Assertion_Misc
begin
section \<open>Semantics\<close>
text \<open>Defines the syntax for the assertion language formulae.\<close>
subsection \<open>Satisfaction predicate\<close>
text \<open>Satisfactions describe the semantics of the assertion language.\<close>
fun eval :: "expr \<Rightarrow> stack \<Rightarrow> val" where
"eval (nil) s = Nilval" |
"eval (\<acute>x`) s = s x"
notation eval ("\<lbrakk>_\<rbrakk>_" [60, 61] 61)
text \<open>A satisfaction with a ls segment holds iff there exists a path of heap cells that
point to each other and that form a super list of the given segment.\<close>
inductive ls_ind :: "state \<Rightarrow> nat \<Rightarrow> (expr \<times> expr) \<Rightarrow> bool" ("_\<Turnstile>ls\<^sup>__" 50) where
EmptyLs: "\<lbrakk>e1\<rbrakk>s = \<lbrakk>e2\<rbrakk>s \<Longrightarrow> dom h = {} \<Longrightarrow> (s,h)\<Turnstile>ls\<^sup>0(e1,e2)" |
ListSegment: "\<lbrakk>e1\<rbrakk>s = Val v' \<Longrightarrow> h1 = [v'\<mapsto>v] \<Longrightarrow> xs \<subseteq> -(fv e1 \<union> fv e2)
\<Longrightarrow> (\<forall>x \<in> xs. ((s(x:=v),h2)\<Turnstile>ls\<^sup>m(\<acute>x`,e2))) \<Longrightarrow> h1 \<bottom> h2 \<Longrightarrow> h = h1++h2 \<Longrightarrow> n = Suc m
\<Longrightarrow> \<lbrakk>e1\<rbrakk>s \<noteq> \<lbrakk>e2\<rbrakk>s \<Longrightarrow> (s,h)\<Turnstile>ls\<^sup>n(e1,e2)"
(* ListSegment: "\<lbrakk>e1\<rbrakk>s = Val v' \<Longrightarrow> h1 = [v'\<mapsto>v] \<Longrightarrow> x \<notin> fv e1 \<union> fv e2 \<Longrightarrow> (s(x:=v),h2)\<Turnstile>ls\<^sup>m(\<acute>x`,e2)
\<Longrightarrow> h1 \<bottom> h2 \<Longrightarrow> h = h1++h2 \<Longrightarrow> n = Suc m \<Longrightarrow> \<lbrakk>e1\<rbrakk>s \<noteq> \<lbrakk>e2\<rbrakk>s \<Longrightarrow> (s,h)\<Turnstile>ls\<^sup>n(e1,e2)" *)
inductive satisfaction :: "state \<Rightarrow> formula \<Rightarrow> bool" (infix "\<Turnstile>" 50) where
EqSat: "\<lbrakk>e1\<rbrakk>s=\<lbrakk>e2\<rbrakk>s \<Longrightarrow> (s,h)\<Turnstile>Pure(e1=\<^sub>pe2)" |
NeqSat: "\<lbrakk>e1\<rbrakk>s\<noteq>\<lbrakk>e2\<rbrakk>s \<Longrightarrow> (s,h)\<Turnstile>Pure(e1\<noteq>\<^sub>pe2)" |
TrueSat: "(s,h)\<Turnstile>PureF []" |
ConjSat: "(s,h)\<Turnstile>Pure P \<Longrightarrow> (s,h)\<Turnstile>PureF \<Pi> \<Longrightarrow> (s,h)\<Turnstile>PureF(P \<and>\<^sub>p \<Pi>)" |
PointsToSat: "\<lbrakk>\<lbrakk>e1\<rbrakk>s = Val v; h = [v\<mapsto>\<lbrakk>e2\<rbrakk>s]\<rbrakk> \<Longrightarrow> (s,h)\<Turnstile>Spat(e1 \<longmapsto> e2)" |
EmpSat: "h = Map.empty \<Longrightarrow> (s,h)\<Turnstile>SpatF emp" |
SepConjSat: "h1 \<bottom> h2 \<Longrightarrow> h = h1++h2 \<Longrightarrow> (s,h1)\<Turnstile>Spat S \<Longrightarrow> (s,h2)\<Turnstile>SpatF \<Sigma>
\<Longrightarrow> (s,h)\<Turnstile>SpatF(S * \<Sigma>)" |
FormSat: "(s,h)\<Turnstile>PureF \<Pi> \<Longrightarrow> (s,h)\<Turnstile>SpatF \<Sigma> \<Longrightarrow> (s,h)\<Turnstile>(\<Pi> \<bar> \<Sigma>)" |
LsSat: "(s,h)\<Turnstile>ls\<^sup>n(e1,e2) \<Longrightarrow> (s,h)\<Turnstile>Spat(ls(e1,e2))"
declare ls_ind.intros[intro]
declare satisfaction.intros[intro]
lemmas ls_induct = ls_ind.induct[split_format(complete)]
lemmas sat_induct = satisfaction.induct[split_format(complete)]
inductive_cases [elim]: "(s,h)\<Turnstile>ls\<^sup>0(e1,e2)" "(s,h)\<Turnstile>ls\<^sup>n(e1,e2)"
inductive_cases [elim]: "(s,h)\<Turnstile>Pure(e1=\<^sub>pe2)" "(s,h)\<Turnstile>Pure(e1\<noteq>\<^sub>pe2)" "(s,h)\<Turnstile>PureF []"
"(s,f)\<Turnstile>PureF(P \<and>\<^sub>p \<Pi>)" "(s,h)\<Turnstile>Spat(e1 \<longmapsto> e2)" "(s,h)\<Turnstile>SpatF emp" "(s,h)\<Turnstile>SpatF(S * \<Sigma>)"
"(s,h)\<Turnstile>(\<Pi> \<bar> \<Sigma>)" "(s,h)\<Turnstile>Spat(ls(e1,e2))"
subsection \<open>Satisfaction properties\<close>
text \<open>There are a number of helpful properties that follow from the satisfaction definition.\<close>
text \<open>Satisfaction is decidable, cf. Lemma 1 \cite{JoshBerdine.2004}.\<close>
corollary sat_decidable: "(s,h)\<Turnstile>F \<or> \<not> (s,h)\<Turnstile>F"
by simp
text \<open>Separating conjunctions are only allowed on distinct heap parts.\<close>
corollary sep_conj_ortho: "\<nexists>s h. (s,h) \<Turnstile> [\<acute>x`=\<^sub>p\<acute>y`] \<bar> [\<acute>x` \<longmapsto> xv, \<acute>y` \<longmapsto> yv]"
proof
assume "\<exists>s h. (s, h) \<Turnstile> [\<acute>x` =\<^sub>p \<acute>y`] \<bar> [\<acute>x` \<longmapsto> xv, \<acute>y` \<longmapsto> yv]"
then obtain s h where "(s,h) \<Turnstile> [\<acute>x`=\<^sub>p\<acute>y`] \<bar> [\<acute>x` \<longmapsto> xv, \<acute>y` \<longmapsto> yv]" by auto
hence "(s,h)\<Turnstile>PureF [\<acute>x`=\<^sub>p\<acute>y`]" and spatf: "(s,h) \<Turnstile> SpatF [\<acute>x` \<longmapsto> xv, \<acute>y` \<longmapsto> yv]" by auto
{
hence "(s,h)\<Turnstile>Pure(\<acute>x`=\<^sub>p\<acute>y`)" by auto
hence "\<lbrakk>\<acute>x`\<rbrakk>s = \<lbrakk>\<acute>y`\<rbrakk>s" by auto
}
from spatf obtain h1 h2 where "h1 \<bottom> h2" "h = h1++h2" "(s,h1)\<Turnstile>Spat (\<acute>x`\<longmapsto>xv)"
"(s,h2)\<Turnstile>SpatF [\<acute>y`\<longmapsto>yv]" by blast
from \<open>(s,h1)\<Turnstile>Spat (\<acute>x`\<longmapsto>xv)\<close> obtain v where "\<lbrakk>\<acute>x`\<rbrakk>s = Val v" "dom h1 = {v}" by auto
from \<open>(s,h2)\<Turnstile>SpatF [\<acute>y`\<longmapsto>yv]\<close> obtain h3 h4 where "h3 \<bottom> h4" "h2 = h3++h4"
"(s,h3)\<Turnstile>Spat(\<acute>y`\<longmapsto>yv)" by auto
then obtain v' where "\<lbrakk>\<acute>y`\<rbrakk>s = Val v'" "dom h3 = {v'}" by auto
with \<open>\<lbrakk>\<acute>x`\<rbrakk>s = Val v\<close> \<open>\<lbrakk>\<acute>x`\<rbrakk>s = \<lbrakk>\<acute>y`\<rbrakk>s\<close> have "v = v'" by simp
with \<open>dom h3 = {v'}\<close> \<open>dom h1 = {v}\<close> have "\<not> h3 \<bottom> h1" by simp
hence "\<not> (h3++h4) \<bottom> h1" using ortho_distr by auto
with \<open>h2 = h3++h4\<close> have "\<not> h1 \<bottom> h2" using ortho_commut by metis
with \<open>h1 \<bottom> h2\<close> show False by simp
qed
text \<open>Order in pure formulae does not matter.\<close>
corollary pure_commut: "(s,h)\<Turnstile>PureF(p1\<and>\<^sub>pp2\<and>\<^sub>p\<Pi>) \<longleftrightarrow> (s,h)\<Turnstile>PureF(p2\<and>\<^sub>pp1\<and>\<^sub>p\<Pi>)" by auto
corollary pure_commut_form: "(s,h)\<Turnstile>(p1\<and>\<^sub>pp2\<and>\<^sub>p\<Pi>)\<bar>\<Sigma> \<Longrightarrow> (s,h)\<Turnstile>(p2\<and>\<^sub>pp1\<and>\<^sub>p\<Pi>)\<bar>\<Sigma>"
using pure_commut by force
text \<open>Singular spatial formulae are only satisfied by singular heaps.\<close>
corollary sing_heap: "(s,h)\<Turnstile>SpatF[x\<longmapsto>y] \<longleftrightarrow> (s,h)\<Turnstile>Spat(x\<longmapsto>y) \<and> (\<exists> v v'. \<lbrakk>x\<rbrakk>s = Val v \<and>
\<lbrakk>y\<rbrakk>s = v' \<and> h = [v\<mapsto>v'])" (is "?lhs \<longleftrightarrow> ?rhs")
proof
assume "?lhs"
hence spat: "(s, h) \<Turnstile> Spat (x \<longmapsto> y)" by fastforce
moreover then obtain v v' where "\<lbrakk>x\<rbrakk>s = Val v" "\<lbrakk>y\<rbrakk>s= v'" by blast
moreover with spat have "h = [v\<mapsto>v']" by fastforce
ultimately show "?rhs" by simp
next
assume "?rhs"
moreover have "h \<bottom> Map.empty" by simp
ultimately have "(s,h++Map.empty)\<Turnstile>SpatF[x\<longmapsto>y]" by blast
thus "?lhs" by simp
qed
text \<open>Order in spatial formulae does not matter.\<close>
corollary spatial_commut: "(s,h)\<Turnstile>SpatF(s1*s2*\<Sigma>) \<longleftrightarrow> (s,h)\<Turnstile>SpatF(s2*s1*\<Sigma>)" (is "?P s1 s2 \<longleftrightarrow> ?p s2 s1")
proof
assume "?P s1 s2"
then obtain h1 h2 where h:"h1 \<bottom> h2 \<and> h = h1++h2" and s1:"(s,h1)\<Turnstile>Spat s1" and "(s,h2)\<Turnstile>SpatF (s2*\<Sigma>)"
by auto
then obtain h3 h4 where h2:"h3 \<bottom> h4 \<and> h2 = h3++h4" and s2: "(s,h3)\<Turnstile>Spat s2" and \<sigma>: "(s,h4)\<Turnstile>SpatF \<Sigma>"
by auto
from h h2 have "h4 \<bottom> h1" by auto
moreover then obtain h2' where h2': "h2' = h1++h4" by simp
ultimately have "(s,h2')\<Turnstile>SpatF(s1*\<Sigma>)" using s1 \<sigma> by auto
moreover from h h2 h2' have "h3 \<bottom> h2'" by auto
moreover with h h2 h2' have "h=h3++h2'" by (metis map_add_assoc map_add_comm)
ultimately show "?P s2 s1" using s2 by auto
next
assume "?P s2 s1"
then obtain h1 h2 where h:"h1 \<bottom> h2 \<and> h = h1++h2" and s2:"(s,h1)\<Turnstile>Spat s2" and "(s,h2)\<Turnstile>SpatF (s1*\<Sigma>)"
by auto
then obtain h3 h4 where h2:"h3 \<bottom> h4 \<and> h2 = h3++h4" and s1:"(s,h3)\<Turnstile>Spat s1" and \<sigma>: "(s,h4)\<Turnstile>SpatF \<Sigma>"
by auto
from h h2 have "h4 \<bottom> h1" by auto
moreover then obtain h2' where h2': "h2' = h1++h4" by simp
ultimately have "(s,h2')\<Turnstile>SpatF(s2*\<Sigma>)" using s2 \<sigma> by auto
moreover from h h2 h2' have "h3 \<bottom> h2'" by auto
moreover with h h2 h2' have "h=h3++h2'" by (metis map_add_assoc map_add_comm)
ultimately show "?P s1 s2" using s1 by auto
qed
corollary spatial_commut_form: "(s,h)\<Turnstile>\<Pi>\<bar>(s1*s2*\<Sigma>) \<Longrightarrow> (s,h)\<Turnstile>\<Pi>\<bar>(s2*s1*\<Sigma>)"
using spatial_commut by force
text \<open>An empty list is equivalent to an empty heap.\<close>
corollary empty_ls: "(s,h)\<Turnstile>SpatF emp \<longleftrightarrow> (s,h)\<Turnstile>Spat(ls(x,x))"
proof
assume "(s, h) \<Turnstile> SpatF emp"
hence "dom h = {}" by blast
hence "(s,h)\<Turnstile>ls\<^sup>0(x,x)" by blast
thus "(s, h) \<Turnstile> Spat (ls(x, x))" by blast
next
assume "(s, h) \<Turnstile> Spat (ls(x, x))"
then obtain n where "(s,h)\<Turnstile>ls\<^sup>n(x,x)" by auto
hence "n=0" "dom h = {}" by auto
thus "(s, h) \<Turnstile> SpatF emp" by auto
qed
text \<open>Due to this theorem circular list segements can only be formulated as follows:\<close>
term "\<acute>x`\<longmapsto>\<acute>y` * ls(\<acute>y`,\<acute>y`) * emp"
text \<open>The heap has no influence on the satisfaction of a pure formula.\<close>
corollary heap_pure: "(s,h)\<Turnstile>Pure P \<Longrightarrow>\<forall>h'. (s,h')\<Turnstile>Pure P"
by (induction s h "Pure P" rule: sat_induct) auto
corollary heap_puref: "(s,h)\<Turnstile>PureF \<Pi> \<Longrightarrow>\<forall>h'. (s,h')\<Turnstile>PureF \<Pi>"
proof (induction s h "PureF \<Pi>" arbitrary: \<Pi> rule: sat_induct)
case (TrueSat s h)
then show ?case by fast
next
case (ConjSat s h P \<Pi>')
then show ?case using heap_pure by blast
qed
text \<open>Evaluation does not rely on unrelated variable values.\<close>
corollary eval_notin[simp]: "x \<notin> fv e \<Longrightarrow> \<lbrakk>e\<rbrakk>s=\<lbrakk>e\<rbrakk>s(x:=v)"
by (cases e) auto
text \<open>Only the two ls expressions are stack related.\<close>
corollary ls_stack_relation: "\<lbrakk>(s,h)\<Turnstile>ls\<^sup>n(e1,e2); \<lbrakk>e1\<rbrakk>s=\<lbrakk>e1\<rbrakk>t; \<lbrakk>e2\<rbrakk>s=\<lbrakk>e2\<rbrakk>t\<rbrakk> \<Longrightarrow> (t,h)\<Turnstile>ls\<^sup>n(e1,e2)"
proof (induction arbitrary: t rule: ls_induct)
case (EmptyLs e1 s e2 h)
then show ?case by auto
next
case (ListSegment e1 s v' h1 v xs e2 h2 m h n)
from ListSegment.hyps(1) ListSegment.prems(1) have e1: "\<lbrakk>e1\<rbrakk>t = Val v'" by simp
from ListSegment.hyps(7) ListSegment.prems have neq: "\<lbrakk>e1\<rbrakk>t \<noteq> \<lbrakk>e2\<rbrakk>t" by simp
have "\<forall>x \<in> xs. (t(x:=v),h2)\<Turnstile>ls\<^sup>m(\<acute>x`, e2)"
proof
fix x :: var
assume assm: "x \<in> xs"
with ListSegment.IH have aux:
"\<lbrakk>\<acute>x`\<rbrakk>s(x := v) = \<lbrakk>\<acute>x`\<rbrakk>xa \<Longrightarrow> \<lbrakk>e2\<rbrakk>s(x := v) = \<lbrakk>e2\<rbrakk>xa \<Longrightarrow> (xa, h2)\<Turnstile>ls\<^sup>m(\<acute>x`, e2)" for xa
by blast
have "\<lbrakk>\<acute>x`\<rbrakk>s(x:=v) = \<lbrakk>\<acute>x`\<rbrakk>t(x:=v)" "\<lbrakk>e2\<rbrakk>s(x:=v) = \<lbrakk>e2\<rbrakk>t(x:=v)" using assm ListSegment.prems
apply simp using assm ListSegment.prems ListSegment(3)
by (metis ComplD UnCI eval_notin subsetD)
from aux[OF this] show "(t(x:=v),h2)\<Turnstile>ls\<^sup>m(\<acute>x`, e2)" .
qed
from ls_ind.ListSegment[OF e1 ListSegment(2-3) this ListSegment(4-6) neq] show ?case .
qed
lemma ls_extend_lhs: "\<lbrakk>(s(x:=v),h)\<Turnstile>ls\<^sup>n(e1,e2); x \<notin> fv e1 \<union> fv e2\<rbrakk> \<Longrightarrow> (s,h)\<Turnstile>ls\<^sup>n(e1,e2)"
proof -
assume assm1: "(s(x:=v),h)\<Turnstile>ls\<^sup>n(e1,e2)"
assume assm2: "x \<notin> fv e1 \<union> fv e2"
hence "\<lbrakk>e1\<rbrakk>s(x:=v) = \<lbrakk>e1\<rbrakk>s" "\<lbrakk>e2\<rbrakk>s(x:=v) = \<lbrakk>e2\<rbrakk>s" using eval_notin by fastforce+
from ls_stack_relation[OF assm1 this] show ?thesis .
qed
lemma ls_extend_rhs: "\<lbrakk>(s,h)\<Turnstile>ls\<^sup>n(e1,e2); x \<notin> fv e1 \<union> fv e2\<rbrakk> \<Longrightarrow> (s(x:=v),h)\<Turnstile>ls\<^sup>n(e1,e2)"
proof -
assume assm1: "(s,h)\<Turnstile>ls\<^sup>n(e1,e2)"
assume assm2: "x \<notin> fv e1 \<union> fv e2"
hence "\<lbrakk>e1\<rbrakk>s = \<lbrakk>e1\<rbrakk>s(x:=v) " "\<lbrakk>e2\<rbrakk>s = \<lbrakk>e2\<rbrakk>s(x:=v)" using eval_notin by fastforce+
from ls_stack_relation[OF assm1 this] show ?thesis .
qed
corollary ls_extend: "x \<notin> fv e1 \<union> fv e2 \<Longrightarrow> ((s,h)\<Turnstile>ls\<^sup>n(e1,e2)) = ((s(x:=v),h)\<Turnstile>ls\<^sup>n(e1,e2))"
using ls_extend_lhs ls_extend_rhs by metis
text \<open>The following lemmata are used to proof the substitution rule:\<close>
lemma subst_expr: "\<lbrakk>\<acute>x`\<rbrakk>s = \<lbrakk>E\<rbrakk>s \<Longrightarrow> \<lbrakk>subst x E e\<rbrakk>s = \<lbrakk>e\<rbrakk>s"
using subst_expr.elims by metis
lemma ls_change_fst: "\<lbrakk>(s,h)\<Turnstile>ls\<^sup>n(a,e); \<lbrakk>a\<rbrakk>s=\<lbrakk>b\<rbrakk>s\<rbrakk> \<Longrightarrow> (s,h)\<Turnstile>ls\<^sup>n(b,e)"
proof (induction rule: ls_induct)
case (EmptyLs e1 s e2 h)
then show ?case by auto
next
case (ListSegment a s v h1 v' xs e h2 h m n)
hence b: "\<lbrakk>b\<rbrakk>s = Val v" by metis
define xs' where xs': "xs' = xs - fv b"
with ListSegment(3) have "xs' \<subseteq> - (fv a \<union> fv e) - fv b" by auto
hence xs'_sub: "xs' \<subseteq> - (fv b \<union> fv e)" by auto
have ih: "\<forall>x\<in> xs'. (s(x := v'), h2)\<Turnstile>ls\<^sup>h(\<acute>x`, e)"
proof
fix x
assume "x \<in> xs'"
with xs' have "x \<in> xs" by simp
thus "(s(x := v'), h2)\<Turnstile>ls\<^sup>h(\<acute>x`, e)" using ListSegment.IH by simp
qed
from ListSegment.prems(1) ListSegment.hyps(7) have "\<lbrakk>b\<rbrakk>s \<noteq> \<lbrakk>e\<rbrakk>s" by simp
from ls_ind.ListSegment[OF b ListSegment.hyps(2) xs'_sub ih ListSegment.hyps(4-6) this] show ?case .
qed
lemma ls_change_snd: "\<lbrakk>(s,h)\<Turnstile>ls\<^sup>n(e,a); \<lbrakk>a\<rbrakk>s = \<lbrakk>b\<rbrakk>s\<rbrakk> \<Longrightarrow> (s,h)\<Turnstile>ls\<^sup>n(e,b)"
proof (induction rule: ls_induct)
case (EmptyLs e1' s e2' h)
then show ?case by auto
next
case (ListSegment e s v h1 v' xs a h2 h m n)
define xs' where xs': "xs' = xs - fv b"
with ListSegment(3) have "xs' \<subseteq> - (fv e \<union> fv a) - fv b" by auto
hence xs'_sub: "xs' \<subseteq> - (fv e \<union> fv b)" by auto
have ih: "\<forall>x\<in> xs'. (s(x := v'), h2)\<Turnstile>ls\<^sup>h(\<acute>x`, b)"
proof
fix x
assume x: "x \<in> xs'"
with xs' have "x \<notin> fv b" by simp
moreover from x xs' ListSegment(3) have "x \<notin> fv a" by auto
ultimately have "\<lbrakk>b\<rbrakk>s(x := v') = \<lbrakk>b\<rbrakk>s" "\<lbrakk>a\<rbrakk>s(x := v') = \<lbrakk>a\<rbrakk>s" using eval_notin by metis+
with ListSegment.prems have "\<lbrakk>a\<rbrakk>s(x := v') = \<lbrakk>b\<rbrakk>s(x := v')" by simp
from x xs' have "x \<in> xs" by simp
hence "\<lbrakk>a\<rbrakk>s(x := v') = \<lbrakk>b\<rbrakk>s(x := v') \<Longrightarrow> (s(x := v'), h2)\<Turnstile>ls\<^sup>h(\<acute>x`, b)" using ListSegment.IH
by blast
from this[OF \<open>\<lbrakk>a\<rbrakk>s(x := v') = \<lbrakk>b\<rbrakk>s(x := v')\<close>] show "(s(x := v'), h2)\<Turnstile>ls\<^sup>h(\<acute>x`, b)" .
qed
from ListSegment.prems ListSegment.hyps(7) have "\<lbrakk>e\<rbrakk>s \<noteq> \<lbrakk>b\<rbrakk>s" by simp
from ls_ind.ListSegment[OF ListSegment(1-2) xs'_sub ih ListSegment(4-6) this] show ?case .
qed
lemma subst_sat_ls: "\<lbrakk>(s,h)\<Turnstile>ls\<^sup>n(e1',e2'); e1' = subst x E e1; e2' = subst x E e2; \<lbrakk>\<acute>x`\<rbrakk>s=\<lbrakk>E\<rbrakk>s\<rbrakk>
\<Longrightarrow> (s,h)\<Turnstile>ls\<^sup>n(e1,e2)"
using ls_change_snd ls_change_fst subst_expr by metis
lemma subst_sat:"\<lbrakk>(s,h)\<Turnstile>F'; F'=subst x E F; \<lbrakk>\<acute>x`\<rbrakk>s=\<lbrakk>E\<rbrakk>s\<rbrakk> \<Longrightarrow> (s,h)\<Turnstile>F"
proof (induction arbitrary: F rule: sat_induct)
case (EqSat e1 s e2 h)
from EqSat.prems(1) obtain e3 e4 where F: "F = Pure (e3=\<^sub>pe4)"
using subst_distinct_pure1 subst_distinct_formula2
by (metis formula.inject(1) subst_formula.simps(1))
with EqSat.prems(1) have e1: "e1 = subst x E e3" and e2: "e2 = subst x E e4" by simp_all
then show ?case proof (cases "\<acute>x`=e3")
case True
with e1 have "e1 = E" by auto
then show ?thesis using EqSat by (metis F True e2 satisfaction.EqSat subst_not_eq_expr)
next
case False
then show ?thesis proof (cases "\<acute>x`=e4")
case True
with e2 have "e2 = E" by auto
then show ?thesis using EqSat F False True by auto
next
case False
with \<open>\<acute>x`\<noteq>e3\<close> F have "x \<notin> fv F" by (metis Un_iff empty_iff fv_expr.simps(1) fv_expr.simps(2)
fv_formula.simps(1) fv_pure.simps(1) insert_iff subst_expr.elims)
then show ?thesis using subst_not_free_formula EqSat.hyps F e1 e2 by auto
qed
qed
next
case (NeqSat e1 s e2 h)
from NeqSat.prems(1) obtain e3 e4 where F: "F = Pure (e3\<noteq>\<^sub>pe4)"
using subst_distinct_pure2 subst_distinct_formula2
by (metis formula.inject(1) subst_formula.simps(1))
with NeqSat.prems(1) have e1: "e1 = subst x E e3" and e2: "e2 = subst x E e4" by simp_all
then show ?case proof (cases "\<acute>x`=e3")
case True
with e1 have "e1 = E" by auto
then show ?thesis using NeqSat by (metis F True e1 e2 satisfaction.NeqSat subst_not_eq_expr)
next
case False
then show ?thesis proof (cases "\<acute>x`=e4")
case True
with e2 have "e2 = E" by auto
then show ?thesis using NeqSat F False True by auto
next
case False
with \<open>\<acute>x`\<noteq>e3\<close> F have "x \<notin> fv F" by (smt Un_iff fv_expr.simps(1) fv_expr.simps(2)
fv_formula.simps(1) fv_pure.simps(2) insert_absorb insert_iff insert_not_empty
subst_expr.elims)
then show ?thesis using subst_not_free_formula NeqSat.hyps F e1 e2 by auto
qed
qed
next
case (TrueSat s h)
then show ?case using subst_preserve_True satisfaction.TrueSat by metis
next
case (ConjSat s h P \<Pi>)
from ConjSat.prems(1) obtain P' \<Pi>' where F: "F = PureF (P'\<and>\<^sub>p\<Pi>')"
using subst_distinct_formula1 subst_distinct_puref
by (metis formula.inject(2) subst_formula.simps(2))
with ConjSat.prems(1) have "Pure P = subst x E (Pure P')" "PureF \<Pi> = subst x E (PureF \<Pi>')"
by simp_all
from ConjSat.IH(1)[OF this(1) ConjSat.prems(2)] ConjSat.IH(2)[OF this(2) ConjSat.prems(2)] F
show ?case by auto
next
case (PointsToSat e1 s v h e2)
then show ?case proof (cases "x \<in> fv F")
case True
then show ?thesis using PointsToSat
by (smt formula.inject(3) satisfaction.PointsToSat spatial.inject(1) subst_distinct_formula4
subst_distinct_spat1 subst_expr.simps(2) subst_formula.simps(3) subst_not_eq_expr
subst_spatial.simps(1))
next
case False
show ?thesis using subst_not_free_formula[OF False] PointsToSat by fastforce
qed
next
case (EmpSat h s)
then show ?case using satisfaction.EmpSat subst_preserve_emp by metis
next
case (SepConjSat h1 h2 h s S \<Sigma>)
from SepConjSat.prems(1) obtain S' \<Sigma>' where F: "F = SpatF (S'*\<Sigma>')"
using subst_distinct_spatf subst_distinct_formula3
by (metis formula.inject(4) subst_formula.simps(4))
with SepConjSat.prems(1) have "Spat S = subst x E (Spat S')" "SpatF \<Sigma> = subst x E (SpatF \<Sigma>')"
by simp_all
with SepConjSat.IH SepConjSat.prems(2) have "(s,h1)\<Turnstile>Spat S'" "(s,h2)\<Turnstile>SpatF \<Sigma>'" by simp_all
then show ?case using F SepConjSat.hyps by blast
next
case (FormSat s h \<Pi> \<Sigma>)
from FormSat.prems(1) obtain \<Pi>' \<Sigma>' where F:"F = \<Pi>'\<bar>\<Sigma>'" using subst_distinct_formula5 by metis
with FormSat.prems(1) have "substl x E \<Pi>' = \<Pi>" and "substl x E \<Sigma>' = \<Sigma>" by simp_all
with FormSat.IH FormSat.prems(2) have "(s,h)\<Turnstile>PureF \<Pi>'" and "(s,h)\<Turnstile>SpatF \<Sigma>'" by simp_all
with F show ?case by auto
next
case (LsSat s h n e1 e2)
from LsSat(2) obtain e1' e2' where F:"F = Spat (ls(e1', e2'))" using subst_distinct_formula4
by (metis formula.inject(3) subst_distinct_spat2 subst_formula.simps(3))
with LsSat(2) have "e1 = subst x E e1'" "e2 = subst x E e2'" by simp_all
from subst_sat_ls[OF LsSat(1) this LsSat(3)] show ?case using F by auto
qed
lemma subst_sat_ls_rev: "\<lbrakk>(s,h)\<Turnstile>ls\<^sup>n(e1',e2'); e1 = subst x E e1'; e2 = subst x E e2'; \<lbrakk>\<acute>x`\<rbrakk>s=\<lbrakk>E\<rbrakk>s\<rbrakk>
\<Longrightarrow> (s,h)\<Turnstile>ls\<^sup>n(e1,e2)"
using ls_change_snd ls_change_fst subst_expr by metis
lemma subst_sat_rev:"\<lbrakk>(s,h)\<Turnstile>F; \<lbrakk>\<acute>x`\<rbrakk>s=\<lbrakk>E\<rbrakk>s\<rbrakk> \<Longrightarrow> (s,h)\<Turnstile>subst x E F"
proof (induction rule: sat_induct)
case (EqSat e1 s e2 h)
then show ?case by (metis satisfaction.EqSat subst_expr.simps(2) subst_formula.simps(1)
subst_not_eq_expr subst_pure.simps(1))
next
case (NeqSat e1 s e2 h)
then show ?case by (metis satisfaction.NeqSat subst_expr.simps(2) subst_formula.simps(1)
subst_not_eq_expr subst_pure.simps(2))
next
case (PointsToSat e1 s v h e2)
then show ?case by (smt satisfaction.PointsToSat subst_expr.simps(2) subst_formula.simps(3)
subst_not_eq_expr subst_spatial.simps(1))
next
case (LsSat s h n e1 e2)
obtain e1' e2' where F: "subst x E (Spat (ls(e1, e2))) = Spat (ls(e1', e2'))" by simp
hence "e1' = subst x E e1" "e2' = subst x E e2" by simp_all
from subst_sat_ls_rev[OF LsSat(1) this LsSat(2)] show ?case using F by auto
qed auto
lemma subst_sat_eq: "\<lbrakk>F'=subst x E F; \<lbrakk>\<acute>x`\<rbrakk>s=\<lbrakk>E\<rbrakk>s\<rbrakk> \<Longrightarrow> ((s,h)\<Turnstile>F') = ((s,h)\<Turnstile>F)"
using subst_sat subst_sat_rev by fast
end |
State Before: α : Type u_1
m : MeasurableSpace α
f : α → α
s : Set α
μ : MeasureTheory.Measure α
hf : QuasiErgodic f
hs : MeasurableSet s
hs' : f ⁻¹' s =ᶠ[ae μ] s
⊢ s =ᶠ[ae μ] ∅ ∨ s =ᶠ[ae μ] univ State After: case intro.intro.intro
α : Type u_1
m : MeasurableSpace α
f : α → α
s : Set α
μ : MeasureTheory.Measure α
hf : QuasiErgodic f
hs : MeasurableSet s
hs' : f ⁻¹' s =ᶠ[ae μ] s
t : Set α
h₀ : MeasurableSet t
h₁ : t =ᶠ[ae μ] s
h₂ : f ⁻¹' t = t
⊢ s =ᶠ[ae μ] ∅ ∨ s =ᶠ[ae μ] univ Tactic: obtain ⟨t, h₀, h₁, h₂⟩ := hf.toQuasiMeasurePreserving.exists_preimage_eq_of_preimage_ae hs hs' State Before: case intro.intro.intro
α : Type u_1
m : MeasurableSpace α
f : α → α
s : Set α
μ : MeasureTheory.Measure α
hf : QuasiErgodic f
hs : MeasurableSet s
hs' : f ⁻¹' s =ᶠ[ae μ] s
t : Set α
h₀ : MeasurableSet t
h₁ : t =ᶠ[ae μ] s
h₂ : f ⁻¹' t = t
⊢ s =ᶠ[ae μ] ∅ ∨ s =ᶠ[ae μ] univ State After: no goals Tactic: rcases hf.ae_empty_or_univ h₀ h₂ with (h₃ | h₃) <;> [left; right] <;> exact ae_eq_trans h₁.symm h₃ |
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas64_2e321m9_7limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition carry :
{ carry : feBW_loose -> feBW_tight
| forall a, phiBW_tight (carry a) = (phiBW_loose a) }.
Proof.
Set Ltac Profiling.
Time synthesize_carry ().
Show Ltac Profile.
Time Defined.
Print Assumptions carry.
|
open import Agda.Builtin.Equality
record IsGroup (G : Set) : Set where
field _∙_ : G → G → G
open IsGroup ⦃ ... ⦄
record Group : Set₁ where
field G : Set
⦃ IsG ⦄ : IsGroup G
open Group using () renaming (G to [_])
variable
G : Group
postulate
works : ∀ {G} → (x : [ G ]) → (x ∙ x) ≡ x
fails : (x : [ G ]) → (x ∙ x) ≡ x
-- WAS: No instance of type IsGroup [ G ]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.