Datasets:
AI4M
/

text
stringlengths
0
3.34M
[STATEMENT] lemma f'_base: "x \<ge> x\<^sub>0 \<Longrightarrow> x \<le> x\<^sub>1 \<Longrightarrow> f' x \<ge> 0" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>x\<^sub>0 \<le> x; x \<le> x\<^sub>1\<rbrakk> \<Longrightarrow> 0 \<le> f' x [PROOF STEP] apply (subst f'.simps(1), assumption) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>x\<^sub>0 \<le> x; x \<le> x\<^sub>1\<rbrakk> \<Longrightarrow> 0 \<le> f (nat \<lfloor>x\<rfloor>) [PROOF STEP] apply (rule f_base) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>x\<^sub>0 \<le> x; x \<le> x\<^sub>1\<rbrakk> \<Longrightarrow> x\<^sub>0 \<le> real (nat \<lfloor>x\<rfloor>) 2. \<lbrakk>x\<^sub>0 \<le> x; x \<le> x\<^sub>1\<rbrakk> \<Longrightarrow> real (nat \<lfloor>x\<rfloor>) \<le> x\<^sub>1 [PROOF STEP] apply (rule order.trans[of _ "real (nat \<lfloor>x\<^sub>0\<rfloor>)"], simp add: x0_int) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>x\<^sub>0 \<le> x; x \<le> x\<^sub>1\<rbrakk> \<Longrightarrow> real (nat \<lfloor>x\<^sub>0\<rfloor>) \<le> real (nat \<lfloor>x\<rfloor>) 2. \<lbrakk>x\<^sub>0 \<le> x; x \<le> x\<^sub>1\<rbrakk> \<Longrightarrow> real (nat \<lfloor>x\<rfloor>) \<le> x\<^sub>1 [PROOF STEP] apply (subst of_nat_le_iff, intro nat_mono floor_mono, assumption) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>x\<^sub>0 \<le> x; x \<le> x\<^sub>1\<rbrakk> \<Longrightarrow> real (nat \<lfloor>x\<rfloor>) \<le> x\<^sub>1 [PROOF STEP] using x0_pos [PROOF STATE] proof (prove) using this: 0 < x\<^sub>0 goal (1 subgoal): 1. \<lbrakk>x\<^sub>0 \<le> x; x \<le> x\<^sub>1\<rbrakk> \<Longrightarrow> real (nat \<lfloor>x\<rfloor>) \<le> x\<^sub>1 [PROOF STEP] apply linarith [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
[SSW Rules] Do you give each project a project page (that you refer customers to)? « 11. Storyboarding - Do you conduct specification analysis by creating mock-ups? » 13. Do you conduct Market Research via the Web?
[GOAL] C : Type u inst✝ : Category.{v, u} C X Y P Q : C t : BinaryFan P Q I : IsLimit t s : Cone (pair Q P) ⊢ ∀ (j : Discrete WalkingPair), (fun s => lift I (BinaryFan.swap s)) s ≫ NatTrans.app (BinaryFan.swap t).π j = NatTrans.app s.π j [PROOFSTEP] rintro ⟨⟨⟩⟩ [GOAL] case mk.left C : Type u inst✝ : Category.{v, u} C X Y P Q : C t : BinaryFan P Q I : IsLimit t s : Cone (pair Q P) ⊢ (fun s => lift I (BinaryFan.swap s)) s ≫ NatTrans.app (BinaryFan.swap t).π { as := WalkingPair.left } = NatTrans.app s.π { as := WalkingPair.left } [PROOFSTEP] simp [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X Y P Q : C t : BinaryFan P Q I : IsLimit t s : Cone (pair Q P) ⊢ (fun s => lift I (BinaryFan.swap s)) s ≫ NatTrans.app (BinaryFan.swap t).π { as := WalkingPair.right } = NatTrans.app s.π { as := WalkingPair.right } [PROOFSTEP] simp [GOAL] C : Type u inst✝ : Category.{v, u} C X Y P Q : C t : BinaryFan P Q I : IsLimit t s : Cone (pair Q P) m : s.pt ⟶ (BinaryFan.swap t).pt w : ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app (BinaryFan.swap t).π j = NatTrans.app s.π j ⊢ m = (fun s => lift I (BinaryFan.swap s)) s [PROOFSTEP] have h := I.uniq (BinaryFan.swap s) m [GOAL] C : Type u inst✝ : Category.{v, u} C X Y P Q : C t : BinaryFan P Q I : IsLimit t s : Cone (pair Q P) m : s.pt ⟶ (BinaryFan.swap t).pt w : ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app (BinaryFan.swap t).π j = NatTrans.app s.π j h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app t.π j = NatTrans.app (BinaryFan.swap s).π j) → m = lift I (BinaryFan.swap s) ⊢ m = (fun s => lift I (BinaryFan.swap s)) s [PROOFSTEP] rw [h] [GOAL] C : Type u inst✝ : Category.{v, u} C X Y P Q : C t : BinaryFan P Q I : IsLimit t s : Cone (pair Q P) m : s.pt ⟶ (BinaryFan.swap t).pt w : ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app (BinaryFan.swap t).π j = NatTrans.app s.π j h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app t.π j = NatTrans.app (BinaryFan.swap s).π j) → m = lift I (BinaryFan.swap s) ⊢ ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app t.π j = NatTrans.app (BinaryFan.swap s).π j [PROOFSTEP] rintro ⟨j⟩ [GOAL] case mk C : Type u inst✝ : Category.{v, u} C X Y P Q : C t : BinaryFan P Q I : IsLimit t s : Cone (pair Q P) m : s.pt ⟶ (BinaryFan.swap t).pt w : ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app (BinaryFan.swap t).π j = NatTrans.app s.π j h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app t.π j = NatTrans.app (BinaryFan.swap s).π j) → m = lift I (BinaryFan.swap s) j : WalkingPair ⊢ m ≫ NatTrans.app t.π { as := j } = NatTrans.app (BinaryFan.swap s).π { as := j } [PROOFSTEP] specialize w ⟨WalkingPair.swap j⟩ [GOAL] case mk C : Type u inst✝ : Category.{v, u} C X Y P Q : C t : BinaryFan P Q I : IsLimit t s : Cone (pair Q P) m : s.pt ⟶ (BinaryFan.swap t).pt h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app t.π j = NatTrans.app (BinaryFan.swap s).π j) → m = lift I (BinaryFan.swap s) j : WalkingPair w : m ≫ NatTrans.app (BinaryFan.swap t).π { as := ↑WalkingPair.swap j } = NatTrans.app s.π { as := ↑WalkingPair.swap j } ⊢ m ≫ NatTrans.app t.π { as := j } = NatTrans.app (BinaryFan.swap s).π { as := j } [PROOFSTEP] cases j [GOAL] case mk.left C : Type u inst✝ : Category.{v, u} C X Y P Q : C t : BinaryFan P Q I : IsLimit t s : Cone (pair Q P) m : s.pt ⟶ (BinaryFan.swap t).pt h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app t.π j = NatTrans.app (BinaryFan.swap s).π j) → m = lift I (BinaryFan.swap s) w : m ≫ NatTrans.app (BinaryFan.swap t).π { as := ↑WalkingPair.swap WalkingPair.left } = NatTrans.app s.π { as := ↑WalkingPair.swap WalkingPair.left } ⊢ m ≫ NatTrans.app t.π { as := WalkingPair.left } = NatTrans.app (BinaryFan.swap s).π { as := WalkingPair.left } [PROOFSTEP] exact w [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X Y P Q : C t : BinaryFan P Q I : IsLimit t s : Cone (pair Q P) m : s.pt ⟶ (BinaryFan.swap t).pt h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app t.π j = NatTrans.app (BinaryFan.swap s).π j) → m = lift I (BinaryFan.swap s) w : m ≫ NatTrans.app (BinaryFan.swap t).π { as := ↑WalkingPair.swap WalkingPair.right } = NatTrans.app s.π { as := ↑WalkingPair.swap WalkingPair.right } ⊢ m ≫ NatTrans.app t.π { as := WalkingPair.right } = NatTrans.app (BinaryFan.swap s).π { as := WalkingPair.right } [PROOFSTEP] exact w [GOAL] C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) ⊢ ∀ (j : Discrete WalkingPair), (fun t => lift R (BinaryFan.assocInv P t)) t ≫ NatTrans.app (BinaryFan.assoc Q s).π j = NatTrans.app t.π j [PROOFSTEP] rintro ⟨⟨⟩⟩ [GOAL] case mk.left C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) ⊢ (fun t => lift R (BinaryFan.assocInv P t)) t ≫ NatTrans.app (BinaryFan.assoc Q s).π { as := WalkingPair.left } = NatTrans.app t.π { as := WalkingPair.left } [PROOFSTEP] simp [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) ⊢ (fun t => lift R (BinaryFan.assocInv P t)) t ≫ NatTrans.app (BinaryFan.assoc Q s).π { as := WalkingPair.right } = NatTrans.app t.π { as := WalkingPair.right } [PROOFSTEP] simp [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) ⊢ lift R (BinaryFan.assocInv P t) ≫ lift Q (BinaryFan.mk (BinaryFan.fst s ≫ BinaryFan.snd sXY) (BinaryFan.snd s)) = BinaryFan.snd t [PROOFSTEP] apply Q.hom_ext [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) ⊢ ∀ (j : Discrete WalkingPair), (lift R (BinaryFan.assocInv P t) ≫ lift Q (BinaryFan.mk (BinaryFan.fst s ≫ BinaryFan.snd sXY) (BinaryFan.snd s))) ≫ NatTrans.app sYZ.π j = BinaryFan.snd t ≫ NatTrans.app sYZ.π j [PROOFSTEP] rintro ⟨⟨⟩⟩ [GOAL] case mk.right.mk.left C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) ⊢ (lift R (BinaryFan.assocInv P t) ≫ lift Q (BinaryFan.mk (BinaryFan.fst s ≫ BinaryFan.snd sXY) (BinaryFan.snd s))) ≫ NatTrans.app sYZ.π { as := WalkingPair.left } = BinaryFan.snd t ≫ NatTrans.app sYZ.π { as := WalkingPair.left } [PROOFSTEP] simp [GOAL] case mk.right.mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) ⊢ (lift R (BinaryFan.assocInv P t) ≫ lift Q (BinaryFan.mk (BinaryFan.fst s ≫ BinaryFan.snd sXY) (BinaryFan.snd s))) ≫ NatTrans.app sYZ.π { as := WalkingPair.right } = BinaryFan.snd t ≫ NatTrans.app sYZ.π { as := WalkingPair.right } [PROOFSTEP] simp [GOAL] C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt w : ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app (BinaryFan.assoc Q s).π j = NatTrans.app t.π j ⊢ m = (fun t => lift R (BinaryFan.assocInv P t)) t [PROOFSTEP] have h := R.uniq (BinaryFan.assocInv P t) m [GOAL] C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt w : ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app (BinaryFan.assoc Q s).π j = NatTrans.app t.π j h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) ⊢ m = (fun t => lift R (BinaryFan.assocInv P t)) t [PROOFSTEP] rw [h] [GOAL] C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt w : ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app (BinaryFan.assoc Q s).π j = NatTrans.app t.π j h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) ⊢ ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j [PROOFSTEP] rintro ⟨⟨⟩⟩ [GOAL] case mk.left C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt w : ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app (BinaryFan.assoc Q s).π j = NatTrans.app t.π j h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) ⊢ m ≫ NatTrans.app s.π { as := WalkingPair.left } = NatTrans.app (BinaryFan.assocInv P t).π { as := WalkingPair.left } [PROOFSTEP] simp [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt w : ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app (BinaryFan.assoc Q s).π j = NatTrans.app t.π j h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) ⊢ m ≫ NatTrans.app s.π { as := WalkingPair.right } = NatTrans.app (BinaryFan.assocInv P t).π { as := WalkingPair.right } [PROOFSTEP] simp [GOAL] case mk.left C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt w : ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app (BinaryFan.assoc Q s).π j = NatTrans.app t.π j h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) ⊢ m ≫ BinaryFan.fst s = lift P (BinaryFan.mk (BinaryFan.fst t) (BinaryFan.snd t ≫ BinaryFan.fst sYZ)) case mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt w : ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app (BinaryFan.assoc Q s).π j = NatTrans.app t.π j h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) ⊢ m ≫ BinaryFan.snd s = BinaryFan.snd t ≫ BinaryFan.snd sYZ [PROOFSTEP] apply P.hom_ext [GOAL] case mk.left C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt w : ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app (BinaryFan.assoc Q s).π j = NatTrans.app t.π j h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) ⊢ ∀ (j : Discrete WalkingPair), (m ≫ BinaryFan.fst s) ≫ NatTrans.app sXY.π j = lift P (BinaryFan.mk (BinaryFan.fst t) (BinaryFan.snd t ≫ BinaryFan.fst sYZ)) ≫ NatTrans.app sXY.π j [PROOFSTEP] rintro ⟨⟨⟩⟩ [GOAL] case mk.left.mk.left C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt w : ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app (BinaryFan.assoc Q s).π j = NatTrans.app t.π j h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) ⊢ (m ≫ BinaryFan.fst s) ≫ NatTrans.app sXY.π { as := WalkingPair.left } = lift P (BinaryFan.mk (BinaryFan.fst t) (BinaryFan.snd t ≫ BinaryFan.fst sYZ)) ≫ NatTrans.app sXY.π { as := WalkingPair.left } [PROOFSTEP] simp [GOAL] case mk.left.mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt w : ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app (BinaryFan.assoc Q s).π j = NatTrans.app t.π j h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) ⊢ (m ≫ BinaryFan.fst s) ≫ NatTrans.app sXY.π { as := WalkingPair.right } = lift P (BinaryFan.mk (BinaryFan.fst t) (BinaryFan.snd t ≫ BinaryFan.fst sYZ)) ≫ NatTrans.app sXY.π { as := WalkingPair.right } [PROOFSTEP] simp [GOAL] case mk.left.mk.left C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt w : ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app (BinaryFan.assoc Q s).π j = NatTrans.app t.π j h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) ⊢ m ≫ BinaryFan.fst s ≫ BinaryFan.fst sXY = BinaryFan.fst t [PROOFSTEP] exact w ⟨WalkingPair.left⟩ [GOAL] case mk.left.mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt w : ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app (BinaryFan.assoc Q s).π j = NatTrans.app t.π j h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) ⊢ m ≫ BinaryFan.fst s ≫ BinaryFan.snd sXY = BinaryFan.snd t ≫ BinaryFan.fst sYZ [PROOFSTEP] specialize w ⟨WalkingPair.right⟩ [GOAL] case mk.left.mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) w : m ≫ NatTrans.app (BinaryFan.assoc Q s).π { as := WalkingPair.right } = NatTrans.app t.π { as := WalkingPair.right } ⊢ m ≫ BinaryFan.fst s ≫ BinaryFan.snd sXY = BinaryFan.snd t ≫ BinaryFan.fst sYZ [PROOFSTEP] simp at w [GOAL] case mk.left.mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) w : m ≫ lift Q (BinaryFan.mk (BinaryFan.fst s ≫ BinaryFan.snd sXY) (BinaryFan.snd s)) = BinaryFan.snd t ⊢ m ≫ BinaryFan.fst s ≫ BinaryFan.snd sXY = BinaryFan.snd t ≫ BinaryFan.fst sYZ [PROOFSTEP] rw [← w] [GOAL] case mk.left.mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) w : m ≫ lift Q (BinaryFan.mk (BinaryFan.fst s ≫ BinaryFan.snd sXY) (BinaryFan.snd s)) = BinaryFan.snd t ⊢ m ≫ BinaryFan.fst s ≫ BinaryFan.snd sXY = (m ≫ lift Q (BinaryFan.mk (BinaryFan.fst s ≫ BinaryFan.snd sXY) (BinaryFan.snd s))) ≫ BinaryFan.fst sYZ [PROOFSTEP] simp [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt w : ∀ (j : Discrete WalkingPair), m ≫ NatTrans.app (BinaryFan.assoc Q s).π j = NatTrans.app t.π j h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) ⊢ m ≫ BinaryFan.snd s = BinaryFan.snd t ≫ BinaryFan.snd sYZ [PROOFSTEP] specialize w ⟨WalkingPair.right⟩ [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) w : m ≫ NatTrans.app (BinaryFan.assoc Q s).π { as := WalkingPair.right } = NatTrans.app t.π { as := WalkingPair.right } ⊢ m ≫ BinaryFan.snd s = BinaryFan.snd t ≫ BinaryFan.snd sYZ [PROOFSTEP] simp at w [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) w : m ≫ lift Q (BinaryFan.mk (BinaryFan.fst s ≫ BinaryFan.snd sXY) (BinaryFan.snd s)) = BinaryFan.snd t ⊢ m ≫ BinaryFan.snd s = BinaryFan.snd t ≫ BinaryFan.snd sYZ [PROOFSTEP] rw [← w] [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ X Y Z : C sXY : BinaryFan X Y P : IsLimit sXY sYZ : BinaryFan Y Z Q : IsLimit sYZ s : BinaryFan sXY.pt Z R : IsLimit s t : Cone (pair X sYZ.pt) m : t.pt ⟶ (BinaryFan.assoc Q s).pt h : (∀ (j : Discrete WalkingPair), m ≫ NatTrans.app s.π j = NatTrans.app (BinaryFan.assocInv P t).π j) → m = lift R (BinaryFan.assocInv P t) w : m ≫ lift Q (BinaryFan.mk (BinaryFan.fst s ≫ BinaryFan.snd sXY) (BinaryFan.snd s)) = BinaryFan.snd t ⊢ m ≫ BinaryFan.snd s = (m ≫ lift Q (BinaryFan.mk (BinaryFan.fst s ≫ BinaryFan.snd sXY) (BinaryFan.snd s))) ≫ BinaryFan.snd sYZ [PROOFSTEP] simp [GOAL] C : Type u inst✝ : Category.{v, u} C X✝ Y X : C s : Cone (Functor.empty C) P : IsLimit s t : BinaryFan s.pt X Q : IsLimit t ⊢ snd t ≫ IsLimit.lift Q (mk (IsLimit.lift P { pt := X, π := NatTrans.mk fun x => Discrete.rec (fun x => PEmpty.rec (fun x => (x : PEmpty) → ((Functor.const (Discrete PEmpty)).obj X).obj { as := x } ⟶ (Functor.empty C).obj { as := x }) x x) x }) (𝟙 X)) = 𝟙 t.pt [PROOFSTEP] apply Q.hom_ext [GOAL] C : Type u inst✝ : Category.{v, u} C X✝ Y X : C s : Cone (Functor.empty C) P : IsLimit s t : BinaryFan s.pt X Q : IsLimit t ⊢ ∀ (j : Discrete WalkingPair), (snd t ≫ IsLimit.lift Q (mk (IsLimit.lift P { pt := X, π := NatTrans.mk fun x => Discrete.rec (fun x => PEmpty.rec (fun x => (x : PEmpty) → ((Functor.const (Discrete PEmpty)).obj X).obj { as := x } ⟶ (Functor.empty C).obj { as := x }) x x) x }) (𝟙 X))) ≫ NatTrans.app t.π j = 𝟙 t.pt ≫ NatTrans.app t.π j [PROOFSTEP] rintro ⟨⟨⟩⟩ [GOAL] case mk.left C : Type u inst✝ : Category.{v, u} C X✝ Y X : C s : Cone (Functor.empty C) P : IsLimit s t : BinaryFan s.pt X Q : IsLimit t ⊢ (snd t ≫ IsLimit.lift Q (mk (IsLimit.lift P { pt := X, π := NatTrans.mk fun x => Discrete.rec (fun x => PEmpty.rec (fun x => (x : PEmpty) → ((Functor.const (Discrete PEmpty)).obj X).obj { as := x } ⟶ (Functor.empty C).obj { as := x }) x x) x }) (𝟙 X))) ≫ NatTrans.app t.π { as := WalkingPair.left } = 𝟙 t.pt ≫ NatTrans.app t.π { as := WalkingPair.left } [PROOFSTEP] apply P.hom_ext [GOAL] case mk.left C : Type u inst✝ : Category.{v, u} C X✝ Y X : C s : Cone (Functor.empty C) P : IsLimit s t : BinaryFan s.pt X Q : IsLimit t ⊢ ∀ (j : Discrete PEmpty), ((snd t ≫ IsLimit.lift Q (mk (IsLimit.lift P { pt := X, π := NatTrans.mk fun x => Discrete.rec (fun x => PEmpty.rec (fun x => (x : PEmpty) → ((Functor.const (Discrete PEmpty)).obj X).obj { as := x } ⟶ (Functor.empty C).obj { as := x }) x x) x }) (𝟙 X))) ≫ NatTrans.app t.π { as := WalkingPair.left }) ≫ NatTrans.app s.π j = (𝟙 t.pt ≫ NatTrans.app t.π { as := WalkingPair.left }) ≫ NatTrans.app s.π j [PROOFSTEP] rintro ⟨⟨⟩⟩ [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y X : C s : Cone (Functor.empty C) P : IsLimit s t : BinaryFan s.pt X Q : IsLimit t ⊢ (snd t ≫ IsLimit.lift Q (mk (IsLimit.lift P { pt := X, π := NatTrans.mk fun x => Discrete.rec (fun x => PEmpty.rec (fun x => (x : PEmpty) → ((Functor.const (Discrete PEmpty)).obj X).obj { as := x } ⟶ (Functor.empty C).obj { as := x }) x x) x }) (𝟙 X))) ≫ NatTrans.app t.π { as := WalkingPair.right } = 𝟙 t.pt ≫ NatTrans.app t.π { as := WalkingPair.right } [PROOFSTEP] simp [GOAL] C : Type u inst✝ : Category.{v, u} C X✝ Y X : C s : Cone (Functor.empty C) P : IsLimit s t : BinaryFan X s.pt Q : IsLimit t ⊢ fst t ≫ IsLimit.lift Q (mk (𝟙 X) (IsLimit.lift P { pt := X, π := NatTrans.mk fun x => Discrete.rec (fun x => PEmpty.rec (fun x => (x : PEmpty) → ((Functor.const (Discrete PEmpty)).obj X).obj { as := x } ⟶ (Functor.empty C).obj { as := x }) x x) x })) = 𝟙 t.pt [PROOFSTEP] apply Q.hom_ext [GOAL] C : Type u inst✝ : Category.{v, u} C X✝ Y X : C s : Cone (Functor.empty C) P : IsLimit s t : BinaryFan X s.pt Q : IsLimit t ⊢ ∀ (j : Discrete WalkingPair), (fst t ≫ IsLimit.lift Q (mk (𝟙 X) (IsLimit.lift P { pt := X, π := NatTrans.mk fun x => Discrete.rec (fun x => PEmpty.rec (fun x => (x : PEmpty) → ((Functor.const (Discrete PEmpty)).obj X).obj { as := x } ⟶ (Functor.empty C).obj { as := x }) x x) x }))) ≫ NatTrans.app t.π j = 𝟙 t.pt ≫ NatTrans.app t.π j [PROOFSTEP] rintro ⟨⟨⟩⟩ [GOAL] case mk.left C : Type u inst✝ : Category.{v, u} C X✝ Y X : C s : Cone (Functor.empty C) P : IsLimit s t : BinaryFan X s.pt Q : IsLimit t ⊢ (fst t ≫ IsLimit.lift Q (mk (𝟙 X) (IsLimit.lift P { pt := X, π := NatTrans.mk fun x => Discrete.rec (fun x => PEmpty.rec (fun x => (x : PEmpty) → ((Functor.const (Discrete PEmpty)).obj X).obj { as := x } ⟶ (Functor.empty C).obj { as := x }) x x) x }))) ≫ NatTrans.app t.π { as := WalkingPair.left } = 𝟙 t.pt ≫ NatTrans.app t.π { as := WalkingPair.left } [PROOFSTEP] simp [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y X : C s : Cone (Functor.empty C) P : IsLimit s t : BinaryFan X s.pt Q : IsLimit t ⊢ (fst t ≫ IsLimit.lift Q (mk (𝟙 X) (IsLimit.lift P { pt := X, π := NatTrans.mk fun x => Discrete.rec (fun x => PEmpty.rec (fun x => (x : PEmpty) → ((Functor.const (Discrete PEmpty)).obj X).obj { as := x } ⟶ (Functor.empty C).obj { as := x }) x x) x }))) ≫ NatTrans.app t.π { as := WalkingPair.right } = 𝟙 t.pt ≫ NatTrans.app t.π { as := WalkingPair.right } [PROOFSTEP] apply P.hom_ext [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y X : C s : Cone (Functor.empty C) P : IsLimit s t : BinaryFan X s.pt Q : IsLimit t ⊢ ∀ (j : Discrete PEmpty), ((fst t ≫ IsLimit.lift Q (mk (𝟙 X) (IsLimit.lift P { pt := X, π := NatTrans.mk fun x => Discrete.rec (fun x => PEmpty.rec (fun x => (x : PEmpty) → ((Functor.const (Discrete PEmpty)).obj X).obj { as := x } ⟶ (Functor.empty C).obj { as := x }) x x) x }))) ≫ NatTrans.app t.π { as := WalkingPair.right }) ≫ NatTrans.app s.π j = (𝟙 t.pt ≫ NatTrans.app t.π { as := WalkingPair.right }) ≫ NatTrans.app s.π j [PROOFSTEP] rintro ⟨⟨⟩⟩ [GOAL] C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ : C ⊢ tensorHom ℬ (𝟙 X₁) (𝟙 X₂) = 𝟙 (tensorObj ℬ X₁ X₂) [PROOFSTEP] apply IsLimit.hom_ext (ℬ _ _).isLimit [GOAL] C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ : C ⊢ ∀ (j : Discrete WalkingPair), tensorHom ℬ (𝟙 X₁) (𝟙 X₂) ≫ NatTrans.app (ℬ X₁ X₂).cone.π j = 𝟙 (tensorObj ℬ X₁ X₂) ≫ NatTrans.app (ℬ X₁ X₂).cone.π j [PROOFSTEP] rintro ⟨⟨⟩⟩ [GOAL] case mk.left C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ : C ⊢ tensorHom ℬ (𝟙 X₁) (𝟙 X₂) ≫ NatTrans.app (ℬ X₁ X₂).cone.π { as := WalkingPair.left } = 𝟙 (tensorObj ℬ X₁ X₂) ≫ NatTrans.app (ℬ X₁ X₂).cone.π { as := WalkingPair.left } [PROOFSTEP] dsimp [tensorHom] [GOAL] case mk.left C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ : C ⊢ IsLimit.lift (ℬ X₁ X₂).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ X₂).cone ≫ 𝟙 X₁) (BinaryFan.snd (ℬ X₁ X₂).cone ≫ 𝟙 X₂)) ≫ BinaryFan.fst (ℬ X₁ X₂).cone = 𝟙 (tensorObj ℬ X₁ X₂) ≫ BinaryFan.fst (ℬ X₁ X₂).cone [PROOFSTEP] simp [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ : C ⊢ tensorHom ℬ (𝟙 X₁) (𝟙 X₂) ≫ NatTrans.app (ℬ X₁ X₂).cone.π { as := WalkingPair.right } = 𝟙 (tensorObj ℬ X₁ X₂) ≫ NatTrans.app (ℬ X₁ X₂).cone.π { as := WalkingPair.right } [PROOFSTEP] dsimp [tensorHom] [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ : C ⊢ IsLimit.lift (ℬ X₁ X₂).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ X₂).cone ≫ 𝟙 X₁) (BinaryFan.snd (ℬ X₁ X₂).cone ≫ 𝟙 X₂)) ≫ BinaryFan.snd (ℬ X₁ X₂).cone = 𝟙 (tensorObj ℬ X₁ X₂) ≫ BinaryFan.snd (ℬ X₁ X₂).cone [PROOFSTEP] simp [GOAL] C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ Y₁ Z₁ X₂ Y₂ Z₂ : C f₁ : X₁ ⟶ Y₁ f₂ : X₂ ⟶ Y₂ g₁ : Y₁ ⟶ Z₁ g₂ : Y₂ ⟶ Z₂ ⊢ tensorHom ℬ (f₁ ≫ g₁) (f₂ ≫ g₂) = tensorHom ℬ f₁ f₂ ≫ tensorHom ℬ g₁ g₂ [PROOFSTEP] apply IsLimit.hom_ext (ℬ _ _).isLimit [GOAL] C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ Y₁ Z₁ X₂ Y₂ Z₂ : C f₁ : X₁ ⟶ Y₁ f₂ : X₂ ⟶ Y₂ g₁ : Y₁ ⟶ Z₁ g₂ : Y₂ ⟶ Z₂ ⊢ ∀ (j : Discrete WalkingPair), tensorHom ℬ (f₁ ≫ g₁) (f₂ ≫ g₂) ≫ NatTrans.app (ℬ Z₁ Z₂).cone.π j = (tensorHom ℬ f₁ f₂ ≫ tensorHom ℬ g₁ g₂) ≫ NatTrans.app (ℬ Z₁ Z₂).cone.π j [PROOFSTEP] rintro ⟨⟨⟩⟩ [GOAL] case mk.left C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ Y₁ Z₁ X₂ Y₂ Z₂ : C f₁ : X₁ ⟶ Y₁ f₂ : X₂ ⟶ Y₂ g₁ : Y₁ ⟶ Z₁ g₂ : Y₂ ⟶ Z₂ ⊢ tensorHom ℬ (f₁ ≫ g₁) (f₂ ≫ g₂) ≫ NatTrans.app (ℬ Z₁ Z₂).cone.π { as := WalkingPair.left } = (tensorHom ℬ f₁ f₂ ≫ tensorHom ℬ g₁ g₂) ≫ NatTrans.app (ℬ Z₁ Z₂).cone.π { as := WalkingPair.left } [PROOFSTEP] dsimp [tensorHom] [GOAL] case mk.left C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ Y₁ Z₁ X₂ Y₂ Z₂ : C f₁ : X₁ ⟶ Y₁ f₂ : X₂ ⟶ Y₂ g₁ : Y₁ ⟶ Z₁ g₂ : Y₂ ⟶ Z₂ ⊢ IsLimit.lift (ℬ Z₁ Z₂).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ X₂).cone ≫ f₁ ≫ g₁) (BinaryFan.snd (ℬ X₁ X₂).cone ≫ f₂ ≫ g₂)) ≫ BinaryFan.fst (ℬ Z₁ Z₂).cone = (IsLimit.lift (ℬ Y₁ Y₂).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ X₂).cone ≫ f₁) (BinaryFan.snd (ℬ X₁ X₂).cone ≫ f₂)) ≫ IsLimit.lift (ℬ Z₁ Z₂).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ Y₁ Y₂).cone ≫ g₁) (BinaryFan.snd (ℬ Y₁ Y₂).cone ≫ g₂))) ≫ BinaryFan.fst (ℬ Z₁ Z₂).cone [PROOFSTEP] simp [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ Y₁ Z₁ X₂ Y₂ Z₂ : C f₁ : X₁ ⟶ Y₁ f₂ : X₂ ⟶ Y₂ g₁ : Y₁ ⟶ Z₁ g₂ : Y₂ ⟶ Z₂ ⊢ tensorHom ℬ (f₁ ≫ g₁) (f₂ ≫ g₂) ≫ NatTrans.app (ℬ Z₁ Z₂).cone.π { as := WalkingPair.right } = (tensorHom ℬ f₁ f₂ ≫ tensorHom ℬ g₁ g₂) ≫ NatTrans.app (ℬ Z₁ Z₂).cone.π { as := WalkingPair.right } [PROOFSTEP] dsimp [tensorHom] [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ Y₁ Z₁ X₂ Y₂ Z₂ : C f₁ : X₁ ⟶ Y₁ f₂ : X₂ ⟶ Y₂ g₁ : Y₁ ⟶ Z₁ g₂ : Y₂ ⟶ Z₂ ⊢ IsLimit.lift (ℬ Z₁ Z₂).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ X₂).cone ≫ f₁ ≫ g₁) (BinaryFan.snd (ℬ X₁ X₂).cone ≫ f₂ ≫ g₂)) ≫ BinaryFan.snd (ℬ Z₁ Z₂).cone = (IsLimit.lift (ℬ Y₁ Y₂).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ X₂).cone ≫ f₁) (BinaryFan.snd (ℬ X₁ X₂).cone ≫ f₂)) ≫ IsLimit.lift (ℬ Z₁ Z₂).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ Y₁ Y₂).cone ≫ g₁) (BinaryFan.snd (ℬ Y₁ Y₂).cone ≫ g₂))) ≫ BinaryFan.snd (ℬ Z₁ Z₂).cone [PROOFSTEP] simp [GOAL] C : Type u inst✝ : Category.{v, u} C X✝ Y✝ : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) W X Y Z : C ⊢ tensorHom ℬ (BinaryFan.associatorOfLimitCone ℬ W X Y).hom (𝟙 Z) ≫ (BinaryFan.associatorOfLimitCone ℬ W (tensorObj ℬ X Y) Z).hom ≫ tensorHom ℬ (𝟙 W) (BinaryFan.associatorOfLimitCone ℬ X Y Z).hom = (BinaryFan.associatorOfLimitCone ℬ (tensorObj ℬ W X) Y Z).hom ≫ (BinaryFan.associatorOfLimitCone ℬ W X (tensorObj ℬ Y Z)).hom [PROOFSTEP] dsimp [tensorHom] [GOAL] C : Type u inst✝ : Category.{v, u} C X✝ Y✝ : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) W X Y Z : C ⊢ IsLimit.lift (ℬ (ℬ W (ℬ X Y).cone.pt).cone.pt Z).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ (BinaryFan.associatorOfLimitCone ℬ W X Y).hom) (BinaryFan.snd (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ 𝟙 Z)) ≫ (BinaryFan.associatorOfLimitCone ℬ W (tensorObj ℬ X Y) Z).hom ≫ IsLimit.lift (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ 𝟙 W) (BinaryFan.snd (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ (BinaryFan.associatorOfLimitCone ℬ X Y Z).hom)) = (BinaryFan.associatorOfLimitCone ℬ (tensorObj ℬ W X) Y Z).hom ≫ (BinaryFan.associatorOfLimitCone ℬ W X (tensorObj ℬ Y Z)).hom [PROOFSTEP] apply IsLimit.hom_ext (ℬ _ _).isLimit [GOAL] C : Type u inst✝ : Category.{v, u} C X✝ Y✝ : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) W X Y Z : C ⊢ ∀ (j : Discrete WalkingPair), (IsLimit.lift (ℬ (ℬ W (ℬ X Y).cone.pt).cone.pt Z).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ (BinaryFan.associatorOfLimitCone ℬ W X Y).hom) (BinaryFan.snd (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ 𝟙 Z)) ≫ (BinaryFan.associatorOfLimitCone ℬ W (tensorObj ℬ X Y) Z).hom ≫ IsLimit.lift (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ 𝟙 W) (BinaryFan.snd (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ (BinaryFan.associatorOfLimitCone ℬ X Y Z).hom))) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π j = ((BinaryFan.associatorOfLimitCone ℬ (tensorObj ℬ W X) Y Z).hom ≫ (BinaryFan.associatorOfLimitCone ℬ W X (tensorObj ℬ Y Z)).hom) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π j [PROOFSTEP] rintro ⟨⟨⟩⟩ [GOAL] case mk.left C : Type u inst✝ : Category.{v, u} C X✝ Y✝ : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) W X Y Z : C ⊢ (IsLimit.lift (ℬ (ℬ W (ℬ X Y).cone.pt).cone.pt Z).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ (BinaryFan.associatorOfLimitCone ℬ W X Y).hom) (BinaryFan.snd (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ 𝟙 Z)) ≫ (BinaryFan.associatorOfLimitCone ℬ W (tensorObj ℬ X Y) Z).hom ≫ IsLimit.lift (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ 𝟙 W) (BinaryFan.snd (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ (BinaryFan.associatorOfLimitCone ℬ X Y Z).hom))) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π { as := WalkingPair.left } = ((BinaryFan.associatorOfLimitCone ℬ (tensorObj ℬ W X) Y Z).hom ≫ (BinaryFan.associatorOfLimitCone ℬ W X (tensorObj ℬ Y Z)).hom) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π { as := WalkingPair.left } [PROOFSTEP] simp [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) W X Y Z : C ⊢ (IsLimit.lift (ℬ (ℬ W (ℬ X Y).cone.pt).cone.pt Z).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ (BinaryFan.associatorOfLimitCone ℬ W X Y).hom) (BinaryFan.snd (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ 𝟙 Z)) ≫ (BinaryFan.associatorOfLimitCone ℬ W (tensorObj ℬ X Y) Z).hom ≫ IsLimit.lift (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ 𝟙 W) (BinaryFan.snd (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ (BinaryFan.associatorOfLimitCone ℬ X Y Z).hom))) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π { as := WalkingPair.right } = ((BinaryFan.associatorOfLimitCone ℬ (tensorObj ℬ W X) Y Z).hom ≫ (BinaryFan.associatorOfLimitCone ℬ W X (tensorObj ℬ Y Z)).hom) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π { as := WalkingPair.right } [PROOFSTEP] apply IsLimit.hom_ext (ℬ _ _).isLimit [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) W X Y Z : C ⊢ ∀ (j : Discrete WalkingPair), ((IsLimit.lift (ℬ (ℬ W (ℬ X Y).cone.pt).cone.pt Z).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ (BinaryFan.associatorOfLimitCone ℬ W X Y).hom) (BinaryFan.snd (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ 𝟙 Z)) ≫ (BinaryFan.associatorOfLimitCone ℬ W (tensorObj ℬ X Y) Z).hom ≫ IsLimit.lift (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ 𝟙 W) (BinaryFan.snd (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ (BinaryFan.associatorOfLimitCone ℬ X Y Z).hom))) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ X (ℬ Y Z).cone.pt).cone.π j = (((BinaryFan.associatorOfLimitCone ℬ (tensorObj ℬ W X) Y Z).hom ≫ (BinaryFan.associatorOfLimitCone ℬ W X (tensorObj ℬ Y Z)).hom) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ X (ℬ Y Z).cone.pt).cone.π j [PROOFSTEP] rintro ⟨⟨⟩⟩ [GOAL] case mk.right.mk.left C : Type u inst✝ : Category.{v, u} C X✝ Y✝ : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) W X Y Z : C ⊢ ((IsLimit.lift (ℬ (ℬ W (ℬ X Y).cone.pt).cone.pt Z).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ (BinaryFan.associatorOfLimitCone ℬ W X Y).hom) (BinaryFan.snd (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ 𝟙 Z)) ≫ (BinaryFan.associatorOfLimitCone ℬ W (tensorObj ℬ X Y) Z).hom ≫ IsLimit.lift (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ 𝟙 W) (BinaryFan.snd (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ (BinaryFan.associatorOfLimitCone ℬ X Y Z).hom))) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ X (ℬ Y Z).cone.pt).cone.π { as := WalkingPair.left } = (((BinaryFan.associatorOfLimitCone ℬ (tensorObj ℬ W X) Y Z).hom ≫ (BinaryFan.associatorOfLimitCone ℬ W X (tensorObj ℬ Y Z)).hom) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ X (ℬ Y Z).cone.pt).cone.π { as := WalkingPair.left } [PROOFSTEP] simp [GOAL] case mk.right.mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) W X Y Z : C ⊢ ((IsLimit.lift (ℬ (ℬ W (ℬ X Y).cone.pt).cone.pt Z).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ (BinaryFan.associatorOfLimitCone ℬ W X Y).hom) (BinaryFan.snd (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ 𝟙 Z)) ≫ (BinaryFan.associatorOfLimitCone ℬ W (tensorObj ℬ X Y) Z).hom ≫ IsLimit.lift (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ 𝟙 W) (BinaryFan.snd (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ (BinaryFan.associatorOfLimitCone ℬ X Y Z).hom))) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ X (ℬ Y Z).cone.pt).cone.π { as := WalkingPair.right } = (((BinaryFan.associatorOfLimitCone ℬ (tensorObj ℬ W X) Y Z).hom ≫ (BinaryFan.associatorOfLimitCone ℬ W X (tensorObj ℬ Y Z)).hom) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ X (ℬ Y Z).cone.pt).cone.π { as := WalkingPair.right } [PROOFSTEP] apply IsLimit.hom_ext (ℬ _ _).isLimit [GOAL] case mk.right.mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) W X Y Z : C ⊢ ∀ (j : Discrete WalkingPair), (((IsLimit.lift (ℬ (ℬ W (ℬ X Y).cone.pt).cone.pt Z).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ (BinaryFan.associatorOfLimitCone ℬ W X Y).hom) (BinaryFan.snd (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ 𝟙 Z)) ≫ (BinaryFan.associatorOfLimitCone ℬ W (tensorObj ℬ X Y) Z).hom ≫ IsLimit.lift (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ 𝟙 W) (BinaryFan.snd (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ (BinaryFan.associatorOfLimitCone ℬ X Y Z).hom))) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ X (ℬ Y Z).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ Y Z).cone.π j = ((((BinaryFan.associatorOfLimitCone ℬ (tensorObj ℬ W X) Y Z).hom ≫ (BinaryFan.associatorOfLimitCone ℬ W X (tensorObj ℬ Y Z)).hom) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ X (ℬ Y Z).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ Y Z).cone.π j [PROOFSTEP] rintro ⟨⟨⟩⟩ [GOAL] case mk.right.mk.right.mk.left C : Type u inst✝ : Category.{v, u} C X✝ Y✝ : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) W X Y Z : C ⊢ (((IsLimit.lift (ℬ (ℬ W (ℬ X Y).cone.pt).cone.pt Z).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ (BinaryFan.associatorOfLimitCone ℬ W X Y).hom) (BinaryFan.snd (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ 𝟙 Z)) ≫ (BinaryFan.associatorOfLimitCone ℬ W (tensorObj ℬ X Y) Z).hom ≫ IsLimit.lift (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ 𝟙 W) (BinaryFan.snd (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ (BinaryFan.associatorOfLimitCone ℬ X Y Z).hom))) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ X (ℬ Y Z).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ Y Z).cone.π { as := WalkingPair.left } = ((((BinaryFan.associatorOfLimitCone ℬ (tensorObj ℬ W X) Y Z).hom ≫ (BinaryFan.associatorOfLimitCone ℬ W X (tensorObj ℬ Y Z)).hom) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ X (ℬ Y Z).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ Y Z).cone.π { as := WalkingPair.left } [PROOFSTEP] simp [GOAL] case mk.right.mk.right.mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) W X Y Z : C ⊢ (((IsLimit.lift (ℬ (ℬ W (ℬ X Y).cone.pt).cone.pt Z).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ (BinaryFan.associatorOfLimitCone ℬ W X Y).hom) (BinaryFan.snd (ℬ (ℬ (ℬ W X).cone.pt Y).cone.pt Z).cone ≫ 𝟙 Z)) ≫ (BinaryFan.associatorOfLimitCone ℬ W (tensorObj ℬ X Y) Z).hom ≫ IsLimit.lift (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ 𝟙 W) (BinaryFan.snd (ℬ W (ℬ (tensorObj ℬ X Y) Z).cone.pt).cone ≫ (BinaryFan.associatorOfLimitCone ℬ X Y Z).hom))) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ X (ℬ Y Z).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ Y Z).cone.π { as := WalkingPair.right } = ((((BinaryFan.associatorOfLimitCone ℬ (tensorObj ℬ W X) Y Z).hom ≫ (BinaryFan.associatorOfLimitCone ℬ W X (tensorObj ℬ Y Z)).hom) ≫ NatTrans.app (ℬ W (ℬ X (ℬ Y Z).cone.pt).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ X (ℬ Y Z).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ Y Z).cone.π { as := WalkingPair.right } [PROOFSTEP] simp [GOAL] C : Type u inst✝ : Category.{v, u} C X✝ Y✝ : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X Y : C ⊢ (BinaryFan.associatorOfLimitCone ℬ X 𝒯.cone.pt Y).hom ≫ tensorHom ℬ (𝟙 X) (BinaryFan.leftUnitor 𝒯.isLimit (ℬ 𝒯.cone.pt Y).isLimit).hom = tensorHom ℬ (BinaryFan.rightUnitor 𝒯.isLimit (ℬ X 𝒯.cone.pt).isLimit).hom (𝟙 Y) [PROOFSTEP] dsimp [tensorHom] [GOAL] C : Type u inst✝ : Category.{v, u} C X✝ Y✝ : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X Y : C ⊢ (BinaryFan.associatorOfLimitCone ℬ X 𝒯.cone.pt Y).hom ≫ IsLimit.lift (ℬ X Y).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X (ℬ 𝒯.cone.pt Y).cone.pt).cone ≫ 𝟙 X) (BinaryFan.snd (ℬ X (ℬ 𝒯.cone.pt Y).cone.pt).cone ≫ BinaryFan.snd (ℬ 𝒯.cone.pt Y).cone)) = IsLimit.lift (ℬ X Y).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (ℬ X 𝒯.cone.pt).cone.pt Y).cone ≫ BinaryFan.fst (ℬ X 𝒯.cone.pt).cone) (BinaryFan.snd (ℬ (ℬ X 𝒯.cone.pt).cone.pt Y).cone ≫ 𝟙 Y)) [PROOFSTEP] apply IsLimit.hom_ext (ℬ _ _).isLimit [GOAL] C : Type u inst✝ : Category.{v, u} C X✝ Y✝ : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X Y : C ⊢ ∀ (j : Discrete WalkingPair), ((BinaryFan.associatorOfLimitCone ℬ X 𝒯.cone.pt Y).hom ≫ IsLimit.lift (ℬ X Y).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X (ℬ 𝒯.cone.pt Y).cone.pt).cone ≫ 𝟙 X) (BinaryFan.snd (ℬ X (ℬ 𝒯.cone.pt Y).cone.pt).cone ≫ BinaryFan.snd (ℬ 𝒯.cone.pt Y).cone))) ≫ NatTrans.app (ℬ X Y).cone.π j = IsLimit.lift (ℬ X Y).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (ℬ X 𝒯.cone.pt).cone.pt Y).cone ≫ BinaryFan.fst (ℬ X 𝒯.cone.pt).cone) (BinaryFan.snd (ℬ (ℬ X 𝒯.cone.pt).cone.pt Y).cone ≫ 𝟙 Y)) ≫ NatTrans.app (ℬ X Y).cone.π j [PROOFSTEP] rintro ⟨⟨⟩⟩ [GOAL] case mk.left C : Type u inst✝ : Category.{v, u} C X✝ Y✝ : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X Y : C ⊢ ((BinaryFan.associatorOfLimitCone ℬ X 𝒯.cone.pt Y).hom ≫ IsLimit.lift (ℬ X Y).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X (ℬ 𝒯.cone.pt Y).cone.pt).cone ≫ 𝟙 X) (BinaryFan.snd (ℬ X (ℬ 𝒯.cone.pt Y).cone.pt).cone ≫ BinaryFan.snd (ℬ 𝒯.cone.pt Y).cone))) ≫ NatTrans.app (ℬ X Y).cone.π { as := WalkingPair.left } = IsLimit.lift (ℬ X Y).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (ℬ X 𝒯.cone.pt).cone.pt Y).cone ≫ BinaryFan.fst (ℬ X 𝒯.cone.pt).cone) (BinaryFan.snd (ℬ (ℬ X 𝒯.cone.pt).cone.pt Y).cone ≫ 𝟙 Y)) ≫ NatTrans.app (ℬ X Y).cone.π { as := WalkingPair.left } [PROOFSTEP] simp [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X✝ Y✝ : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X Y : C ⊢ ((BinaryFan.associatorOfLimitCone ℬ X 𝒯.cone.pt Y).hom ≫ IsLimit.lift (ℬ X Y).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X (ℬ 𝒯.cone.pt Y).cone.pt).cone ≫ 𝟙 X) (BinaryFan.snd (ℬ X (ℬ 𝒯.cone.pt Y).cone.pt).cone ≫ BinaryFan.snd (ℬ 𝒯.cone.pt Y).cone))) ≫ NatTrans.app (ℬ X Y).cone.π { as := WalkingPair.right } = IsLimit.lift (ℬ X Y).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (ℬ X 𝒯.cone.pt).cone.pt Y).cone ≫ BinaryFan.fst (ℬ X 𝒯.cone.pt).cone) (BinaryFan.snd (ℬ (ℬ X 𝒯.cone.pt).cone.pt Y).cone ≫ 𝟙 Y)) ≫ NatTrans.app (ℬ X Y).cone.π { as := WalkingPair.right } [PROOFSTEP] simp [GOAL] C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ : C f : X₁ ⟶ X₂ ⊢ tensorHom ℬ (𝟙 𝒯.cone.pt) f ≫ (BinaryFan.leftUnitor 𝒯.isLimit (ℬ 𝒯.cone.pt X₂).isLimit).hom = (BinaryFan.leftUnitor 𝒯.isLimit (ℬ 𝒯.cone.pt X₁).isLimit).hom ≫ f [PROOFSTEP] dsimp [tensorHom] [GOAL] C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ : C f : X₁ ⟶ X₂ ⊢ IsLimit.lift (ℬ 𝒯.cone.pt X₂).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ 𝒯.cone.pt X₁).cone ≫ 𝟙 𝒯.cone.pt) (BinaryFan.snd (ℬ 𝒯.cone.pt X₁).cone ≫ f)) ≫ BinaryFan.snd (ℬ 𝒯.cone.pt X₂).cone = BinaryFan.snd (ℬ 𝒯.cone.pt X₁).cone ≫ f [PROOFSTEP] simp [GOAL] C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ : C f : X₁ ⟶ X₂ ⊢ tensorHom ℬ f (𝟙 𝒯.cone.pt) ≫ (BinaryFan.rightUnitor 𝒯.isLimit (ℬ X₂ 𝒯.cone.pt).isLimit).hom = (BinaryFan.rightUnitor 𝒯.isLimit (ℬ X₁ 𝒯.cone.pt).isLimit).hom ≫ f [PROOFSTEP] dsimp [tensorHom] [GOAL] C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ : C f : X₁ ⟶ X₂ ⊢ IsLimit.lift (ℬ X₂ 𝒯.cone.pt).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ 𝒯.cone.pt).cone ≫ f) (BinaryFan.snd (ℬ X₁ 𝒯.cone.pt).cone ≫ 𝟙 𝒯.cone.pt)) ≫ BinaryFan.fst (ℬ X₂ 𝒯.cone.pt).cone = BinaryFan.fst (ℬ X₁ 𝒯.cone.pt).cone ≫ f [PROOFSTEP] simp [GOAL] C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ X₃ Y₁ Y₂ Y₃ : C f₁ : X₁ ⟶ Y₁ f₂ : X₂ ⟶ Y₂ f₃ : X₃ ⟶ Y₃ ⊢ tensorHom ℬ (tensorHom ℬ f₁ f₂) f₃ ≫ (BinaryFan.associatorOfLimitCone ℬ Y₁ Y₂ Y₃).hom = (BinaryFan.associatorOfLimitCone ℬ X₁ X₂ X₃).hom ≫ tensorHom ℬ f₁ (tensorHom ℬ f₂ f₃) [PROOFSTEP] dsimp [tensorHom] [GOAL] C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ X₃ Y₁ Y₂ Y₃ : C f₁ : X₁ ⟶ Y₁ f₂ : X₂ ⟶ Y₂ f₃ : X₃ ⟶ Y₃ ⊢ IsLimit.lift (ℬ (tensorObj ℬ Y₁ Y₂) Y₃).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (tensorObj ℬ X₁ X₂) X₃).cone ≫ IsLimit.lift (ℬ Y₁ Y₂).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ X₂).cone ≫ f₁) (BinaryFan.snd (ℬ X₁ X₂).cone ≫ f₂))) (BinaryFan.snd (ℬ (tensorObj ℬ X₁ X₂) X₃).cone ≫ f₃)) ≫ (BinaryFan.associatorOfLimitCone ℬ Y₁ Y₂ Y₃).hom = (BinaryFan.associatorOfLimitCone ℬ X₁ X₂ X₃).hom ≫ IsLimit.lift (ℬ Y₁ (tensorObj ℬ Y₂ Y₃)).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ (ℬ X₂ X₃).cone.pt).cone ≫ f₁) (BinaryFan.snd (ℬ X₁ (ℬ X₂ X₃).cone.pt).cone ≫ IsLimit.lift (ℬ Y₂ Y₃).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₂ X₃).cone ≫ f₂) (BinaryFan.snd (ℬ X₂ X₃).cone ≫ f₃)))) [PROOFSTEP] apply IsLimit.hom_ext (ℬ _ _).isLimit [GOAL] C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ X₃ Y₁ Y₂ Y₃ : C f₁ : X₁ ⟶ Y₁ f₂ : X₂ ⟶ Y₂ f₃ : X₃ ⟶ Y₃ ⊢ ∀ (j : Discrete WalkingPair), (IsLimit.lift (ℬ (tensorObj ℬ Y₁ Y₂) Y₃).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (tensorObj ℬ X₁ X₂) X₃).cone ≫ IsLimit.lift (ℬ Y₁ Y₂).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ X₂).cone ≫ f₁) (BinaryFan.snd (ℬ X₁ X₂).cone ≫ f₂))) (BinaryFan.snd (ℬ (tensorObj ℬ X₁ X₂) X₃).cone ≫ f₃)) ≫ (BinaryFan.associatorOfLimitCone ℬ Y₁ Y₂ Y₃).hom) ≫ NatTrans.app (ℬ Y₁ (ℬ Y₂ Y₃).cone.pt).cone.π j = ((BinaryFan.associatorOfLimitCone ℬ X₁ X₂ X₃).hom ≫ IsLimit.lift (ℬ Y₁ (tensorObj ℬ Y₂ Y₃)).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ (ℬ X₂ X₃).cone.pt).cone ≫ f₁) (BinaryFan.snd (ℬ X₁ (ℬ X₂ X₃).cone.pt).cone ≫ IsLimit.lift (ℬ Y₂ Y₃).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₂ X₃).cone ≫ f₂) (BinaryFan.snd (ℬ X₂ X₃).cone ≫ f₃))))) ≫ NatTrans.app (ℬ Y₁ (ℬ Y₂ Y₃).cone.pt).cone.π j [PROOFSTEP] rintro ⟨⟨⟩⟩ [GOAL] case mk.left C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ X₃ Y₁ Y₂ Y₃ : C f₁ : X₁ ⟶ Y₁ f₂ : X₂ ⟶ Y₂ f₃ : X₃ ⟶ Y₃ ⊢ (IsLimit.lift (ℬ (tensorObj ℬ Y₁ Y₂) Y₃).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (tensorObj ℬ X₁ X₂) X₃).cone ≫ IsLimit.lift (ℬ Y₁ Y₂).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ X₂).cone ≫ f₁) (BinaryFan.snd (ℬ X₁ X₂).cone ≫ f₂))) (BinaryFan.snd (ℬ (tensorObj ℬ X₁ X₂) X₃).cone ≫ f₃)) ≫ (BinaryFan.associatorOfLimitCone ℬ Y₁ Y₂ Y₃).hom) ≫ NatTrans.app (ℬ Y₁ (ℬ Y₂ Y₃).cone.pt).cone.π { as := WalkingPair.left } = ((BinaryFan.associatorOfLimitCone ℬ X₁ X₂ X₃).hom ≫ IsLimit.lift (ℬ Y₁ (tensorObj ℬ Y₂ Y₃)).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ (ℬ X₂ X₃).cone.pt).cone ≫ f₁) (BinaryFan.snd (ℬ X₁ (ℬ X₂ X₃).cone.pt).cone ≫ IsLimit.lift (ℬ Y₂ Y₃).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₂ X₃).cone ≫ f₂) (BinaryFan.snd (ℬ X₂ X₃).cone ≫ f₃))))) ≫ NatTrans.app (ℬ Y₁ (ℬ Y₂ Y₃).cone.pt).cone.π { as := WalkingPair.left } [PROOFSTEP] simp [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ X₃ Y₁ Y₂ Y₃ : C f₁ : X₁ ⟶ Y₁ f₂ : X₂ ⟶ Y₂ f₃ : X₃ ⟶ Y₃ ⊢ (IsLimit.lift (ℬ (tensorObj ℬ Y₁ Y₂) Y₃).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (tensorObj ℬ X₁ X₂) X₃).cone ≫ IsLimit.lift (ℬ Y₁ Y₂).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ X₂).cone ≫ f₁) (BinaryFan.snd (ℬ X₁ X₂).cone ≫ f₂))) (BinaryFan.snd (ℬ (tensorObj ℬ X₁ X₂) X₃).cone ≫ f₃)) ≫ (BinaryFan.associatorOfLimitCone ℬ Y₁ Y₂ Y₃).hom) ≫ NatTrans.app (ℬ Y₁ (ℬ Y₂ Y₃).cone.pt).cone.π { as := WalkingPair.right } = ((BinaryFan.associatorOfLimitCone ℬ X₁ X₂ X₃).hom ≫ IsLimit.lift (ℬ Y₁ (tensorObj ℬ Y₂ Y₃)).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ (ℬ X₂ X₃).cone.pt).cone ≫ f₁) (BinaryFan.snd (ℬ X₁ (ℬ X₂ X₃).cone.pt).cone ≫ IsLimit.lift (ℬ Y₂ Y₃).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₂ X₃).cone ≫ f₂) (BinaryFan.snd (ℬ X₂ X₃).cone ≫ f₃))))) ≫ NatTrans.app (ℬ Y₁ (ℬ Y₂ Y₃).cone.pt).cone.π { as := WalkingPair.right } [PROOFSTEP] apply IsLimit.hom_ext (ℬ _ _).isLimit [GOAL] case mk.right C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ X₃ Y₁ Y₂ Y₃ : C f₁ : X₁ ⟶ Y₁ f₂ : X₂ ⟶ Y₂ f₃ : X₃ ⟶ Y₃ ⊢ ∀ (j : Discrete WalkingPair), ((IsLimit.lift (ℬ (tensorObj ℬ Y₁ Y₂) Y₃).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (tensorObj ℬ X₁ X₂) X₃).cone ≫ IsLimit.lift (ℬ Y₁ Y₂).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ X₂).cone ≫ f₁) (BinaryFan.snd (ℬ X₁ X₂).cone ≫ f₂))) (BinaryFan.snd (ℬ (tensorObj ℬ X₁ X₂) X₃).cone ≫ f₃)) ≫ (BinaryFan.associatorOfLimitCone ℬ Y₁ Y₂ Y₃).hom) ≫ NatTrans.app (ℬ Y₁ (ℬ Y₂ Y₃).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ Y₂ Y₃).cone.π j = (((BinaryFan.associatorOfLimitCone ℬ X₁ X₂ X₃).hom ≫ IsLimit.lift (ℬ Y₁ (tensorObj ℬ Y₂ Y₃)).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ (ℬ X₂ X₃).cone.pt).cone ≫ f₁) (BinaryFan.snd (ℬ X₁ (ℬ X₂ X₃).cone.pt).cone ≫ IsLimit.lift (ℬ Y₂ Y₃).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₂ X₃).cone ≫ f₂) (BinaryFan.snd (ℬ X₂ X₃).cone ≫ f₃))))) ≫ NatTrans.app (ℬ Y₁ (ℬ Y₂ Y₃).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ Y₂ Y₃).cone.π j [PROOFSTEP] rintro ⟨⟨⟩⟩ [GOAL] case mk.right.mk.left C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ X₃ Y₁ Y₂ Y₃ : C f₁ : X₁ ⟶ Y₁ f₂ : X₂ ⟶ Y₂ f₃ : X₃ ⟶ Y₃ ⊢ ((IsLimit.lift (ℬ (tensorObj ℬ Y₁ Y₂) Y₃).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (tensorObj ℬ X₁ X₂) X₃).cone ≫ IsLimit.lift (ℬ Y₁ Y₂).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ X₂).cone ≫ f₁) (BinaryFan.snd (ℬ X₁ X₂).cone ≫ f₂))) (BinaryFan.snd (ℬ (tensorObj ℬ X₁ X₂) X₃).cone ≫ f₃)) ≫ (BinaryFan.associatorOfLimitCone ℬ Y₁ Y₂ Y₃).hom) ≫ NatTrans.app (ℬ Y₁ (ℬ Y₂ Y₃).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ Y₂ Y₃).cone.π { as := WalkingPair.left } = (((BinaryFan.associatorOfLimitCone ℬ X₁ X₂ X₃).hom ≫ IsLimit.lift (ℬ Y₁ (tensorObj ℬ Y₂ Y₃)).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ (ℬ X₂ X₃).cone.pt).cone ≫ f₁) (BinaryFan.snd (ℬ X₁ (ℬ X₂ X₃).cone.pt).cone ≫ IsLimit.lift (ℬ Y₂ Y₃).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₂ X₃).cone ≫ f₂) (BinaryFan.snd (ℬ X₂ X₃).cone ≫ f₃))))) ≫ NatTrans.app (ℬ Y₁ (ℬ Y₂ Y₃).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ Y₂ Y₃).cone.π { as := WalkingPair.left } [PROOFSTEP] simp [GOAL] case mk.right.mk.right C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) X₁ X₂ X₃ Y₁ Y₂ Y₃ : C f₁ : X₁ ⟶ Y₁ f₂ : X₂ ⟶ Y₂ f₃ : X₃ ⟶ Y₃ ⊢ ((IsLimit.lift (ℬ (tensorObj ℬ Y₁ Y₂) Y₃).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ (tensorObj ℬ X₁ X₂) X₃).cone ≫ IsLimit.lift (ℬ Y₁ Y₂).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ X₂).cone ≫ f₁) (BinaryFan.snd (ℬ X₁ X₂).cone ≫ f₂))) (BinaryFan.snd (ℬ (tensorObj ℬ X₁ X₂) X₃).cone ≫ f₃)) ≫ (BinaryFan.associatorOfLimitCone ℬ Y₁ Y₂ Y₃).hom) ≫ NatTrans.app (ℬ Y₁ (ℬ Y₂ Y₃).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ Y₂ Y₃).cone.π { as := WalkingPair.right } = (((BinaryFan.associatorOfLimitCone ℬ X₁ X₂ X₃).hom ≫ IsLimit.lift (ℬ Y₁ (tensorObj ℬ Y₂ Y₃)).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₁ (ℬ X₂ X₃).cone.pt).cone ≫ f₁) (BinaryFan.snd (ℬ X₁ (ℬ X₂ X₃).cone.pt).cone ≫ IsLimit.lift (ℬ Y₂ Y₃).isLimit (BinaryFan.mk (BinaryFan.fst (ℬ X₂ X₃).cone ≫ f₂) (BinaryFan.snd (ℬ X₂ X₃).cone ≫ f₃))))) ≫ NatTrans.app (ℬ Y₁ (ℬ Y₂ Y₃).cone.pt).cone.π { as := WalkingPair.right }) ≫ NatTrans.app (ℬ Y₂ Y₃).cone.π { as := WalkingPair.right } [PROOFSTEP] simp [GOAL] C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) ⊢ Category.{?u.82278, u} (MonoidalOfChosenFiniteProductsSynonym 𝒯 ℬ) [PROOFSTEP] dsimp [MonoidalOfChosenFiniteProductsSynonym] [GOAL] C : Type u inst✝ : Category.{v, u} C X Y : C 𝒯 : LimitCone (Functor.empty C) ℬ : (X Y : C) → LimitCone (pair X Y) ⊢ Category.{?u.82278, u} C [PROOFSTEP] infer_instance
/* # This file is part of the Astrometry.net suite. # Licensed under a 3-clause BSD style license - see LICENSE */ #include <math.h> #include <stdlib.h> #include <assert.h> #include <string.h> #include <gsl/gsl_matrix_double.h> #include <gsl/gsl_vector_double.h> #include "os-features.h" #include "sip-utils.h" #include "gslutils.h" #include "starutil.h" #include "mathutil.h" #include "errors.h" #include "log.h" double wcs_pixel_center_for_size(double size) { return 0.5 + 0.5 * size; } void tan_rotate(const tan_t* tanin, tan_t* tanout, double angle) { double s,c; double newcd[4]; memmove(tanout, tanin, sizeof(tan_t)); s = sin(deg2rad(angle)); c = cos(deg2rad(angle)); newcd[0] = c*tanin->cd[0][0] + s*tanin->cd[1][0]; newcd[1] = c*tanin->cd[0][1] + s*tanin->cd[1][1]; newcd[2] = -s*tanin->cd[0][0] + c*tanin->cd[1][0]; newcd[3] = -s*tanin->cd[0][1] + c*tanin->cd[1][1]; tanout->cd[0][0] = newcd[0]; tanout->cd[0][1] = newcd[1]; tanout->cd[1][0] = newcd[2]; tanout->cd[1][1] = newcd[3]; } int sip_ensure_inverse_polynomials(sip_t* sip) { if ((sip->a_order == 0 && sip->b_order == 0) || (sip->ap_order > 0 && sip->bp_order > 0)) { return 0; } sip->ap_order = sip->bp_order = MAX(sip->a_order, sip->b_order) + 1; return sip_compute_inverse_polynomials(sip, 0, 0, 0, 0, 0, 0); } int sip_compute_inverse_polynomials(sip_t* sip, int NX, int NY, double xlo, double xhi, double ylo, double yhi) { int inv_sip_order; int M, N; int i, j, p, q, gu, gv; double maxu, maxv, minu, minv; double u, v, U, V; gsl_matrix *mA; gsl_vector *b1, *b2, *x1, *x2; tan_t* tan; assert(sip->a_order == sip->b_order); assert(sip->ap_order == sip->bp_order); tan = &(sip->wcstan); logverb("sip_compute-inverse_polynomials: A %i, AP %i\n", sip->a_order, sip->ap_order); /* basic idea: lay down a grid in image, for each gridpoint, push through the polynomial to get yourself into warped image coordinate (but not yet lifted onto the sky). Then, using the set of warped gridpoints as inputs, fit back to their original grid locations as targets. */ inv_sip_order = sip->ap_order; // Number of grid points to use: if (NX == 0) NX = 10 * (inv_sip_order + 1); if (NY == 0) NY = 10 * (inv_sip_order + 1); if (xhi == 0) xhi = tan->imagew; if (yhi == 0) yhi = tan->imageh; logverb("NX,NY %i,%i, x range [%f, %f], y range [%f, %f]\n", NX,NY, xlo, xhi, ylo, yhi); // Number of coefficients to solve for: // We only compute the upper triangle polynomial terms N = (inv_sip_order + 1) * (inv_sip_order + 2) / 2; // Number of samples to fit. M = NX * NY; mA = gsl_matrix_alloc(M, N); b1 = gsl_vector_alloc(M); b2 = gsl_vector_alloc(M); assert(mA); assert(b1); assert(b2); /* * Rearranging formula (4), (5), and (6) from the SIP paper gives the * following equations: * * +----------------------- Linear pixel coordinates in PIXELS * | before SIP correction * | +--- Intermediate world coordinates in DEGREES * | | * v v * -1 * U = [CD11 CD12] * x * V [CD21 CD22] y * * +---------------- PIXEL distortion delta from telescope to * | linear coordinates * | +----------- Linear PIXEL coordinates before SIP correction * | | +--- Polynomial U,V terms in powers of PIXELS * v v v * * -f(u1,v1) = p11 p12 p13 p14 p15 ... * ap1 * -f(u2,v2) = p21 p22 p23 p24 p25 ... ap2 * ... * * -g(u1,v1) = p11 p12 p13 p14 p15 ... * bp1 * -g(u2,v2) = p21 p22 p23 p24 p25 ... bp2 * ... * * which recovers the A and B's. */ minu = xlo - tan->crpix[0]; maxu = xhi - tan->crpix[0]; minv = ylo - tan->crpix[1]; maxv = yhi - tan->crpix[1]; // Sample grid locations. i = 0; for (gu=0; gu<NX; gu++) { for (gv=0; gv<NY; gv++) { double fuv, guv; // Calculate grid position in original image pixels u = (gu * (maxu - minu) / (NX-1)) + minu; v = (gv * (maxv - minv) / (NY-1)) + minv; // compute U=u+f(u,v) and V=v+g(u,v) sip_calc_distortion(sip, u, v, &U, &V); fuv = U - u; guv = V - v; // Polynomial terms... j = 0; for (p = 0; p <= inv_sip_order; p++) for (q = 0; q <= inv_sip_order; q++) { if (p + q > inv_sip_order) continue; assert(j < N); gsl_matrix_set(mA, i, j, pow(U, (double)p) * pow(V, (double)q)); j++; } assert(j == N); gsl_vector_set(b1, i, -fuv); gsl_vector_set(b2, i, -guv); i++; } } assert(i == M); // Solve the linear equation. if (gslutils_solve_leastsquares_v(mA, 2, b1, &x1, NULL, b2, &x2, NULL)) { ERROR("Failed to solve SIP inverse matrix equation!"); return -1; } // Extract the coefficients j = 0; for (p = 0; p <= inv_sip_order; p++) for (q = 0; q <= inv_sip_order; q++) { if ((p + q > inv_sip_order)) continue; assert(j < N); sip->ap[p][q] = gsl_vector_get(x1, j); sip->bp[p][q] = gsl_vector_get(x2, j); j++; } assert(j == N); // Check that we found values that actually invert the polynomial. // The error should be particularly small at the grid points. if (log_get_level() > LOG_VERB) { // rms error accumulators: double sumdu = 0; double sumdv = 0; int Z; for (gu = 0; gu < NX; gu++) { for (gv = 0; gv < NY; gv++) { double newu, newv; // Calculate grid position in original image pixels u = (gu * (maxu - minu) / (NX-1)) + minu; v = (gv * (maxv - minv) / (NY-1)) + minv; sip_calc_distortion(sip, u, v, &U, &V); sip_calc_inv_distortion(sip, U, V, &newu, &newv); sumdu += square(u - newu); sumdv += square(v - newv); } } sumdu /= (NX*NY); sumdv /= (NX*NY); debug("RMS error of inverting a distortion (at the grid points, in pixels):\n"); debug(" du: %g\n", sqrt(sumdu)); debug(" dv: %g\n", sqrt(sumdu)); debug(" dist: %g\n", sqrt(sumdu + sumdv)); sumdu = 0; sumdv = 0; Z = 1000; for (i=0; i<Z; i++) { double newu, newv; u = uniform_sample(minu, maxu); v = uniform_sample(minv, maxv); sip_calc_distortion(sip, u, v, &U, &V); sip_calc_inv_distortion(sip, U, V, &newu, &newv); sumdu += square(u - newu); sumdv += square(v - newv); } sumdu /= Z; sumdv /= Z; debug("RMS error of inverting a distortion (at random points, in pixels):\n"); debug(" du: %g\n", sqrt(sumdu)); debug(" dv: %g\n", sqrt(sumdu)); debug(" dist: %g\n", sqrt(sumdu + sumdv)); } gsl_matrix_free(mA); gsl_vector_free(b1); gsl_vector_free(b2); gsl_vector_free(x1); gsl_vector_free(x2); return 0; } anbool tan_pixel_is_inside_image(const tan_t* wcs, double x, double y) { return (x >= 1 && x <= wcs->imagew && y >= 1 && y <= wcs->imageh); } anbool sip_pixel_is_inside_image(const sip_t* wcs, double x, double y) { return tan_pixel_is_inside_image(&(wcs->wcstan), x, y); } anbool sip_is_inside_image(const sip_t* wcs, double ra, double dec) { double x,y; if (!sip_radec2pixelxy(wcs, ra, dec, &x, &y)) return FALSE; return sip_pixel_is_inside_image(wcs, x, y); } anbool tan_is_inside_image(const tan_t* wcs, double ra, double dec) { double x,y; if (!tan_radec2pixelxy(wcs, ra, dec, &x, &y)) return FALSE; return tan_pixel_is_inside_image(wcs, x, y); } int* sip_filter_stars_in_field(const sip_t* sip, const tan_t* tan, const double* xyz, const double* radec, int N, double** p_xy, int* inds, int* p_Ngood) { int i, Ngood; int W, H; double* xy = NULL; anbool allocd = FALSE; assert(sip || tan); assert(xyz || radec); assert(p_Ngood); Ngood = 0; if (!inds) { inds = malloc(N * sizeof(int)); allocd = TRUE; } if (p_xy) xy = malloc(N * 2 * sizeof(double)); if (sip) { W = sip->wcstan.imagew; H = sip->wcstan.imageh; } else { W = tan->imagew; H = tan->imageh; } for (i=0; i<N; i++) { double x, y; if (xyz) { if (sip) { if (!sip_xyzarr2pixelxy(sip, xyz + i*3, &x, &y)) continue; } else { if (!tan_xyzarr2pixelxy(tan, xyz + i*3, &x, &y)) continue; } } else { if (sip) { if (!sip_radec2pixelxy(sip, radec[i*2], radec[i*2+1], &x, &y)) continue; } else { if (!tan_radec2pixelxy(tan, radec[i*2], radec[i*2+1], &x, &y)) continue; } } // FIXME -- check half- and one-pixel FITS issues. if ((x < 0) || (y < 0) || (x >= W) || (y >= H)) continue; inds[Ngood] = i; if (xy) { xy[Ngood * 2 + 0] = x; xy[Ngood * 2 + 1] = y; } Ngood++; } if (allocd) inds = realloc(inds, Ngood * sizeof(int)); if (xy) xy = realloc(xy, Ngood * 2 * sizeof(double)); if (p_xy) *p_xy = xy; *p_Ngood = Ngood; return inds; } void sip_get_radec_center(const sip_t* wcs, double* p_ra, double* p_dec) { double px = wcs_pixel_center_for_size(wcs->wcstan.imagew); double py = wcs_pixel_center_for_size(wcs->wcstan.imageh); sip_pixelxy2radec(wcs, px, py, p_ra, p_dec); } void tan_get_radec_center(const tan_t* wcs, double* p_ra, double* p_dec) { double px = wcs_pixel_center_for_size(wcs->imagew); double py = wcs_pixel_center_for_size(wcs->imageh); tan_pixelxy2radec(wcs, px, py, p_ra, p_dec); } double sip_get_radius_deg(const sip_t* wcs) { return arcsec2deg(sip_pixel_scale(wcs) * hypot(wcs->wcstan.imagew, wcs->wcstan.imageh)/2.0); } double tan_get_radius_deg(const tan_t* wcs) { return arcsec2deg(tan_pixel_scale(wcs) * hypot(wcs->imagew, wcs->imageh)/2.0); } void sip_get_radec_center_hms(const sip_t* wcs, int* rah, int* ram, double* ras, int* decsign, int* decd, int* decm, double* decs) { double ra, dec; sip_get_radec_center(wcs, &ra, &dec); ra2hms(ra, rah, ram, ras); dec2dms(dec, decsign, decd, decm, decs); } void sip_get_radec_center_hms_string(const sip_t* wcs, char* rastr, char* decstr) { double ra, dec; sip_get_radec_center(wcs, &ra, &dec); ra2hmsstring(ra, rastr); dec2dmsstring(dec, decstr); } void sip_get_field_size(const sip_t* wcs, double* pw, double* ph, char** units) { double minx = 0.5; double maxx = (wcs->wcstan.imagew + 0.5); double midx = (minx + maxx) / 2.0; double miny = 0.5; double maxy = (wcs->wcstan.imageh + 0.5); double midy = (miny + maxy) / 2.0; double ra1, dec1, ra2, dec2, ra3, dec3; double w, h; // measure width through the middle sip_pixelxy2radec(wcs, minx, midy, &ra1, &dec1); sip_pixelxy2radec(wcs, midx, midy, &ra2, &dec2); sip_pixelxy2radec(wcs, maxx, midy, &ra3, &dec3); w = arcsec_between_radecdeg(ra1, dec1, ra2, dec2) + arcsec_between_radecdeg(ra2, dec2, ra3, dec3); // measure height through the middle sip_pixelxy2radec(wcs, midx, miny, &ra1, &dec1); sip_pixelxy2radec(wcs, midx, midy, &ra2, &dec2); sip_pixelxy2radec(wcs, midx, maxy, &ra3, &dec3); h = arcsec_between_radecdeg(ra1, dec1, ra2, dec2) + arcsec_between_radecdeg(ra2, dec2, ra3, dec3); if (MIN(w, h) < 60.0) { *units = "arcseconds"; *pw = w; *ph = h; } else if (MIN(w, h) < 3600.0) { *units = "arcminutes"; *pw = w / 60.0; *ph = h / 60.0; } else { *units = "degrees"; *pw = w / 3600.0; *ph = h / 3600.0; } } void sip_walk_image_boundary(const sip_t* wcs, double stepsize, void (*callback)(const sip_t* wcs, double x, double y, double ra, double dec, void* token), void* token) { int i, side; // Walk the perimeter of the image in steps of stepsize pixels double W = wcs->wcstan.imagew; double H = wcs->wcstan.imageh; { double Xmin = 0.5; double Xmax = W + 0.5; double Ymin = 0.5; double Ymax = H + 0.5; double offsetx[] = { Xmin, Xmax, Xmax, Xmin }; double offsety[] = { Ymin, Ymin, Ymax, Ymax }; double stepx[] = { +stepsize, 0, -stepsize, 0 }; double stepy[] = { 0, +stepsize, 0, -stepsize }; int Nsteps[] = { ceil(W/stepsize), ceil(H/stepsize), ceil(W/stepsize), ceil(H/stepsize) }; for (side=0; side<4; side++) { for (i=0; i<Nsteps[side]; i++) { double ra, dec; double x, y; x = MIN(Xmax, MAX(Xmin, offsetx[side] + i * stepx[side])); y = MIN(Ymax, MAX(Ymin, offsety[side] + i * stepy[side])); sip_pixelxy2radec(wcs, x, y, &ra, &dec); callback(wcs, x, y, ra, dec, token); } } } } struct radecbounds { double rac, decc; double ramin, ramax, decmin, decmax; }; static void radec_bounds_callback(const sip_t* wcs, double x, double y, double ra, double dec, void* token) { struct radecbounds* b = token; b->decmin = MIN(b->decmin, dec); b->decmax = MAX(b->decmax, dec); if (ra - b->rac > 180) // wrap-around: racenter < 180, ra has gone < 0 but been wrapped around to > 180 ra -= 360; if (b->rac - ra > 180) // wrap-around: racenter > 180, ra has gone > 360 but wrapped around to > 0. ra += 360; b->ramin = MIN(b->ramin, ra); b->ramax = MAX(b->ramax, ra); } void sip_get_radec_bounds(const sip_t* wcs, int stepsize, double* pramin, double* pramax, double* pdecmin, double* pdecmax) { struct radecbounds b; sip_get_radec_center(wcs, &(b.rac), &(b.decc)); b.ramin = b.ramax = b.rac; b.decmin = b.decmax = b.decc; sip_walk_image_boundary(wcs, stepsize, radec_bounds_callback, &b); // Check for poles... // north pole if (sip_is_inside_image(wcs, 0, 90)) { b.ramin = 0; b.ramax = 360; b.decmax = 90; } if (sip_is_inside_image(wcs, 0, -90)) { b.ramin = 0; b.ramax = 360; b.decmin = -90; } if (pramin) *pramin = b.ramin; if (pramax) *pramax = b.ramax; if (pdecmin) *pdecmin = b.decmin; if (pdecmax) *pdecmax = b.decmax; } void sip_shift(const sip_t* sipin, sip_t* sipout, double xlo, double xhi, double ylo, double yhi) { memmove(sipout, sipin, sizeof(sip_t)); tan_transform(&(sipin->wcstan), &(sipout->wcstan), xlo, xhi, ylo, yhi, 1.0); } void tan_transform(const tan_t* tanin, tan_t* tanout, double xlo, double xhi, double ylo, double yhi, double scale) { memmove(tanout, tanin, sizeof(tan_t)); tanout->imagew = (xhi - xlo + 1) * scale; tanout->imageh = (yhi - ylo + 1) * scale; tanout->crpix[0] = (tanout->crpix[0] - (xlo - 1)) * scale; tanout->crpix[1] = (tanout->crpix[1] - (ylo - 1)) * scale; tanout->cd[0][0] /= scale; tanout->cd[0][1] /= scale; tanout->cd[1][0] /= scale; tanout->cd[1][1] /= scale; } void tan_scale(const tan_t* tanin, tan_t* tanout, double scale) { memmove(tanout, tanin, sizeof(tan_t)); tanout->imagew *= scale; tanout->imageh *= scale; tanout->crpix[0] = 0.5 + scale * (tanin->crpix[0] - 0.5); tanout->crpix[1] = 0.5 + scale * (tanin->crpix[1] - 0.5); tanout->cd[0][0] /= scale; tanout->cd[0][1] /= scale; tanout->cd[1][0] /= scale; tanout->cd[1][1] /= scale; } void sip_scale(const sip_t* wcsin, sip_t* wcsout, double scale) { int i, j; memmove(wcsout, wcsin, sizeof(sip_t)); tan_scale(&(wcsin->wcstan), &(wcsout->wcstan), scale); for (i=0; i<=wcsin->a_order; i++) { for (j=0; j<=wcsin->a_order; j++) { if (i + j > wcsin->a_order) continue; wcsout->a[i][j] *= pow(scale, 1 - (i+j)); } } for (i=0; i<=wcsin->b_order; i++) { for (j=0; j<=wcsin->b_order; j++) { if (i + j > wcsin->b_order) continue; wcsout->b[i][j] *= pow(scale, 1 - (i+j)); } } for (i=0; i<=wcsin->ap_order; i++) { for (j=0; j<=wcsin->ap_order; j++) { if (i + j > wcsin->ap_order) continue; wcsout->ap[i][j] *= pow(scale, 1 - (i+j)); } } for (i=0; i<=wcsin->bp_order; i++) { for (j=0; j<=wcsin->bp_order; j++) { if (i + j > wcsin->bp_order) continue; wcsout->bp[i][j] *= pow(scale, 1 - (i+j)); } } }
module TyTTP.Core.Routing import Control.Monad.Maybe import TyTTP.Core.Context export routes : Alternative m => List ( Context me u h1 s h2 a b -> m $ Context me' p' h1' s' h2' a' b' ) -> Context me u h1 s h2 a b -> m $ Context me' p' h1' s' h2' a' b' routes handlers ctx = choiceMap ($ ctx) handlers export routes' : Monad m => ( Context me u h1 s h2 a b -> m $ Context me' p' h1' s' h2' a' b' ) -> List ( Context me u h1 s h2 a b -> MaybeT m $ Context me' p' h1' s' h2' a' b' ) -> Context me u h1 s h2 a b -> m $ Context me' p' h1' s' h2' a' b' routes' def handlers ctx = do Just result <- runMaybeT $ routes handlers ctx | Nothing => def ctx pure result
{-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE TypeOperators #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE UndecidableInstances #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE FunctionalDependencies #-} module VAD( vad ) where -- let framed = frameWithWinAndOverlap 256 hann 10 s -- sp = mapS (fft . U.map (:+ 0)) framed -- import Prelude hiding(splitAt,(:),foldr1,tail,(++)) import Internal import Signal import qualified Data.Vector.Unboxed as U import Data.Vector.Unboxed((!),Unbox(..)) import Data.List.Stream import Common import Windows import Data.Complex import Transform import qualified Trace as F import Fixed import GHC.TypeLits import SpecialInt import Data.Int ltseF :: Sample a => Int -> Signal (U.Vector a) -> Signal (U.Vector a) ltseF n (Signal s) = let (before,remaining) = splitAt n s _lste b (h:t) = let (future,tl) = splitAt n t valueMax = U.zipWith max (foldr1 (U.zipWith max) (h:future)) (foldl1' (U.zipWith max) b) in valueMax:(_lste (tail before ++ [h]) t) in Signal (_lste before remaining) lstdD :: Int -> U.Vector Double -> U.Vector Double -> Double lstdD winSize noiseEnergy lste = let d a b | b == 0 = fromDouble 1e12 | otherwise = a / b in 10 * log(U.sum (U.zipWith (d) lste noiseEnergy) / fromIntegral winSize) / log 10 theTotal :: (SingI r, SingI s, SingI n) => Fixed Int32 n s r -> U.Vector (Fixed Int32 n s r) -> U.Vector (Fixed Int32 n s r) -> Fixed Int40 n s r theTotal m lste noiseEnergy = let d a b | b == 0 = convert m | otherwise = convert a / convert b in U.sum (U.zipWith (d) lste noiseEnergy) {-# INLINE [0] theTotal #-} lstdF :: (SingI n, SingI s, SingI r) => Int -> U.Vector (Fixed Int32 n s r) -> U.Vector (Fixed Int32 n s r) -> (Fixed Int32 n s r) lstdF winSize noiseEnergy lste = 10 * log(convert $ theTotal maxBound lste noiseEnergy/ fromIntegral winSize) / log 10 getDecisionF :: (SingI n, SingI s, SingI r, SingI (n + n)) => Int -> U.Vector (Fixed Int32 (n + n) s r) -> [(U.Vector (Fixed Int32 (n + n) s r), U.Vector (Fixed Int32 (n + n) s r) )] -> [Fixed Int16 n s r] getDecisionF winSize energy ((c,currentE):r) = let tv = fromDouble 31.0 tn = fromDouble 27.0 l = lstdF winSize energy c result | l >= tv = 1 : getDecisionF winSize energy r | l <= tn = 0 : getDecisionF winSize currentE r | otherwise = 0 : getDecisionF winSize energy r in result getDecisionF winSize energy [] = 0:getDecisionF winSize energy [] getDecisionD :: Int -> U.Vector Double -> [(U.Vector Double, U.Vector Double)] -> [Double] getDecisionD winSize energy ((c,currentE):r) = let tv = fromDouble 31.0 tn = fromDouble 27.0 l = lstdD winSize energy c result | l >= tv = 1 : getDecisionD winSize energy r | l <= tn = 0 : getDecisionD winSize currentE r | otherwise = 0 : getDecisionD winSize energy r in result getDecisionD winSize energy [] = 0:getDecisionD winSize energy [] bandEnergy :: Complex Double -> Double bandEnergy (x :+ y) = x*x + y*y bandEnergyF :: (SingI n, SingI s, SingI r, SingI (n + n)) => Complex (Fixed Int16 n s r) -> Fixed Int32 (n + n) s r bandEnergyF (x :+ y) = amul x x + amul y y class VAD a where vad :: (Sample a, FFT a) => Sampled Time a -> Sampled Time a instance (SingI n, SingI s, SingI r, SingI (n + n)) => VAD (Fixed Int16 n s r) where vad s = let winSize = 256 overlap = 20 n = 2 framed = frameWithWinAndOverlap winSize overlap hann s energy = mapS (U.map bandEnergyF . fft . U.map (:+ 0)) (getSignal framed) noiseEnergy0 = U.generate winSize (const (fromDouble 0.00001)) lt = ltseF n energy in --mapS (lstd winSize noiseEnergy0) lt Sampled (period framed) (onSamples (getDecisionF winSize noiseEnergy0) (zipS lt (dropS n energy))) instance VAD Double where vad s = let winSize = 256 overlap = 20 n = 2 framed = frameWithWinAndOverlap winSize overlap hann s energy = mapS (U.map bandEnergy . fft . U.map (:+ 0)) (getSignal framed) noiseEnergy0 = U.generate winSize (const (fromDouble 0.00001)) lt = ltseF n energy in --mapS (lstd winSize noiseEnergy0) lt Sampled (period framed) (onSamples (getDecisionD winSize noiseEnergy0) (zipS lt (dropS n energy)))
{-# OPTIONS --without-K #-} open import Base open import Algebra.Groups open import Integers open import Homotopy.Truncation open import Homotopy.Pointed open import Homotopy.PathTruncation open import Homotopy.Connected -- Definitions and properties of homotopy groups module Homotopy.HomotopyGroups {i} where -- Loop space Ω : (X : pType i) → pType i Ω X = ⋆[ ⋆ X ≡ ⋆ X , refl ] Ω-pregroup : (X : pType i) → pregroup i Ω-pregroup X = record { carrier = (⋆ X) ≡ (⋆ X) ; _∙_ = _∘_ ; e = refl ; _′ = ! ; assoc = concat-assoc ; right-unit = refl-right-unit ; left-unit = λ _ → refl ; right-inverse = opposite-right-inverse ; left-inverse = opposite-left-inverse } Ωⁿ-pregroup : (n : ℕ) ⦃ ≢0 : n ≢ O ⦄ (X : pType i) → pregroup i Ωⁿ-pregroup O ⦃ ≢0 ⦄ X = abort-nondep (≢0 refl) Ωⁿ-pregroup 1 X = Ω-pregroup X Ωⁿ-pregroup (S (S n)) X = Ωⁿ-pregroup (S n) (Ω X) -- Homotopy groups πⁿ-group : (n : ℕ) ⦃ >0 : n ≢ O ⦄ (X : pType i) → group i πⁿ-group n X = π₀-pregroup (Ωⁿ-pregroup n X) fundamental-group : (X : pType i) → group i fundamental-group X = πⁿ-group 1 ⦃ ℕ-S≢O 0 ⦄ X -- Homotopy groups of loop space πⁿ-group-from-πⁿΩ : (n : ℕ) ⦃ ≢0 : n ≢ 0 ⦄ (X : pType i) → πⁿ-group (S n) X ≡ πⁿ-group n (Ω X) πⁿ-group-from-πⁿΩ O ⦃ ≢0 ⦄ X = abort-nondep (≢0 refl) πⁿ-group-from-πⁿΩ 1 X = refl πⁿ-group-from-πⁿΩ (S (S n)) X = refl -- Homotopy groups of spaces of a given h-level abstract truncated-πⁿ-group : (n : ℕ) ⦃ ≢0 : n ≢ 0 ⦄ (X : pType i) (p : is-truncated (n -1) ∣ X ∣) → πⁿ-group n X ≡ unit-group truncated-πⁿ-group O ⦃ ≢0 ⦄ X p = abort-nondep (≢0 refl) truncated-πⁿ-group 1 X p = unit-group-unique _ (proj refl , π₀-extend ⦃ p = λ x → truncated-is-truncated-S _ (π₀-is-set _ _ _) ⦄ (λ x → ap proj (π₁ (p _ _ _ _)))) truncated-πⁿ-group (S (S n)) X p = truncated-πⁿ-group (S n) (Ω X) (λ x y → p _ _ x y) Ωⁿ : (n : ℕ) (X : pType i) → pType i Ωⁿ 0 X = X Ωⁿ (S n) X = Ω (Ωⁿ n X) πⁿ : (n : ℕ) (X : pType i) → pType i πⁿ n X = τ⋆ ⟨0⟩ (Ωⁿ n X) other-πⁿ : (n : ℕ) (X : pType i) → pType i other-πⁿ n X = Ωⁿ n (τ⋆ ⟨ n ⟩ X) ap-Ω-equiv : (X Y : pType i) (e : X ≃⋆ Y) → Ω X ≃⋆ Ω Y ap-Ω-equiv X Y e = transport (λ u → Ω X ≃⋆ Ω u) (pType-eq e) (id-equiv⋆ _) τ⋆Ω-is-Ωτ⋆S : (n : ℕ₋₂) (X : pType i) → τ⋆ n (Ω X) ≃⋆ Ω (τ⋆ (S n) X) τ⋆Ω-is-Ωτ⋆S n X = (τ-path-equiv-path-τ-S , refl) τ⋆kΩⁿ-is-Ωⁿτ⋆n+k : (k n : ℕ) (X : pType i) → τ⋆ ⟨ k ⟩ (Ωⁿ n X) ≃⋆ Ωⁿ n (τ⋆ ⟨ n + k ⟩ X) τ⋆kΩⁿ-is-Ωⁿτ⋆n+k k O X = id-equiv⋆ _ τ⋆kΩⁿ-is-Ωⁿτ⋆n+k k (S n) X = equiv-compose⋆ (τ⋆Ω-is-Ωτ⋆S _ _) (ap-Ω-equiv _ _ (equiv-compose⋆ (τ⋆kΩⁿ-is-Ωⁿτ⋆n+k (S k) n _) (transport (λ u → Ωⁿ n (τ⋆ ⟨ n + S k ⟩ X) ≃⋆ Ωⁿ n (τ⋆ ⟨ u ⟩ X)) (+S-is-S+ n k) (id-equiv⋆ _)))) πⁿ-is-other-πⁿ : (n : ℕ) (X : pType i) → πⁿ n X ≃⋆ other-πⁿ n X πⁿ-is-other-πⁿ n X = transport (λ u → πⁿ n X ≃⋆ Ωⁿ n (τ⋆ ⟨ u ⟩ X)) (+0-is-id n) (τ⋆kΩⁿ-is-Ωⁿτ⋆n+k 0 n X) contr-is-contr-Ω : (X : pType i) → (is-contr⋆ X → is-contr⋆ (Ω X)) contr-is-contr-Ω X p = ≡-is-truncated ⟨-2⟩ p contr-is-contr-Ωⁿ : (n : ℕ) (X : pType i) → (is-contr⋆ X) → is-contr⋆ (Ωⁿ n X) contr-is-contr-Ωⁿ O X p = p contr-is-contr-Ωⁿ (S n) X p = contr-is-contr-Ω _ (contr-is-contr-Ωⁿ n X p) connected-other-πⁿ : (k n : ℕ) (lt : k < S n) (X : pType i) → (is-connected⋆ ⟨ n ⟩ X → is-contr⋆ (other-πⁿ k X)) connected-other-πⁿ k n lt X p = contr-is-contr-Ωⁿ k _ (connected⋆-lt k n lt X p) connected-πⁿ : (k n : ℕ) (lt : k < S n) (X : pType i) → (is-connected⋆ ⟨ n ⟩ X → is-contr⋆ (πⁿ k X)) connected-πⁿ k n lt X p = equiv-types-truncated _ (π₁ (πⁿ-is-other-πⁿ k X) ⁻¹) (connected-other-πⁿ k n lt X p)
module India.Union import public Data.List.AtIndex import public Data.OpenUnion import Text.Lexer export (t : _) => Show (elt t) => Show (Union elt [t]) where show = show . decomp0 export (t : _) => Show (elt t) => Show (Union elt ts) => Show (Union elt (t::ts)) where show x = case decomp x of Left y => show y Right y => show y export (t : _) => Eq (elt t) => Eq (Union elt [t]) where (==) = (==) `on` decomp0 (/=) = (/=) `on` decomp0 export (t : _) => Eq (elt t) => Eq (Union elt ts) => Eq (Union elt (t::ts)) where (==) = (==) `on` decomp (/=) = (/=) `on` decomp export (t : a) => TokenKind (elt t) => TokenKind (Union elt [t]) where TokType x = TokType $ decomp0 x tokValue x = tokValue $ decomp0 x export (t : a) => TokenKind (elt t) => TokenKind (Union elt ts) => TokenKind (Union elt (t::ts)) where TokType x with (decomp x) TokType x | (Left y) = TokType y TokType x | (Right y) = TokType y tokValue x str with (decomp x) tokValue x str | (Left y) = tokValue y str tokValue x str | (Right y) = tokValue y str
(* * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: BSD-2-Clause *) section "Lemmas with Generic Word Length" theory Word_Lemmas imports Type_Syntax Signed_Division_Word Signed_Words More_Word Most_significant_bit Enumeration_Word Aligned Bit_Shifts_Infix_Syntax begin lemma ucast_le_ucast_eq: fixes x y :: "'a::len word" assumes x: "x < 2 ^ n" assumes y: "y < 2 ^ n" assumes n: "n = LENGTH('b::len)" shows "(UCAST('a \<rightarrow> 'b) x \<le> UCAST('a \<rightarrow> 'b) y) = (x \<le> y)" apply (rule iffI) apply (cases "LENGTH('b) < LENGTH('a)") apply (subst less_mask_eq[OF x, symmetric]) apply (subst less_mask_eq[OF y, symmetric]) apply (unfold n) apply (subst ucast_ucast_mask[symmetric])+ apply (simp add: ucast_le_ucast)+ apply (erule ucast_mono_le[OF _ y[unfolded n]]) done lemma ucast_zero_is_aligned: \<open>is_aligned w n\<close> if \<open>UCAST('a::len \<rightarrow> 'b::len) w = 0\<close> \<open>n \<le> LENGTH('b)\<close> proof (rule is_aligned_bitI) fix q assume \<open>q < n\<close> moreover have \<open>bit (UCAST('a::len \<rightarrow> 'b::len) w) q = bit 0 q\<close> using that by simp with \<open>q < n\<close> \<open>n \<le> LENGTH('b)\<close> show \<open>\<not> bit w q\<close> by (simp add: bit_simps) qed lemma unat_ucast_eq_unat_and_mask: "unat (UCAST('b::len \<rightarrow> 'a::len) w) = unat (w AND mask LENGTH('a))" apply (simp flip: take_bit_eq_mask) apply transfer apply (simp add: ac_simps) done lemma le_max_word_ucast_id: \<open>UCAST('b \<rightarrow> 'a) (UCAST('a \<rightarrow> 'b) x) = x\<close> if \<open>x \<le> UCAST('b::len \<rightarrow> 'a) (- 1)\<close> for x :: \<open>'a::len word\<close> proof - from that have a1: \<open>x \<le> word_of_int (uint (word_of_int (2 ^ LENGTH('b) - 1) :: 'b word))\<close> by simp have f2: "((\<exists>i ia. (0::int) \<le> i \<and> \<not> 0 \<le> i + - 1 * ia \<and> i mod ia \<noteq> i) \<or> \<not> (0::int) \<le> - 1 + 2 ^ LENGTH('b) \<or> (0::int) \<le> - 1 + 2 ^ LENGTH('b) + - 1 * 2 ^ LENGTH('b) \<or> (- (1::int) + 2 ^ LENGTH('b)) mod 2 ^ LENGTH('b) = - 1 + 2 ^ LENGTH('b)) = ((\<exists>i ia. (0::int) \<le> i \<and> \<not> 0 \<le> i + - 1 * ia \<and> i mod ia \<noteq> i) \<or> \<not> (1::int) \<le> 2 ^ LENGTH('b) \<or> 2 ^ LENGTH('b) + - (1::int) * ((- 1 + 2 ^ LENGTH('b)) mod 2 ^ LENGTH('b)) = 1)" by force have f3: "\<forall>i ia. \<not> (0::int) \<le> i \<or> 0 \<le> i + - 1 * ia \<or> i mod ia = i" using mod_pos_pos_trivial by force have "(1::int) \<le> 2 ^ LENGTH('b)" by simp then have "2 ^ LENGTH('b) + - (1::int) * ((- 1 + 2 ^ LENGTH('b)) mod 2 ^ len_of TYPE ('b)) = 1" using f3 f2 by blast then have f4: "- (1::int) + 2 ^ LENGTH('b) = (- 1 + 2 ^ LENGTH('b)) mod 2 ^ LENGTH('b)" by linarith have f5: "x \<le> word_of_int (uint (word_of_int (- 1 + 2 ^ LENGTH('b))::'b word))" using a1 by force have f6: "2 ^ LENGTH('b) + - (1::int) = - 1 + 2 ^ LENGTH('b)" by force have f7: "- (1::int) * 1 = - 1" by auto have "\<forall>x0 x1. (x1::int) - x0 = x1 + - 1 * x0" by force then have "x \<le> 2 ^ LENGTH('b) - 1" using f7 f6 f5 f4 by (metis uint_word_of_int wi_homs(2) word_arith_wis(8) word_of_int_2p) then have \<open>uint x \<le> uint (2 ^ LENGTH('b) - (1 :: 'a word))\<close> by (simp add: word_le_def) then have \<open>uint x \<le> 2 ^ LENGTH('b) - 1\<close> by (simp add: uint_word_ariths) (metis \<open>1 \<le> 2 ^ LENGTH('b)\<close> \<open>uint x \<le> uint (2 ^ LENGTH('b) - 1)\<close> linorder_not_less lt2p_lem uint_1 uint_minus_simple_alt uint_power_lower word_le_def zle_diff1_eq) then show ?thesis apply (simp add: unsigned_ucast_eq take_bit_word_eq_self_iff) apply (meson \<open>x \<le> 2 ^ LENGTH('b) - 1\<close> not_le word_less_sub_le) done qed lemma uint_shiftr_eq: \<open>uint (w >> n) = uint w div 2 ^ n\<close> by transfer (simp flip: drop_bit_eq_div add: drop_bit_take_bit min_def le_less less_diff_conv) lemma bit_shiftl_word_iff [bit_simps]: \<open>bit (w << m) n \<longleftrightarrow> m \<le> n \<and> n < LENGTH('a) \<and> bit w (n - m)\<close> for w :: \<open>'a::len word\<close> by (simp add: bit_push_bit_iff not_le) lemma shiftl_def: \<open>w << n = ((*) 2 ^^ n) w\<close> for w :: \<open>'a::len word\<close> proof - have \<open>push_bit n = (((*) 2 ^^ n) :: int \<Rightarrow> int)\<close> for n by (induction n) (simp_all add: fun_eq_iff funpow_swap1, simp add: ac_simps) then show ?thesis by transfer simp qed lemma shiftr_def: \<open>w >> n = ((\<lambda>w. w div 2) ^^ n) w\<close> for w :: \<open>'a::len word\<close> proof - have \<open>(\<lambda>w. w div 2) ^^ n = (drop_bit n :: 'a word \<Rightarrow> 'a word)\<close> by (induction n) (simp_all add: drop_bit_half drop_bit_Suc) then show ?thesis by simp qed lemma bit_shiftr_word_iff: \<open>bit (w >> m) n \<longleftrightarrow> bit w (m + n)\<close> for w :: \<open>'a::len word\<close> by (simp add: bit_simps) lemma sshiftr_eq_funpow_sshiftr1: \<open>w >>> n = (signed_drop_bit (Suc 0) ^^ n) w\<close> apply (rule sym) apply (induction n) apply simp_all done lemma uint_sshiftr_eq: \<open>uint (w >>> n) = take_bit LENGTH('a) (sint w div 2 ^ n)\<close> for w :: \<open>'a::len word\<close> by transfer (simp flip: drop_bit_eq_div) lemma sshiftr_0: "0 >>> n = 0" by (fact signed_drop_bit_of_0) lemma sshiftr_n1: "-1 >>> n = -1" by (fact signed_drop_bit_of_minus_1) lemma bit_sshiftr_word_iff: \<open>bit (w >>> m) n \<longleftrightarrow> bit w (if LENGTH('a) - m \<le> n \<and> n < LENGTH('a) then LENGTH('a) - 1 else (m + n))\<close> for w :: \<open>'a::len word\<close> by (fact bit_signed_drop_bit_iff) lemma nth_sshiftr : "bit (w >>> m) n = (n < size w \<and> (if n + m \<ge> size w then bit w (size w - 1) else bit w (n + m)))" apply (auto simp add: bit_simps word_size ac_simps not_less) apply (meson bit_imp_le_length bit_shiftr_word_iff leD) done lemma sshiftr_numeral: \<open>(numeral k >>> numeral n :: 'a::len word) = word_of_int (drop_bit (numeral n) (signed_take_bit (LENGTH('a) - 1) (numeral k)))\<close> by (fact signed_drop_bit_word_numeral) lemma sshiftr_div_2n: "sint (w >>> n) = sint w div 2 ^ n" using sint_signed_drop_bit_eq [of n w] by (simp add: drop_bit_eq_div) lemma mask_eq: \<open>mask n = (1 << n) - (1 :: 'a::len word)\<close> by (simp add: mask_eq_exp_minus_1 push_bit_of_1) lemma shiftl_0: "(0::'a::len word) << n = 0" by (fact push_bit_of_0) lemma shiftr_0: "(0::'a::len word) >> n = 0" by (fact drop_bit_of_0) lemma nth_shiftl': "bit (w << m) n \<longleftrightarrow> n < size w \<and> n >= m \<and> bit w (n - m)" for w :: "'a::len word" by transfer (auto simp add: bit_push_bit_iff) lemmas nth_shiftl = nth_shiftl' [unfolded word_size] lemma nth_shiftr: "bit (w >> m) n = bit w (n + m)" for w :: "'a::len word" by (simp add: bit_simps ac_simps) lemma shiftr_div_2n: "uint (shiftr w n) = uint w div 2 ^ n" by (fact uint_shiftr_eq) lemma shiftl_rev: "shiftl w n = word_reverse (shiftr (word_reverse w) n)" by (rule bit_word_eqI) (auto simp add: bit_simps) lemma rev_shiftl: "word_reverse w << n = word_reverse (w >> n)" by (simp add: shiftl_rev) lemma shiftr_rev: "w >> n = word_reverse (word_reverse w << n)" by (simp add: rev_shiftl) lemma rev_shiftr: "word_reverse w >> n = word_reverse (w << n)" by (simp add: shiftr_rev) lemmas ucast_up = rc1 [simplified rev_shiftr [symmetric] revcast_ucast [symmetric]] lemmas ucast_down = rc2 [simplified rev_shiftr revcast_ucast [symmetric]] lemma shiftl_zero_size: "size x \<le> n \<Longrightarrow> x << n = 0" for x :: "'a::len word" apply transfer apply (simp add: take_bit_push_bit) done lemma shiftl_t2n: "shiftl w n = 2 ^ n * w" for w :: "'a::len word" by (simp add: push_bit_eq_mult) lemma word_shift_by_2: "x * 4 = (x::'a::len word) << 2" by (simp add: shiftl_t2n) lemma slice_shiftr: "slice n w = ucast (w >> n)" apply (rule bit_word_eqI) apply (cases \<open>n \<le> LENGTH('b)\<close>) apply (auto simp add: bit_slice_iff bit_ucast_iff bit_shiftr_word_iff ac_simps dest: bit_imp_le_length) done lemma shiftr_zero_size: "size x \<le> n \<Longrightarrow> x >> n = 0" for x :: "'a :: len word" by (rule word_eqI) (auto simp add: nth_shiftr dest: test_bit_size) lemma shiftr_x_0: "x >> 0 = x" for x :: "'a::len word" by simp lemma shiftl_x_0: "x << 0 = x" for x :: "'a::len word" by simp lemma shiftl_1: "(1::'a::len word) << n = 2^n" by (fact push_bit_of_1) lemma shiftr_1: "(1::'a::len word) >> n = (if n = 0 then 1 else 0)" by simp lemma shiftl0: "x << 0 = (x :: 'a :: len word)" by (fact shiftl_x_0) lemma and_not_mask: "w AND NOT (mask n) = (w >> n) << n" for w :: \<open>'a::len word\<close> by (rule bit_word_eqI) (auto simp add: bit_simps) lemma and_mask: "w AND mask n = (w << (size w - n)) >> (size w - n)" for w :: \<open>'a::len word\<close> by (rule bit_word_eqI) (auto simp add: bit_simps word_size) lemma shiftr_div_2n_w: "n < size w \<Longrightarrow> w >> n = w div (2^n :: 'a :: len word)" apply (unfold word_div_def) apply (simp add: uint_2p_alt word_size) apply (metis uint_shiftr_eq word_of_int_uint) done lemma le_shiftr: "u \<le> v \<Longrightarrow> u >> (n :: nat) \<le> (v :: 'a :: len word) >> n" apply transfer apply (simp add: take_bit_drop_bit) apply (simp add: drop_bit_eq_div zdiv_mono1) done lemma le_shiftr': "\<lbrakk> u >> n \<le> v >> n ; u >> n \<noteq> v >> n \<rbrakk> \<Longrightarrow> (u::'a::len word) \<le> v" apply (metis le_cases le_shiftr verit_la_disequality) done lemma shiftr_mask_le: "n <= m \<Longrightarrow> mask n >> m = (0 :: 'a::len word)" by (rule bit_word_eqI) (auto simp add: bit_simps) lemma shiftr_mask [simp]: \<open>mask m >> m = (0::'a::len word)\<close> by (rule shiftr_mask_le) simp lemma le_mask_iff: "(w \<le> mask n) = (w >> n = 0)" for w :: \<open>'a::len word\<close> apply safe apply (rule word_le_0_iff [THEN iffD1]) apply (rule xtrans(3)) apply (erule_tac [2] le_shiftr) apply simp apply (rule word_leI) apply (rename_tac n') apply (drule_tac x = "n' - n" in word_eqD) apply (simp add : nth_shiftr word_size bit_simps) apply (case_tac "n <= n'") by auto lemma and_mask_eq_iff_shiftr_0: "(w AND mask n = w) = (w >> n = 0)" for w :: \<open>'a::len word\<close> apply (unfold test_bit_eq_iff [THEN sym]) apply (rule iffI) apply (rule ext) apply (rule_tac [2] ext) apply (auto simp add : word_ao_nth nth_shiftr) apply (drule arg_cong) apply (drule iffD2) apply assumption apply (simp add : word_ao_nth) prefer 2 apply (simp add : word_size test_bit_bin) apply transfer apply (auto simp add: fun_eq_iff bit_simps) apply (metis add_diff_inverse_nat) done lemma mask_shiftl_decompose: "mask m << n = mask (m + n) AND NOT (mask n :: 'a::len word)" by (rule bit_word_eqI) (auto simp add: bit_simps) lemma shiftl_over_and_dist: fixes a::"'a::len word" shows "(a AND b) << c = (a << c) AND (b << c)" by (fact push_bit_and) lemma shiftr_over_and_dist: fixes a::"'a::len word" shows "a AND b >> c = (a >> c) AND (b >> c)" by (fact drop_bit_and) lemma sshiftr_over_and_dist: fixes a::"'a::len word" shows "a AND b >>> c = (a >>> c) AND (b >>> c)" apply(rule word_eqI) apply(simp add:nth_sshiftr word_ao_nth word_size) done lemma shiftl_over_or_dist: fixes a::"'a::len word" shows "a OR b << c = (a << c) OR (b << c)" by (fact push_bit_or) lemma shiftr_over_or_dist: fixes a::"'a::len word" shows "a OR b >> c = (a >> c) OR (b >> c)" by (fact drop_bit_or) lemma sshiftr_over_or_dist: fixes a::"'a::len word" shows "a OR b >>> c = (a >>> c) OR (b >>> c)" apply(rule word_eqI) apply(simp add:nth_sshiftr word_ao_nth word_size) done lemmas shift_over_ao_dists = shiftl_over_or_dist shiftr_over_or_dist sshiftr_over_or_dist shiftl_over_and_dist shiftr_over_and_dist sshiftr_over_and_dist lemma shiftl_shiftl: fixes a::"'a::len word" shows "a << b << c = a << (b + c)" apply(rule word_eqI) apply(auto simp:word_size nth_shiftl add.commute add.left_commute) done lemma shiftr_shiftr: fixes a::"'a::len word" shows "a >> b >> c = a >> (b + c)" apply(rule word_eqI) apply(simp add:word_size nth_shiftr add.left_commute add.commute) done lemma shiftl_shiftr1: fixes a::"'a::len word" shows "c \<le> b \<Longrightarrow> a << b >> c = a AND (mask (size a - b)) << (b - c)" apply (rule word_eqI) apply (auto simp add: bit_simps not_le word_size ac_simps) done lemma shiftl_shiftr2: fixes a::"'a::len word" shows "b < c \<Longrightarrow> a << b >> c = (a >> (c - b)) AND (mask (size a - c))" apply(rule word_eqI) apply(auto simp:nth_shiftr nth_shiftl word_size word_ao_nth bit_simps) done lemma shiftr_shiftl1: fixes a::"'a::len word" shows "c \<le> b \<Longrightarrow> a >> b << c = (a >> (b - c)) AND (NOT (mask c))" by (rule bit_word_eqI) (auto simp add: bit_simps) lemma shiftr_shiftl2: fixes a::"'a::len word" shows "b < c \<Longrightarrow> a >> b << c = (a << (c - b)) AND (NOT (mask c))" apply (rule word_eqI) apply (auto simp add: bit_simps not_le word_size ac_simps) done lemmas multi_shift_simps = shiftl_shiftl shiftr_shiftr shiftl_shiftr1 shiftl_shiftr2 shiftr_shiftl1 shiftr_shiftl2 lemma shiftr_mask2: "n \<le> LENGTH('a) \<Longrightarrow> (mask n >> m :: ('a :: len) word) = mask (n - m)" by (rule bit_word_eqI) (auto simp add: bit_simps) lemma word_shiftl_add_distrib: fixes x :: "'a :: len word" shows "(x + y) << n = (x << n) + (y << n)" by (simp add: shiftl_t2n ring_distribs) lemma mask_shift: "(x AND NOT (mask y)) >> y = x >> y" for x :: \<open>'a::len word\<close> apply (rule bit_eqI) apply (simp add: bit_and_iff bit_not_iff bit_shiftr_word_iff bit_mask_iff not_le) using bit_imp_le_length apply auto done lemma shiftr_div_2n': "unat (w >> n) = unat w div 2 ^ n" apply (unfold unat_eq_nat_uint) apply (subst shiftr_div_2n) apply (subst nat_div_distrib) apply simp apply (simp add: nat_power_eq) done lemma shiftl_shiftr_id: assumes nv: "n < LENGTH('a)" and xv: "x < 2 ^ (LENGTH('a) - n)" shows "x << n >> n = (x::'a::len word)" apply (simp add: shiftl_t2n) apply (rule word_eq_unatI) apply (subst shiftr_div_2n') apply (cases n) apply simp apply (subst iffD1 [OF unat_mult_lem])+ apply (subst unat_power_lower[OF nv]) apply (rule nat_less_power_trans [OF _ order_less_imp_le [OF nv]]) apply (rule order_less_le_trans [OF unat_mono [OF xv] order_eq_refl]) apply (rule unat_power_lower) apply simp apply (subst unat_power_lower[OF nv]) apply simp done lemma ucast_shiftl_eq_0: fixes w :: "'a :: len word" shows "\<lbrakk> n \<ge> LENGTH('b) \<rbrakk> \<Longrightarrow> ucast (w << n) = (0 :: 'b :: len word)" by transfer (simp add: take_bit_push_bit) lemma word_shift_nonzero: "\<lbrakk> (x::'a::len word) \<le> 2 ^ m; m + n < LENGTH('a::len); x \<noteq> 0\<rbrakk> \<Longrightarrow> x << n \<noteq> 0" apply (simp only: word_neq_0_conv word_less_nat_alt shiftl_t2n mod_0 unat_word_ariths unat_power_lower word_le_nat_alt) apply (subst mod_less) apply (rule order_le_less_trans) apply (erule mult_le_mono2) apply (subst power_add[symmetric]) apply (rule power_strict_increasing) apply simp apply simp apply simp done lemma word_shiftr_lt: fixes w :: "'a::len word" shows "unat (w >> n) < (2 ^ (LENGTH('a) - n))" apply (subst shiftr_div_2n') apply transfer apply (simp flip: drop_bit_eq_div add: drop_bit_nat_eq drop_bit_take_bit) done lemma shiftr_less_t2n': "\<lbrakk> x AND mask (n + m) = x; m < LENGTH('a) \<rbrakk> \<Longrightarrow> x >> n < 2 ^ m" for x :: "'a :: len word" apply (simp add: word_size mask_eq_iff_w2p [symmetric] flip: take_bit_eq_mask) apply transfer apply (simp add: take_bit_drop_bit ac_simps) done lemma shiftr_less_t2n: "x < 2 ^ (n + m) \<Longrightarrow> x >> n < 2 ^ m" for x :: "'a :: len word" apply (rule shiftr_less_t2n') apply (erule less_mask_eq) apply (rule ccontr) apply (simp add: not_less) apply (subst (asm) p2_eq_0[symmetric]) apply (simp add: power_add) done lemma shiftr_eq_0: "n \<ge> LENGTH('a) \<Longrightarrow> ((w::'a::len word) >> n) = 0" apply (cut_tac shiftr_less_t2n'[of w n 0], simp) apply (simp add: mask_eq_iff) apply (simp add: lt2p_lem) apply simp done lemma shiftl_less_t2n: fixes x :: "'a :: len word" shows "\<lbrakk> x < (2 ^ (m - n)); m < LENGTH('a) \<rbrakk> \<Longrightarrow> (x << n) < 2 ^ m" apply (simp add: word_size mask_eq_iff_w2p [symmetric] flip: take_bit_eq_mask) apply transfer apply (simp add: take_bit_push_bit) done lemma shiftl_less_t2n': "(x::'a::len word) < 2 ^ m \<Longrightarrow> m+n < LENGTH('a) \<Longrightarrow> x << n < 2 ^ (m + n)" by (rule shiftl_less_t2n) simp_all lemma scast_bit_test [simp]: "scast ((1 :: 'a::len signed word) << n) = (1 :: 'a word) << n" by (rule bit_word_eqI) (simp add: bit_simps) lemma signed_shift_guard_to_word: \<open>unat x * 2 ^ y < 2 ^ n \<longleftrightarrow> x = 0 \<or> x < 1 << n >> y\<close> if \<open>n < LENGTH('a)\<close> \<open>0 < n\<close> for x :: \<open>'a::len word\<close> proof (cases \<open>x = 0\<close>) case True then show ?thesis by simp next case False then have \<open>unat x \<noteq> 0\<close> by (simp add: unat_eq_0) then have \<open>unat x \<ge> 1\<close> by simp show ?thesis proof (cases \<open>y < n\<close>) case False then have \<open>n \<le> y\<close> by simp then obtain q where \<open>y = n + q\<close> using le_Suc_ex by blast moreover have \<open>(2 :: nat) ^ n >> n + q \<le> 1\<close> by (simp add: drop_bit_eq_div power_add) ultimately show ?thesis using \<open>x \<noteq> 0\<close> \<open>unat x \<ge> 1\<close> \<open>n < LENGTH('a)\<close> by (simp add: power_add not_less word_le_nat_alt unat_drop_bit_eq push_bit_of_1) next case True with that have \<open>y < LENGTH('a)\<close> by simp show ?thesis proof (cases \<open>2 ^ n = unat x * 2 ^ y\<close>) case True moreover have \<open>unat x * 2 ^ y < 2 ^ LENGTH('a)\<close> using \<open>n < LENGTH('a)\<close> by (simp flip: True) moreover have \<open>(word_of_nat (2 ^ n) :: 'a word) = word_of_nat (unat x * 2 ^ y)\<close> using True by simp then have \<open>2 ^ n = x * 2 ^ y\<close> by simp ultimately show ?thesis using \<open>y < LENGTH('a)\<close> by (auto simp add: push_bit_of_1 drop_bit_eq_div word_less_nat_alt unat_div unat_word_ariths) next case False with \<open>y < n\<close> have *: \<open>unat x \<noteq> 2 ^ n div 2 ^ y\<close> by (auto simp flip: power_sub power_add) have \<open>unat x * 2 ^ y < 2 ^ n \<longleftrightarrow> unat x * 2 ^ y \<le> 2 ^ n\<close> using False by (simp add: less_le) also have \<open>\<dots> \<longleftrightarrow> unat x \<le> 2 ^ n div 2 ^ y\<close> by (simp add: less_eq_div_iff_mult_less_eq) also have \<open>\<dots> \<longleftrightarrow> unat x < 2 ^ n div 2 ^ y\<close> using * by (simp add: less_le) finally show ?thesis using that \<open>x \<noteq> 0\<close> by (simp flip: push_bit_eq_mult drop_bit_eq_div add: push_bit_of_1 unat_drop_bit_eq word_less_iff_unsigned [where ?'a = nat]) qed qed qed lemma shiftr_not_mask_0: "n+m \<ge> LENGTH('a :: len) \<Longrightarrow> ((w::'a::len word) >> n) AND NOT (mask m) = 0" by (rule bit_word_eqI) (auto simp add: bit_simps word_size dest: bit_imp_le_length) lemma shiftl_mask_is_0[simp]: "(x << n) AND mask n = 0" for x :: \<open>'a::len word\<close> by (simp flip: take_bit_eq_mask add: take_bit_push_bit) lemma rshift_sub_mask_eq: "(a >> (size a - b)) AND mask b = a >> (size a - b)" for a :: \<open>'a::len word\<close> using shiftl_shiftr2[where a=a and b=0 and c="size a - b"] apply (cases "b < size a") apply simp apply (simp add: linorder_not_less mask_eq_decr_exp word_size p2_eq_0[THEN iffD2]) done lemma shiftl_shiftr3: "b \<le> c \<Longrightarrow> a << b >> c = (a >> c - b) AND mask (size a - c)" for a :: \<open>'a::len word\<close> apply (cases "b = c") apply (simp add: shiftl_shiftr1) apply (simp add: shiftl_shiftr2) done lemma and_mask_shiftr_comm: "m \<le> size w \<Longrightarrow> (w AND mask m) >> n = (w >> n) AND mask (m-n)" for w :: \<open>'a::len word\<close> by (simp add: and_mask shiftr_shiftr) (simp add: word_size shiftl_shiftr3) lemma le_mask_shiftl_le_mask: "s = m + n \<Longrightarrow> x \<le> mask n \<Longrightarrow> x << m \<le> mask s" for x :: \<open>'a::len word\<close> by (simp add: le_mask_iff shiftl_shiftr3) lemma word_and_1_shiftl: "x AND (1 << n) = (if bit x n then (1 << n) else 0)" for x :: "'a :: len word" apply (rule bit_word_eqI; transfer) apply (auto simp add: bit_simps not_le ac_simps) done lemmas word_and_1_shiftls' = word_and_1_shiftl[where n=0] word_and_1_shiftl[where n=1] word_and_1_shiftl[where n=2] lemmas word_and_1_shiftls = word_and_1_shiftls' [simplified] lemma word_and_mask_shiftl: "x AND (mask n << m) = ((x >> m) AND mask n) << m" for x :: \<open>'a::len word\<close> apply (rule bit_word_eqI; transfer) apply (auto simp add: bit_simps not_le ac_simps) done lemma shift_times_fold: "(x :: 'a :: len word) * (2 ^ n) << m = x << (m + n)" by (simp add: shiftl_t2n ac_simps power_add) lemma of_bool_nth: "of_bool (bit x v) = (x >> v) AND 1" for x :: \<open>'a::len word\<close> by (simp add: bit_iff_odd_drop_bit word_and_1) lemma shiftr_mask_eq: "(x >> n) AND mask (size x - n) = x >> n" for x :: "'a :: len word" apply (simp flip: take_bit_eq_mask) apply transfer apply (simp add: take_bit_drop_bit) done lemma shiftr_mask_eq': "m = (size x - n) \<Longrightarrow> (x >> n) AND mask m = x >> n" for x :: "'a :: len word" by (simp add: shiftr_mask_eq) lemma and_eq_0_is_nth: fixes x :: "'a :: len word" shows "y = 1 << n \<Longrightarrow> ((x AND y) = 0) = (\<not> (bit x n))" by (simp add: and_exp_eq_0_iff_not_bit push_bit_of_1) lemma word_shift_zero: "\<lbrakk> x << n = 0; x \<le> 2^m; m + n < LENGTH('a)\<rbrakk> \<Longrightarrow> (x::'a::len word) = 0" apply (rule ccontr) apply (drule (2) word_shift_nonzero) apply simp done lemma mask_shift_and_negate[simp]:"(w AND mask n << m) AND NOT (mask n << m) = 0" for w :: \<open>'a::len word\<close> by (clarsimp simp add: mask_eq_decr_exp Parity.bit_eq_iff bit_and_iff bit_not_iff bit_push_bit_iff) (* The seL4 bitfield generator produces functions containing mask and shift operations, such that * invoking two of them consecutively can produce something like the following. *) lemma bitfield_op_twice: "(x AND NOT (mask n << m) OR ((y AND mask n) << m)) AND NOT (mask n << m) = x AND NOT (mask n << m)" for x :: \<open>'a::len word\<close> by (induct n arbitrary: m) (auto simp: word_ao_dist) lemma bitfield_op_twice'': "\<lbrakk>NOT a = b << c; \<exists>x. b = mask x\<rbrakk> \<Longrightarrow> (x AND a OR (y AND b << c)) AND a = x AND a" for a b :: \<open>'a::len word\<close> apply clarsimp apply (cut_tac n=xa and m=c and x=x and y=y in bitfield_op_twice) apply (clarsimp simp:mask_eq_decr_exp) apply (drule not_switch) apply clarsimp done lemma shiftr1_unfold: "x div 2 = x >> 1" by (simp add: drop_bit_eq_div) lemma shiftr1_is_div_2: "(x::('a::len) word) >> 1 = x div 2" by (simp add: drop_bit_eq_div) lemma shiftl1_is_mult: "(x << 1) = (x :: 'a::len word) * 2" by (metis One_nat_def mult_2 mult_2_right one_add_one power_0 power_Suc shiftl_t2n) lemma shiftr1_lt:"x \<noteq> 0 \<Longrightarrow> (x::('a::len) word) >> 1 < x" apply (subst shiftr1_is_div_2) apply (rule div_less_dividend_word) apply simp+ done lemma shiftr1_irrelevant_lsb: "bit (x::('a::len) word) 0 \<or> x >> 1 = (x + 1) >> 1" apply (cases \<open>LENGTH('a)\<close>; transfer) apply (simp_all add: take_bit_drop_bit) apply (simp add: drop_bit_take_bit drop_bit_Suc) done lemma shiftr1_0_imp_only_lsb:"((x::('a::len) word) + 1) >> 1 = 0 \<Longrightarrow> x = 0 \<or> x + 1 = 0" by (metis One_nat_def shiftr1_0_or_1 word_less_1 word_overflow) lemma shiftr1_irrelevant_lsb': "\<not> (bit (x::('a::len) word) 0) \<Longrightarrow> x >> 1 = (x + 1) >> 1" by (metis shiftr1_irrelevant_lsb) (* Perhaps this one should be a simp lemma, but it seems a little dangerous. *) lemma cast_chunk_assemble_id: "\<lbrakk>n = LENGTH('a::len); m = LENGTH('b::len); n * 2 = m\<rbrakk> \<Longrightarrow> (((ucast ((ucast (x::'b word))::'a word))::'b word) OR (((ucast ((ucast (x >> n))::'a word))::'b word) << n)) = x" apply (subgoal_tac "((ucast ((ucast (x >> n))::'a word))::'b word) = x >> n") apply clarsimp apply (subst and_not_mask[symmetric]) apply (subst ucast_ucast_mask) apply (subst word_ao_dist2[symmetric]) apply clarsimp apply (rule ucast_ucast_len) apply (rule shiftr_less_t2n') apply (subst and_mask_eq_iff_le_mask) apply (simp_all add: mask_eq_decr_exp flip: mult_2_right) apply (metis add_diff_cancel_left' len_gt_0 mult_2_right zero_less_diff) done lemma cast_chunk_scast_assemble_id: "\<lbrakk>n = LENGTH('a::len); m = LENGTH('b::len); n * 2 = m\<rbrakk> \<Longrightarrow> (((ucast ((scast (x::'b word))::'a word))::'b word) OR (((ucast ((scast (x >> n))::'a word))::'b word) << n)) = x" apply (subgoal_tac "((scast x)::'a word) = ((ucast x)::'a word)") apply (subgoal_tac "((scast (x >> n))::'a word) = ((ucast (x >> n))::'a word)") apply (simp add:cast_chunk_assemble_id) apply (subst down_cast_same[symmetric], subst is_down, arith, simp)+ done lemma unat_shiftr_less_t2n: fixes x :: "'a :: len word" shows "unat x < 2 ^ (n + m) \<Longrightarrow> unat (x >> n) < 2 ^ m" by (simp add: shiftr_div_2n' power_add mult.commute less_mult_imp_div_less) lemma ucast_less_shiftl_helper: "\<lbrakk> LENGTH('b) + 2 < LENGTH('a); 2 ^ (LENGTH('b) + 2) \<le> n\<rbrakk> \<Longrightarrow> (ucast (x :: 'b::len word) << 2) < (n :: 'a::len word)" apply (erule order_less_le_trans[rotated]) using ucast_less[where x=x and 'a='a] apply (simp only: shiftl_t2n field_simps) apply (rule word_less_power_trans2; simp) done (* negating a mask which has been shifted to the very left *) lemma NOT_mask_shifted_lenword: "NOT (mask len << (LENGTH('a) - len) ::'a::len word) = mask (LENGTH('a) - len)" by (rule bit_word_eqI) (auto simp add: word_size bit_not_iff bit_push_bit_iff bit_mask_iff) (* Comparisons between different word sizes. *) lemma shiftr_less: "(w::'a::len word) < k \<Longrightarrow> w >> n < k" by (metis div_le_dividend le_less_trans shiftr_div_2n' unat_arith_simps(2)) lemma word_and_notzeroD: "w AND w' \<noteq> 0 \<Longrightarrow> w \<noteq> 0 \<and> w' \<noteq> 0" by auto lemma shiftr_le_0: "unat (w::'a::len word) < 2 ^ n \<Longrightarrow> w >> n = (0::'a::len word)" apply (auto simp add: take_bit_word_eq_self_iff word_less_nat_alt simp flip: take_bit_eq_self_iff_drop_bit_eq_0) apply (rule ccontr) apply (simp add: not_le) done lemma of_nat_shiftl: "(of_nat x << n) = (of_nat (x * 2 ^ n) :: ('a::len) word)" proof - have "(of_nat x::'a word) << n = of_nat (2 ^ n) * of_nat x" using shiftl_t2n by (metis word_unat_power) thus ?thesis by simp qed lemma shiftl_1_not_0: "n < LENGTH('a) \<Longrightarrow> (1::'a::len word) << n \<noteq> 0" by (simp add: shiftl_t2n) (* continue sorting out from here *) (* usually: x,y = (len_of TYPE ('a)) *) lemma bitmagic_zeroLast_leq_or1Last: "(a::('a::len) word) AND (mask len << x - len) \<le> a OR mask (y - len)" by (meson le_word_or2 order_trans word_and_le2) lemma zero_base_lsb_imp_set_eq_as_bit_operation: fixes base ::"'a::len word" assumes valid_prefix: "mask (LENGTH('a) - len) AND base = 0" shows "(base = NOT (mask (LENGTH('a) - len)) AND a) \<longleftrightarrow> (a \<in> {base .. base OR mask (LENGTH('a) - len)})" proof have helper3: "x OR y = x OR y AND NOT x" for x y ::"'a::len word" by (simp add: word_oa_dist2) from assms show "base = NOT (mask (LENGTH('a) - len)) AND a \<Longrightarrow> a \<in> {base..base OR mask (LENGTH('a) - len)}" apply(simp add: word_and_le1) apply(metis helper3 le_word_or2 word_bw_comms(1) word_bw_comms(2)) done next assume "a \<in> {base..base OR mask (LENGTH('a) - len)}" hence a: "base \<le> a \<and> a \<le> base OR mask (LENGTH('a) - len)" by simp show "base = NOT (mask (LENGTH('a) - len)) AND a" proof - have f2: "\<forall>x\<^sub>0. base AND NOT (mask x\<^sub>0) \<le> a AND NOT (mask x\<^sub>0)" using a neg_mask_mono_le by blast have f3: "\<forall>x\<^sub>0. a AND NOT (mask x\<^sub>0) \<le> (base OR mask (LENGTH('a) - len)) AND NOT (mask x\<^sub>0)" using a neg_mask_mono_le by blast have f4: "base = base AND NOT (mask (LENGTH('a) - len))" using valid_prefix by (metis mask_eq_0_eq_x word_bw_comms(1)) hence f5: "\<forall>x\<^sub>6. (base OR x\<^sub>6) AND NOT (mask (LENGTH('a) - len)) = base OR x\<^sub>6 AND NOT (mask (LENGTH('a) - len))" using word_ao_dist by (metis) have f6: "\<forall>x\<^sub>2 x\<^sub>3. a AND NOT (mask x\<^sub>2) \<le> x\<^sub>3 \<or> \<not> (base OR mask (LENGTH('a) - len)) AND NOT (mask x\<^sub>2) \<le> x\<^sub>3" using f3 dual_order.trans by auto have "base = (base OR mask (LENGTH('a) - len)) AND NOT (mask (LENGTH('a) - len))" using f5 by auto hence "base = a AND NOT (mask (LENGTH('a) - len))" using f2 f4 f6 by (metis eq_iff) thus "base = NOT (mask (LENGTH('a) - len)) AND a" by (metis word_bw_comms(1)) qed qed lemma of_nat_eq_signed_scast: "(of_nat x = (y :: ('a::len) signed word)) = (of_nat x = (scast y :: 'a word))" by (metis scast_of_nat scast_scast_id(2)) lemma word_aligned_add_no_wrap_bounded: "\<lbrakk> w + 2^n \<le> x; w + 2^n \<noteq> 0; is_aligned w n \<rbrakk> \<Longrightarrow> (w::'a::len word) < x" by (blast dest: is_aligned_no_overflow le_less_trans word_leq_le_minus_one) lemma mask_Suc: "mask (Suc n) = (2 :: 'a::len word) ^ n + mask n" by (simp add: mask_eq_decr_exp) lemma mask_mono: "sz' \<le> sz \<Longrightarrow> mask sz' \<le> (mask sz :: 'a::len word)" by (simp add: le_mask_iff shiftr_mask_le) lemma aligned_mask_disjoint: "\<lbrakk> is_aligned (a :: 'a :: len word) n; b \<le> mask n \<rbrakk> \<Longrightarrow> a AND b = 0" by (metis and_zero_eq is_aligned_mask le_mask_imp_and_mask word_bw_lcs(1)) lemma word_and_or_mask_aligned: "\<lbrakk> is_aligned a n; b \<le> mask n \<rbrakk> \<Longrightarrow> a + b = a OR b" by (simp add: aligned_mask_disjoint word_plus_and_or_coroll) lemma word_and_or_mask_aligned2: \<open>is_aligned b n \<Longrightarrow> a \<le> mask n \<Longrightarrow> a + b = a OR b\<close> using word_and_or_mask_aligned [of b n a] by (simp add: ac_simps) lemma is_aligned_ucastI: "is_aligned w n \<Longrightarrow> is_aligned (ucast w) n" apply transfer apply (auto simp add: min_def) apply (metis bintrunc_bintrunc_ge bintrunc_n_0 nat_less_le not_le take_bit_eq_0_iff) done lemma ucast_le_maskI: "a \<le> mask n \<Longrightarrow> UCAST('a::len \<rightarrow> 'b::len) a \<le> mask n" by (metis and_mask_eq_iff_le_mask ucast_and_mask) lemma ucast_add_mask_aligned: "\<lbrakk> a \<le> mask n; is_aligned b n \<rbrakk> \<Longrightarrow> UCAST ('a::len \<rightarrow> 'b::len) (a + b) = ucast a + ucast b" by (metis add.commute is_aligned_ucastI ucast_le_maskI ucast_or_distrib word_and_or_mask_aligned) lemma ucast_shiftl: "LENGTH('b) \<le> LENGTH ('a) \<Longrightarrow> UCAST ('a::len \<rightarrow> 'b::len) x << n = ucast (x << n)" by word_eqI_solve lemma ucast_leq_mask: "LENGTH('a) \<le> n \<Longrightarrow> ucast (x::'a::len word) \<le> mask n" apply (simp add: less_eq_mask_iff_take_bit_eq_self) apply transfer apply (simp add: ac_simps) done lemma shiftl_inj: "\<lbrakk> x << n = y << n; x \<le> mask (LENGTH('a)-n); y \<le> mask (LENGTH('a)-n) \<rbrakk> \<Longrightarrow> x = (y :: 'a :: len word)" apply word_eqI apply (rename_tac n') apply (case_tac "LENGTH('a) - n \<le> n'", simp) by (metis add.commute add.right_neutral diff_add_inverse le_diff_conv linorder_not_less zero_order(1)) lemma distinct_word_add_ucast_shift_inj: \<open>p' = p \<and> off' = off\<close> if *: \<open>p + (UCAST('a::len \<rightarrow> 'b::len) off << n) = p' + (ucast off' << n)\<close> and \<open>is_aligned p n'\<close> \<open>is_aligned p' n'\<close> \<open>n' = n + LENGTH('a)\<close> \<open>n' < LENGTH('b)\<close> proof - from \<open>n' = n + LENGTH('a)\<close> have [simp]: \<open>n' - n = LENGTH('a)\<close> \<open>n + LENGTH('a) = n'\<close> by simp_all from \<open>is_aligned p n'\<close> obtain q where p: \<open>p = push_bit n' (word_of_nat q)\<close> \<open>q < 2 ^ (LENGTH('b) - n')\<close> by (rule is_alignedE') from \<open>is_aligned p' n'\<close> obtain q' where p': \<open>p' = push_bit n' (word_of_nat q')\<close> \<open>q' < 2 ^ (LENGTH('b) - n')\<close> by (rule is_alignedE') define m :: nat where \<open>m = unat off\<close> then have off: \<open>off = word_of_nat m\<close> by simp define m' :: nat where \<open>m' = unat off'\<close> then have off': \<open>off' = word_of_nat m'\<close> by simp have \<open>push_bit n' q + take_bit n' (push_bit n m) < 2 ^ LENGTH('b)\<close> by (metis id_apply is_aligned_no_wrap''' of_nat_eq_id of_nat_push_bit p(1) p(2) take_bit_nat_eq_self_iff take_bit_nat_less_exp take_bit_push_bit that(2) that(5) unsigned_of_nat) moreover have \<open>push_bit n' q' + take_bit n' (push_bit n m') < 2 ^ LENGTH('b)\<close> by (metis \<open>n' - n = LENGTH('a)\<close> id_apply is_aligned_no_wrap''' m'_def of_nat_eq_id of_nat_push_bit off' p'(1) p'(2) take_bit_nat_eq_self_iff take_bit_push_bit that(3) that(5) unsigned_of_nat) ultimately have \<open>push_bit n' q + take_bit n' (push_bit n m) = push_bit n' q' + take_bit n' (push_bit n m')\<close> using * by (simp add: p p' off off' push_bit_of_nat push_bit_take_bit word_of_nat_inj flip: of_nat_add) then have \<open>int (push_bit n' q + take_bit n' (push_bit n m)) = int (push_bit n' q' + take_bit n' (push_bit n m'))\<close> by simp then have \<open>concat_bit n' (int (push_bit n m)) (int q) = concat_bit n' (int (push_bit n m')) (int q')\<close> by (simp add: of_nat_push_bit of_nat_take_bit bin_cat_eq_push_bit_add_take_bit) then show ?thesis by (simp add: bin_cat_inj p p' off off' take_bit_of_nat take_bit_push_bit word_of_nat_eq_iff) (simp add: push_bit_eq_mult) qed lemma word_upto_Nil: "y < x \<Longrightarrow> [x .e. y ::'a::len word] = []" by (simp add: upto_enum_red not_le word_less_nat_alt) lemma word_enum_decomp_elem: assumes "[x .e. (y ::'a::len word)] = as @ a # bs" shows "x \<le> a \<and> a \<le> y" proof - have "set as \<subseteq> set [x .e. y] \<and> a \<in> set [x .e. y]" using assms by (auto dest: arg_cong[where f=set]) then show ?thesis by auto qed lemma word_enum_prefix: "[x .e. (y ::'a::len word)] = as @ a # bs \<Longrightarrow> as = (if x < a then [x .e. a - 1] else [])" apply (induct as arbitrary: x; clarsimp) apply (case_tac "x < y") prefer 2 apply (case_tac "x = y", simp) apply (simp add: not_less) apply (drule (1) dual_order.not_eq_order_implies_strict) apply (simp add: word_upto_Nil) apply (simp add: word_upto_Cons_eq) apply (case_tac "x < y") prefer 2 apply (case_tac "x = y", simp) apply (simp add: not_less) apply (drule (1) dual_order.not_eq_order_implies_strict) apply (simp add: word_upto_Nil) apply (clarsimp simp: word_upto_Cons_eq) apply (frule word_enum_decomp_elem) apply clarsimp apply (rule conjI) prefer 2 apply (subst word_Suc_le[symmetric]; clarsimp) apply (drule meta_spec) apply (drule (1) meta_mp) apply clarsimp apply (rule conjI; clarsimp) apply (subst (2) word_upto_Cons_eq) apply unat_arith apply simp done lemma word_enum_decomp_set: "[x .e. (y ::'a::len word)] = as @ a # bs \<Longrightarrow> a \<notin> set as" by (metis distinct_append distinct_enum_upto' not_distinct_conv_prefix) lemma word_enum_decomp: assumes "[x .e. (y ::'a::len word)] = as @ a # bs" shows "x \<le> a \<and> a \<le> y \<and> a \<notin> set as \<and> (\<forall>z \<in> set as. x \<le> z \<and> z \<le> y)" proof - from assms have "set as \<subseteq> set [x .e. y] \<and> a \<in> set [x .e. y]" by (auto dest: arg_cong[where f=set]) with word_enum_decomp_set[OF assms] show ?thesis by auto qed lemma of_nat_unat_le_mask_ucast: "\<lbrakk>of_nat (unat t) = w; t \<le> mask LENGTH('a)\<rbrakk> \<Longrightarrow> t = UCAST('a::len \<rightarrow> 'b::len) w" by (clarsimp simp: ucast_nat_def ucast_ucast_mask simp flip: and_mask_eq_iff_le_mask) lemma less_diff_gt0: "a < b \<Longrightarrow> (0 :: 'a :: len word) < b - a" by unat_arith lemma unat_plus_gt: "unat ((a :: 'a :: len word) + b) \<le> unat a + unat b" by (clarsimp simp: unat_plus_if_size) lemma const_less: "\<lbrakk> (a :: 'a :: len word) - 1 < b; a \<noteq> b \<rbrakk> \<Longrightarrow> a < b" by (metis less_1_simp word_le_less_eq) lemma add_mult_aligned_neg_mask: \<open>(x + y * m) AND NOT(mask n) = (x AND NOT(mask n)) + y * m\<close> if \<open>m AND (2 ^ n - 1) = 0\<close> for x y m :: \<open>'a::len word\<close> by (metis (no_types, hide_lams) add.assoc add.commute add.right_neutral add_uminus_conv_diff mask_eq_decr_exp mask_eqs(2) mask_eqs(6) mult.commute mult_zero_left subtract_mask(1) that) lemma unat_of_nat_minus_1: "\<lbrakk> n < 2 ^ LENGTH('a); n \<noteq> 0 \<rbrakk> \<Longrightarrow> unat ((of_nat n:: 'a :: len word) - 1) = n - 1" by (simp add: of_nat_diff unat_eq_of_nat) lemma word_eq_zeroI: "a \<le> a - 1 \<Longrightarrow> a = 0" for a :: "'a :: len word" by (simp add: word_must_wrap) lemma word_add_format: "(-1 :: 'a :: len word) + b + c = b + (c - 1)" by simp lemma upto_enum_word_nth: "\<lbrakk> i \<le> j; k \<le> unat (j - i) \<rbrakk> \<Longrightarrow> [i .e. j] ! k = i + of_nat k" apply (clarsimp simp: upto_enum_def nth_append) apply (clarsimp simp: word_le_nat_alt[symmetric]) apply (rule conjI, clarsimp) apply (subst toEnum_of_nat, unat_arith) apply unat_arith apply (clarsimp simp: not_less unat_sub[symmetric]) apply unat_arith done lemma upto_enum_step_nth: "\<lbrakk> a \<le> c; n \<le> unat ((c - a) div (b - a)) \<rbrakk> \<Longrightarrow> [a, b .e. c] ! n = a + of_nat n * (b - a)" by (clarsimp simp: upto_enum_step_def not_less[symmetric] upto_enum_word_nth) lemma upto_enum_inc_1_len: "a < - 1 \<Longrightarrow> [(0 :: 'a :: len word) .e. 1 + a] = [0 .e. a] @ [1 + a]" apply (simp add: upto_enum_word) apply (subgoal_tac "unat (1+a) = 1 + unat a") apply simp apply (subst unat_plus_simple[THEN iffD1]) apply (metis add.commute no_plus_overflow_neg olen_add_eqv) apply unat_arith done lemma neg_mask_add: "y AND mask n = 0 \<Longrightarrow> x + y AND NOT(mask n) = (x AND NOT(mask n)) + y" for x y :: \<open>'a::len word\<close> by (clarsimp simp: mask_out_sub_mask mask_eqs(7)[symmetric] mask_twice) lemma shiftr_shiftl_shiftr[simp]: "(x :: 'a :: len word) >> a << a >> a = x >> a" by word_eqI_solve lemma add_right_shift: "\<lbrakk> x AND mask n = 0; y AND mask n = 0; x \<le> x + y \<rbrakk> \<Longrightarrow> (x + y :: ('a :: len) word) >> n = (x >> n) + (y >> n)" apply (simp add: no_olen_add_nat is_aligned_mask[symmetric]) apply (simp add: unat_arith_simps shiftr_div_2n' split del: if_split) apply (subst if_P) apply (erule order_le_less_trans[rotated]) apply (simp add: add_mono) apply (simp add: shiftr_div_2n' is_aligned_iff_dvd_nat) done lemma sub_right_shift: "\<lbrakk> x AND mask n = 0; y AND mask n = 0; y \<le> x \<rbrakk> \<Longrightarrow> (x - y) >> n = (x >> n :: 'a :: len word) - (y >> n)" using add_right_shift[where x="x - y" and y=y and n=n] by (simp add: aligned_sub_aligned is_aligned_mask[symmetric] word_sub_le) lemma and_and_mask_simple: "y AND mask n = mask n \<Longrightarrow> (x AND y) AND mask n = x AND mask n" by (simp add: ac_simps) lemma and_and_mask_simple_not: "y AND mask n = 0 \<Longrightarrow> (x AND y) AND mask n = 0" by (simp add: ac_simps) lemma word_and_le': "b \<le> c \<Longrightarrow> (a :: 'a :: len word) AND b \<le> c" by (metis word_and_le1 order_trans) lemma word_and_less': "b < c \<Longrightarrow> (a :: 'a :: len word) AND b < c" by transfer simp lemma shiftr_w2p: "x < LENGTH('a) \<Longrightarrow> 2 ^ x = (2 ^ (LENGTH('a) - 1) >> (LENGTH('a) - 1 - x) :: 'a :: len word)" by word_eqI_solve lemma t2p_shiftr: "\<lbrakk> b \<le> a; a < LENGTH('a) \<rbrakk> \<Longrightarrow> (2 :: 'a :: len word) ^ a >> b = 2 ^ (a - b)" by word_eqI_solve lemma scast_1[simp]: "scast (1 :: 'a :: len signed word) = (1 :: 'a word)" by simp lemma unsigned_uminus1 [simp]: \<open>(unsigned (-1::'b::len word)::'c::len word) = mask LENGTH('b)\<close> by (rule bit_word_eqI) (auto simp add: bit_simps) lemma ucast_ucast_mask_eq: "\<lbrakk> UCAST('a::len \<rightarrow> 'b::len) x = y; x AND mask LENGTH('b) = x \<rbrakk> \<Longrightarrow> x = ucast y" by (drule sym) (simp flip: take_bit_eq_mask add: unsigned_ucast_eq) lemma ucast_up_neq: "\<lbrakk> ucast x \<noteq> (ucast y::'b::len word); LENGTH('b) \<le> LENGTH ('a) \<rbrakk> \<Longrightarrow> ucast x \<noteq> (ucast y::'a::len word)" by (fastforce dest: ucast_up_eq) lemma mask_AND_less_0: "\<lbrakk> x AND mask n = 0; m \<le> n \<rbrakk> \<Longrightarrow> x AND mask m = 0" for x :: \<open>'a::len word\<close> by (metis mask_twice2 word_and_notzeroD) lemma mask_len_id [simp]: "(x :: 'a :: len word) AND mask LENGTH('a) = x" using uint_lt2p [of x] by (simp add: mask_eq_iff) lemma scast_ucast_down_same: "LENGTH('b) \<le> LENGTH('a) \<Longrightarrow> SCAST('a \<rightarrow> 'b) = UCAST('a::len \<rightarrow> 'b::len)" by (simp add: down_cast_same is_down) lemma word_aligned_0_sum: "\<lbrakk> a + b = 0; is_aligned (a :: 'a :: len word) n; b \<le> mask n; n < LENGTH('a) \<rbrakk> \<Longrightarrow> a = 0 \<and> b = 0" by (simp add: word_plus_and_or_coroll aligned_mask_disjoint word_or_zero) lemma mask_eq1_nochoice: "\<lbrakk> LENGTH('a) > 1; (x :: 'a :: len word) AND 1 = x \<rbrakk> \<Longrightarrow> x = 0 \<or> x = 1" by (metis word_and_1) lemma shiftr_and_eq_shiftl: "(w >> n) AND x = y \<Longrightarrow> w AND (x << n) = (y << n)" for y :: "'a:: len word" by (metis (no_types, lifting) and_not_mask bit.conj_ac(1) bit.conj_ac(2) mask_eq_0_eq_x shiftl_mask_is_0 shiftl_over_and_dist) lemma add_mask_lower_bits': "\<lbrakk> len = LENGTH('a); is_aligned (x :: 'a :: len word) n; \<forall>n' \<ge> n. n' < len \<longrightarrow> \<not> bit p n' \<rbrakk> \<Longrightarrow> x + p AND NOT(mask n) = x" using add_mask_lower_bits by auto lemma leq_mask_shift: "(x :: 'a :: len word) \<le> mask (low_bits + high_bits) \<Longrightarrow> (x >> low_bits) \<le> mask high_bits" by (simp add: le_mask_iff shiftr_shiftr ac_simps) lemma ucast_ucast_eq_mask_shift: "(x :: 'a :: len word) \<le> mask (low_bits + LENGTH('b)) \<Longrightarrow> ucast((ucast (x >> low_bits)) :: 'b :: len word) = x >> low_bits" by (meson and_mask_eq_iff_le_mask eq_ucast_ucast_eq not_le_imp_less shiftr_less_t2n' ucast_ucast_len) lemma const_le_unat: "\<lbrakk> b < 2 ^ LENGTH('a); of_nat b \<le> a \<rbrakk> \<Longrightarrow> b \<le> unat (a :: 'a :: len word)" apply (simp add: word_le_def) apply (simp only: uint_nat zle_int) apply transfer apply (simp add: take_bit_nat_eq_self) done lemma upt_enum_offset_trivial: "\<lbrakk> x < 2 ^ LENGTH('a) - 1 ; n \<le> unat x \<rbrakk> \<Longrightarrow> ([(0 :: 'a :: len word) .e. x] ! n) = of_nat n" apply (induct x arbitrary: n) apply simp by (simp add: upto_enum_word_nth) lemma word_le_mask_out_plus_2sz: "x \<le> (x AND NOT(mask sz)) + 2 ^ sz - 1" for x :: \<open>'a::len word\<close> by (metis add_diff_eq word_neg_and_le) lemma ucast_add: "ucast (a + (b :: 'a :: len word)) = ucast a + (ucast b :: ('a signed word))" by transfer (simp add: take_bit_add) lemma ucast_minus: "ucast (a - (b :: 'a :: len word)) = ucast a - (ucast b :: ('a signed word))" apply (insert ucast_add[where a=a and b="-b"]) apply (metis (no_types, hide_lams) add_diff_eq diff_add_cancel ucast_add) done lemma scast_ucast_add_one [simp]: "scast (ucast (x :: 'a::len word) + (1 :: 'a signed word)) = x + 1" apply (subst ucast_1[symmetric]) apply (subst ucast_add[symmetric]) apply clarsimp done lemma word_and_le_plus_one: "a > 0 \<Longrightarrow> (x :: 'a :: len word) AND (a - 1) < a" by (simp add: gt0_iff_gem1 word_and_less') lemma unat_of_ucast_then_shift_eq_unat_of_shift[simp]: "LENGTH('b) \<ge> LENGTH('a) \<Longrightarrow> unat ((ucast (x :: 'a :: len word) :: 'b :: len word) >> n) = unat (x >> n)" by (simp add: shiftr_div_2n' unat_ucast_up_simp) lemma unat_of_ucast_then_mask_eq_unat_of_mask[simp]: "LENGTH('b) \<ge> LENGTH('a) \<Longrightarrow> unat ((ucast (x :: 'a :: len word) :: 'b :: len word) AND mask m) = unat (x AND mask m)" by (metis ucast_and_mask unat_ucast_up_simp) lemma shiftr_less_t2n3: "\<lbrakk> (2 :: 'a word) ^ (n + m) = 0; m < LENGTH('a) \<rbrakk> \<Longrightarrow> (x :: 'a :: len word) >> n < 2 ^ m" by (fastforce intro: shiftr_less_t2n' simp: mask_eq_decr_exp power_overflow) lemma unat_shiftr_le_bound: "\<lbrakk> 2 ^ (LENGTH('a :: len) - n) - 1 \<le> bnd; 0 < n \<rbrakk> \<Longrightarrow> unat ((x :: 'a word) >> n) \<le> bnd" apply transfer apply (simp add: take_bit_drop_bit) apply (simp add: drop_bit_take_bit) apply (rule order_trans) defer apply assumption apply (simp add: nat_le_iff of_nat_diff) done lemma shiftr_eqD: "\<lbrakk> x >> n = y >> n; is_aligned x n; is_aligned y n \<rbrakk> \<Longrightarrow> x = y" by (metis is_aligned_shiftr_shiftl) lemma word_shiftr_shiftl_shiftr_eq_shiftr: "a \<ge> b \<Longrightarrow> (x :: 'a :: len word) >> a << b >> b = x >> a" apply (rule bit_word_eqI) apply (auto simp add: bit_simps dest: bit_imp_le_length) done lemma of_int_uint_ucast: "of_int (uint (x :: 'a::len word)) = (ucast x :: 'b::len word)" by (fact Word.of_int_uint) lemma mod_mask_drop: "\<lbrakk> m = 2 ^ n; 0 < m; mask n AND msk = mask n \<rbrakk> \<Longrightarrow> (x mod m) AND msk = x mod m" for x :: \<open>'a::len word\<close> by (simp add: word_mod_2p_is_mask word_bw_assocs) lemma mask_eq_ucast_eq: "\<lbrakk> x AND mask LENGTH('a) = (x :: ('c :: len word)); LENGTH('a) \<le> LENGTH('b)\<rbrakk> \<Longrightarrow> ucast (ucast x :: ('a :: len word)) = (ucast x :: ('b :: len word))" by (metis ucast_and_mask ucast_id ucast_ucast_mask ucast_up_eq) lemma of_nat_less_t2n: "of_nat i < (2 :: ('a :: len) word) ^ n \<Longrightarrow> n < LENGTH('a) \<and> unat (of_nat i :: 'a word) < 2 ^ n" by (metis order_less_trans p2_gt_0 unat_less_power word_neq_0_conv) lemma word_sub_mono4: "\<lbrakk> y + x \<le> z + x; y \<le> y + x; z \<le> z + x \<rbrakk> \<Longrightarrow> y \<le> z" for y :: "'a :: len word" by (simp add: word_add_le_iff2) lemma eq_or_less_helperD: "\<lbrakk> n = unat (2 ^ m - 1 :: 'a :: len word) \<or> n < unat (2 ^ m - 1 :: 'a word); m < LENGTH('a) \<rbrakk> \<Longrightarrow> n < 2 ^ m" by (meson le_less_trans nat_less_le unat_less_power word_power_less_1) lemma mask_sub: "n \<le> m \<Longrightarrow> mask m - mask n = mask m AND NOT(mask n :: 'a::len word)" by (metis (full_types) and_mask_eq_iff_shiftr_0 mask_out_sub_mask shiftr_mask_le word_bw_comms(1)) lemma neg_mask_diff_bound: "sz'\<le> sz \<Longrightarrow> (ptr AND NOT(mask sz')) - (ptr AND NOT(mask sz)) \<le> 2 ^ sz - 2 ^ sz'" (is "_ \<Longrightarrow> ?lhs \<le> ?rhs") for ptr :: \<open>'a::len word\<close> proof - assume lt: "sz' \<le> sz" hence "?lhs = ptr AND (mask sz AND NOT(mask sz'))" by (metis add_diff_cancel_left' multiple_mask_trivia) also have "\<dots> \<le> ?rhs" using lt by (metis (mono_tags) add_diff_eq diff_eq_eq eq_iff mask_2pm1 mask_sub word_and_le') finally show ?thesis by simp qed lemma mask_out_eq_0: "\<lbrakk> idx < 2 ^ sz; sz < LENGTH('a) \<rbrakk> \<Longrightarrow> (of_nat idx :: 'a :: len word) AND NOT(mask sz) = 0" by (simp add: of_nat_power less_mask_eq mask_eq_0_eq_x) lemma is_aligned_neg_mask_eq': "is_aligned ptr sz = (ptr AND NOT(mask sz) = ptr)" using is_aligned_mask mask_eq_0_eq_x by blast lemma neg_mask_mask_unat: "sz < LENGTH('a) \<Longrightarrow> unat ((ptr :: 'a :: len word) AND NOT(mask sz)) + unat (ptr AND mask sz) = unat ptr" by (metis AND_NOT_mask_plus_AND_mask_eq unat_plus_simple word_and_le2) lemma unat_pow_le_intro: "LENGTH('a) \<le> n \<Longrightarrow> unat (x :: 'a :: len word) < 2 ^ n" by (metis lt2p_lem not_le of_nat_le_iff of_nat_numeral semiring_1_class.of_nat_power uint_nat) lemma unat_shiftl_less_t2n: "\<lbrakk> unat (x :: 'a :: len word) < 2 ^ (m - n); m < LENGTH('a) \<rbrakk> \<Longrightarrow> unat (x << n) < 2 ^ m" by (metis More_Word.of_nat_power nat_mult_power_less_eq numeral_2_eq_2 of_nat_push_bit push_bit_eq_mult unat_less_power unat_of_nat_len unsigned_less word_eq_unatI zero_less_Suc) lemma unat_is_aligned_add: "\<lbrakk> is_aligned p n; unat d < 2 ^ n \<rbrakk> \<Longrightarrow> unat (p + d AND mask n) = unat d \<and> unat (p + d AND NOT(mask n)) = unat p" by (metis add.right_neutral and_mask_eq_iff_le_mask and_not_mask le_mask_iff mask_add_aligned mask_out_add_aligned mult_zero_right shiftl_t2n shiftr_le_0) lemma unat_shiftr_shiftl_mask_zero: "\<lbrakk> c + a \<ge> LENGTH('a) + b ; c < LENGTH('a) \<rbrakk> \<Longrightarrow> unat (((q :: 'a :: len word) >> a << b) AND NOT(mask c)) = 0" by (fastforce intro: unat_is_aligned_add[where p=0 and n=c, simplified, THEN conjunct2] unat_shiftl_less_t2n unat_shiftr_less_t2n unat_pow_le_intro) lemmas of_nat_ucast = ucast_of_nat[symmetric] lemma leq_low_bits_iff_zero: "\<lbrakk> x \<le> mask (low bits + high bits); x >> low_bits = 0 \<rbrakk> \<Longrightarrow> (x AND mask low_bits = 0) = (x = 0)" for x :: \<open>'a::len word\<close> using and_mask_eq_iff_shiftr_0 by force lemma unat_less_iff: "\<lbrakk> unat (a :: 'a :: len word) = b; c < 2 ^ LENGTH('a) \<rbrakk> \<Longrightarrow> (a < of_nat c) = (b < c)" using unat_ucast_less_no_overflow_simp by blast lemma is_aligned_no_overflow3: "\<lbrakk> is_aligned (a :: 'a :: len word) n; n < LENGTH('a); b < 2 ^ n; c \<le> 2 ^ n; b < c \<rbrakk> \<Longrightarrow> a + b \<le> a + (c - 1)" by (meson is_aligned_no_wrap' le_m1_iff_lt not_le word_less_sub_1 word_plus_mono_right) lemma mask_add_aligned_right: "is_aligned p n \<Longrightarrow> (q + p) AND mask n = q AND mask n" by (simp add: mask_add_aligned add.commute) lemma leq_high_bits_shiftr_low_bits_leq_bits_mask: "x \<le> mask high_bits \<Longrightarrow> (x :: 'a :: len word) << low_bits \<le> mask (low_bits + high_bits)" by (metis le_mask_shiftl_le_mask) lemma word_two_power_neg_ineq: "2 ^ m \<noteq> (0 :: 'a word) \<Longrightarrow> 2 ^ n \<le> - (2 ^ m :: 'a :: len word)" apply (cases "n < LENGTH('a)"; simp add: power_overflow) apply (cases "m < LENGTH('a)"; simp add: power_overflow) apply (simp add: word_le_nat_alt unat_minus word_size) apply (cases "LENGTH('a)"; simp) apply (simp add: less_Suc_eq_le) apply (drule power_increasing[where a=2 and n=n] power_increasing[where a=2 and n=m], simp)+ apply (drule(1) add_le_mono) apply simp done lemma unat_shiftl_absorb: "\<lbrakk> x \<le> 2 ^ p; p + k < LENGTH('a) \<rbrakk> \<Longrightarrow> unat (x :: 'a :: len word) * 2 ^ k = unat (x * 2 ^ k)" by (smt add_diff_cancel_right' add_lessD1 le_add2 le_less_trans mult.commute nat_le_power_trans unat_lt2p unat_mult_lem unat_power_lower word_le_nat_alt) lemma word_plus_mono_right_split: "\<lbrakk> unat ((x :: 'a :: len word) AND mask sz) + unat z < 2 ^ sz; sz < LENGTH('a) \<rbrakk> \<Longrightarrow> x \<le> x + z" apply (subgoal_tac "(x AND NOT(mask sz)) + (x AND mask sz) \<le> (x AND NOT(mask sz)) + ((x AND mask sz) + z)") apply (simp add:word_plus_and_or_coroll2 field_simps) apply (rule word_plus_mono_right) apply (simp add: less_le_trans no_olen_add_nat) using of_nat_power is_aligned_no_wrap' by force lemma mul_not_mask_eq_neg_shiftl: "NOT(mask n :: 'a::len word) = -1 << n" by (simp add: NOT_mask shiftl_t2n) lemma shiftr_mul_not_mask_eq_and_not_mask: "(x >> n) * NOT(mask n) = - (x AND NOT(mask n))" for x :: \<open>'a::len word\<close> by (metis NOT_mask and_not_mask mult_minus_left semiring_normalization_rules(7) shiftl_t2n) lemma mask_eq_n1_shiftr: "n \<le> LENGTH('a) \<Longrightarrow> (mask n :: 'a :: len word) = -1 >> (LENGTH('a) - n)" by (metis diff_diff_cancel eq_refl mask_full shiftr_mask2) lemma is_aligned_mask_out_add_eq: "is_aligned p n \<Longrightarrow> (p + x) AND NOT(mask n) = p + (x AND NOT(mask n))" by (simp add: mask_out_sub_mask mask_add_aligned) lemmas is_aligned_mask_out_add_eq_sub = is_aligned_mask_out_add_eq[where x="a - b" for a b, simplified field_simps] lemma aligned_bump_down: "is_aligned x n \<Longrightarrow> (x - 1) AND NOT(mask n) = x - 2 ^ n" by (drule is_aligned_mask_out_add_eq[where x="-1"]) (simp add: NOT_mask) lemma unat_2tp_if: "unat (2 ^ n :: ('a :: len) word) = (if n < LENGTH ('a) then 2 ^ n else 0)" by (split if_split, simp_all add: power_overflow) lemma mask_of_mask: "mask (n::nat) AND mask (m::nat) = (mask (min m n) :: 'a::len word)" by word_eqI_solve lemma unat_signed_ucast_less_ucast: "LENGTH('a) \<le> LENGTH('b) \<Longrightarrow> unat (ucast (x :: 'a :: len word) :: 'b :: len signed word) = unat x" by (simp add: unat_ucast_up_simp) lemmas unat_ucast_mask = unat_ucast_eq_unat_and_mask[where w=a for a] lemma t2n_mask_eq_if: "2 ^ n AND mask m = (if n < m then 2 ^ n else (0 :: 'a::len word))" by (rule word_eqI) (auto simp add: bit_simps) lemma unat_ucast_le: "unat (ucast (x :: 'a :: len word) :: 'b :: len word) \<le> unat x" by (simp add: ucast_nat_def word_unat_less_le) lemma ucast_le_up_down_iff: "\<lbrakk> LENGTH('a) \<le> LENGTH('b); (x :: 'b :: len word) \<le> ucast (- 1 :: 'a :: len word) \<rbrakk> \<Longrightarrow> (ucast x \<le> (y :: 'a word)) = (x \<le> ucast y)" using le_max_word_ucast_id ucast_le_ucast by metis lemma ucast_ucast_mask_shift: "a \<le> LENGTH('a) + b \<Longrightarrow> ucast (ucast (p AND mask a >> b) :: 'a :: len word) = p AND mask a >> b" by (metis add.commute le_mask_iff shiftr_mask_le ucast_ucast_eq_mask_shift word_and_le') lemma unat_ucast_mask_shift: "a \<le> LENGTH('a) + b \<Longrightarrow> unat (ucast (p AND mask a >> b) :: 'a :: len word) = unat (p AND mask a >> b)" by (metis linear ucast_ucast_mask_shift unat_ucast_up_simp) lemma mask_overlap_zero: "a \<le> b \<Longrightarrow> (p AND mask a) AND NOT(mask b) = 0" for p :: \<open>'a::len word\<close> by (metis NOT_mask_AND_mask mask_lower_twice2 max_def) lemma mask_shifl_overlap_zero: "a + c \<le> b \<Longrightarrow> (p AND mask a << c) AND NOT(mask b) = 0" for p :: \<open>'a::len word\<close> by (metis and_mask_0_iff_le_mask mask_mono mask_shiftl_decompose order_trans shiftl_over_and_dist word_and_le' word_and_le2) lemma mask_overlap_zero': "a \<ge> b \<Longrightarrow> (p AND NOT(mask a)) AND mask b = 0" for p :: \<open>'a::len word\<close> using mask_AND_NOT_mask mask_AND_less_0 by blast lemma mask_rshift_mult_eq_rshift_lshift: "((a :: 'a :: len word) >> b) * (1 << c) = (a >> b << c)" by (simp add: shiftl_t2n) lemma shift_alignment: "a \<ge> b \<Longrightarrow> is_aligned (p >> a << a) b" using is_aligned_shift is_aligned_weaken by blast lemma mask_split_sum_twice: "a \<ge> b \<Longrightarrow> (p AND NOT(mask a)) + ((p AND mask a) AND NOT(mask b)) + (p AND mask b) = p" for p :: \<open>'a::len word\<close> by (simp add: add.commute multiple_mask_trivia word_bw_comms(1) word_bw_lcs(1) word_plus_and_or_coroll2) lemma mask_shift_eq_mask_mask: "(p AND mask a >> b << b) = (p AND mask a) AND NOT(mask b)" for p :: \<open>'a::len word\<close> by (simp add: and_not_mask) lemma mask_shift_sum: "\<lbrakk> a \<ge> b; unat n = unat (p AND mask b) \<rbrakk> \<Longrightarrow> (p AND NOT(mask a)) + (p AND mask a >> b) * (1 << b) + n = (p :: 'a :: len word)" apply (simp add: push_bit_of_1 flip: push_bit_eq_mult) apply (subst disjunctive_add) apply (auto simp add: bit_simps) apply (smt (z3) AND_NOT_mask_plus_AND_mask_eq and.comm_neutral and.right_idem and_not_mask bit.conj_disj_distrib bit.disj_cancel_right mask_out_first_mask_some word_bw_assocs(1) word_bw_comms(1) word_bw_comms(2) word_eq_unatI) done lemma is_up_compose: "\<lbrakk> is_up uc; is_up uc' \<rbrakk> \<Longrightarrow> is_up (uc' \<circ> uc)" unfolding is_up_def by (simp add: Word.target_size Word.source_size) lemma of_int_sint_scast: "of_int (sint (x :: 'a :: len word)) = (scast x :: 'b :: len word)" by (fact Word.of_int_sint) lemma scast_of_nat_to_signed [simp]: "scast (of_nat x :: 'a :: len word) = (of_nat x :: 'a signed word)" by transfer simp lemma scast_of_nat_signed_to_unsigned_add: "scast (of_nat x + of_nat y :: 'a :: len signed word) = (of_nat x + of_nat y :: 'a :: len word)" by (metis of_nat_add scast_of_nat) lemma scast_of_nat_unsigned_to_signed_add: "(scast (of_nat x + of_nat y :: 'a :: len word)) = (of_nat x + of_nat y :: 'a :: len signed word)" by (metis Abs_fnat_hom_add scast_of_nat_to_signed) lemma and_mask_cases: fixes x :: "'a :: len word" assumes len: "n < LENGTH('a)" shows "x AND mask n \<in> of_nat ` set [0 ..< 2 ^ n]" apply (simp flip: take_bit_eq_mask) apply (rule image_eqI [of _ _ \<open>unat (take_bit n x)\<close>]) using len apply simp_all apply transfer apply simp done lemma sint_eq_uint_2pl: "\<lbrakk> (a :: 'a :: len word) < 2 ^ (LENGTH('a) - 1) \<rbrakk> \<Longrightarrow> sint a = uint a" by (simp add: not_msb_from_less sint_eq_uint word_2p_lem word_size) lemma pow_sub_less: "\<lbrakk> a + b \<le> LENGTH('a); unat (x :: 'a :: len word) = 2 ^ a \<rbrakk> \<Longrightarrow> unat (x * 2 ^ b - 1) < 2 ^ (a + b)" by (smt (z3) eq_or_less_helperD le_add2 le_eq_less_or_eq le_trans power_add unat_mult_lem unat_pow_le_intro unat_power_lower word_eq_unatI) lemma sle_le_2pl: "\<lbrakk> (b :: 'a :: len word) < 2 ^ (LENGTH('a) - 1); a \<le> b \<rbrakk> \<Longrightarrow> a <=s b" by (simp add: not_msb_from_less word_sle_msb_le) lemma sless_less_2pl: "\<lbrakk> (b :: 'a :: len word) < 2 ^ (LENGTH('a) - 1); a < b \<rbrakk> \<Longrightarrow> a <s b" using not_msb_from_less word_sless_msb_less by blast lemma and_mask2: "w << n >> n = w AND mask (size w - n)" for w :: \<open>'a::len word\<close> by (cases "n \<le> size w"; clarsimp simp: word_and_le2 and_mask shiftl_zero_size) lemma aligned_sub_aligned_simple: "\<lbrakk> is_aligned a n; is_aligned b n \<rbrakk> \<Longrightarrow> is_aligned (a - b) n" by (simp add: aligned_sub_aligned) lemma minus_one_shift: "- (1 << n) = (-1 << n :: 'a::len word)" by (simp flip: mul_not_mask_eq_neg_shiftl minus_exp_eq_not_mask add: push_bit_of_1) lemma ucast_eq_mask: "(UCAST('a::len \<rightarrow> 'b::len) x = UCAST('a \<rightarrow> 'b) y) = (x AND mask LENGTH('b) = y AND mask LENGTH('b))" by transfer (simp flip: take_bit_eq_mask add: ac_simps) context fixes w :: "'a::len word" begin private lemma sbintrunc_uint_ucast: assumes "Suc n = LENGTH('b::len)" shows "signed_take_bit n (uint (ucast w :: 'b word)) = signed_take_bit n (uint w)" by (rule bit_eqI) (use assms in \<open>simp add: bit_simps\<close>) private lemma test_bit_sbintrunc: assumes "i < LENGTH('a)" shows "bit (word_of_int (signed_take_bit n (uint w)) :: 'a word) i = (if n < i then bit w n else bit w i)" using assms by (simp add: bit_simps) private lemma test_bit_sbintrunc_ucast: assumes len_a: "i < LENGTH('a)" shows "bit (word_of_int (signed_take_bit (LENGTH('b) - 1) (uint (ucast w :: 'b word))) :: 'a word) i = (if LENGTH('b::len) \<le> i then bit w (LENGTH('b) - 1) else bit w i)" using len_a by (auto simp add: sbintrunc_uint_ucast bit_simps) lemma scast_ucast_high_bits: \<open>scast (ucast w :: 'b::len word) = w \<longleftrightarrow> (\<forall> i \<in> {LENGTH('b) ..< size w}. bit w i = bit w (LENGTH('b) - 1))\<close> proof (cases \<open>LENGTH('a) \<le> LENGTH('b)\<close>) case True moreover define m where \<open>m = LENGTH('b) - LENGTH('a)\<close> ultimately have \<open>LENGTH('b) = m + LENGTH('a)\<close> by simp then show ?thesis apply (simp_all add: signed_ucast_eq word_size) apply (rule bit_word_eqI) apply (simp add: bit_signed_take_bit_iff) done next case False define q where \<open>q = LENGTH('b) - 1\<close> then have \<open>LENGTH('b) = Suc q\<close> by simp moreover define m where \<open>m = Suc LENGTH('a) - LENGTH('b)\<close> with False \<open>LENGTH('b) = Suc q\<close> have \<open>LENGTH('a) = m + q\<close> by (simp add: not_le) ultimately show ?thesis apply (simp_all add: signed_ucast_eq word_size) apply (transfer fixing: m q) apply (simp add: signed_take_bit_take_bit) apply rule apply (subst bit_eq_iff) apply (simp add: bit_take_bit_iff bit_signed_take_bit_iff min_def) apply (auto simp add: Suc_le_eq) using less_imp_le_nat apply blast using less_imp_le_nat apply blast done qed lemma scast_ucast_mask_compare: "scast (ucast w :: 'b::len word) = w \<longleftrightarrow> (w \<le> mask (LENGTH('b) - 1) \<or> NOT(mask (LENGTH('b) - 1)) \<le> w)" apply (clarsimp simp: le_mask_high_bits neg_mask_le_high_bits scast_ucast_high_bits word_size) apply (rule iffI; clarsimp) apply (rename_tac i j; case_tac "i = LENGTH('b) - 1"; case_tac "j = LENGTH('b) - 1") by auto lemma ucast_less_shiftl_helper': "\<lbrakk> LENGTH('b) + (a::nat) < LENGTH('a); 2 ^ (LENGTH('b) + a) \<le> n\<rbrakk> \<Longrightarrow> (ucast (x :: 'b::len word) << a) < (n :: 'a::len word)" apply (erule order_less_le_trans[rotated]) using ucast_less[where x=x and 'a='a] apply (simp only: shiftl_t2n field_simps) apply (rule word_less_power_trans2; simp) done end lemma ucast_ucast_mask2: "is_down (UCAST ('a \<rightarrow> 'b)) \<Longrightarrow> UCAST ('b::len \<rightarrow> 'c::len) (UCAST ('a::len \<rightarrow> 'b::len) x) = UCAST ('a \<rightarrow> 'c) (x AND mask LENGTH('b))" apply (simp flip: take_bit_eq_mask) apply transfer apply simp done lemma ucast_NOT: "ucast (NOT x) = NOT(ucast x) AND mask (LENGTH('a))" for x::"'a::len word" by word_eqI lemma ucast_NOT_down: "is_down UCAST('a::len \<rightarrow> 'b::len) \<Longrightarrow> UCAST('a \<rightarrow> 'b) (NOT x) = NOT(UCAST('a \<rightarrow> 'b) x)" by word_eqI lemma upto_enum_step_shift: "\<lbrakk> is_aligned p n \<rbrakk> \<Longrightarrow> ([p , p + 2 ^ m .e. p + 2 ^ n - 1]) = map ((+) p) [0, 2 ^ m .e. 2 ^ n - 1]" apply (erule is_aligned_get_word_bits) prefer 2 apply (simp add: map_idI) apply (clarsimp simp: upto_enum_step_def) apply (frule is_aligned_no_overflow) apply (simp add: linorder_not_le [symmetric]) done lemma upto_enum_step_shift_red: "\<lbrakk> is_aligned p sz; sz < LENGTH('a); us \<le> sz \<rbrakk> \<Longrightarrow> [p :: 'a :: len word, p + 2 ^ us .e. p + 2 ^ sz - 1] = map (\<lambda>x. p + of_nat x * 2 ^ us) [0 ..< 2 ^ (sz - us)]" apply (subst upto_enum_step_shift, assumption) apply (simp add: upto_enum_step_red) done lemma upto_enum_step_subset: "set [x, y .e. z] \<subseteq> {x .. z}" apply (clarsimp simp: upto_enum_step_def linorder_not_less) apply (drule div_to_mult_word_lt) apply (rule conjI) apply (erule word_random[rotated]) apply simp apply (rule order_trans) apply (erule word_plus_mono_right) apply simp apply simp done lemma ucast_distrib: fixes M :: "'a::len word \<Rightarrow> 'a::len word \<Rightarrow> 'a::len word" fixes M' :: "'b::len word \<Rightarrow> 'b::len word \<Rightarrow> 'b::len word" fixes L :: "int \<Rightarrow> int \<Rightarrow> int" assumes lift_M: "\<And>x y. uint (M x y) = L (uint x) (uint y) mod 2 ^ LENGTH('a)" assumes lift_M': "\<And>x y. uint (M' x y) = L (uint x) (uint y) mod 2 ^ LENGTH('b)" assumes distrib: "\<And>x y. (L (x mod (2 ^ LENGTH('b))) (y mod (2 ^ LENGTH('b)))) mod (2 ^ LENGTH('b)) = (L x y) mod (2 ^ LENGTH('b))" assumes is_down: "is_down (ucast :: 'a word \<Rightarrow> 'b word)" shows "ucast (M a b) = M' (ucast a) (ucast b)" apply (simp only: ucast_eq) apply (subst lift_M) apply (subst of_int_uint [symmetric], subst lift_M') apply (metis local.distrib local.is_down take_bit_eq_mod ucast_down_wi uint_word_of_int_eq word_of_int_uint) done lemma ucast_down_add: "is_down (ucast:: 'a word \<Rightarrow> 'b word) \<Longrightarrow> ucast ((a :: 'a::len word) + b) = (ucast a + ucast b :: 'b::len word)" by (rule ucast_distrib [where L="(+)"], (clarsimp simp: uint_word_ariths)+, presburger, simp) lemma ucast_down_minus: "is_down (ucast:: 'a word \<Rightarrow> 'b word) \<Longrightarrow> ucast ((a :: 'a::len word) - b) = (ucast a - ucast b :: 'b::len word)" apply (rule ucast_distrib [where L="(-)"], (clarsimp simp: uint_word_ariths)+) apply (metis mod_diff_left_eq mod_diff_right_eq) apply simp done lemma ucast_down_mult: "is_down (ucast:: 'a word \<Rightarrow> 'b word) \<Longrightarrow> ucast ((a :: 'a::len word) * b) = (ucast a * ucast b :: 'b::len word)" apply (rule ucast_distrib [where L="(*)"], (clarsimp simp: uint_word_ariths)+) apply (metis mod_mult_eq) apply simp done lemma scast_distrib: fixes M :: "'a::len word \<Rightarrow> 'a::len word \<Rightarrow> 'a::len word" fixes M' :: "'b::len word \<Rightarrow> 'b::len word \<Rightarrow> 'b::len word" fixes L :: "int \<Rightarrow> int \<Rightarrow> int" assumes lift_M: "\<And>x y. uint (M x y) = L (uint x) (uint y) mod 2 ^ LENGTH('a)" assumes lift_M': "\<And>x y. uint (M' x y) = L (uint x) (uint y) mod 2 ^ LENGTH('b)" assumes distrib: "\<And>x y. (L (x mod (2 ^ LENGTH('b))) (y mod (2 ^ LENGTH('b)))) mod (2 ^ LENGTH('b)) = (L x y) mod (2 ^ LENGTH('b))" assumes is_down: "is_down (scast :: 'a word \<Rightarrow> 'b word)" shows "scast (M a b) = M' (scast a) (scast b)" apply (subst (1 2 3) down_cast_same [symmetric]) apply (insert is_down) apply (clarsimp simp: is_down_def target_size source_size is_down) apply (rule ucast_distrib [where L=L, OF lift_M lift_M' distrib]) apply (insert is_down) apply (clarsimp simp: is_down_def target_size source_size is_down) done lemma scast_down_add: "is_down (scast:: 'a word \<Rightarrow> 'b word) \<Longrightarrow> scast ((a :: 'a::len word) + b) = (scast a + scast b :: 'b::len word)" by (rule scast_distrib [where L="(+)"], (clarsimp simp: uint_word_ariths)+, presburger, simp) lemma scast_down_minus: "is_down (scast:: 'a word \<Rightarrow> 'b word) \<Longrightarrow> scast ((a :: 'a::len word) - b) = (scast a - scast b :: 'b::len word)" apply (rule scast_distrib [where L="(-)"], (clarsimp simp: uint_word_ariths)+) apply (metis mod_diff_left_eq mod_diff_right_eq) apply simp done lemma scast_down_mult: "is_down (scast:: 'a word \<Rightarrow> 'b word) \<Longrightarrow> scast ((a :: 'a::len word) * b) = (scast a * scast b :: 'b::len word)" apply (rule scast_distrib [where L="(*)"], (clarsimp simp: uint_word_ariths)+) apply (metis mod_mult_eq) apply simp done lemma scast_ucast_3: "\<lbrakk> is_down (ucast :: 'a word \<Rightarrow> 'c word); is_down (ucast :: 'b word \<Rightarrow> 'c word) \<rbrakk> \<Longrightarrow> (scast (ucast (a :: 'a::len word) :: 'b::len word) :: 'c::len word) = ucast a" by (metis down_cast_same ucast_eq ucast_down_wi) lemma scast_ucast_4: "\<lbrakk> is_up (ucast :: 'a word \<Rightarrow> 'b word); is_down (ucast :: 'b word \<Rightarrow> 'c word) \<rbrakk> \<Longrightarrow> (scast (ucast (a :: 'a::len word) :: 'b::len word) :: 'c::len word) = ucast a" by (metis down_cast_same ucast_eq ucast_down_wi) lemma scast_scast_b: "\<lbrakk> is_up (scast :: 'a word \<Rightarrow> 'b word) \<rbrakk> \<Longrightarrow> (scast (scast (a :: 'a::len word) :: 'b::len word) :: 'c::len word) = scast a" by (metis scast_eq sint_up_scast) lemma ucast_scast_1: "\<lbrakk> is_down (scast :: 'a word \<Rightarrow> 'b word); is_down (ucast :: 'b word \<Rightarrow> 'c word) \<rbrakk> \<Longrightarrow> (ucast (scast (a :: 'a::len word) :: 'b::len word) :: 'c::len word) = scast a" by (metis scast_eq ucast_down_wi) lemma ucast_scast_4: "\<lbrakk> is_up (scast :: 'a word \<Rightarrow> 'b word); is_down (ucast :: 'b word \<Rightarrow> 'c word) \<rbrakk> \<Longrightarrow> (ucast (scast (a :: 'a::len word) :: 'b::len word) :: 'c::len word) = scast a" by (metis down_cast_same scast_eq sint_up_scast) lemma ucast_ucast_a: "\<lbrakk> is_down (ucast :: 'b word \<Rightarrow> 'c word) \<rbrakk> \<Longrightarrow> (ucast (ucast (a :: 'a::len word) :: 'b::len word) :: 'c::len word) = ucast a" by (metis down_cast_same ucast_eq ucast_down_wi) lemma ucast_ucast_b: "\<lbrakk> is_up (ucast :: 'a word \<Rightarrow> 'b word) \<rbrakk> \<Longrightarrow> (ucast (ucast (a :: 'a::len word) :: 'b::len word) :: 'c::len word) = ucast a" by (metis ucast_up_ucast) lemma scast_scast_a: "\<lbrakk> is_down (scast :: 'b word \<Rightarrow> 'c word) \<rbrakk> \<Longrightarrow> (scast (scast (a :: 'a::len word) :: 'b::len word) :: 'c::len word) = scast a" apply (simp only: scast_eq) apply (metis down_cast_same is_up_down scast_eq ucast_down_wi) done lemma scast_down_wi [OF refl]: "uc = scast \<Longrightarrow> is_down uc \<Longrightarrow> uc (word_of_int x) = word_of_int x" by (metis down_cast_same is_up_down ucast_down_wi) lemmas cast_simps = is_down is_up scast_down_add scast_down_minus scast_down_mult ucast_down_add ucast_down_minus ucast_down_mult scast_ucast_1 scast_ucast_3 scast_ucast_4 ucast_scast_1 ucast_scast_3 ucast_scast_4 ucast_ucast_a ucast_ucast_b scast_scast_a scast_scast_b ucast_down_wi scast_down_wi ucast_of_nat scast_of_nat uint_up_ucast sint_up_scast up_scast_surj up_ucast_surj lemma sdiv_word_max: "(sint (a :: ('a::len) word) sdiv sint (b :: ('a::len) word) < (2 ^ (size a - 1))) = ((a \<noteq> - (2 ^ (size a - 1)) \<or> (b \<noteq> -1)))" (is "?lhs = (\<not> ?a_int_min \<or> \<not> ?b_minus1)") proof (rule classical) assume not_thesis: "\<not> ?thesis" have not_zero: "b \<noteq> 0" using not_thesis by (clarsimp) let ?range = \<open>{- (2 ^ (size a - 1))..<2 ^ (size a - 1)} :: int set\<close> have result_range: "sint a sdiv sint b \<in> ?range \<union> {2 ^ (size a - 1)}" apply (cut_tac sdiv_int_range [where a="sint a" and b="sint b"]) apply (erule rev_subsetD) using sint_range' [where x=a] sint_range' [where x=b] apply (auto simp: max_def abs_if word_size) done have result_range_overflow: "(sint a sdiv sint b = 2 ^ (size a - 1)) = (?a_int_min \<and> ?b_minus1)" apply (rule iffI [rotated]) apply (clarsimp simp: signed_divide_int_def sgn_if word_size sint_int_min) apply (rule classical) apply (case_tac "?a_int_min") apply (clarsimp simp: word_size sint_int_min) apply (metis diff_0_right int_sdiv_negated_is_minus1 minus_diff_eq minus_int_code(2) power_eq_0_iff sint_minus1 zero_neq_numeral) apply (subgoal_tac "abs (sint a) < 2 ^ (size a - 1)") apply (insert sdiv_int_range [where a="sint a" and b="sint b"])[1] apply (clarsimp simp: word_size) apply (insert sdiv_int_range [where a="sint a" and b="sint b"])[1] apply auto apply (cases \<open>size a\<close>) apply simp_all apply (smt (z3) One_nat_def diff_Suc_1 signed_word_eqI sint_int_min sint_range' wsst_TYs(3)) done have result_range_simple: "(sint a sdiv sint b \<in> ?range) \<Longrightarrow> ?thesis" apply (insert sdiv_int_range [where a="sint a" and b="sint b"]) apply (clarsimp simp: word_size sint_int_min) done show ?thesis apply (rule UnE [OF result_range result_range_simple]) apply simp apply (clarsimp simp: word_size) using result_range_overflow apply (clarsimp simp: word_size) done qed lemmas sdiv_word_min' = sdiv_word_min [simplified word_size, simplified] lemmas sdiv_word_max' = sdiv_word_max [simplified word_size, simplified] lemma signed_arith_ineq_checks_to_eq: "((- (2 ^ (size a - 1)) \<le> (sint a + sint b)) \<and> (sint a + sint b \<le> (2 ^ (size a - 1) - 1))) = (sint a + sint b = sint (a + b ))" "((- (2 ^ (size a - 1)) \<le> (sint a - sint b)) \<and> (sint a - sint b \<le> (2 ^ (size a - 1) - 1))) = (sint a - sint b = sint (a - b))" "((- (2 ^ (size a - 1)) \<le> (- sint a)) \<and> (- sint a) \<le> (2 ^ (size a - 1) - 1)) = ((- sint a) = sint (- a))" "((- (2 ^ (size a - 1)) \<le> (sint a * sint b)) \<and> (sint a * sint b \<le> (2 ^ (size a - 1) - 1))) = (sint a * sint b = sint (a * b))" "((- (2 ^ (size a - 1)) \<le> (sint a sdiv sint b)) \<and> (sint a sdiv sint b \<le> (2 ^ (size a - 1) - 1))) = (sint a sdiv sint b = sint (a sdiv b))" "((- (2 ^ (size a - 1)) \<le> (sint a smod sint b)) \<and> (sint a smod sint b \<le> (2 ^ (size a - 1) - 1))) = (sint a smod sint b = sint (a smod b))" by (auto simp: sint_word_ariths word_size signed_div_arith signed_mod_arith sbintrunc_eq_in_range range_sbintrunc) lemma signed_arith_sint: "((- (2 ^ (size a - 1)) \<le> (sint a + sint b)) \<and> (sint a + sint b \<le> (2 ^ (size a - 1) - 1))) \<Longrightarrow> sint (a + b) = (sint a + sint b)" "((- (2 ^ (size a - 1)) \<le> (sint a - sint b)) \<and> (sint a - sint b \<le> (2 ^ (size a - 1) - 1))) \<Longrightarrow> sint (a - b) = (sint a - sint b)" "((- (2 ^ (size a - 1)) \<le> (- sint a)) \<and> (- sint a) \<le> (2 ^ (size a - 1) - 1)) \<Longrightarrow> sint (- a) = (- sint a)" "((- (2 ^ (size a - 1)) \<le> (sint a * sint b)) \<and> (sint a * sint b \<le> (2 ^ (size a - 1) - 1))) \<Longrightarrow> sint (a * b) = (sint a * sint b)" "((- (2 ^ (size a - 1)) \<le> (sint a sdiv sint b)) \<and> (sint a sdiv sint b \<le> (2 ^ (size a - 1) - 1))) \<Longrightarrow> sint (a sdiv b) = (sint a sdiv sint b)" "((- (2 ^ (size a - 1)) \<le> (sint a smod sint b)) \<and> (sint a smod sint b \<le> (2 ^ (size a - 1) - 1))) \<Longrightarrow> sint (a smod b) = (sint a smod sint b)" by (subst (asm) signed_arith_ineq_checks_to_eq; simp)+ end
module Language.LSP.Message.TextDocument import Language.JSON import Language.LSP.Message.Derive import Language.LSP.Message.Location import Language.LSP.Message.URI import Language.LSP.Message.Utils import Language.Reflection %language ElabReflection %default total ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#textDocumentIdentifier public export record TextDocumentIdentifier where constructor MkTextDocumentIdentifier uri : DocumentURI %runElab deriveJSON defaultOpts `{{TextDocumentIdentifier}} ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#versionedTextDocumentIdentifier public export record VersionedTextDocumentIdentifier where constructor MkVersionedTextDocumentIdentifier uri : DocumentURI version : Int %runElab deriveJSON defaultOpts `{{VersionedTextDocumentIdentifier}} ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#versionedTextDocumentIdentifier public export record OptionalVersionedTextDocumentIdentifier where constructor MkOptionalVersionedTextDocumentIdentifier uri : DocumentURI version : Maybe Int %runElab deriveJSON defaultOpts `{{OptionalVersionedTextDocumentIdentifier}} ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#textDocumentItem public export record TextDocumentItem where constructor MkTextDocumentItem uri : DocumentURI languageId : String version : Int text : String %runElab deriveJSON defaultOpts `{{TextDocumentItem}} ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#textDocumentPositionParams public export record TextDocumentPositionParams where constructor MkTextDocumentPositionParams textDocument : TextDocumentIdentifier position : Position %runElab deriveJSON defaultOpts `{{TextDocumentPositionParams}} namespace TextDocumentSyncKind ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#textDocument_synchronization public export data TextDocumentSyncKind = None | Full | Incremental export ToJSON TextDocumentSyncKind where toJSON None = JNumber 0 toJSON Full = JNumber 1 toJSON Incremental = JNumber 2 export FromJSON TextDocumentSyncKind where fromJSON (JNumber 0) = pure None fromJSON (JNumber 1) = pure Full fromJSON (JNumber 2) = pure Incremental fromJSON _ = Nothing ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#textDocument_didOpen public export record DidOpenTextDocumentParams where constructor MkDidOpenTextDocumentParams textDocument : TextDocumentItem %runElab deriveJSON defaultOpts `{{DidOpenTextDocumentParams}} ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#documentFilter public export record DocumentFilter where constructor MkDocumentFilter language : Maybe String scheme : Maybe String pattern : Maybe String %runElab deriveJSON defaultOpts `{{DocumentFilter}} ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#documentFilter public export DocumentSelector : Type DocumentSelector = List DocumentFilter public export record TextDocumentRegistrationOptions where constructor MkTextDocumentRegistrationOptions documentSelector : OneOf [DocumentSelector, Null] %runElab deriveJSON defaultOpts `{{TextDocumentRegistrationOptions}} ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#textDocumentRegistrationOptions public export record TextDocumentChangeRegistrationOptions where constructor MkTextDocumentChangeRegistrationOptions syncKind : TextDocumentSyncKind %runElab deriveJSON defaultOpts `{{TextDocumentChangeRegistrationOptions}} namespace DidChangeTextDocumentParams ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#textDocument_didChange public export record TextDocumentContentChangeEvent where constructor MkTextDocumentContentChangeEvent text : String %runElab deriveJSON defaultOpts `{{TextDocumentContentChangeEvent}} ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#textDocument_didChange public export record TextDocumentContentChangeEventWithRange where constructor MkTextDocumentContentChangeEventWithRange range : Range rangeLength : Maybe Int text : String %runElab deriveJSON defaultOpts `{{TextDocumentContentChangeEventWithRange}} ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#textDocument_didChange public export record DidChangeTextDocumentParams where constructor MkDidChangeTextDocumentParams textDocument : VersionedTextDocumentIdentifier contentChanges : List (OneOf [TextDocumentContentChangeEvent, TextDocumentContentChangeEventWithRange]) %runElab deriveJSON defaultOpts `{{DidChangeTextDocumentParams}} namespace TextDocumentSaveReason ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#textDocument_willSave public export data TextDocumentSaveReason = Manual | AfterDelay | FocusOut export ToJSON TextDocumentSaveReason where toJSON Manual = JNumber 1 toJSON AfterDelay = JNumber 2 toJSON FocusOut = JNumber 3 export FromJSON TextDocumentSaveReason where fromJSON (JNumber 1) = pure Manual fromJSON (JNumber 2) = pure AfterDelay fromJSON (JNumber 3) = pure FocusOut fromJSON _ = neutral ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#textDocument_willSave public export record WillSaveTextDocumentParams where constructor MkWillSaveTextDocumentParams textDocument : TextDocumentIdentifier reason : TextDocumentSaveReason %runElab deriveJSON defaultOpts `{{WillSaveTextDocumentParams}} ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#textDocument_didSave public export record SaveOptions where constructor MkSaveOptions includeText : Maybe Bool %runElab deriveJSON defaultOpts `{{SaveOptions}} ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#textDocument_didSave public export record TextDocumentSaveRegistrationOptions where constructor MkTextDocumentSaveRegistrationOptions documentSelector : OneOf [DocumentSelector, Null] includeText : Maybe Bool %runElab deriveJSON defaultOpts `{{TextDocumentSaveRegistrationOptions}} ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#textDocument_didSave public export record DidSaveTextDocumentParams where constructor MkDidSaveTextDocumentParams textDocument : TextDocumentIdentifier text : Maybe String %runElab deriveJSON defaultOpts `{{DidSaveTextDocumentParams}} ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#textDocument_didClose public export record DidCloseTextDocumentParams where constructor MkDidCloseTextDocumentParams textDocument : TextDocumentIdentifier %runElab deriveJSON defaultOpts `{{DidCloseTextDocumentParams}} ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#textDocument_didClose public export record TextDocumentSyncClientCapabilities where constructor MkTextDocumentSyncClientCapabilities dynamicRegistration : Maybe Bool willSave : Maybe Bool willSaveWaitUntil : Maybe Bool didSave : Maybe Bool %runElab deriveJSON defaultOpts `{{TextDocumentSyncClientCapabilities}} ||| Refer to https://microsoft.github.io/language-server-protocol/specification.html#textDocument_didClose public export record TextDocumentSyncOptions where constructor MkTextDocumentSyncOptions openClose : Maybe Bool change : Maybe TextDocumentSyncKind willSave : Maybe Bool willSaveWaitUntil : Maybe Bool save : Maybe (OneOf [Bool, SaveOptions]) %runElab deriveJSON defaultOpts `{{TextDocumentSyncOptions}}
[STATEMENT] lemma (in vars) foo: "s<n := 2>\<cdot>b = s\<cdot>b" [PROOF STATE] proof (prove) goal (1 subgoal): 1. s\<langle>n := 2\<rangle>\<cdot>b = s\<cdot>b [PROOF STEP] by simp
import numpy as np import pywt from skimage.transform import resize from skimage.measure import label from filters import adaptive_thresh def enhance_puncta(img, level=7): """ Removing low frequency wavelet signals to enhance puncta. Dependent on image size, try level 6~8. """ if level == 0: return img wp = pywt.WaveletPacket2D(data=img, wavelet='haar', mode='sym') back = resize(np.array(wp['d'*level].data), img.shape, order=3, mode='reflect')/(2**level) cimg = img - back cimg[cimg < 0] = 0 return cimg def detect_puncta(img, level=7, PERC=50, FILSIZE=1): pimg = enhance_puncta(img.astype(np.uint16), level) limg = label(adaptive_thresh(pimg, R=PERC, FILTERINGSIZE=FILSIZE), connectivity=1) return limg
# Solving equations by addition Let's solve a linear equation using addition. **Note**: Lesson inspired by https://www.basic-mathematics.com/solving-equations-using-addition.html Here we import a few functions from SymPy. Each of these functions will be explained. ```python from sympy import symbols, Eq, simplify, solve ``` ## Overview Given we have an equation $$x + -b = c$$ we want to solve for the value of $x$. We need to "move" the $b$ value over to the other side so we can have $x$ by itself. Notice the negative or minus sign in front of $b$. We can add a postive version of the negative $b$ to move it over to the other side. $$x + -b + b = c + b$$ Once we do this, the negative and positive $b$ values will cancel out to zero. $$x + 0 = c + b$$ This will simplify to having $x$ on one side like we want it to. $$x = c + b$$ ## Setup The above uses variables instead of numbers. Let's play around with this ourselves with Python and SymPy using real numbers. Let's say we want to solve for $x$ in the following equation. $$x + (-2) = 8$$ First we need to create our $x$ variable for our equation using the `symbols()` function. ```python x = symbols('x') ``` We can write Python code very similar to our equation above. Let us define our equation. This can be done using the `Eq()` function. The first and second arguments for this function are the left and right sides of the equal side, respectively. ```python eq1 = Eq(x + (-2), 8) eq1 ``` $\displaystyle x - 2 = 8$ You may notice the left side of our equation looks a bit different from above, which was $x + (-2)$. These two expressions are equal and Python can check this for us. ```python x + (-2) == x - 2 ``` True ## Using Python and SymPy to Help Us Another great thing about Python and SymPy is that we can solve our equation using the `solve()` function. Why show this? Because we can have an end point we can go towards to check our math work immediately. The `solve()` function takes two parameters, the first one is the equation and the second one is variable symbol you want to solve for. ```python solve(eq1, x) ``` [10] This tells us that solving for $x$ in our equation $x + (-2) = 8$ requires $x$ to be $10$, or $x = 10$. Let's use math to double check this. ## Adding on both sides Based on our Overview above, we want to add the positive version of our values on both sides of the equation. We can access the left hand side and the right hand side using the methods `.lhs` and `rhs` on our equation object, respectively. ```python # Left side of the equals eq1.lhs ``` $\displaystyle x - 2$ ```python # Right side of the equals eq1.rhs ``` $\displaystyle 8$ Now that we have the left and right sides, let's add the positive version of 2 because we are subtracting from it. ```python eq1 = Eq(eq1.lhs + 2, eq1.rhs + 2) eq1 ``` $\displaystyle x = 10$ ## Exercise Solve the following equation using addition. $$x + (-6) = 5$$ **Hint**: Start by translating the math equation in to a SymPy equation using the `Eq()` function. ```python ```
Require Import Events. Require Import TraceModel. Require Import Properties. Require Import CommonST. Require Import Robustdef. Require Import Criteria. Require Import ClassicalExtras. Require Import FunctionalExtensionality. Require Import Coq.Logic.ClassicalFacts. (** This file proves the collapses that happens in presence of reflection in the source language *) (* RrHP --- \ | ----- RrSCP --- / / \ RHP / RrTP --- (<-> RFrSCP) / / \ | /---------- / RrSP / / RSCP (<-> RFrTP) / / / / | / / / / RTP --------- -- / \ / / \ RHSP (<-> RFrSP) / RDP RSP ------- *) Variable code_intro : forall {P1 P2 : par src} (h : P1 <> P2) (Cs1 Cs2 : ctx src), ctx src. Axiom beh_intro1 : forall P1 P2 (h : P1 <> P2)Cs1 Cs2, forall t, sem src ((code_intro h Cs1 Cs2) [P1]) t <-> sem src (Cs1 [P1]) t. Axiom beh_intro2 : forall P1 P2 (h : P1 <> P2) Cs1 Cs2, forall t, sem src ((code_intro h Cs1 Cs2) [P2]) t = sem src (Cs2 [P2]) t. (* R2HSP -> r2RSP and a similar argument for k >= 2 *) Lemma R2HSP_R2rSP : R2HSP -> R2rSP. Proof. rewrite <- R2HSC_R2HSP, <- R2rSC_R2rSP. intros H2rsc Ct P1 P2 m1 m2 H1 H2. destruct H1 as [t1 [Hpref1 H1]]. destruct H2 as [t2 [Hpref2 H2]]. destruct (classic (P1 = P2)) as [Heq | Hneq]. + rewrite <- Heq in *. destruct (H2rsc P1 Ct m1 m2) as [Cs Hspref]. ++ intros x [Hx | Hx]; [exists t1 | exists t2]; subst; auto. ++ exists Cs. split. +++ destruct (Hspref m1) as [tt1 Hpref11]; simpl; auto. now exists tt1. +++ destruct (Hspref m2) as [tt2 Hpref22]; simpl; auto. now exists tt2. + apply R2HSC_RSC in H2rsc. destruct (H2rsc P1 Ct t1 H1 m1 Hpref1) as [Cs1 [t1' [H' H'']]]. destruct (H2rsc P2 Ct t2 H2 m2 Hpref2) as [Cs2 [t2' [H2' H2'']]]. exists (code_intro Hneq Cs1 Cs2). split; [exists t1' | exists t2']; split; auto; [ now rewrite (beh_intro1 P1 P2 Hneq Cs1 Cs2) | now rewrite (beh_intro2 P1 P2 Hneq Cs1 Cs2)]. Qed. (* RHP -> r2RHP *) (* as usual we need prop_extensionality *) Hypothesis prop_ext : prop_extensionality. Lemma RHP_R2rHP : RHP -> R2rHP. Proof. rewrite <- RHC_RHP, <- R2rHC_R2rHP. intros hrc P1 P2 Ct. destruct (hrc P1 Ct) as [Cs1 H1]. destruct (hrc P2 Ct) as [Cs2 H2]. destruct (classic (P1 = P2)) as [Heq | Hneq]. + rewrite Heq in *. exists Cs1. split; apply functional_extensionality; intros t; apply prop_ext; now auto. + exists (code_intro Hneq Cs1 Cs2). split; apply functional_extensionality; intros t; apply prop_ext; [ now rewrite beh_intro1 | now rewrite beh_intro2]. Qed. (* RSCP -> R2rSCP *) Lemma RSCP_R2rSCP : RSCHP -> R2rSCHP. Proof. rewrite <- R2rSCHC_R2rSCHP, <- RSCHC_RSCHP. intros sscr P1 P2 Ct. destruct (classic (P1 = P2)) as [Heq | Hneq]. + rewrite <- Heq in *. destruct (sscr P1 Ct) as [Cs H]. now exists Cs. + destruct (sscr P1 Ct) as [Cs1 H1]. destruct (sscr P2 Ct) as [Cs2 H2]. exists (code_intro Hneq Cs1 Cs2). split; intros t H; [rewrite beh_intro1; now apply H1 | rewrite beh_intro2; now apply H2]. Qed. Lemma R2rSCP_R2rTP : R2rSCHP -> R2rTP. Proof. rewrite <- R2rSCHC_R2rSCHP, <- R2rTC_R2rTP. intros rp Ct P1 P2 t1 t2 H1 H2. destruct (rp P1 P2 Ct) as [Cs [HH1 HH2]]. exists Cs; split; [now apply HH1 | now apply HH2]. Qed. Theorem RSCHP_R2rTP : RSCHP -> R2rTP. Proof. intros H. now apply R2rSCP_R2rTP; apply RSCP_R2rSCP. Qed.
program test_update_until use bmif_2_0, only: BMI_FAILURE, BMI_SUCCESS use bmiheatf use fixtures, only: config_file, status, tolerance implicit none type (bmi_heat) :: m integer :: retcode retcode = test_more_than_dt() if (retcode.ne.BMI_SUCCESS) then stop BMI_FAILURE end if retcode = test_less_than_dt() if (retcode.ne.BMI_SUCCESS) then stop BMI_FAILURE end if retcode = test_multiple_of_dt() if (retcode.ne.BMI_SUCCESS) then stop BMI_FAILURE end if contains function test_more_than_dt() result(code) double precision, parameter :: expected_time = 10.5d0 double precision :: time integer :: code status = m%initialize(config_file) status = m%update_until(expected_time) status = m%get_current_time(time) status = m%finalize() write(*,*) "Test time more than dt" print *, time print *, expected_time code = BMI_SUCCESS if (abs(time - expected_time) > tolerance) then code = BMI_FAILURE end if end function test_more_than_dt function test_less_than_dt() result(code) double precision, parameter :: expected_time = 0.1d0 double precision :: time integer :: code status = m%initialize(config_file) status = m%update_until(expected_time) status = m%get_current_time(time) status = m%finalize() write(*,*) "Test time less than dt" print *, time print *, expected_time code = BMI_SUCCESS if (abs(time - expected_time) > tolerance) then code = BMI_FAILURE end if end function test_less_than_dt function test_multiple_of_dt() result(code) double precision :: time, dt, expected_time integer :: code status = m%initialize(config_file) status = m%get_time_step(dt) expected_time = 3 * dt status = m%update_until(expected_time) status = m%get_current_time(time) status = m%finalize() write(*,*) "Test time multiple of dt" print *, time print *, expected_time code = BMI_SUCCESS if (abs(time - expected_time) > tolerance) then code = BMI_FAILURE end if end function test_multiple_of_dt end program test_update_until
/- Copyright 2019 (c) Hans-Dieter Hiep. All rights reserved. Released under MIT license as described in the file LICENSE. -/ import data.finmap data.bool data.vector data.list data.multiset import data.finsupp open nat option finset list universes u v variables {α : Type u} {β : Type v} /- Indices of a list -/ @[derive decidable_eq] inductive pointer: list α → Type u | here (x : α) (xs : list α): pointer (x :: xs) | tail {xs : list α} (y : α): pointer xs → pointer (y :: xs) /- List membership with concrete witness (position) -/ @[derive decidable_eq] inductive list_at: α → list α → Type u | here (x : α) (xs : list α): list_at x (x :: xs) | tail {x : α} {l : list α} (y : α): list_at x l → list_at x (y :: l) lemma list_at_mem {x : α} {l : list α} : list_at x l → x ∈ l := begin intro H, induction l, cases H, cases H, constructor, refl, have: x ∈ l_tl, apply l_ih, assumption, right, assumption end /- A FIFO queue is a list of elements. Adding an element appends it to the back. Removing an element takes it from the front. -/ @[derive decidable_eq] structure queue (α : Type u) := (l : list α) @[reducible] def queue.add (q : queue α) (x : α) : queue α := ⟨q.l ++ [x]⟩ @[reducible] def queue.empty : queue α → bool | ⟨[]⟩ := tt | ⟨(x :: l)⟩ := ff def queue.full : queue α → Prop := λq, q ≠ ⟨[]⟩ @[reducible] def queue.remove : Π q : queue α, queue.full q → α × queue α | ⟨[]⟩ H := begin exfalso, apply H, simp end | ⟨(x :: l)⟩ _ := ⟨x, ⟨l⟩⟩ @[reducible] def queue.first (q : queue α) (H : queue.full q) : α := (queue.remove q H).fst @[reducible] def queue.unshift (q : queue α) (H : queue.full q) : queue α := (queue.remove q H).snd @[reducible] def queue.poll : queue α → option (α × queue α) | ⟨[]⟩ := none | ⟨(x :: l)⟩ := some ⟨x, ⟨l⟩⟩ instance queue.has_zero : has_zero (queue α) := ⟨⟨[]⟩⟩ /- A function with finite support can be updated. This either adds a new value, or overwrites the value previoulsy mapped. -/ namespace finsupp variables [decidable_eq α] [decidable_eq β] [has_zero β] def update (f : α →₀ β) (a : α) (b : β) : α →₀ β := ⟨if b = 0 then f.support.erase a else f.support ∪ {a}, (λa', if a = a' then b else f a'), λa', begin by_cases H : (a = a'); by_cases G : (b = 0); simp [G,H], { split, {intro, cases a_1, assumption}, {intro, have : ¬a' = a, intro, apply H, apply eq.symm, assumption, exact ⟨this, a_1⟩ } }, { split, {intro, cases a_1, exfalso, apply H, apply eq.symm, assumption, assumption}, { intro, right, assumption } } end⟩ @[simp] theorem update.to_fun (f : α →₀ β) (a : α) (b : β) : (update f a b).to_fun = (λa', if a = a' then b else f a') := rfl @[simp] theorem update.app_new_eq (f : α →₀ β) (a : α) (b : β) : (update f a b) a = b := begin simp [coe_fn], unfold has_coe_to_fun.coe, simp end theorem update.app_old_eq (f : α →₀ β) (a : α) (b : β) (c : α) (H : a ≠ c) : (update f a b) c = f c := begin simp [coe_fn], unfold has_coe_to_fun.coe, simp [H, coe_fn], unfold has_coe_to_fun.coe end end finsupp /- Elimination and matching with equality (thanks to Rob Lewis) -/ def option.elim {β : Sort v} (t : option α) (f : t = none → β) (g : Π(a : α), t = some a → β) : β := match t, rfl : (∀ b, t = b → β) with | none, h := f h | (some a), h := g a h end /- Lift list of options -/ lemma head_lift_nil {a : α} : head (lift (@nil α)) ≠ some a := begin intro, simp [lift, has_lift.lift, default, inhabited.default] at a_1, assumption end lemma tail_lift_some {hd a : α} {tl : list α} : head (lift (list.cons hd tl)) = some a → hd = a := begin intro, simp [lift, has_lift.lift, coe] at a_1, simp [lift_t, has_lift_t.lift, coe_t, has_coe_t.coe] at a_1, assumption end /- Decomposition of finite set: a singleton and remainder set, such they are disjoint. A decomposition can be coerced to their union set. -/ inductive decomp_finset [decidable_eq α] (rem: finset α) (elem: α) : Type | mk: elem ∉ rem → decomp_finset instance decomp_finset.coe_finset [decidable_eq α] (rem: finset α) (elem: α) : has_coe (decomp_finset rem elem) (finset α) := ⟨λ_, rem ∪ {elem}⟩ notation Γ `;;` x := decomp_finset Γ x /- Existence of a proof of a proposition implies that proposition. -/ lemma exists_proof_prop (P Q : Prop) : (∃x : P, Q) → P := begin intro, cases a, assumption end
%sim_COF.m %Dana Goerzen and Jamie Near, 2021. % % USAGE: % d_out=sim_COF(H,d_in,order) % % DESCRIPTION: % This function nulls signal from any undesired coherences in a spin system % Desired coherences are determined through extended phase graph analysis % and pulse sequence design. % % INPUTS: % H = Hamiltonian operator structure. % d_in = input density matrix structure % order = desired coherence order that you wish to keep signal from % % OUTPUTS: % d_out = output density matrix structure function d_out=sim_COF(H,d_in,order) %initialize mask as permitting all coherences through, then iterate through coherence matrix in Hamiltonian to %set any values that don't correspond to the desired coherence order to 0. for n=1:length(H) %JN - Looping through the parts of the spin system: mask1=H(n).coherenceOrder==order; %zero any undesired coherences d_temp=mask1.*d_in{n}; d_in{n}=d_temp; end d_out=d_in; end
subroutine read_unformatted_double(m, n, k, a, filename) implicit none integer :: m, n, k double precision :: a(m,n,k) character*4096 :: filename open(10, file=trim(filename), form='unformatted') read(10) a close(10) end subroutine subroutine read_unformatted_int(m, n, k, a, filename) implicit none integer :: m, n, k integer :: a(m,n,k) character*4096 :: filename open(10, file=trim(filename), form='unformatted') read(10) a close(10) end subroutine subroutine read_unformatted_mixed(m, n, k, a, b, filename) implicit none integer :: m, n, k double precision :: a(m,n) integer :: b(k) character*4096 :: filename open(10, file=trim(filename), form='unformatted') read(10) a, b close(10) end subroutine
/- Copyright (c) 2023 Mantas Bakšys, Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mantas Bakšys, Yaël Dillies ! This file was ported from Lean 3 source module algebra.order.chebyshev ! leanprover-community/mathlib commit b7399344324326918d65d0c74e9571e3a8cb9199 ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathbin.Algebra.BigOperators.Order import Mathbin.Algebra.Order.Rearrangement import Mathbin.GroupTheory.Perm.Cycle.Basic /-! # Chebyshev's sum inequality This file proves the Chebyshev sum inequality. Chebyshev's inequality states `(∑ i in s, f i) * (∑ i in s, g i) ≤ s.card * ∑ i in s, f i * g i` when `f g : ι → α` monovary, and the reverse inequality when `f` and `g` antivary. ## Main declarations * `monovary_on.sum_mul_sum_le_card_mul_sum`: Chebyshev's inequality. * `antivary_on.card_mul_sum_le_sum_mul_sum`: Chebyshev's inequality, dual version. * `sq_sum_le_card_mul_sum_sq`: Special case of Chebyshev's inequality when `f = g`. ## Implementation notes In fact, we don't need much compatibility between the addition and multiplication of `α`, so we can actually decouple them by replacing multiplication with scalar multiplication and making `f` and `g` land in different types. As a bonus, this makes the dual statement trivial. The multiplication versions are provided for convenience. The case for `monotone`/`antitone` pairs of functions over a `linear_order` is not deduced in this file because it is easily deducible from the `monovary` API. -/ open Equiv Equiv.Perm Finset Function OrderDual open BigOperators variable {ι α β : Type _} /-! ### Scalar multiplication versions -/ section Smul variable [LinearOrderedRing α] [LinearOrderedAddCommGroup β] [Module α β] [OrderedSMul α β] {s : Finset ι} {σ : Perm ι} {f : ι → α} {g : ι → β} /-- **Chebyshev's Sum Inequality**: When `f` and `g` monovary together (eg they are both monotone/antitone), the scalar product of their sum is less than the size of the set times their scalar product. -/ theorem MonovaryOn.sum_smul_sum_le_card_smul_sum (hfg : MonovaryOn f g s) : ((∑ i in s, f i) • ∑ i in s, g i) ≤ s.card • ∑ i in s, f i • g i := by classical obtain ⟨σ, hσ, hs⟩ := s.countable_to_set.exists_cycle_on rw [← card_range s.card, sum_smul_sum_eq_sum_perm hσ] exact sum_le_card_nsmul _ _ _ fun n _ => hfg.sum_smul_comp_perm_le_sum_smul fun x hx => hs fun h => hx <| is_fixed_pt.perm_pow h _ #align monovary_on.sum_smul_sum_le_card_smul_sum MonovaryOn.sum_smul_sum_le_card_smul_sum /-- **Chebyshev's Sum Inequality**: When `f` and `g` antivary together (eg one is monotone, the other is antitone), the scalar product of their sum is less than the size of the set times their scalar product. -/ theorem AntivaryOn.card_smul_sum_le_sum_smul_sum (hfg : AntivaryOn f g s) : (s.card • ∑ i in s, f i • g i) ≤ (∑ i in s, f i) • ∑ i in s, g i := by convert hfg.dual_right.sum_smul_sum_le_card_smul_sum #align antivary_on.card_smul_sum_le_sum_smul_sum AntivaryOn.card_smul_sum_le_sum_smul_sum variable [Fintype ι] /-- **Chebyshev's Sum Inequality**: When `f` and `g` monovary together (eg they are both monotone/antitone), the scalar product of their sum is less than the size of the set times their scalar product. -/ theorem Monovary.sum_smul_sum_le_card_smul_sum (hfg : Monovary f g) : ((∑ i, f i) • ∑ i, g i) ≤ Fintype.card ι • ∑ i, f i • g i := (hfg.MonovaryOn _).sum_smul_sum_le_card_smul_sum #align monovary.sum_smul_sum_le_card_smul_sum Monovary.sum_smul_sum_le_card_smul_sum /-- **Chebyshev's Sum Inequality**: When `f` and `g` antivary together (eg one is monotone, the other is antitone), the scalar product of their sum is less than the size of the set times their scalar product. -/ theorem Antivary.card_smul_sum_le_sum_smul_sum (hfg : Antivary f g) : (Fintype.card ι • ∑ i, f i • g i) ≤ (∑ i, f i) • ∑ i, g i := by convert(hfg.dual_right.monovary_on _).sum_smul_sum_le_card_smul_sum #align antivary.card_smul_sum_le_sum_smul_sum Antivary.card_smul_sum_le_sum_smul_sum end Smul /-! ### Multiplication versions Special cases of the above when scalar multiplication is actually multiplication. -/ section Mul variable [LinearOrderedRing α] {s : Finset ι} {σ : Perm ι} {f g : ι → α} /-- **Chebyshev's Sum Inequality**: When `f` and `g` monovary together (eg they are both monotone/antitone), the product of their sum is less than the size of the set times their scalar product. -/ theorem MonovaryOn.sum_mul_sum_le_card_mul_sum (hfg : MonovaryOn f g s) : ((∑ i in s, f i) * ∑ i in s, g i) ≤ s.card * ∑ i in s, f i * g i := by rw [← nsmul_eq_mul] exact hfg.sum_smul_sum_le_card_smul_sum #align monovary_on.sum_mul_sum_le_card_mul_sum MonovaryOn.sum_mul_sum_le_card_mul_sum /-- **Chebyshev's Sum Inequality**: When `f` and `g` antivary together (eg one is monotone, the other is antitone), the product of their sum is greater than the size of the set times their scalar product. -/ theorem AntivaryOn.card_mul_sum_le_sum_mul_sum (hfg : AntivaryOn f g s) : ((s.card : α) * ∑ i in s, f i * g i) ≤ (∑ i in s, f i) * ∑ i in s, g i := by rw [← nsmul_eq_mul] exact hfg.card_smul_sum_le_sum_smul_sum #align antivary_on.card_mul_sum_le_sum_mul_sum AntivaryOn.card_mul_sum_le_sum_mul_sum /-- Special case of **Chebyshev's Sum Inequality** or the **Cauchy-Schwarz Inequality**: The square of the sum is less than the size of the set times the sum of the squares. -/ theorem sq_sum_le_card_mul_sum_sq : (∑ i in s, f i) ^ 2 ≤ s.card * ∑ i in s, f i ^ 2 := by simp_rw [sq] exact (monovaryOn_self _ _).sum_mul_sum_le_card_mul_sum #align sq_sum_le_card_mul_sum_sq sq_sum_le_card_mul_sum_sq variable [Fintype ι] /-- **Chebyshev's Sum Inequality**: When `f` and `g` monovary together (eg they are both monotone/antitone), the product of their sum is less than the size of the set times their scalar product. -/ theorem Monovary.sum_mul_sum_le_card_mul_sum (hfg : Monovary f g) : ((∑ i, f i) * ∑ i, g i) ≤ Fintype.card ι * ∑ i, f i * g i := (hfg.MonovaryOn _).sum_mul_sum_le_card_mul_sum #align monovary.sum_mul_sum_le_card_mul_sum Monovary.sum_mul_sum_le_card_mul_sum /-- **Chebyshev's Sum Inequality**: When `f` and `g` antivary together (eg one is monotone, the other is antitone), the product of their sum is less than the size of the set times their scalar product. -/ theorem Antivary.card_mul_sum_le_sum_mul_sum (hfg : Antivary f g) : ((Fintype.card ι : α) * ∑ i, f i * g i) ≤ (∑ i, f i) * ∑ i, g i := (hfg.AntivaryOn _).card_mul_sum_le_sum_mul_sum #align antivary.card_mul_sum_le_sum_mul_sum Antivary.card_mul_sum_le_sum_mul_sum end Mul variable [LinearOrderedField α] {s : Finset ι} {f : ι → α} theorem sum_div_card_sq_le_sum_sq_div_card : ((∑ i in s, f i) / s.card) ^ 2 ≤ (∑ i in s, f i ^ 2) / s.card := by obtain rfl | hs := s.eq_empty_or_nonempty · simp rw [← card_pos, ← @Nat.cast_pos α] at hs rw [div_pow, div_le_div_iff (sq_pos_of_ne_zero _ hs.ne') hs, sq (s.card : α), mul_left_comm, ← mul_assoc] exact mul_le_mul_of_nonneg_right sq_sum_le_card_mul_sum_sq hs.le #align sum_div_card_sq_le_sum_sq_div_card sum_div_card_sq_le_sum_sq_div_card
/** * Copyright (c) 2020 MIT License by Helen Xu, Sean Fraser * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <stdio.h> #include <stdlib.h> #include <algorithm> #include <boost/multiprecision/cpp_bin_float.hpp> #include <cmath> #include <fstream> #include <iostream> #include <iterator> #include <string> #include <numeric> // for inclusive_scan #include <random> #include <type_traits> #include <vector> #include "fasttime.h" #include "fmm_2d.h" #include "fmm_3d.h" #include "gtest/gtest.h" #include "tensor.h" typedef boost::multiprecision::cpp_bin_float_100 mp_100; #define OUTDATED __cplusplus <= 201402L; // #define DEBUG #define CHECK_RESULT namespace excluded_sum { namespace { template <typename T> void omp_scan(T* a, const int N) { T scan_a = 0; #pragma omp simd reduction(inscan, + : scan_a) for (int i = 0; i < N; i++) { scan_a += a[i]; #pragma omp scan inclusive(scan_a); a[i] = scan_a; } } int roundUp(int numToRound, int multiple) { if (multiple == 0) return numToRound; int remainder = numToRound % multiple; if (remainder == 0) return numToRound; return numToRound + multiple - remainder; } template <typename T> void cpp_inclusive_scan(T* arr, const int n) { #ifdef OUTDATED omp_scan(arr, n); #else std::inclusive_scan(arr, arr + n, arr); #endif } template <typename T> void cpp_naive_suffix_scan(T* arr, const int n) { std::reverse(arr, arr + n); cpp_inclusive_scan(arr, n); std::reverse(arr, arr + n); } // Uses GTest framework to set up a variety of tests for arrays of INC/EXSUM // that fit in RAM // Testing space partitions for prefix sum are: // - Array size // - Array values // - Array types // - TODO PADDING // - TODO later (operation) class AccuracyTest : public ::testing::Test { protected: AccuracyTest() { // You can do set-up work for each test here. } ~AccuracyTest() override { // You can do clean-up work that doesn't throw exceptions here. } // If the constructor and destructor are not enough for setting up // and cleaning up each test, you can define the following methods: void SetUp() override { // Code here will be called immediately after the constructor (right // before each test). } void TearDown() override { // Code here will be called immediately after each test (right // before the destructor). } // Class members declared here can be used by all tests in the test suite // for Foo. // For floating-point types, gets relative error between x and y with // with reference to the minimum of the two. template <typename T> const T RelativeError(T x, T y) { T xAbs = abs(x); T yAbs = abs(y); T diff = abs(xAbs - yAbs); return diff / std::min(xAbs, yAbs); } template<typename T, typename T1> const T RMSE(T* ref, T1* arr, size_t N) { T rms_rel_err(0); /* mp_100 max_rel_err(0); mp_100 min_rel_err(1); mp_100 avg_rel_err(0); mp_100 rms_rel_err(0); */ for (size_t i = 1; i < N; ++i) { //EXPECT_NEAR(tensor.data()[i], static_cast<float>(array_ref[i]), 1e3); T err = RelativeError(ref[i], T(arr[i])); /* if (err > max_rel_err) { max_rel_err = err; } if (err < min_rel_err) { min_rel_err = err; } avg_rel_err += err; */ rms_rel_err += err * err; } // avg_rel_err = avg_rel_err / N; rms_rel_err = sqrt(rms_rel_err / N); // printf("avg %f, rmse %f, min %f, max %f\n", avg_rel_err, rms_rel_err, min_rel_err, max_rel_err); return rms_rel_err; } template <size_t N> void prefix_sum_mp_100(mp_100* arr, const size_t increment) { mp_100 sum(0); for (size_t i = 0; i < N; i += increment) { sum += mp_100(arr[i]); arr[i] = mp_100(sum); } } template <typename T1, typename T2, size_t N> std::vector<T1> copy_array(T2* arr) { std::vector<T1> copy; for (size_t i = 0; i < N; ++i) { copy.emplace_back(T1(arr[i])); } return copy; } template <typename T> void print_vector(std::vector<T> const& a) { for (int i = 0; i < a.size(); i++) { std::cout << a.at(i) << " "; } std::cout << std::endl; } // Kahan Summation algorithm for floats or doubles. Follows same prefix // sum API as rest of the implementations. // Optimization by the compiler is turned off to ensure the numerical // stability and accuracy of the algorithm. Otherwise some parts would be // optimized out and it would lose its compensated summation. #pragma clang optimize off template <typename T, size_t N> void KahanSummation(T* arr, const size_t increment) { T sum = 0; T c = 0; for (size_t i = 0; i < N; i += increment) { T y = arr[i] - c; T t = sum + y; c = (t - sum) - y; sum = t; arr[i] = sum; } } #pragma clang optimize on template <typename T, size_t N> void naive_serial(T* arr, const size_t increment) { T sum = 0; for (size_t i = 0; i < N; i += increment) { sum += arr[i]; arr[i] = sum; } } }; TEST_F(AccuracyTest, Float) { // initialize type and size typedef double dtype; typedef vector<size_t> index_t; std::ofstream f("ND-exsum-double-accuracy.csv"); int increment = 4; int lower = 1 << 20; int upper = (1 << 20) + increment; /* int lower = 1 << 26; int upper = (1 << 26) + increment; */ // size_t box_len = 2; size_t box_len = 8; // lines separate the different algorithms // columns are the CSV values if (f.is_open()) { f << "Algorithm Num"; for (int p = lower; p < upper; p += increment) { const size_t N = p * box_len; f << ",runtime(ms) for N = "; f << N; } f << std::endl; f << "X-data"; for (int p = lower; p < upper; p += increment) { const size_t N = p * box_len; f << "," << N; } f << std::endl; } f.precision(10); // dim = 1 { printf("1d\n"); f << 1; constexpr size_t dim = 1; std::cout << "\nAlgorithm " << dim << std::endl; for (int p = lower; p < upper; p += increment) { size_t N; #ifdef DEBUG N = 4; // 4 ^ 1 #else N = p * box_len; #endif size_t n = roundUp(std::floor(std::pow(N, 1. / dim)), box_len); N = std::pow(n, dim); index_t side_lens(dim); index_t box_lens(dim); std::fill(side_lens.begin(), side_lens.end(), n); std::cout << "n = " << n << std::endl; std::cout << "N = " << N << std::endl; exsum_tensor::Tensor<dtype, dim> tensor(side_lens); tensor.RandFill(0, 1); exsum_tensor::Tensor<dtype, dim> tensor_copy(tensor); std::fill(box_lens.begin(), box_lens.end(), box_len); exsum_tensor::Tensor<mp_100, dim> tensor_naive(side_lens); exsum_tensor::Tensor<dtype, dim> tensor_ref(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test_2(tensor_copy); std::cout << "size = " << tensor_test.size() << std::endl; printf("BDBS\n"); // time incsum + subtraction tensor_ref = tensor_copy; tensor_ref.ExsumIncsumSubtraction(box_lens); printf("BOXCOMP\n"); // time box complement tensor_test = tensor_copy; tensor_test.BoxComplementExsum(box_lens); // time summed area table + subtraction tensor_test_2 = tensor_copy; tensor_test_2.SummedAreaTable(box_lens); printf("naive\n"); // time naive mp_100* data = tensor_naive.data(); // copy and convert to extended precision for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { data[i] = mp_100(tensor.data()[i]); } tensor_naive.NaiveExsum(box_lens); printf("compute rmse\n"); // const T RMSE(T* ref, T1* arr, size_t N) { mp_100 rmse_bdbs = RMSE(tensor_naive.data(), tensor_ref.data(), N); mp_100 rmse_boxcomp = RMSE(tensor_naive.data(), tensor_test.data(), N); mp_100 rmse_sat = RMSE(tensor_naive.data(), tensor_test_2.data(), N); if (f.is_open()) { f << N << ","; f << rmse_sat << ","; f << rmse_bdbs << ","; f << rmse_boxcomp; } } f << std::endl; } // dim = 2 { printf("2d\n"); f << 2; constexpr size_t dim = 2; std::cout << "\nAlgorithm " << dim << std::endl; for (int p = lower; p < upper; p += increment) { size_t N; #ifdef DEBUG N = 16; // 4 ^ 2 #else N = p * box_len; #endif size_t n = roundUp(std::floor(std::pow(N, 1. / dim)), box_len); N = std::pow(n, dim); index_t side_lens(dim); index_t box_lens(dim); std::fill(side_lens.begin(), side_lens.end(), n); std::cout << "n = " << n << std::endl; std::cout << "N = " << N << std::endl; exsum_tensor::Tensor<dtype, dim> tensor(side_lens); tensor.RandFill(0, 1); exsum_tensor::Tensor<dtype, dim> tensor_copy(tensor); std::fill(box_lens.begin(), box_lens.end(), box_len); exsum_tensor::Tensor<mp_100, dim> tensor_naive(side_lens); exsum_tensor::Tensor<dtype, dim> tensor_ref(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test_2(tensor_copy); std::cout << "size = " << tensor_test.size() << std::endl; printf("BDBS\n"); // time incsum + subtraction tensor_ref = tensor_copy; tensor_ref.ExsumIncsumSubtraction(box_lens); printf("BOXCOMP\n"); // time box complement tensor_test = tensor_copy; tensor_test.BoxComplementExsum(box_lens); // time summed area table + subtraction tensor_test_2 = tensor_copy; tensor_test_2.SummedAreaTable(box_lens); printf("naive\n"); // time naive mp_100* data = tensor_naive.data(); // copy and convert to extended precision for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { data[i] = mp_100(tensor.data()[i]); } tensor_naive.NaiveExsum(box_lens); printf("compute rmse\n"); // const T RMSE(T* ref, T1* arr, size_t N) { mp_100 rmse_bdbs = RMSE(tensor_naive.data(), tensor_ref.data(), N); mp_100 rmse_boxcomp = RMSE(tensor_naive.data(), tensor_test.data(), N); mp_100 rmse_sat = RMSE(tensor_naive.data(), tensor_test_2.data(), N); if (f.is_open()) { f << N << ","; f << rmse_sat << ","; f << rmse_bdbs << ","; f << rmse_boxcomp; } } f << std::endl; } // dim = 3 { printf("3d\n"); f << 3; constexpr size_t dim = 3; std::cout << "\nAlgorithm " << dim << std::endl; for (int p = lower; p < upper; p += increment) { size_t N; #ifdef DEBUG N = 64; #else N = p * box_len; #endif size_t n = roundUp(std::floor(std::pow(N, 1. / dim)), box_len); N = std::pow(n, dim); index_t side_lens(dim); index_t box_lens(dim); std::fill(side_lens.begin(), side_lens.end(), n); std::cout << "n = " << n << std::endl; std::cout << "N = " << N << std::endl; exsum_tensor::Tensor<dtype, dim> tensor(side_lens); tensor.RandFill(0, 1); exsum_tensor::Tensor<dtype, dim> tensor_copy(tensor); std::fill(box_lens.begin(), box_lens.end(), box_len); exsum_tensor::Tensor<mp_100, dim> tensor_naive(side_lens); exsum_tensor::Tensor<dtype, dim> tensor_ref(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test_2(tensor_copy); std::cout << "size = " << tensor_test.size() << std::endl; printf("BDBS\n"); // time incsum + subtraction tensor_ref = tensor_copy; tensor_ref.ExsumIncsumSubtraction(box_lens); printf("BOXCOMP\n"); // time box complement tensor_test = tensor_copy; tensor_test.BoxComplementExsum(box_lens); // time summed area table + subtraction tensor_test_2 = tensor_copy; tensor_test_2.SummedAreaTable(box_lens); printf("naive\n"); // time naive mp_100* data = tensor_naive.data(); // copy and convert to extended precision for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { data[i] = mp_100(tensor.data()[i]); } tensor_naive.NaiveExsum(box_lens); printf("compute rmse\n"); // const T RMSE(T* ref, T1* arr, size_t N) { mp_100 rmse_bdbs = RMSE(tensor_naive.data(), tensor_ref.data(), N); mp_100 rmse_boxcomp = RMSE(tensor_naive.data(), tensor_test.data(), N); mp_100 rmse_sat = RMSE(tensor_naive.data(), tensor_test_2.data(), N); if (f.is_open()) { f << N << ","; f << rmse_sat << ","; f << rmse_bdbs << ","; f << rmse_boxcomp; } } f << std::endl; } // dim = 4 { printf("4d\n"); f << 4; constexpr size_t dim = 4; std::cout << "\nAlgorithm " << dim << std::endl; for (int p = lower; p < upper; p += increment) { size_t N; #ifdef DEBUG N = 256; #else N = p * box_len; #endif size_t n = roundUp(std::floor(std::pow(N, 1. / dim)), box_len); N = std::pow(n, dim); index_t side_lens(dim); index_t box_lens(dim); std::fill(side_lens.begin(), side_lens.end(), n); std::cout << "n = " << n << std::endl; std::cout << "N = " << N << std::endl; exsum_tensor::Tensor<dtype, dim> tensor(side_lens); tensor.RandFill(0, 1); exsum_tensor::Tensor<dtype, dim> tensor_copy(tensor); std::fill(box_lens.begin(), box_lens.end(), box_len); exsum_tensor::Tensor<mp_100, dim> tensor_naive(side_lens); exsum_tensor::Tensor<dtype, dim> tensor_ref(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test_2(tensor_copy); std::cout << "size = " << tensor_test.size() << std::endl; printf("BDBS\n"); // time incsum + subtraction tensor_ref = tensor_copy; tensor_ref.ExsumIncsumSubtraction(box_lens); printf("BOXCOMP\n"); // time box complement tensor_test = tensor_copy; tensor_test.BoxComplementExsum(box_lens); // time summed area table + subtraction tensor_test_2 = tensor_copy; tensor_test_2.SummedAreaTable(box_lens); printf("naive\n"); // time naive mp_100* data = tensor_naive.data(); // copy and convert to extended precision for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { data[i] = mp_100(tensor.data()[i]); } tensor_naive.NaiveExsum(box_lens); printf("compute rmse\n"); // const T RMSE(T* ref, T1* arr, size_t N) { mp_100 rmse_bdbs = RMSE(tensor_naive.data(), tensor_ref.data(), N); mp_100 rmse_boxcomp = RMSE(tensor_naive.data(), tensor_test.data(), N); mp_100 rmse_sat = RMSE(tensor_naive.data(), tensor_test_2.data(), N); if (f.is_open()) { f << N << ","; f << rmse_sat << ","; f << rmse_bdbs << ","; f << rmse_boxcomp; } } f << std::endl; } // dim = 5 { printf("5d\n"); f << 5; constexpr size_t dim = 5; std::cout << "\nAlgorithm " << dim << std::endl; for (int p = lower; p < upper; p += increment) { size_t N; #ifdef DEBUG N = 1024; #else N = p * box_len; #endif size_t n = roundUp(std::floor(std::pow(N, 1. / dim)), box_len); N = std::pow(n, dim); index_t side_lens(dim); index_t box_lens(dim); std::fill(side_lens.begin(), side_lens.end(), n); std::cout << "n = " << n << std::endl; std::cout << "N = " << N << std::endl; exsum_tensor::Tensor<dtype, dim> tensor(side_lens); tensor.RandFill(0, 1); exsum_tensor::Tensor<dtype, dim> tensor_copy(tensor); std::fill(box_lens.begin(), box_lens.end(), box_len); exsum_tensor::Tensor<mp_100, dim> tensor_naive(side_lens); exsum_tensor::Tensor<dtype, dim> tensor_ref(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test_2(tensor_copy); std::cout << "size = " << tensor_test.size() << std::endl; printf("BDBS\n"); // time incsum + subtraction tensor_ref = tensor_copy; tensor_ref.ExsumIncsumSubtraction(box_lens); printf("BOXCOMP\n"); // time box complement tensor_test = tensor_copy; tensor_test.BoxComplementExsum(box_lens); // time summed area table + subtraction tensor_test_2 = tensor_copy; tensor_test_2.SummedAreaTable(box_lens); printf("naive\n"); // time naive mp_100* data = tensor_naive.data(); // copy and convert to extended precision for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { data[i] = mp_100(tensor.data()[i]); } tensor_naive.NaiveExsum(box_lens); printf("compute rmse\n"); // const T RMSE(T* ref, T1* arr, size_t N) { mp_100 rmse_bdbs = RMSE(tensor_naive.data(), tensor_ref.data(), N); mp_100 rmse_boxcomp = RMSE(tensor_naive.data(), tensor_test.data(), N); mp_100 rmse_sat = RMSE(tensor_naive.data(), tensor_test_2.data(), N); if (f.is_open()) { f << N << ","; f << rmse_sat << ","; f << rmse_bdbs << ","; f << rmse_boxcomp; } } f << std::endl; } } class PerfTest : public ::testing::Test { protected: PerfTest() { // You can do set-up work for each test here. } ~PerfTest() override { // You can do clean-up work that doesn't throw exceptions here. } // If the constructor and destructor are not enough for setting up // and cleaning up each test, you can define the following methods: void SetUp() override { // Code here will be called immediately after the constructor (right // before each test). } void TearDown() override { // Code here will be called immediately after each test (right // before the destructor). } // Class members declared here can be used by all tests in the test suite // for Foo. template <typename T1, typename T2> std::vector<T1> copy_array(T2* arr, const size_t N) { std::vector<T1> copy; for (size_t i = 0; i < N; ++i) { copy.emplace_back(T1(arr[i])); } return copy; } }; /* // size 2^24 was previously used for timing results TEST_F(AccuracyTest, Float) { // initialize type and size typedef float dtype; const size_t N = 1 << 2; typedef const vector<size_t> index_t; index_t side_lens {N, N + 2}; exsum_tensor::Tensor_2D<dtype> tensor(side_lens); //tensor.RandFill(1, 1); //tensor.Print(); tensor.SetElt(index_t {0, 0}, 1); tensor.SetElt(index_t {0, 1}, 7); tensor.SetElt(index_t {0, 2}, 3); tensor.SetElt(index_t {0, 3}, 2); tensor.SetElt(index_t {0, 4}, 4); tensor.SetElt(index_t {0, 5}, 2); tensor.SetElt(index_t {1, 0}, 6); tensor.SetElt(index_t {1, 1}, 3); tensor.SetElt(index_t {1, 2}, 0); tensor.SetElt(index_t {1, 3}, 9); tensor.SetElt(index_t {1, 4}, 3); tensor.SetElt(index_t {1, 5}, 6); tensor.SetElt(index_t {2, 0}, 8); tensor.SetElt(index_t {2, 1}, 4); tensor.SetElt(index_t {2, 2}, 4); tensor.SetElt(index_t {2, 3}, 1); tensor.SetElt(index_t {2, 4}, 9); tensor.SetElt(index_t {2, 5}, 5); tensor.SetElt(index_t {3, 0}, 3); tensor.SetElt(index_t {3, 1}, 7); tensor.SetElt(index_t {3, 2}, 6); tensor.SetElt(index_t {3, 3}, 1); tensor.SetElt(index_t {3, 4}, 4); tensor.SetElt(index_t {3, 5}, 3); tensor.Print(); index_t box_lens {2 , 3}; exsum_tensor::Tensor_2D<dtype> incsum_copy(tensor); exsum_tensor::Tensor_2D<dtype> exsum_copy(tensor); exsum_tensor::Tensor_2D<dtype> exsum_copy2(tensor); exsum_tensor::Tensor_2D<dtype> corners(tensor); exsum_tensor::Tensor_2D<dtype> incsum(tensor); exsum_tensor::Tensor_2D<dtype> box_comp(tensor); incsum_copy.IncsumCheck(box_lens); exsum_copy.ExsumCheckSubtraction(box_lens); exsum_copy2.ExsumCheckNaive(box_lens); corners.CornersExsum(box_lens); incsum.Incsum(box_lens); box_comp.BoxComplementExsum(box_lens); std::cout << "=================== Results ==========================" << std::endl; tensor.Print(); //incsum_copy.Print(); //exsum_copy.Print(); exsum_copy2.Print(); corners.Print(); //incsum.Print(); box_comp.Print(); } */ TEST_F(PerfTest, Float) { // initialize type and size typedef float dtype; int trials = 1; int lower_pow2 = 3; int upper_pow2 = 4; std::ofstream f("2D-exsum-float-performance.csv"); // lines separate the different algorithms // columns are the CSV values if (f.is_open()) { f << "Algorithm Num"; for (int p = lower_pow2; p < upper_pow2; p++) { const size_t N = 1 << p; f << ",runtime(ms) for "; f << N << " elems"; } f << std::endl; f << "X-data"; for (int p = lower_pow2; p < upper_pow2; p++) { const size_t N = 1 << p; f << "," << N; } f << std::endl; } f.precision(10); for (int j = 0; j < 1; j++) { f << j; for (int p = lower_pow2; p < upper_pow2; p++) { const size_t N = 1 << p; typedef const vector<size_t> index_t; index_t side_lens {N, N}; exsum_tensor::Tensor_2D<dtype> tensor(side_lens); tensor.RandFill(0, 1); exsum_tensor::Tensor_2D<dtype> tensor_copy(tensor); index_t box_lens {4 , 4}; exsum_tensor::Tensor_2D<dtype> box_comp(tensor_copy); std::vector<double> elapsed_times(trials); for (int k = 0; k < trials; k++) { box_comp = tensor_copy; fasttime_t start = gettime(); box_comp.BoxComplementExsum(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times.emplace_back(elapsed); } tensor.ExsumCheckSubtraction(box_lens); std::nth_element(elapsed_times.begin(), elapsed_times.begin() + elapsed_times.size()/2, elapsed_times.end()); double median_time = elapsed_times[elapsed_times.size()/2]; for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor.data()[i], box_comp.data()[i], N >> 3); } f << "," << median_time * 1000; } f << std::endl; } f.close(); } /* TEST_F(AccuracyTest, Double) { // initialize type and size typedef double dtype; const size_t N = 1 << 2; typedef const vector<size_t> index_t; index_t side_lens {N, N + 2, N}; exsum_tensor::Tensor_3D<dtype> tensor(side_lens); tensor.RandFill(1, 1); tensor.SetElt(index_t {1, 1, 2}, 3); //tensor.Print(); exsum_tensor::Tensor_3D<dtype> exsum_test(tensor); index_t box_lens {2, 3, 2}; tensor.Print(); fasttime_t start = gettime(); tensor.ExsumCheckNaive(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); std::cout << "=====" << std::endl; std::cout << elapsed * 1000 << std::endl; tensor.Print(); start = gettime(); // exsum_test.BoxComplementExsum(box_lens); // exsum_test.BoxComplementExsum_space(box_lens); exsum_test.BoxComplementExsum_space_new(box_lens); // exsum_test.CornersExsum_dN(box_lens); // exsum_test.CornersExsum_leaf(box_lens, 3); end = gettime(); elapsed = tdiff(start, end); std::cout << "=====" << std::endl; std::cout << elapsed * 1000 << std::endl; exsum_test.Print(); for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_DOUBLE_EQ(tensor.data()[i], exsum_test.data()[i]); } } */ TEST_F(PerfTest, Double) { // initialize type and size typedef double dtype; typedef const vector<size_t> index_t; int trials; #ifdef DEBUG trials = 1; #else trials = 3; #endif int lower = 2; int upper; #ifdef DEBUG upper = lower + 1; #else upper = 24; #endif #ifdef DELAY lower = 4; upper = 8; #endif int increment = 4; size_t box_len; #ifdef DEBUG box_len = 2; #else box_len = 4; #endif #ifdef DELAY std::ofstream f("3D-exsum-double-performance-" + std::to_string(DELAY) + ".csv"); #else std::ofstream f("3D-exsum-double-performance.csv"); #endif typedef void (exsum_tensor::Tensor_3D<dtype>::*fn)(index_t&); static fn funcs[] = { &exsum_tensor::Tensor_3D<dtype>::ExsumCheckSubtraction, &exsum_tensor::Tensor_3D<dtype>::SummedAreaTable, &exsum_tensor::Tensor_3D<dtype>::CornersExsum_dN, // spine &exsum_tensor::Tensor_3D<dtype>::CornersExsum_Space, // space = 1 &exsum_tensor::Tensor_3D<dtype>::CornersExsum_leaf_2, // space = 2 &exsum_tensor::Tensor_3D<dtype>::CornersExsum_leaf, // space = 4 &exsum_tensor::Tensor_3D<dtype>::CornersExsum, // space = 8 &exsum_tensor::Tensor_3D<dtype>::BoxComplementExsum, }; #ifdef DELAY fasttime_t t1 = gettime(); for (int i = 0; i < trials * 10000; i++) { volatile long x = fib(DELAY); } fasttime_t t2 = gettime(); double elapsed_time = tdiff(t1, t2); double time_ms = (elapsed_time * 1000) / (trials * 10000); std::cout << "fib time (ms): " << time_ms << std::endl; #endif // lines separate the different algorithms // columns are the CSV values if (f.is_open()) { f << "Algorithm Num"; for (int p = lower; p < upper; p += increment) { const size_t N = p * box_len; f << ",runtime(ms) for "; f << N * N * N << " elems"; } #ifdef DELAY f << ", fibtime(ms)"; #endif f << std::endl; f << "X-data"; for (int p = lower; p < upper; p += increment) { const size_t N = p * box_len; f << "," << N * N * N; } #ifdef DELAY f << "," << time_ms; #endif f << std::endl; } f.precision(10); int len = sizeof(funcs) / sizeof(funcs[0]); for (int j = 0; j < len; j++) { f << j; std::cout << "Algorithm " << j << std::endl; for (int p = lower; p < upper; p += increment) { const size_t N = p * box_len; index_t side_lens{N, N, N}; std::cout << "N = " << N * N * N << std::endl; exsum_tensor::Tensor_3D<dtype> tensor(side_lens); tensor.RandFill(0, 1); exsum_tensor::Tensor_3D<dtype> tensor_copy(tensor); index_t box_lens{box_len, box_len, box_len}; exsum_tensor::Tensor_3D<dtype> tensor_test(tensor_copy); std::vector<double> elapsed_times(trials); for (int k = 0; k < trials; k++) { tensor_test = tensor_copy; fasttime_t start = gettime(); (tensor_test.*funcs[j])(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times.emplace_back(elapsed); } tensor.ExsumCheckSubtraction(box_lens); std::nth_element(elapsed_times.begin(), elapsed_times.begin() + elapsed_times.size() / 2, elapsed_times.end()); double median_time = elapsed_times[elapsed_times.size() / 2]; #ifdef CHECK_RESULT for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { // N>>3 was 0 in debugging, so just making it something tiny EXPECT_NEAR(tensor.data()[i], tensor_test.data()[i], (double)1 / 10000000); // N >> 3); } #endif f << "," << median_time * 1000; } f << std::endl; } f.close(); } TEST_F(PerfTest, ArbitraryDims) { // initialize type and size typedef double dtype; typedef vector<size_t> index_t; std::ofstream f("ND-exsum-double-performance.csv"); int trials; #ifdef DEBUG trials = 1; #else trials = 3; #endif int increment = 4; int lower = 1 << 20; int upper = (1 << 20) + increment; /* int lower = 1 << 26; int upper = (1 << 26) + increment; */ // size_t box_len = 2; double debug_diff = (double) 1/100; size_t debug_shift = 3; size_t box_len = 8; // lines separate the different algorithms // columns are the CSV values if (f.is_open()) { f << "Algorithm Num"; for (int p = lower; p < upper; p += increment) { const size_t N = p * box_len; f << ",runtime(ms) for N = "; f << N; } f << std::endl; f << "X-data"; for (int p = lower; p < upper; p += increment) { const size_t N = p * box_len; f << "," << N; } f << std::endl; } f.precision(10); // dim = 1 { f << 1; constexpr size_t dim = 1; std::cout << "\nAlgorithm " << dim << std::endl; for (int p = lower; p < upper; p += increment) { size_t N; #ifdef DEBUG N = 4; // 4 ^ 1 #else N = p * box_len; #endif size_t n = roundUp(std::floor(std::pow(N, 1. / dim)), box_len); N = std::pow(n, dim); index_t side_lens(dim); index_t box_lens(dim); std::fill(side_lens.begin(), side_lens.end(), n); std::cout << "n = " << n << std::endl; std::cout << "N = " << N << std::endl; exsum_tensor::Tensor<dtype, dim> tensor(side_lens); tensor.RandFill(0, 1); exsum_tensor::Tensor<dtype, dim> tensor_copy(tensor); std::fill(box_lens.begin(), box_lens.end(), box_len); exsum_tensor::Tensor<dtype, dim> tensor_naive(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_ref(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test_2(tensor_copy); std::cout << "size = " << tensor_test.size() << std::endl; // time incsum + subtraction std::vector<double> elapsed_times_incsum(trials); for (int k = 0; k < trials; k++) { tensor_ref = tensor_copy; fasttime_t start = gettime(); tensor_ref.ExsumIncsumSubtraction(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); printf("\tBDBS trial %d, time %f\n", k, elapsed); elapsed_times_incsum[k] = elapsed; } // time box complement std::vector<double> elapsed_times(trials); for (int k = 0; k < trials; k++) { tensor_test = tensor_copy; fasttime_t start = gettime(); tensor_test.BoxComplementExsum(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); printf("\tBOXCOMP trial %d, time %f\n", k, elapsed); elapsed_times[k] = elapsed; } #ifdef DEBUG for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test.data()[i], debug_diff); // tiny diff check } #endif // time summed area table + subtraction std::vector<double> elapsed_times_sat(trials); for (int k = 0; k < trials; k++) { tensor_test_2 = tensor_copy; fasttime_t start = gettime(); tensor_test_2.SummedAreaTable(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); printf("\tSAT trial %d, time %f\n", k, elapsed); elapsed_times_sat[k] = elapsed; } #ifdef DEBUG for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test_2.data()[i], debug_diff); } #endif // time naive std::vector<double> elapsed_times_naive(trials); for (int k = 0; k < trials; k++) { tensor_naive = tensor_copy; fasttime_t start = gettime(); tensor_naive.NaiveExsum(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); printf("\tNAIVE trial %d, time %f\n", k, elapsed); elapsed_times_naive[k] = elapsed; } #ifdef CHECK_RESULT // check naive for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_naive.data()[i], debug_diff); // tiny diff check } #endif int med_idx = trials / 2; // sort the times std::nth_element(elapsed_times.begin(), elapsed_times.begin() + elapsed_times.size() / 2, elapsed_times.end()); std::nth_element(elapsed_times_sat.begin(), elapsed_times_sat.begin() + elapsed_times_sat.size() / 2, elapsed_times_sat.end()); std::nth_element( elapsed_times_incsum.begin(), elapsed_times_incsum.begin() + elapsed_times_incsum.size() / 2, elapsed_times_incsum.end()); std::nth_element( elapsed_times_naive.begin(), elapsed_times_naive.begin() + elapsed_times_naive.size() / 2, elapsed_times_naive.end()); double sat_med = elapsed_times_sat[med_idx] * 1000; double incsum_med = elapsed_times_incsum[med_idx] * 1000; double boxcomp_med = elapsed_times[med_idx] * 1000; double naive_med = elapsed_times_naive[med_idx] * 1000; // dim, vol, SAT + subtraction time, BDBS + subtraction time, BOXCOMP // time f << "," << N << "," << naive_med << "," << sat_med << "," << incsum_med << "," << boxcomp_med; std::cout << "NAIVE Time = " << naive_med << std::endl; std::cout << "SAT Time = " << sat_med << std::endl; std::cout << "INCSUM Time = " << incsum_med << std::endl; std::cout << "BOXCOMP Time = " << boxcomp_med << std::endl; } f << std::endl; } // dim = 2 { f << 2; constexpr size_t dim = 2; std::cout << "\nAlgorithm " << dim << std::endl; for (int p = lower; p < upper; p += increment) { size_t N; #ifdef DEBUG N = 16; // 4 ^ 2 #else N = p * box_len; #endif size_t n = roundUp(std::floor(std::pow(N, 1. / dim)), box_len); N = std::pow(n, dim); index_t side_lens(dim); index_t box_lens(dim); std::fill(side_lens.begin(), side_lens.end(), n); std::cout << "n = " << n << std::endl; std::cout << "N = " << N << std::endl; exsum_tensor::Tensor<dtype, dim> tensor(side_lens); tensor.RandFill(0, 1); exsum_tensor::Tensor<dtype, dim> tensor_copy(tensor); std::fill(box_lens.begin(), box_lens.end(), box_len); exsum_tensor::Tensor<dtype, dim> tensor_naive(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_ref(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test_2(tensor_copy); std::cout << "size = " << tensor_test.size() << std::endl; // time box complement std::vector<double> elapsed_times(trials); for (int k = 0; k < trials; k++) { tensor_test = tensor_copy; fasttime_t start = gettime(); tensor_test.BoxComplementExsum(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); // elapsed_times.emplace_back(elapsed); elapsed_times[k] = elapsed; } // time summed area table + subtraction std::vector<double> elapsed_times_sat(trials); for (int k = 0; k < trials; k++) { tensor_test_2 = tensor_copy; fasttime_t start = gettime(); tensor_test_2.SummedAreaTable(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times_sat[k] = elapsed; } // time incsum + subtraction std::vector<double> elapsed_times_incsum(trials); for (int k = 0; k < trials; k++) { tensor_ref = tensor_copy; fasttime_t start = gettime(); tensor_ref.ExsumIncsumSubtraction(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times_incsum[k] = elapsed; } // time naive std::vector<double> elapsed_times_naive(trials); for (int k = 0; k < trials; k++) { tensor_naive = tensor_copy; fasttime_t start = gettime(); tensor_naive.NaiveExsum(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times_naive[k] = elapsed; } int med_idx = trials / 2; // sort the times std::nth_element(elapsed_times.begin(), elapsed_times.begin() + elapsed_times.size() / 2, elapsed_times.end()); std::nth_element(elapsed_times_sat.begin(), elapsed_times_sat.begin() + elapsed_times_sat.size() / 2, elapsed_times_sat.end()); std::nth_element( elapsed_times_incsum.begin(), elapsed_times_incsum.begin() + elapsed_times_incsum.size() / 2, elapsed_times_incsum.end()); std::nth_element( elapsed_times_naive.begin(), elapsed_times_naive.begin() + elapsed_times_naive.size() / 2, elapsed_times_naive.end()); #ifdef CHECK_RESULT for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test.data()[i], N >> debug_shift); } for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test_2.data()[i], N >> debug_shift); } // check naive for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { // printf("checking naive idx %lu\n", i); EXPECT_NEAR(tensor_ref.data()[i], tensor_naive.data()[i], N >> debug_shift); } #endif double naive_med = elapsed_times_naive[med_idx] * 1000; double sat_med = elapsed_times_sat[med_idx] * 1000; double incsum_med = elapsed_times_incsum[med_idx] * 1000; double boxcomp_med = elapsed_times[med_idx] * 1000; // dim, vol, SAT + subtraction time, BDBS + subtraction time, BOXCOMP // time // f << "," << sat_med << "," << incsum_med << "," << boxcomp_med ; f << "," << N << "," << naive_med << "," << sat_med << "," << incsum_med << "," << boxcomp_med; std::cout << "NAIVE Time = " << naive_med << std::endl; std::cout << "SAT Time = " << sat_med << std::endl; std::cout << "INCSUM Time = " << incsum_med << std::endl; std::cout << "BOXCOMP Time = " << boxcomp_med << std::endl; } f << std::endl; } // dim = 3 { f << 3; constexpr size_t dim = 3; std::cout << "\nAlgorithm " << dim << std::endl; for (int p = lower; p < upper; p += increment) { size_t N; #ifdef DEBUG N = 64; // 4 ^ 3 #else N = p * box_len; #endif size_t n = roundUp(std::floor(std::pow(N, 1. / dim)), box_len); N = std::pow(n, dim); index_t side_lens(dim); index_t box_lens(dim); std::fill(side_lens.begin(), side_lens.end(), n); std::cout << "n = " << n << std::endl; std::cout << "N = " << N << std::endl; exsum_tensor::Tensor<dtype, dim> tensor(side_lens); tensor.RandFill(0, 1); exsum_tensor::Tensor<dtype, dim> tensor_copy(tensor); std::fill(box_lens.begin(), box_lens.end(), box_len); // setup output tensors exsum_tensor::Tensor<dtype, dim> tensor_naive(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_ref(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test_2(tensor_copy); std::cout << "size = " << tensor_test.size() << std::endl; // time box complement std::vector<double> elapsed_times(trials); for (int k = 0; k < trials; k++) { tensor_test = tensor_copy; fasttime_t start = gettime(); tensor_test.BoxComplementExsum(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); // elapsed_times.emplace_back(elapsed); elapsed_times[k] = elapsed; } // time summed area table + subtraction std::vector<double> elapsed_times_sat(trials); for (int k = 0; k < trials; k++) { tensor_test_2 = tensor_copy; fasttime_t start = gettime(); tensor_test_2.SummedAreaTable(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times_sat[k] = elapsed; } // time incsum + subtraction std::vector<double> elapsed_times_incsum(trials); for (int k = 0; k < trials; k++) { tensor_ref = tensor_copy; fasttime_t start = gettime(); tensor_ref.ExsumIncsumSubtraction(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times_incsum[k] = elapsed; } // time naive std::vector<double> elapsed_times_naive(trials); for (int k = 0; k < trials; k++) { tensor_naive = tensor_copy; fasttime_t start = gettime(); tensor_naive.NaiveExsum(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times_naive[k] = elapsed; } int med_idx = trials / 2; // sort the times std::nth_element(elapsed_times.begin(), elapsed_times.begin() + elapsed_times.size() / 2, elapsed_times.end()); std::nth_element(elapsed_times_sat.begin(), elapsed_times_sat.begin() + elapsed_times_sat.size() / 2, elapsed_times_sat.end()); std::nth_element( elapsed_times_incsum.begin(), elapsed_times_incsum.begin() + elapsed_times_incsum.size() / 2, elapsed_times_incsum.end()); std::nth_element( elapsed_times_naive.begin(), elapsed_times_naive.begin() + elapsed_times_naive.size() / 2, elapsed_times_naive.end()); // verify result #ifdef CHECK_RESULT for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test.data()[i], N >> debug_shift); } for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test_2.data()[i], N >> debug_shift); } // check naive for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_naive.data()[i], N>>debug_shift); // tiny diff check } #endif double sat_med = elapsed_times_sat[med_idx] * 1000; double incsum_med = elapsed_times_incsum[med_idx] * 1000; double boxcomp_med = elapsed_times[med_idx] * 1000; double naive_med = elapsed_times_naive[med_idx] * 1000; // dim, vol, SAT + subtraction time, BDBS + subtraction time, BOXCOMP // time f << "," << N << "," << naive_med << "," << sat_med << "," << incsum_med << "," << boxcomp_med; std::cout << "NAIVE Time = " << naive_med << std::endl; std::cout << "SAT Time = " << sat_med << std::endl; std::cout << "INCSUM Time = " << incsum_med << std::endl; std::cout << "BOXCOMP Time = " << boxcomp_med << std::endl; } f << std::endl; } // dim = 4 { f << 4; constexpr size_t dim = 4; std::cout << "\nAlgorithm " << dim << std::endl; for (int p = lower; p < upper; p += increment) { size_t N; #ifdef DEBUG N = 256; // 4 ^ 4 #else N = p * box_len; #endif size_t n = roundUp(std::floor(std::pow(N, 1. / dim)), box_len); N = std::pow(n, dim); index_t side_lens(dim); index_t box_lens(dim); std::fill(side_lens.begin(), side_lens.end(), n); std::cout << "n = " << n << std::endl; std::cout << "N = " << N << std::endl; exsum_tensor::Tensor<dtype, dim> tensor(side_lens); tensor.RandFill(0, 1); exsum_tensor::Tensor<dtype, dim> tensor_copy(tensor); std::fill(box_lens.begin(), box_lens.end(), box_len); // setup output tensors exsum_tensor::Tensor<dtype, dim> tensor_naive(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_ref(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test_2(tensor_copy); std::cout << "size = " << tensor_test.size() << std::endl; // time box complement std::vector<double> elapsed_times(trials); for (int k = 0; k < trials; k++) { tensor_test = tensor_copy; fasttime_t start = gettime(); tensor_test.BoxComplementExsum(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); // elapsed_times.emplace_back(elapsed); elapsed_times[k] = elapsed; } // time summed area table + subtraction std::vector<double> elapsed_times_sat(trials); for (int k = 0; k < trials; k++) { tensor_test_2 = tensor_copy; fasttime_t start = gettime(); tensor_test_2.SummedAreaTable(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times_sat[k] = elapsed; } // time incsum + subtraction std::vector<double> elapsed_times_incsum(trials); for (int k = 0; k < trials; k++) { tensor_ref = tensor_copy; fasttime_t start = gettime(); tensor_ref.ExsumIncsumSubtraction(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); // elapsed_times_incsum.emplace_back(elapsed); elapsed_times_incsum[k] = elapsed; } // time naive std::vector<double> elapsed_times_naive(trials); for (int k = 0; k < trials; k++) { tensor_naive = tensor_copy; fasttime_t start = gettime(); tensor_naive.NaiveExsum(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times_naive[k] = elapsed; } int med_idx = trials / 2; // sort the times std::nth_element(elapsed_times.begin(), elapsed_times.begin() + elapsed_times.size() / 2, elapsed_times.end()); std::nth_element(elapsed_times_sat.begin(), elapsed_times_sat.begin() + elapsed_times_sat.size() / 2, elapsed_times_sat.end()); std::nth_element( elapsed_times_incsum.begin(), elapsed_times_incsum.begin() + elapsed_times_incsum.size() / 2, elapsed_times_incsum.end()); std::nth_element( elapsed_times_naive.begin(), elapsed_times_naive.begin() + elapsed_times_naive.size() / 2, elapsed_times_naive.end()); // verify result #ifdef CHECK_RESULT for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test.data()[i], N >> debug_shift); } for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test_2.data()[i], N >> debug_shift); } // check naive for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_naive.data()[i], N>>debug_shift); // tiny diff check } #endif double sat_med = elapsed_times_sat[med_idx] * 1000; double incsum_med = elapsed_times_incsum[med_idx] * 1000; double boxcomp_med = elapsed_times[med_idx] * 1000; double naive_med = elapsed_times_naive[med_idx] * 1000; // dim, vol, SAT + subtraction time, BDBS + subtraction time, BOXCOMP // time f << "," << N << "," << naive_med << "," << sat_med << "," << incsum_med << "," << boxcomp_med; std::cout << "NAIVE Time = " << naive_med << std::endl; std::cout << "SAT Time = " << sat_med << std::endl; std::cout << "INCSUM Time = " << incsum_med << std::endl; std::cout << "BOXCOMP Time = " << boxcomp_med << std::endl; } f << std::endl; } // dim = 5 { f << 5; constexpr size_t dim = 5; std::cout << "\nAlgorithm " << dim << std::endl; for (int p = lower; p < upper; p += increment) { size_t N; #ifdef DEBUG N = 1024; // 4 ^ 5 #else N = p * box_len; #endif size_t n = roundUp(std::floor(std::pow(N, 1. / dim)), box_len); N = std::pow(n, dim); index_t side_lens(dim); index_t box_lens(dim); std::fill(side_lens.begin(), side_lens.end(), n); std::cout << "n = " << n << std::endl; std::cout << "N = " << N << std::endl; exsum_tensor::Tensor<dtype, dim> tensor(side_lens); tensor.RandFill(0, 1); exsum_tensor::Tensor<dtype, dim> tensor_copy(tensor); std::fill(box_lens.begin(), box_lens.end(), box_len); // setup output tensors exsum_tensor::Tensor<dtype, dim> tensor_naive(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_ref(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test_2(tensor_copy); std::cout << "size = " << tensor_test.size() << std::endl; // time box complement std::vector<double> elapsed_times(trials); for (int k = 0; k < trials; k++) { tensor_test = tensor_copy; fasttime_t start = gettime(); tensor_test.BoxComplementExsum(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times[k] = elapsed; } // time summed area table + subtraction std::vector<double> elapsed_times_sat(trials); for (int k = 0; k < trials; k++) { tensor_test_2 = tensor_copy; fasttime_t start = gettime(); tensor_test_2.SummedAreaTable(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times_sat[k] = elapsed; } // time incsum + subtraction std::vector<double> elapsed_times_incsum(trials); for (int k = 0; k < trials; k++) { tensor_ref = tensor_copy; fasttime_t start = gettime(); tensor_ref.ExsumIncsumSubtraction(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times_incsum[k] = elapsed; } // time naive std::vector<double> elapsed_times_naive(trials); for (int k = 0; k < trials; k++) { tensor_naive = tensor_copy; fasttime_t start = gettime(); tensor_naive.NaiveExsum(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times_naive[k] = elapsed; } int med_idx = trials / 2; // sort the times std::nth_element(elapsed_times.begin(), elapsed_times.begin() + elapsed_times.size() / 2, elapsed_times.end()); std::nth_element(elapsed_times_sat.begin(), elapsed_times_sat.begin() + elapsed_times_sat.size() / 2, elapsed_times_sat.end()); std::nth_element( elapsed_times_incsum.begin(), elapsed_times_incsum.begin() + elapsed_times_incsum.size() / 2, elapsed_times_incsum.end()); std::nth_element( elapsed_times_naive.begin(), elapsed_times_naive.begin() + elapsed_times_naive.size() / 2, elapsed_times_naive.end()); // verify result #ifdef CHECK_RESULT for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test.data()[i], N >> debug_shift); } for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test_2.data()[i], N >> debug_shift); } // check naive for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_naive.data()[i], N>>debug_shift); // tiny diff check } #endif // write median to file double sat_med = elapsed_times_sat[med_idx] * 1000; double incsum_med = elapsed_times_incsum[med_idx] * 1000; double boxcomp_med = elapsed_times[med_idx] * 1000; double naive_med = elapsed_times_naive[med_idx] * 1000; // dim, vol, SAT + subtraction time, BDBS + subtraction time, BOXCOMP // time f << "," << N << "," << naive_med << "," << sat_med << "," << incsum_med << "," << boxcomp_med; std::cout << "NAIVE Time = " << naive_med << std::endl; std::cout << "SAT Time = " << sat_med << std::endl; std::cout << "INCSUM Time = " << incsum_med << std::endl; std::cout << "BOXCOMP Time = " << boxcomp_med << std::endl; } f << std::endl; } return; // dim = 6 { f << 6; constexpr size_t dim = 6; std::cout << "\nAlgorithm " << dim << std::endl; for (int p = lower; p < upper; p += increment) { size_t N; #ifdef DEBUG N = 4096; // 4 ^ 6 #else N = p * box_len; #endif size_t n = roundUp(std::floor(std::pow(N, 1. / dim)), box_len); N = std::pow(n, dim); index_t side_lens(dim); index_t box_lens(dim); std::fill(side_lens.begin(), side_lens.end(), n); std::cout << "n = " << n << std::endl; std::cout << "N = " << N << std::endl; exsum_tensor::Tensor<dtype, dim> tensor(side_lens); tensor.RandFill(0, 1); exsum_tensor::Tensor<dtype, dim> tensor_copy(tensor); std::fill(box_lens.begin(), box_lens.end(), box_len); exsum_tensor::Tensor<dtype, dim> tensor_ref(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test_2(tensor_copy); std::cout << "size = " << tensor_test.size() << std::endl; // time box complement std::vector<double> elapsed_times(trials); for (int k = 0; k < trials; k++) { tensor_test = tensor_copy; fasttime_t start = gettime(); tensor_test.BoxComplementExsum(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); // elapsed_times.emplace_back(elapsed); elapsed_times[k] = elapsed; } // time summed area table + subtraction std::vector<double> elapsed_times_sat(trials); for (int k = 0; k < trials; k++) { tensor_test_2 = tensor_copy; fasttime_t start = gettime(); tensor_test_2.SummedAreaTable(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times_sat[k] = elapsed; } // time incsum + subtraction std::vector<double> elapsed_times_incsum(trials); for (int k = 0; k < trials; k++) { tensor_ref = tensor_copy; fasttime_t start = gettime(); tensor_ref.ExsumIncsumSubtraction(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); // elapsed_times_incsum.emplace_back(elapsed); elapsed_times_incsum[k] = elapsed; } int med_idx = trials / 2; // sort the times std::nth_element(elapsed_times.begin(), elapsed_times.begin() + elapsed_times.size() / 2, elapsed_times.end()); std::nth_element(elapsed_times_sat.begin(), elapsed_times_sat.begin() + elapsed_times_sat.size() / 2, elapsed_times_sat.end()); std::nth_element( elapsed_times_incsum.begin(), elapsed_times_incsum.begin() + elapsed_times_incsum.size() / 2, elapsed_times_incsum.end()); // verify result #ifdef CHECK_RESULT for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test.data()[i], N >> debug_shift); } for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test_2.data()[i], N >> debug_shift); } #endif // write median to file double sat_med = elapsed_times_sat[med_idx] * 1000; double incsum_med = elapsed_times_incsum[med_idx] * 1000; double boxcomp_med = elapsed_times[med_idx] * 1000; // dim, vol, SAT + subtraction time, BDBS + subtraction time, BOXCOMP // time /* f << "," << N << "," << sat_med << "," << incsum_med << "," << boxcomp_med; */ f << "," << N << ",0," << sat_med << "," << incsum_med << "," << boxcomp_med; std::cout << "SAT Time = " << sat_med << std::endl; std::cout << "INCSUM Time = " << incsum_med << std::endl; std::cout << "BOXCOMP Time = " << boxcomp_med << std::endl; } f << std::endl; } // dim = 7 { f << 7; constexpr size_t dim = 7; std::cout << "\nAlgorithm " << dim << std::endl; for (int p = lower; p < upper; p += increment) { size_t N; #ifdef DEBUG N = 16384; // 4 ^ 7 #else N = p * box_len; #endif size_t n = roundUp(std::floor(std::pow(N, 1. / dim)), box_len); N = std::pow(n, dim); index_t side_lens(dim); index_t box_lens(dim); std::fill(side_lens.begin(), side_lens.end(), n); std::cout << "n = " << n << std::endl; std::cout << "N = " << N << std::endl; exsum_tensor::Tensor<dtype, dim> tensor(side_lens); tensor.RandFill(0, 1); exsum_tensor::Tensor<dtype, dim> tensor_copy(tensor); std::fill(box_lens.begin(), box_lens.end(), box_len); exsum_tensor::Tensor<dtype, dim> tensor_ref(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test_2(tensor_copy); std::cout << "size = " << tensor_test.size() << std::endl; // time box complement std::vector<double> elapsed_times(trials); for (int k = 0; k < trials; k++) { tensor_test = tensor_copy; fasttime_t start = gettime(); tensor_test.BoxComplementExsum(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); // elapsed_times.emplace_back(elapsed); elapsed_times[k] = elapsed; } // time summed area table + subtraction std::vector<double> elapsed_times_sat(trials); for (int k = 0; k < trials; k++) { tensor_test_2 = tensor_copy; fasttime_t start = gettime(); tensor_test_2.SummedAreaTable(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times_sat[k] = elapsed; } // time incsum + subtraction std::vector<double> elapsed_times_incsum(trials); for (int k = 0; k < trials; k++) { tensor_ref = tensor_copy; fasttime_t start = gettime(); tensor_ref.ExsumIncsumSubtraction(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); // elapsed_times_incsum.emplace_back(elapsed); elapsed_times_incsum[k] = elapsed; } int med_idx = trials / 2; // sort the times std::nth_element(elapsed_times.begin(), elapsed_times.begin() + elapsed_times.size() / 2, elapsed_times.end()); std::nth_element(elapsed_times_sat.begin(), elapsed_times_sat.begin() + elapsed_times_sat.size() / 2, elapsed_times_sat.end()); std::nth_element( elapsed_times_incsum.begin(), elapsed_times_incsum.begin() + elapsed_times_incsum.size() / 2, elapsed_times_incsum.end()); // verify result #ifdef CHECK_RESULT for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test.data()[i], N >> debug_shift); } for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test_2.data()[i], N >> debug_shift); } #endif // write median to file double sat_med = elapsed_times_sat[med_idx] * 1000; double incsum_med = elapsed_times_incsum[med_idx] * 1000; double boxcomp_med = elapsed_times[med_idx] * 1000; // dim, vol, SAT + subtraction time, BDBS + subtraction time, BOXCOMP // time /* f << "," << N << "," << sat_med << "," << incsum_med << "," << boxcomp_med; */ f << "," << N << ",0," << sat_med << "," << incsum_med << "," << boxcomp_med; std::cout << "SAT Time = " << sat_med << std::endl; std::cout << "INCSUM Time = " << incsum_med << std::endl; std::cout << "BOXCOMP Time = " << boxcomp_med << std::endl; } f << std::endl; } // dim = 8 { f << 8; constexpr size_t dim = 8; std::cout << "\nAlgorithm " << dim << std::endl; for (int p = lower; p < upper; p += increment) { size_t N; #ifdef DEBUG N = 65536; // 4 ^ 8 #else N = p * box_len; #endif size_t n = roundUp(std::floor(std::pow(N, 1. / dim)), box_len); N = std::pow(n, dim); index_t side_lens(dim); index_t box_lens(dim); std::fill(side_lens.begin(), side_lens.end(), n); std::cout << "n = " << n << std::endl; std::cout << "N = " << N << std::endl; exsum_tensor::Tensor<dtype, dim> tensor(side_lens); tensor.RandFill(0, 1); exsum_tensor::Tensor<dtype, dim> tensor_copy(tensor); std::fill(box_lens.begin(), box_lens.end(), box_len); exsum_tensor::Tensor<dtype, dim> tensor_ref(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test_2(tensor_copy); std::cout << "size = " << tensor_test.size() << std::endl; // time box complement std::vector<double> elapsed_times(trials); for (int k = 0; k < trials; k++) { tensor_test = tensor_copy; fasttime_t start = gettime(); tensor_test.BoxComplementExsum(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); // elapsed_times.emplace_back(elapsed); elapsed_times[k] = elapsed; } // time summed area table + subtraction std::vector<double> elapsed_times_sat(trials); for (int k = 0; k < trials; k++) { tensor_test_2 = tensor_copy; fasttime_t start = gettime(); tensor_test_2.SummedAreaTable(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times_sat[k] = elapsed; } // time incsum + subtraction std::vector<double> elapsed_times_incsum(trials); for (int k = 0; k < trials; k++) { tensor_ref = tensor_copy; fasttime_t start = gettime(); tensor_ref.ExsumIncsumSubtraction(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); // elapsed_times_incsum.emplace_back(elapsed); elapsed_times_incsum[k] = elapsed; } int med_idx = trials / 2; // sort the times std::nth_element(elapsed_times.begin(), elapsed_times.begin() + elapsed_times.size() / 2, elapsed_times.end()); std::nth_element(elapsed_times_sat.begin(), elapsed_times_sat.begin() + elapsed_times_sat.size() / 2, elapsed_times_sat.end()); std::nth_element( elapsed_times_incsum.begin(), elapsed_times_incsum.begin() + elapsed_times_incsum.size() / 2, elapsed_times_incsum.end()); // verify result #ifdef CHECK_RESULT for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test.data()[i], N >> debug_shift); } for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test_2.data()[i], N >> debug_shift); } #endif // write median to file double sat_med = elapsed_times_sat[med_idx] * 1000; double incsum_med = elapsed_times_incsum[med_idx] * 1000; double boxcomp_med = elapsed_times[med_idx] * 1000; // dim, vol, SAT + subtraction time, BDBS + subtraction time, BOXCOMP // time /* f << "," << N << "," << sat_med << "," << incsum_med << "," << boxcomp_med; */ f << "," << N << ",0," << sat_med << "," << incsum_med << "," << boxcomp_med; std::cout << "SAT Time = " << sat_med << std::endl; std::cout << "INCSUM Time = " << incsum_med << std::endl; std::cout << "BOXCOMP Time = " << boxcomp_med << std::endl; } f << std::endl; } // dim = 9 { f << 9; constexpr size_t dim = 9; std::cout << "\nAlgorithm " << dim << std::endl; for (int p = lower; p < upper; p += increment) { size_t N; #ifdef DEBUG N = 262144; // 4 ^ 9 #else N = p * box_len; #endif size_t n = roundUp(std::floor(std::pow(N, 1. / dim)), box_len); N = std::pow(n, dim); index_t side_lens(dim); index_t box_lens(dim); std::fill(side_lens.begin(), side_lens.end(), n); std::cout << "n = " << n << std::endl; std::cout << "N = " << N << std::endl; exsum_tensor::Tensor<dtype, dim> tensor(side_lens); tensor.RandFill(0, 1); exsum_tensor::Tensor<dtype, dim> tensor_copy(tensor); std::fill(box_lens.begin(), box_lens.end(), box_len); exsum_tensor::Tensor<dtype, dim> tensor_ref(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test_2(tensor_copy); std::cout << "size = " << tensor_test.size() << std::endl; // time box complement std::vector<double> elapsed_times(trials); for (int k = 0; k < trials; k++) { tensor_test = tensor_copy; fasttime_t start = gettime(); tensor_test.BoxComplementExsum(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); // elapsed_times.emplace_back(elapsed); elapsed_times[k] = elapsed; } // time summed area table + subtraction std::vector<double> elapsed_times_sat(trials); for (int k = 0; k < trials; k++) { tensor_test_2 = tensor_copy; fasttime_t start = gettime(); tensor_test_2.SummedAreaTable(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times_sat[k] = elapsed; } // time incsum + subtraction std::vector<double> elapsed_times_incsum(trials); for (int k = 0; k < trials; k++) { tensor_ref = tensor_copy; fasttime_t start = gettime(); tensor_ref.ExsumIncsumSubtraction(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); // elapsed_times_incsum.emplace_back(elapsed); elapsed_times_incsum[k] = elapsed; } int med_idx = trials / 2; // sort the times std::nth_element(elapsed_times.begin(), elapsed_times.begin() + elapsed_times.size() / 2, elapsed_times.end()); std::nth_element(elapsed_times_sat.begin(), elapsed_times_sat.begin() + elapsed_times_sat.size() / 2, elapsed_times_sat.end()); std::nth_element( elapsed_times_incsum.begin(), elapsed_times_incsum.begin() + elapsed_times_incsum.size() / 2, elapsed_times_incsum.end()); // verify result #ifdef CHECK_RESULT for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test.data()[i], N >> debug_shift); } for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test_2.data()[i], N >> debug_shift); } #endif // write median to file double sat_med = elapsed_times_sat[med_idx] * 1000; double incsum_med = elapsed_times_incsum[med_idx] * 1000; double boxcomp_med = elapsed_times[med_idx] * 1000; // dim, vol, SAT + subtraction time, BDBS + subtraction time, BOXCOMP // time /* f << "," << N << "," << sat_med << "," << incsum_med << "," << boxcomp_med; */ f << "," << N << ",0," << sat_med << "," << incsum_med << "," << boxcomp_med; std::cout << "SAT Time = " << sat_med << std::endl; std::cout << "INCSUM Time = " << incsum_med << std::endl; std::cout << "BOXCOMP Time = " << boxcomp_med << std::endl; } f << std::endl; } // TODO: make this a function? not sure how to do it in gtest // dim = 10 { f << 10; constexpr size_t dim = 10; std::cout << "\nAlgorithm " << dim << std::endl; for (int p = lower; p < upper; p += increment) { size_t N; #ifdef DEBUG N = 1048576; // 4 ^ 10 #else N = p * box_len; #endif size_t n = roundUp(std::floor(std::pow(N, 1. / dim)), box_len); N = std::pow(n, dim); index_t side_lens(dim); index_t box_lens(dim); std::fill(side_lens.begin(), side_lens.end(), n); std::cout << "n = " << n << std::endl; std::cout << "N = " << N << std::endl; exsum_tensor::Tensor<dtype, dim> tensor(side_lens); tensor.RandFill(0, 1); exsum_tensor::Tensor<dtype, dim> tensor_copy(tensor); std::fill(box_lens.begin(), box_lens.end(), box_len); exsum_tensor::Tensor<dtype, dim> tensor_ref(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test(tensor_copy); exsum_tensor::Tensor<dtype, dim> tensor_test_2(tensor_copy); std::cout << "size = " << tensor_test.size() << std::endl; // time box complement std::vector<double> elapsed_times(trials); for (int k = 0; k < trials; k++) { tensor_test = tensor_copy; fasttime_t start = gettime(); tensor_test.BoxComplementExsum(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); // elapsed_times.emplace_back(elapsed); elapsed_times[k] = elapsed; } // time summed area table + subtraction std::vector<double> elapsed_times_sat(trials); for (int k = 0; k < trials; k++) { tensor_test_2 = tensor_copy; fasttime_t start = gettime(); tensor_test_2.SummedAreaTable(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); elapsed_times_sat[k] = elapsed; } // time incsum + subtraction std::vector<double> elapsed_times_incsum(trials); for (int k = 0; k < trials; k++) { tensor_ref = tensor_copy; fasttime_t start = gettime(); tensor_ref.ExsumIncsumSubtraction(box_lens); fasttime_t end = gettime(); double elapsed = tdiff(start, end); // elapsed_times_incsum.emplace_back(elapsed); elapsed_times_incsum[k] = elapsed; } int med_idx = trials / 2; // sort the times std::nth_element(elapsed_times.begin(), elapsed_times.begin() + elapsed_times.size() / 2, elapsed_times.end()); std::nth_element(elapsed_times_sat.begin(), elapsed_times_sat.begin() + elapsed_times_sat.size() / 2, elapsed_times_sat.end()); std::nth_element( elapsed_times_incsum.begin(), elapsed_times_incsum.begin() + elapsed_times_incsum.size() / 2, elapsed_times_incsum.end()); // verify result #ifdef CHECK_RESULT for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test.data()[i], N >> debug_shift); } for (size_t i = 0; i <= tensor.getAddress(tensor.getMaxIndex()); ++i) { EXPECT_NEAR(tensor_ref.data()[i], tensor_test_2.data()[i], N >> debug_shift); } #endif // write median to file double sat_med = elapsed_times_sat[med_idx] * 1000; double incsum_med = elapsed_times_incsum[med_idx] * 1000; double boxcomp_med = elapsed_times[med_idx] * 1000; // dim, vol, SAT + subtraction time, BDBS + subtraction time, BOXCOMP // time /* f << "," << N << "," << sat_med << "," << incsum_med << "," << boxcomp_med; */ f << "," << N << ",0," << sat_med << "," << incsum_med << "," << boxcomp_med; std::cout << "SAT Time = " << sat_med << std::endl; std::cout << "INCSUM Time = " << incsum_med << std::endl; std::cout << "BOXCOMP Time = " << boxcomp_med << std::endl; } f << std::endl; } f.close(); } } // namespace } // namespace excluded_sum int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
{-# LANGUAGE FlexibleContexts #-} module DFTPinwheel where import Control.Monad as M import qualified Data.Array.Accelerate as A import Data.Array.Accelerate.LLVM.PTX import Data.Array.Repa as R import Data.Complex import Data.List as L import Data.Vector.Storable as VS import Data.Vector.Unboxed as VU import DFT.Plan import Filter.Utils import Foreign.CUDA.Driver as CUDA import FourierMethod.BlockCudaMatrix import FourierMethod.FourierSeries2D import Image.IO import Pinwheel.Base import Pinwheel.FourierSeries2D import System.Directory import System.Environment import System.FilePath import System.Random import Utils.SimpsonRule import Utils.Array import Numeric.GSL.Special.Bessel import Utils.Distribution main = do args@(deviceIDsStr:numPointsStr:numR2FreqStr:deltaStr:deltaReconStr:periodR2Str:angularFreqStr:radialFreqStr:sigmaStr:periodEnvelopeStr:stdStr:numBatchStr:_) <- getArgs let deviceIDs = read deviceIDsStr :: [Int] numPoints = read numPointsStr :: Int numR2Freqs = read numR2FreqStr :: Int delta = read deltaStr :: Double deltaRecon = read deltaReconStr :: Double periodR2 = read periodR2Str :: Double angularFreq = read angularFreqStr :: Int radialFreq = read radialFreqStr :: Int sigma = read sigmaStr :: Double periodEnvelope = read periodEnvelopeStr :: Double std = read stdStr :: Double numBatch = read numBatchStr :: Int folderPath = "output/test/DFTPinwheel" removePathForcibly folderPath createDirectoryIfMissing True folderPath initialise [] devs <- M.mapM device deviceIDs ctxs <- M.mapM (\dev -> CUDA.create dev []) devs ptxs <- M.mapM createTargetFromContext ctxs let center = div numPoints 2 pinwheel = computeUnboxedS . R.fromFunction (Z :. numPoints :. numPoints) $ \(Z :. x :. y) -> fourierMellinInvPeriod sigma (periodEnvelope * sqrt 2) angularFreq radialFreq ( deltaRecon * fromIntegral (x - center) , deltaRecon * fromIntegral (y - center)) pinwheelMat = CuMat (numPoints ^ 2) 1 . CuVecHost . VU.convert . toUnboxed $ pinwheel centerFreq = div numR2Freqs 2 -- pinwheelFreq = -- computeUnboxedS . R.fromFunction (Z :. numR2Freqs :. numR2Freqs) $ \(Z :. x :. y) -> -- fourierMellin -- sigma -- angularFreq -- radialFreq -- ( delta * fromIntegral (x - centerFreq) -- , delta * fromIntegral (y - centerFreq)) -- pinwheelFreqMat = -- CuMat 1 (numR2Freqs ^ 2) . CuVecHost . VU.convert . toUnboxed $ -- . computeS . weightedArray -- pinwheelFreq -- invHarmonics <- -- createInverseHarmonicMatriesGPU ptxs 1 numPoints numR2Freqs periodR2 delta -- pinwheelCoef <- -- computeFourierCoefficientsR2Stream -- deviceIDs -- ptxs -- numR2Freqs -- numPoints -- periodR2 -- delta -- 1 -- numBatch -- [pinwheelMat] -- pinwheelFreqSeries <- -- computeFourierSeriesR2 -- deviceIDs -- numR2Freqs -- numPoints -- periodR2 -- invHarmonics -- [pinwheelFreqMat] plotImageRepaComplex (folderPath </> "Pinwheel.png") . ImageRepa 8 . computeS . extend (Z :. (1 :: Int) :. All :. All) . R.traverse pinwheel id $ \f idx@(Z :. i :. j) -> let x = fromIntegral $ i - center y = fromIntegral $ j - center r = sqrt $ x ^ 2 + y ^ 2 -- if r <= 6 -- then 0 -- else in f idx -- plotImageRepaComplex (folderPath </> "PinwheelTest.png") . -- ImageRepa 8 . computeS . extend (Z :. (1 :: Int) :. All :. All) $ -- pinwheelTest -- plotImageRepaComplex (folderPath </> "PinwheelCoefficients.png") . -- ImageRepa 8 . computeS . R.traverse pinwheelCoef id $ \f idx@(Z :. _ :. i :. j) -> -- let x = fromIntegral $ i - div numR2Freqs 2 -- y = fromIntegral $ j - div numR2Freqs 2 -- r = sqrt $ x ^ 2 + y ^ 2 -- in if r <= 0 -- then 0 -- else f idx -- plotImageRepaComplex (folderPath </> "PinwheelFrequency.png") . -- ImageRepa 8 . computeS . extend (Z :. (1 :: Int) :. All :. All) $ -- pinwheelFreq -- plotImageRepaComplex (folderPath </> "PinwheelFrequencySeries.png") . -- ImageRepa 8 $ -- pinwheelFreqSeries initVec <- (VS.fromList . L.map (\x -> x :+ 0)) <$> M.replicateM (numPoints ^ 2) randomIO lock <- getFFTWLock plan <- fst <$> idft1dGPlan lock emptyPlan [numPoints, numPoints] [0, 1] initVec -- dftPinwheel <- -- dftExecute plan (DFTPlanID IDFT1DG [numPoints, numPoints] [0, 1]) . -- VU.convert . toUnboxed . computeUnboxedS . makeFilter2D $ -- pinwheel -- plotImageRepaComplex (folderPath </> "DFTPinwheel.png") . -- ImageRepa 8 . -- computeUnboxedS . -- makeFilter2D . -- fromUnboxed (Z :. (1 :: Int) :. numPoints :. numPoints) . VS.convert $ -- dftPinwheel let centerR2Freq = div numR2Freqs 2 gaussian2D = computeUnboxedS . fromFunction (Z :. numR2Freqs :. numR2Freqs) $ \(Z :. i' :. j') -> let i = i' - centerR2Freq j = j' - centerR2Freq rho = (sqrt . fromIntegral $ i ^ 2 + j ^ 2) / periodR2 * 2*pi in (-- rho ^ (abs angularFreq) * exp (pi * fromIntegral (i ^ 2 + j ^ 2) / ((-1) * periodR2 ^ 2 * std ^ 2)) / (2 * pi * std ^ 2)) :+ 0 -- pinwheelFreqAnaticalNonGaussian = -- centerHollowArray numR2Freqs . computeUnboxedS $ -- analyticalFourierCoefficients2 -- numR2Freqs -- delta -- angularFreq -- radialFreq -- (-sigma) --(sigma - 1) -- periodR2 -- (periodEnvelope * sqrt 2) pinwheelFreqAnatical = centerHollowArray numR2Freqs . computeUnboxedS -- . R.zipWith (*) gaussian2D $ analyticalFourierCoefficients2 numR2Freqs delta angularFreq radialFreq (-sigma) --(sigma - 1) periodR2 (periodEnvelope * sqrt 2) -- pinwheelFreqAnaticalMat = -- A.use . -- A.fromList (A.Z A.:. (numR2Freqs ^ 2) A.:. (1 :: Int)) . R.toList $ -- pinwheelFreqAnatical -- pinwheelFreqAnaticalInner = -- centerHollowArray numR2Freqs . computeUnboxedS $ -- analyticalFourierCoefficients2' -- numR2Freqs -- (1 / fromIntegral (div numR2Freqs 2)) -- angularFreq -- radialFreq -- (-sigma) -- periodR2 -- (periodEnvelope * sqrt 2) -- pinwheelFreqAnaticalInnerMat = -- A.use . -- A.fromList (A.Z A.:. (numR2Freqs ^ 2) A.:. (1 :: Int)) . R.toList $ -- pinwheelFreqAnaticalInner -- pinwheelFreqSeriesAnatical = -- computeUnboxedS $ -- analyticalFourierSeries1 -- numPoints -- 1 -- delta -- angularFreq -- radialFreq -- (sigma - 1) -- periodR2 plotImageRepaComplex (folderPath </> "PinwheelCoefficientsAnalytical.png") . ImageRepa 8 . computeS . extend (Z :. (1 :: Int) :. All :. All) $ pinwheelFreqAnatical -- plotImageRepaComplex -- (folderPath </> "PinwheelCoefficientsAnalyticalNonGaussian.png") . -- ImageRepa 8 . computeS . extend (Z :. (1 :: Int) :. All :. All) $ -- pinwheelFreqAnaticalNonGaussian -- plotImageRepaComplex -- (folderPath </> "PinwheelCoefficientsAnalyticalInner.png") . -- ImageRepa 8 . computeS . extend (Z :. (1 :: Int) :. All :. All) $ -- pinwheelFreqAnaticalInner -- plotImageRepaComplex (folderPath </> "PinwheelFrequencySeriesAnalytical.png") . -- ImageRepa 8 . computeS . extend (Z :. (1 :: Int) :. All :. All) $ -- pinwheelFreqSeriesAnatical -- pinwheelFreqSeriesStream <- -- computeFourierSeriesR2Stream -- deviceIDs -- ptxs -- numR2Freqs -- numPoints -- periodR2 -- delta -- numBatch -- [transposeCuMat pinwheelFreqMat] -- plotImageRepaComplex (folderPath </> "PinwheelFrequencySeriesStream.png") . -- ImageRepa 8 . computeS . R.traverse pinwheelFreqSeriesStream id $ \f idx@(Z :. _ :. i :. j) -> -- let x = fromIntegral $ i - div numPoints 2 -- y = fromIntegral $ j - div numPoints 2 -- r = sqrt $ x ^ 2 + y ^ 2 -- in if r <= 0 -- then 0 -- else f idx -- pinwheelCoefficientsAnalyticalSeriesStream <- -- computeFourierSeriesR2StreamAcc -- ptxs -- numR2Freqs -- numPoints -- 1 -- periodR2 -- deltaRecon -- numBatch -- pinwheelFreqAnaticalMat -- -- [ transposeCuMat . createCuMat $ -- -- [VU.convert . toUnboxed $ pinwheelFreqAnatical] -- -- ] -- plotImageRepaComplex -- (folderPath </> "PinwheelCoefficientsAnalyticalSeries.png") . -- ImageRepa 8 . -- computeS . -- extend (Z :. (1 :: Int) :. All :. All) . -- R.traverse (sumS . rotate3D $ pinwheelCoefficientsAnalyticalSeriesStream) id $ \f idx@(Z :. i :. j) -> -- let x = fromIntegral $ i - center -- y = fromIntegral $ j - center -- r = sqrt $ x ^ 2 + y ^ 2 -- -- if r <= 6 -- -- then 0 -- -- else -- in f idx pinwheelCoefficientsAnalyticalIDFT <- dftExecute plan (DFTPlanID IDFT1DG [numPoints, numPoints] [0, 1]) . VU.convert . toUnboxed . computeS . makeFilter2D $ pinwheelFreqAnatical plotImageRepaComplex (folderPath </> "PinwheelCozefficientsIDFT.png") . ImageRepa 8 . computeS . makeFilter2DInverse . fromUnboxed (Z :. (1 :: Int) :. numPoints :. numPoints) . VS.convert $ pinwheelCoefficientsAnalyticalIDFT -- pinwheelCoefficientsAnalyticalInnerSeriesStream <- -- computeFourierSeriesR2StreamAcc' -- ptxs -- numR2Freqs -- numPoints -- 1 -- periodR2 -- 1 -- (1 / fromIntegral (div numR2Freqs 2)) -- numBatch -- pinwheelFreqAnaticalMat -- plotImageRepaComplex -- (folderPath </> "PinwheelCoefficientsAnalyticalInnverSeries.png") . -- ImageRepa 8 . -- computeS . extend (Z :. (1 :: Int) :. All :. All) . sumS . rotate3D $ -- pinwheelCoefficientsAnalyticalInnerSeriesStream -- plotImageRepaComplex -- (folderPath </> "PinwheelCoefficientsAnalyticalSumSeries.png") . -- ImageRepa 8 . -- computeS . extend (Z :. (1 :: Int) :. All :. All) . sumS . rotate3D $ -- R.zipWith -- (+) -- pinwheelCoefficientsAnalyticalSeriesStream -- pinwheelCoefficientsAnalyticalInnerSeriesStream -- computeS . R.traverse pinwheelCoefficientsAnalyticalSeriesStream id $ \f idx@(Z :. _ :. i :. j) -> -- let x = fromIntegral $ i - div numPoints 2 -- y = fromIntegral $ j - div numPoints 2 -- r = sqrt $ x ^ 2 + y ^ 2 -- in if r <= 0 -- then 0 -- else f idx -- -- pinwheelCoefficientsSeriesStream <- -- computeFourierSeriesR2Stream -- deviceIDs -- ptxs -- numR2Freqs -- numPoints -- periodR2 -- delta -- numBatch -- [transposeCuMat . createCuMat $ [VU.convert . toUnboxed $ pinwheelCoef]] -- plotImageRepaComplex (folderPath </> "PinwheelCoefficientsSeriesStream.png") . -- ImageRepa 8 . computeS . R.traverse pinwheelCoefficientsSeriesStream id $ \f idx@(Z :. _ :. i :. j) -> -- let x = fromIntegral $ i - div numPoints 2 -- y = fromIntegral $ j - div numPoints 2 -- r = sqrt $ x ^ 2 + y ^ 2 -- in if r <= 0 -- then 0 -- else f idx -- let besselFilter = -- computeUnboxedS . fromFunction (Z :. numR2Freqs :. numR2Freqs) $ \(Z :. i :. j) -> -- let xFreq = fromIntegral $ i - centerFreq -- yFreq = fromIntegral $ j - centerFreq -- rho = sqrt $ xFreq ^ 2 + yFreq ^ 2 -- in if rho == 0 -- then 1 -- else (bessel_J1 (2 * pi * rho / periodR2)) / rho :+ 0 -- plotImageRepaComplex (folderPath </> "BesselFilter.png") . -- ImageRepa 8 . computeS . extend (Z :. (1 :: Int) :. All :. All) $ -- besselFilter -- initVec1 <- -- (VS.fromList . L.map (\x -> x :+ 0)) <$> -- M.replicateM (numR2Freqs ^ 2) randomIO -- lock <- getFFTWLock -- plan <- -- fst <$> -- (dft1dGPlan lock emptyPlan [numR2Freqs, numR2Freqs] [0, 1] initVec1 >>= \(plan, vec) -> -- idft1dGPlan lock plan [numR2Freqs, numR2Freqs] [0, 1] vec) -- let besselVec = -- VU.convert . toUnboxed . computeS . makeFilter2D $ besselFilter -- pinwheelFreqAnaticalVec = VU.convert . toUnboxed $ pinwheelFreqAnatical -- besselVecF <- -- dftExecute plan (DFTPlanID DFT1DG [numR2Freqs, numR2Freqs] [0, 1]) besselVec -- pinwheelFreqAnaticalVecF <- -- dftExecute -- plan -- (DFTPlanID DFT1DG [numR2Freqs, numR2Freqs] [0, 1]) -- pinwheelFreqAnaticalVec -- pinwheelFreqAnatical1 <- -- fmap (fromUnboxed (Z :. numR2Freqs :. numR2Freqs) . VS.convert) . -- dftExecute plan (DFTPlanID IDFT1DG [numR2Freqs, numR2Freqs] [0, 1]) $ -- VS.zipWith (*) besselVecF pinwheelFreqAnaticalVecF -- plotImageRepaComplex -- (folderPath </> "BesselFilterConveledPinwheelFreqAnatical.png") . -- ImageRepa 8 . computeS . extend (Z :. (1 :: Int) :. All :. All) $ -- pinwheelFreqAnatical1 -- let pinwheelFreqAnatical2 = -- R.zipWith (-) pinwheelFreqAnatical pinwheelFreqAnatical1 -- pinwheelFreqAnaticalMat2 = -- A.use . -- A.fromList (A.Z A.:. (numR2Freqs ^ 2) A.:. (1 :: Int)) . R.toList $ -- pinwheelFreqAnatical2 -- plotImageRepaComplex (folderPath </> "pinwheelFreqAnatical2.png") . -- ImageRepa 8 . computeS . extend (Z :. (1 :: Int) :. All :. All) $ -- pinwheelFreqAnatical2 -- pinwheelCoefficientsAnalyticalSeriesStream2 <- -- computeFourierSeriesR2StreamAcc -- ptxs -- numR2Freqs -- numPoints -- 1 -- periodR2 -- delta -- numBatch -- pinwheelFreqAnaticalMat2 -- plotImageRepaComplex -- (folderPath </> "PinwheelCoefficientsAnalyticalSeries2.png") . -- ImageRepa 8 . -- computeS . extend (Z :. (1 :: Int) :. All :. All) . sumS . rotate3D $ -- pinwheelCoefficientsAnalyticalSeriesStream2 -- dftPinwheel <- -- fmap (fromUnboxed (Z :. numR2Freqs :. numR2Freqs) . VS.convert) . -- dftExecute plan (DFTPlanID DFT1DG [numR2Freqs, numR2Freqs] [0, 1]) . -- VU.convert . toUnboxed . computeS . makeFilter2D $ -- pinwheel -- let zeroArr = -- fromFunction (Z :. numR2Freqs :. numR2Freqs) $ \(Z :. i :. j) -> -- if (sqrt . fromIntegral $ (i - centerFreq) ^ 2 + (j - centerFreq) ^ 2) < -- 1 -- then 0 -- else 1 -- plotImageRepaComplex (folderPath </> "DFTPinwheel.png") . -- ImageRepa 8 . -- computeS . -- extend (Z :. (1 :: Int) :. All :. All) . -- R.zipWith (*) zeroArr . makeFilter2D $ -- dftPinwheel -- let normFunc arr = -- let m = VU.maximum . toUnboxed . computeS . R.map magnitude $ arr -- in R.map (/ (m :+ 0)) arr -- plotImageRepaComplex (folderPath </> "Diff.png") . -- ImageRepa 8 . computeS . extend (Z :. (1 :: Int) :. All :. All) $ -- R.zipWith -- (\a b -> -- if magnitude b == 0 -- then 0 -- else a / b) -- (normFunc . R.zipWith (*) zeroArr . makeFilter2D $ dftPinwheel) -- (normFunc . R.zipWith (*) zeroArr $ pinwheelFreqAnatical)
using Documenter, KernelAbstractions makedocs( modules = [KernelAbstractions], sitename = "KernelAbstractions", format = Documenter.HTML( prettyurls = get(ENV, "CI", nothing) == "true" ), pages = [ "Home" => "index.md", "Writing kernels" => "kernels.md", "Examples" => [ "examples/memcopy.md" "examples/memcopy_static.md" "examples/naive_transpose.md" "examples/performance.md" "examples/matmul.md" ], "API" => "api.md", "Extras" => [ "extras/unrolling.md" ] ], doctest = true )
------------------------------------------------------------------------ -- An investigation of nested fixpoints of the form μX.νY.… in Agda ------------------------------------------------------------------------ module MuNu where open import Codata.Musical.Notation import Codata.Musical.Colist as Colist open import Codata.Musical.Stream open import Data.Digit open import Data.Empty open import Data.List using (List; _∷_; []) open import Data.Product open import Relation.Binary.PropositionalEquality open import Relation.Nullary using (¬_) -- Christophe Raffalli discusses (essentially) the type μO. νZ. Z + O -- in his thesis. If Z is read as zero and O as one, then this type -- contains bit sequences of the form (0^⋆1)^⋆0^ω. -- It is interesting to note that currently it is not possible to -- encode this type directly in Agda. One might believe that the -- following definition should work. First we define the inner -- greatest fixpoint: data Z (O : Set) : Set where [0] : ∞ (Z O) → Z O [1] : O → Z O -- Then we define the outer least fixpoint: data O : Set where ↓ : Z O → O -- However, it is still possible to define values of the form (01)^ω: 01^ω : O 01^ω = ↓ ([0] (♯ [1] 01^ω)) -- The reason is the way the termination/productivity checker works: -- it accepts definitions by guarded corecursion as long as the guard -- contains at least one occurrence of ♯_, no matter how the types -- involved are defined. In effect ∞ has global reach. The mistake -- done above was believing that O is defined to be a least fixpoint. -- The type O really corresponds to νZ. μO. Z + O, i.e. (1^⋆0)^ω: data O′ : Set where [0] : ∞ O′ → O′ [1] : O′ → O′ mutual O→O′ : O → O′ O→O′ (↓ z) = ZO→O′ z ZO→O′ : Z O → O′ ZO→O′ ([0] z) = [0] (♯ ZO→O′ (♭ z)) ZO→O′ ([1] o) = [1] (O→O′ o) mutual O′→O : O′ → O O′→O o = ↓ (O′→ZO o) O′→ZO : O′ → Z O O′→ZO ([0] o) = [0] (♯ O′→ZO (♭ o)) O′→ZO ([1] o) = [1] (O′→O o) -- If O had actually encoded the type μO. νZ. Z + O, then we could -- have proved the following theorem: mutual ⟦_⟧O : O → Stream Bit ⟦ ↓ z ⟧O = ⟦ z ⟧Z ⟦_⟧Z : Z O → Stream Bit ⟦ [0] z ⟧Z = 0b ∷ ♯ ⟦ ♭ z ⟧Z ⟦ [1] o ⟧Z = 1b ∷ ♯ ⟦ o ⟧O Theorem : Set Theorem = ∀ o → ¬ (head ⟦ o ⟧O ≡ 0b × head (tail ⟦ o ⟧O) ≡ 1b × tail (tail ⟦ o ⟧O) ≈ ⟦ o ⟧O) -- This would have been unfortunate, though: inconsistency : Theorem → ⊥ inconsistency theorem = theorem 01^ω (refl , refl , proof) where proof : tail (tail ⟦ 01^ω ⟧O) ≈ ⟦ 01^ω ⟧O proof = refl ∷ ♯ (refl ∷ ♯ proof) -- Using the following elimination principle we can prove the theorem: data ⇑ {O} (P : O → Set) : Z O → Set where [0] : ∀ {z} → ∞ (⇑ P (♭ z)) → ⇑ P ([0] z) [1] : ∀ {o} → P o → ⇑ P ([1] o) O-Elim : Set₁ O-Elim = (P : O → Set) → (∀ {z} → ⇑ P z → P (↓ z)) → (o : O) → P o theorem : O-Elim → Theorem theorem O-elim = O-elim P helper where P : O → Set P o = ¬ (head ⟦ o ⟧O ≡ 0b × head (tail ⟦ o ⟧O) ≡ 1b × tail (tail ⟦ o ⟧O) ≈ ⟦ o ⟧O) helper : ∀ {z} → ⇑ P z → P (↓ z) helper ([1] p) (() , eq₂ , eq₃) helper ([0] p) (refl , eq₂ , eq₃) = hlp _ eq₂ (head-cong eq₃) (tail-cong eq₃) (♭ p) where hlp : ∀ z → head ⟦ z ⟧Z ≡ 1b → head (tail ⟦ z ⟧Z) ≡ 0b → tail (tail ⟦ z ⟧Z) ≈ ⟦ z ⟧Z → ⇑ P z → ⊥ hlp .([0] _) () eq₂ eq₃ ([0] p) hlp .([1] _) eq₁ eq₂ eq₃ ([1] p) = p (eq₂ , head-cong eq₃ , tail-cong eq₃) -- Fortunately it appears as if we cannot prove this elimination -- principle. The following code is not accepted by the termination -- checker: {- mutual O-elim : O-Elim O-elim P hyp (↓ z) = hyp (Z-elim P hyp z) Z-elim : (P : O → Set) → (∀ {z} → ⇑ P z → P (↓ z)) → (z : Z O) → ⇑ P z Z-elim P hyp ([0] z) = [0] (♯ Z-elim P hyp (♭ z)) Z-elim P hyp ([1] o) = [1] (O-elim P hyp o) -} -- If hyp were known to be contractive, then the code above would be -- correct (if not accepted by the termination checker). This is not -- the case in theorem above.
[STATEMENT] lemma targets_simps [simp]: shows "targets \<^bold>\<sharp> = {}" and "targets \<^bold>\<guillemotleft>x\<^bold>\<guillemotright> = {\<^bold>\<guillemotleft>x\<^bold>\<guillemotright>}" and "arr t \<Longrightarrow> targets \<^bold>\<lambda>\<^bold>[t\<^bold>] = {\<^bold>\<lambda>\<^bold>[Trg t\<^bold>]}" and "\<lbrakk>arr t; arr u\<rbrakk> \<Longrightarrow> targets (t \<^bold>\<circ> u) = {Trg t \<^bold>\<circ> Trg u}" and "\<lbrakk>arr t; arr u\<rbrakk> \<Longrightarrow> targets (\<^bold>\<lambda>\<^bold>[t\<^bold>] \<^bold>\<Zspot> u) = {subst (Trg u) (Trg t)}" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (targets \<^bold>\<sharp> = {} &&& targets \<^bold>\<guillemotleft>x\<^bold>\<guillemotright> = {\<^bold>\<guillemotleft>x\<^bold>\<guillemotright>}) &&& (arr t \<Longrightarrow> targets \<^bold>\<lambda>\<^bold>[t\<^bold>] = {\<^bold>\<lambda>\<^bold>[Trg t\<^bold>]}) &&& (\<lbrakk>arr t; arr u\<rbrakk> \<Longrightarrow> targets (t \<^bold>\<circ> u) = {Trg t \<^bold>\<circ> Trg u}) &&& (\<lbrakk>arr t; arr u\<rbrakk> \<Longrightarrow> targets (\<^bold>\<lambda>\<^bold>[t\<^bold>] \<^bold>\<Zspot> u) = {Subst 0 (Trg u) (Trg t)}) [PROOF STEP] using targets_char\<^sub>\<Lambda> [PROOF STATE] proof (prove) using this: targets ?t = (if Arr ?t then {Trg ?t} else {}) goal (1 subgoal): 1. (targets \<^bold>\<sharp> = {} &&& targets \<^bold>\<guillemotleft>x\<^bold>\<guillemotright> = {\<^bold>\<guillemotleft>x\<^bold>\<guillemotright>}) &&& (arr t \<Longrightarrow> targets \<^bold>\<lambda>\<^bold>[t\<^bold>] = {\<^bold>\<lambda>\<^bold>[Trg t\<^bold>]}) &&& (\<lbrakk>arr t; arr u\<rbrakk> \<Longrightarrow> targets (t \<^bold>\<circ> u) = {Trg t \<^bold>\<circ> Trg u}) &&& (\<lbrakk>arr t; arr u\<rbrakk> \<Longrightarrow> targets (\<^bold>\<lambda>\<^bold>[t\<^bold>] \<^bold>\<Zspot> u) = {Subst 0 (Trg u) (Trg t)}) [PROOF STEP] by auto
lemma locally_compact_closedin: fixes S :: "'a :: heine_borel set" shows "\<lbrakk>closedin (top_of_set S) t; locally compact S\<rbrakk> \<Longrightarrow> locally compact t"
#pragma once #include <unordered_map> #include <vector> #include <gsl/gsl> #include "huffman.h" #include "interface.h" #include "minqueue.h" class statistics { public: size_t original; size_t modified; }; class huffman_encoder { // Statistics of the current encoding. statistics stats; std::unordered_map<char, std::vector<int>> encoding; std::unordered_map<char, int> count_chars(std::vector<char> buf); void evaluate(minqueue_node* root, std::vector<int> encoding); public: huffman_encoder(); std::vector<int> encode(std::vector<char> buf); void display_encoding(); void display_stats(); };
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Abstract for my thesis %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \vspace*{\fill} \section*{\centering Abstract} \addcontentsline{toc}{section}{Abstract} Obesity has been a major global problem for more than a decade, associated with many noncommunicable diseases such as cancer. The number of obese people, both adults and children, has risen in every country of the world and the trend will likely to continue. Cancers are caused by dysregulation of various molecular pathways that allow tumour cells to proliferate, survive and migrate. One of the difficulties associated with the treatment of cancers is the identification of the underlying biological pathways that drive tumorigenesis. This research aims to determine whether gene expression signatures exist that are specific to obesity across multiple cancer types, and to investigate whether there are any common pathways being dysregulated in cancers based on these genetic signatures. In this work no genetic signatures or differentially expressed genes were found between obese and non-obese patients that were common across multiple cancer types. However, the Akt, \gls{egfr}, \gls{tgfb} and Src pathways may have a role in promoting the tumour progression in patients that are obese. It is likely that there is some complex mechanism underlying the relationship between obesity and cancer. A better understanding of the pathways being dysregulated in cancer cells in obese patients may lead to improved clinical decisions, and contribute towards personalised treatment in the future. \vfill \vfill
lemma emeasure_lborel_box_finite: "emeasure lborel (box a b) < \<infinity>"
Eastern Long Island Hospital is committed to providing outstanding patient care in the Greenport, NY area, but before you commit to Eastern Long Island Hospital for a Spine X-Ray make sure you compare and shop other medical facilities. It may save you hundreds (in some cases thousands) of dollars. View a Spine X-Ray cost comparison for Greenport and Request a Free Quote before you make a decision.
[STATEMENT] lemma gkernTr11:"\<lbrakk>Group F; Group G; f \<in> gHom F G ; a \<in> carrier F\<rbrakk> \<Longrightarrow> (iim F G f {f a}) = (gker\<^bsub>F,G\<^esub> f) \<bullet>\<^bsub>F\<^esub> a" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>Group F; Group G; f \<in> gHom F G; a \<in> carrier F\<rbrakk> \<Longrightarrow> iim F G f {f a} = gker\<^bsub>F,G\<^esub> f \<bullet>\<^bsub>F\<^esub> a [PROOF STEP] apply (frule gkernTr7[of "F" "G" "f"], assumption+) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>Group F; Group G; f \<in> gHom F G; a \<in> carrier F; F \<guillemotright> gker\<^bsub>F,G\<^esub> f \<rbrakk> \<Longrightarrow> iim F G f {f a} = gker\<^bsub>F,G\<^esub> f \<bullet>\<^bsub>F\<^esub> a [PROOF STEP] apply (rule equalityI) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>Group F; Group G; f \<in> gHom F G; a \<in> carrier F; F \<guillemotright> gker\<^bsub>F,G\<^esub> f \<rbrakk> \<Longrightarrow> iim F G f {f a} \<subseteq> gker\<^bsub>F,G\<^esub> f \<bullet>\<^bsub>F\<^esub> a 2. \<lbrakk>Group F; Group G; f \<in> gHom F G; a \<in> carrier F; F \<guillemotright> gker\<^bsub>F,G\<^esub> f \<rbrakk> \<Longrightarrow> gker\<^bsub>F,G\<^esub> f \<bullet>\<^bsub>F\<^esub> a \<subseteq> iim F G f {f a} [PROOF STEP] (** iim F G f {f a} \<subseteq> ker\<^sub>F\<^sub>,\<^sub>Gf \<^sub>F a **) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>Group F; Group G; f \<in> gHom F G; a \<in> carrier F; F \<guillemotright> gker\<^bsub>F,G\<^esub> f \<rbrakk> \<Longrightarrow> iim F G f {f a} \<subseteq> gker\<^bsub>F,G\<^esub> f \<bullet>\<^bsub>F\<^esub> a 2. \<lbrakk>Group F; Group G; f \<in> gHom F G; a \<in> carrier F; F \<guillemotright> gker\<^bsub>F,G\<^esub> f \<rbrakk> \<Longrightarrow> gker\<^bsub>F,G\<^esub> f \<bullet>\<^bsub>F\<^esub> a \<subseteq> iim F G f {f a} [PROOF STEP] apply (rule subsetI, simp add:iim_def, erule conjE) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>x. \<lbrakk>Group F; Group G; f \<in> gHom F G; a \<in> carrier F; F \<guillemotright> gker\<^bsub>F,G\<^esub> f ; x \<in> carrier F; f x = f a\<rbrakk> \<Longrightarrow> x \<in> gker\<^bsub>F,G\<^esub> f \<bullet>\<^bsub>F\<^esub> a 2. \<lbrakk>Group F; Group G; f \<in> gHom F G; a \<in> carrier F; F \<guillemotright> gker\<^bsub>F,G\<^esub> f \<rbrakk> \<Longrightarrow> gker\<^bsub>F,G\<^esub> f \<bullet>\<^bsub>F\<^esub> a \<subseteq> iim F G f {f a} [PROOF STEP] apply (frule_tac a1 = x in gkernTr9[THEN sym, of "F" "G" "f" _ "a"], assumption+, simp, frule_tac a = x in Group.a_in_rcs[of "F" "gker\<^bsub>F,G\<^esub> f"], assumption+, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>Group F; Group G; f \<in> gHom F G; a \<in> carrier F; F \<guillemotright> gker\<^bsub>F,G\<^esub> f \<rbrakk> \<Longrightarrow> gker\<^bsub>F,G\<^esub> f \<bullet>\<^bsub>F\<^esub> a \<subseteq> iim F G f {f a} [PROOF STEP] apply (rule subsetI) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>x. \<lbrakk>Group F; Group G; f \<in> gHom F G; a \<in> carrier F; F \<guillemotright> gker\<^bsub>F,G\<^esub> f ; x \<in> gker\<^bsub>F,G\<^esub> f \<bullet>\<^bsub>F\<^esub> a\<rbrakk> \<Longrightarrow> x \<in> iim F G f {f a} [PROOF STEP] apply (simp add:gkernel_def rcs_def iim_def, erule exE, (erule conjE)+, rotate_tac -1, frule sym, thin_tac "h \<cdot>\<^bsub>F\<^esub> a = x", simp add:gHom, thin_tac "x = h \<cdot>\<^bsub>F\<^esub> a", frule gHom_mem[of "F" "G" "f" "a"], assumption+, simp add:Group.mult_closed Group.l_unit) [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
%% Runge-Kutta 2 % % for solving % % $\frac{dv}{dt}=-\alpha (t)v+\beta (t)$ % % where $\alpha (t) = \frac{3t}{1-t}$ and $\beta (t)=2(1-t)^3 e^{-t}$ % % Assuming v(0)=1.0 for the period 0<t<15 clc clear %close all %% Discretization of t ti=0; % Initial Time tf=150; % Final Time % Time step % h = 0.2; % h = 0.8; % h = 1.1; h = 0.68; % Discretization of t i=1; t(1)=ti; while t(i)<tf i=i+1; t(i)=t(i-1)+h; end t(i)=tf; clear i; n=length(t); %% Discretization of v v = zeros(n,1); alpha = zeros(n,1); beta = zeros(n,1); t2 = zeros(n,1); alpha2 = zeros(n,1); beta2 = zeros(n,1); for i=1:n-1 t2(i) = t(i)+1/2*h; % compute variables alpha(t) and beta(t) alpha(i) = 3*t(i)/(1+t(i)); alpha2(i) = 3*t2(i)/(1+t2(i)); % alpha(t + 1/2h) beta(i) = 2*(1+t(i))^3*exp(-t(i)); beta2(i) = 2*(1+t2(i))^3*exp(-t2(i)); % beta(t + 1/2h) end % initial condition (v(0)) v(1)=1; for i=1:n-1 % main discrete equation k1 = h*(-alpha(i)*v(i)+beta(i)); k2 = h*(-alpha2(i)*(v(i)+1/2*k1)+beta2(i)); v(i+1) = v(i)+k2; end %% Exact solution v2 = zeros(n,1); for i=1:n v2(i) = exp(-t(i))*(1+t(i))^3; end error=v2-v; figure plot(t,v,'.',t,v2,'-') %figure %plot(t,error,'.')
Formal statement is: lemma convex_translation_eq [simp]: "convex ((+) a ` s) \<longleftrightarrow> convex s" Informal statement is: A translation of a convex set is convex.
-- %logging 5 interface Interface specifier where 0 concrete : specifier -> Type record Value s where constructor MkValue specifier : s 0 DependentValue : Interface s => Value s -> Type DependentValue v = concrete (specifier v) data Record : s -> Type where MkRecord : Value s -> DependentValue {s} value -> Record s
In a matter of minutes and without a single line of code, Zapier allows you to connect Clio and Abacus, with as many as 36 possible integrations. Are you ready to find your productivity superpowers? It's easy to connect Clio + Abacus and requires absolutely zero coding experience—the only limit is your own imagination.
universe variables u v namespace quot section universe variables u_a u_b u_c variables {A : Type u_a} {B : Type u_b} {C : Type u_c} variables [s₁ : setoid A] [s₂ : setoid B] include s₁ s₂ attribute [reducible, elab_as_eliminator] protected definition lift₂ (f : A → B → C)(c : ∀ a₁ a₂ b₁ b₂, a₁ ≈ b₁ → a₂ ≈ b₂ → f a₁ a₂ = f b₁ b₂) (q₁ : quotient s₁) (q₂ : quotient s₂) : C := quot.lift (λ (a₁ : A), quot.lift (f a₁) (λ (a b : B), c a₁ a a₁ b (setoid.refl a₁)) q₂) (λ (a b : A) (H : a ≈ b), @quot.ind B s₂.r (λ (a_1 : quotient s₂), (quot.lift (f a) (λ (a_1 b : B), c a a_1 a b (setoid.refl a)) a_1) = (quot.lift (f b) (λ (a b_1 : B), c b a b b_1 (setoid.refl b)) a_1)) (λ (a' : B), c a a' b a' H (setoid.refl a')) q₂) q₁ attribute [reducible, elab_as_eliminator] protected definition lift_on₂ (q₁ : quotient s₁) (q₂ : quotient s₂) (f : A → B → C) (c : ∀ a₁ a₂ b₁ b₂, a₁ ≈ b₁ → a₂ ≈ b₂ → f a₁ a₂ = f b₁ b₂) : C := quot.lift₂ f c q₁ q₂ attribute [elab_as_eliminator] protected theorem induction_on₂ {C : quotient s₁ → quotient s₂ → Prop} (q₁ : quotient s₁) (q₂ : quotient s₂) (H : ∀ a b, C ⟦a⟧ ⟦b⟧) : C q₁ q₂ := quot.ind (λ a₁, quot.ind (λ a₂, H a₁ a₂) q₂) q₁ attribute [elab_as_eliminator] protected theorem induction_on₃ [s₃ : setoid C] {D : quotient s₁ → quotient s₂ → quotient s₃ → Prop} (q₁ : quotient s₁) (q₂ : quotient s₂) (q₃ : quotient s₃) (H : ∀ a b c, D ⟦a⟧ ⟦b⟧ ⟦c⟧) : D q₁ q₂ q₃ := quot.ind (λ a₁, quot.ind (λ a₂, quot.ind (λ a₃, H a₁ a₂ a₃) q₃) q₂) q₁ end section exact variable {A : Type u} variable [s : setoid A] include s private definition rel (q₁ q₂ : quotient s) : Prop := quot.lift_on₂ q₁ q₂ (λ a₁ a₂, a₁ ≈ a₂) (λ a₁ a₂ b₁ b₂ a₁b₁ a₂b₂, propext (iff.intro (λ a₁a₂, setoid.trans (setoid.symm a₁b₁) (setoid.trans a₁a₂ a₂b₂)) (λ b₁b₂, setoid.trans a₁b₁ (setoid.trans b₁b₂ (setoid.symm a₂b₂))))) local infix `~` := rel private lemma rel.refl : ∀ q : quotient s, q ~ q := λ q, quot.induction_on q (λ a, setoid.refl a) private lemma eq_imp_rel {q₁ q₂ : quotient s} : q₁ = q₂ → q₁ ~ q₂ := assume h, eq.rec_on h (rel.refl q₁) theorem exact' {a b : A} : ⟦a⟧ = ⟦b⟧ → a ≈ b := assume h, eq_imp_rel h end exact section universe variables u_a u_b u_c variables {A : Type u_a} {B : Type u_b} variables [s₁ : setoid A] [s₂ : setoid B] include s₁ s₂ attribute [reducible, elab_as_eliminator] protected definition rec_on_subsingleton₂ {C : quotient s₁ → quotient s₂ → Type u_c} [H : ∀ a b, subsingleton (C ⟦a⟧ ⟦b⟧)] (q₁ : quotient s₁) (q₂ : quotient s₂) (f : Π a b, C ⟦a⟧ ⟦b⟧) : C q₁ q₂:= @quot.rec_on_subsingleton _ s₁.r (λ q, C q q₂) (λ a, quot.ind (λ b, H a b) q₂) q₁ (λ a, @quot.rec_on_subsingleton _ s₂.r _ (H a) q₂ (λ b, f a b)) end end quot
||| Copyright 2016 Google Inc. ||| ||| Licensed under the Apache License, Version 2.0 (the "License"); ||| you may not use this file except in compliance with the License. ||| You may obtain a copy of the License at ||| ||| http://www.apache.org/licenses/LICENSE-2.0 ||| ||| Unless required by applicable law or agreed to in writing, software ||| distributed under the License is distributed on an "AS IS" BASIS, ||| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ||| See the License for the specific language governing permissions and ||| limitations under the License. module Protobuf.TextFormat import Data.String import Lightyear import Lightyear.Char import Lightyear.Strings import Protobuf.Core import Protobuf.Printer import Protobuf.ParseUtils import Protobuf.Util printEnum : interpEnum d -> Printer () printEnum {d=MkEnumDescriptor {k=k} _ values} i = print (name (index i values)) mutual printMessage : InterpMessage d -> Printer () printMessage {d=MkMessageDescriptor _ _} (MkMessage fields) = printFields fields printFields : InterpFields d -> Printer () printFields {d=Nil} Nil = return () printFields {d=f::fs} (x::xs) = do { printField x printFields xs } printField : interpField d -> Printer () printField {d=MkFieldDescriptor Optional _ name number} = maybe (return ()) (printSingleFieldValue name) printField {d=MkFieldDescriptor Required _ name number} = (printSingleFieldValue name) printField {d=MkFieldDescriptor Repeated _ name number} = forEach (printSingleFieldValue name) ||| Prints in the form ||| <indent>field_name: <contents>\n printSingleFieldValue : (name: String) -> interpFieldValue d -> Printer () printSingleFieldValue name x = do { printIndent print name print ": " printFieldValue x print "\n" } printFieldValue : interpFieldValue d -> Printer () printFieldValue {d=PBDouble} = print . show printFieldValue {d=PBFloat} = print . show printFieldValue {d=PBInt32} = print . show printFieldValue {d=PBInt64} = print . show printFieldValue {d=PBUInt32} = print . show printFieldValue {d=PBUInt64} = print . show printFieldValue {d=PBSInt32} = print . show printFieldValue {d=PBSInt64} = print . show printFieldValue {d=PBFixed32} = print . show printFieldValue {d=PBFixed64} = print . show printFieldValue {d=PBSFixed32} = print . show printFieldValue {d=PBSFixed64} = print . show printFieldValue {d=PBBool} = print . toLower . show printFieldValue {d=PBString} = print . show printFieldValue {d=PBBytes} = print . show printFieldValue {d=PBMessage _} = braces . printMessage printFieldValue {d=PBEnum _} = printEnum export printToTextFormat : InterpMessage d -> String printToTextFormat x = assert_total $ runPrinter (printMessage x) export implementation Show (InterpMessage d) where show = printToTextFormat --- Deserialization --- Deserialization is implemented using the Lightyear monadic parser package. --- TODO: commit to more paths to give a better error stack and faster parsing. -- TODO: handle escape codes including \" parseString : Parser String parseString = do { char '"' chars <- many (satisfy (\c => c /= '"')) char '"' spaces return (pack chars) } parseInteger : Parser Integer parseInteger = do { chars <- many (satisfy (\c => c /= '}' && not (isSpace c))) spaces case parseInteger (pack chars) of Nothing => fail $ "Could not parse " ++ (pack chars) ++ " as an integer" Just x => return x } parseDouble : Parser Double parseDouble = do { chars <- many (satisfy (\c => c /= '}' && not (isSpace c))) spaces case parseDouble (pack chars) of Nothing => fail $ "Could not parse " ++ (pack chars) ++ " as a double" Just x => return x } parseBool : Parser Bool parseBool = ((char 't' *!> token "rue" *> return True) <|> (char 'f' *!> token "alse" *> return False)) <?> "A boolean value (\"true\" or \"false\")" parseEnum : Parser (interpEnum d) parseEnum {d=MkEnumDescriptor enumName values} = do { chars <- many (satisfy isAlpha) spaces case findIndex (\v => name v == pack chars) values of Nothing => fail ( "An field in the enum " ++ enumName ++ " (no field named " ++ (show (pack chars)) ++ ")") Just i => return i } mutual parseMessage : Parser (InterpMessage d) parseMessage {d=MkMessageDescriptor msgName fields} = do { xs <- parseFields msgName case messageFromFieldList {fields=fields} xs of Left err => fail ("A valid message (" ++ err ++ ")") Right fs => return (MkMessage fs) } parseFields : (msgName : String) -> Parser (FieldList d) parseFields {d=d} msgName = many (do { chars <- some (satisfy (\c => isDigit c || isAlpha c || c == '_')) commitTo (do { spaces token ":" case Data.Vect.findIndex (\v => name v == pack chars) d of Nothing => fail ( "An field in the message " ++ msgName ++ " (no field named " ++ (show (pack chars)) ++ ")") Just i => do { v <- parseField return (i ** v) } }) }) parseField : Parser (singularTypeForField d) parseField {d=MkFieldDescriptor _ ty _ _} = parseFieldValue {d=ty} parseFieldValue : Parser (interpFieldValue d) parseFieldValue {d=PBDouble} = parseDouble parseFieldValue {d=PBFloat} = parseDouble parseFieldValue {d=PBInt32} = parseInteger parseFieldValue {d=PBInt64} = parseInteger parseFieldValue {d=PBUInt32} = parseInteger parseFieldValue {d=PBUInt64} = parseInteger parseFieldValue {d=PBSInt32} = parseInteger parseFieldValue {d=PBSInt64} = parseInteger parseFieldValue {d=PBFixed32} = parseInteger parseFieldValue {d=PBFixed64} = parseInteger parseFieldValue {d=PBSFixed32} = parseInteger parseFieldValue {d=PBSFixed64} = parseInteger parseFieldValue {d=PBBool} = parseBool parseFieldValue {d=PBString} = parseString parseFieldValue {d=PBBytes} = parseString parseFieldValue {d=PBMessage m} = braces parseMessage parseFieldValue {d=PBEnum e} = parseEnum export parseFromTextFormat : String -> Either String (InterpMessage d) parseFromTextFormat {d=d} = assert_total $ parse (spaces *> parseMessage {d=d})
%!TEX TS-program = xelatex %!TEX encoding = UTF-8 Unicode \documentclass[12pt]{article} \usepackage{geometry} \geometry{letterpaper} \usepackage{fancyhdr} \usepackage{extramarks} \usepackage{amsmath} \usepackage{amsthm} \usepackage{amsfonts} \usepackage{tikz} \usepackage[plain]{algorithm} \usepackage{algpseudocode} \usepackage{caption} \usepackage{booktabs} \usepackage{graphics} \usepackage{amsmath} \usepackage{amsfonts} \usepackage{xltxtra,fontspec,xunicode} \usepackage[slantfont,boldfont]{xeCJK} %\setCJKmainfont{宋体} \setmainfont{Optima} \defaultfontfeatures{Mapping=tex-text} \usepackage{xltxtra,fontspec,xunicode} \usepackage[slantfont,boldfont]{xeCJK} %\setCJKmainfont{宋体} \setmainfont{Optima} \defaultfontfeatures{Mapping=tex-text} \XeTeXlinebreaklocale “zh” \XeTeXlinebreakskip = 0pt plus 1pt minus 0.1pt \usepackage{listings} \usepackage{color} \definecolor{dkgreen}{rgb}{0,0.6,0} \definecolor{gray}{rgb}{0.5,0.5,0.5} \definecolor{mauve}{rgb}{0.58,0,0.82} \lstset{frame=tb, language=Java, aboveskip=3mm, belowskip=3mm, showstringspaces=false, columns=flexible, basicstyle={\small\ttfamily}, numbers=none, numberstyle=\tiny\color{gray}, keywordstyle=\color{blue}, commentstyle=\color{dkgreen}, stringstyle=\color{mauve}, breaklines=true, breakatwhitespace=true, tabsize=3 } \topmargin=-0.45in \evensidemargin=0in \oddsidemargin=0in \textwidth=6.5in \textheight=9.0in \headsep=0.25in \linespread{1.1} \pagestyle{fancy} \lhead{\hmwkAuthorName} \rhead{\hmwkClass} \chead{\hmwkTitle} \renewcommand\headrulewidth{0.4pt} \renewcommand\footrulewidth{0.4pt} \setlength\parindent{0pt} % Homework Details \newcommand{\hmwkTitle}{homework\ \#5} \newcommand{\hmwkClass}{Deep Reinforcement Learning} \newcommand{\hmwkAuthorName}{Tianxiao Hu} \begin{document} \pagebreak \section{Problem 1} \begin{figure}[!h] \centering \includegraphics[width=5in]{1.png} \caption{Comparison for agent with histogram-based exploration and agent with no exploration for PointMass.} \end{figure} \newpage \section{Problem 2} \begin{figure}[!h] \centering \includegraphics[width=5in]{2.png} \caption{Comparison for agent with KDE-based exploration and agent with no exploration for PointMass.} \end{figure} \newpage \section{Problem 3} \begin{figure}[!h] \centering \includegraphics[width=5in]{3.png} \caption{Comparison for agent with EX2-based exploration and agent with no exploration for PointMass.} \end{figure} \newpage \section{Problem 4} \begin{figure}[!h] \centering \includegraphics[width=5in]{4.png} \caption{Comparison for two agents with EX2-based exploration and another agent with no exploration for HalfCheetah.} \end{figure} (1)The learning curve is going up and down is because that even after the agents have already reached the optimal states, they are still incentivized by the reward bonus to explore somewhere else. Thus the agents wouldn't stop exploring, which would result in suboptimal returns. (2)Different bonus parameter will affect agent's performance. A bigger bonus parameter will keep driving the agent to explore somewhere else and have a different learning curve. \newpage \end{document}
theory CJ_DDL imports Main (* Christoph Benzmüller & Xavier Parent & Ali Farjami, 2018 *) begin (* DDL: Dyadic Deontic Logic by Carmo and Jones *) typedecl i (*type for possible worlds*) type_synonym \<tau> = "(i\<Rightarrow>bool)" type_synonym \<gamma> = "\<tau>\<Rightarrow>\<tau>" type_synonym \<rho> = "\<tau>\<Rightarrow>\<tau>\<Rightarrow>\<tau>" consts av::"i\<Rightarrow>\<tau>" pv::"i\<Rightarrow>\<tau>" ob::"\<tau>\<Rightarrow>(\<tau>\<Rightarrow>bool)" (*accessibility relations*) cw::i (*current world*) axiomatization where ax_3a: "\<forall>w.\<exists>x. av(w)(x)" and ax_4a: "\<forall>w x. av(w)(x) \<longrightarrow> pv(w)(x)" and ax_4b: "\<forall>w. pv(w)(w)" and ax_5a: "\<forall>X.\<not>ob(X)(\<lambda>x. False)" and ax_5b: "\<forall>X Y Z. (\<forall>w. ((Y(w) \<and> X(w)) \<longleftrightarrow> (Z(w) \<and> X(w)))) \<longrightarrow> (ob(X)(Y) \<longleftrightarrow> ob(X)(Z))" and ax_5ca: "\<forall>X \<beta>. ((\<forall>Z. \<beta>(Z) \<longrightarrow> ob(X)(Z)) \<and> (\<exists>Z. \<beta>(Z))) \<longrightarrow> (((\<exists>y. ((\<lambda>w. \<forall>Z. (\<beta> Z) \<longrightarrow> (Z w))(y) \<and> X(y))) \<longrightarrow> ob(X)(\<lambda>w. \<forall>Z. (\<beta> Z) \<longrightarrow> (Z w))))" and ax_5c: "\<forall>X Y Z. (((\<exists>w. (X(w) \<and> Y(w) \<and> Z(w))) \<and> ob(X)(Y) \<and> ob(X)(Z)) \<longrightarrow> ob(X)(\<lambda>w. Y(w) \<and> Z(w)))" and ax_5d: "\<forall>X Y Z. ((\<forall>w. Y(w) \<longrightarrow> X(w)) \<and> ob(X)(Y) \<and> (\<forall>w. X(w) \<longrightarrow> Z(w))) \<longrightarrow> ob(Z)(\<lambda>w. (Z(w) \<and> \<not>X(w)) \<or> Y(w))" and ax_5e: "\<forall>X Y Z. ((\<forall>w. Y(w) \<longrightarrow> X(w)) \<and> ob(X)(Z) \<and> (\<exists>w. Y(w) \<and> Z(w))) \<longrightarrow> ob(Y)(Z)" abbreviation ddlneg::\<gamma> ("\<^bold>\<not>_"[52]53) where "\<^bold>\<not>A \<equiv> \<lambda>w. \<not>A(w)" abbreviation ddland::\<rho> (infixr"\<^bold>\<and>"51) where "A\<^bold>\<and>B \<equiv> \<lambda>w. A(w)\<and>B(w)" abbreviation ddlor::\<rho> (infixr"\<^bold>\<or>"50) where "A\<^bold>\<or>B \<equiv> \<lambda>w. A(w)\<or>B(w)" abbreviation ddlimp::\<rho> (infixr"\<^bold>\<rightarrow>"49) where "A\<^bold>\<rightarrow>B \<equiv> \<lambda>w. A(w)\<longrightarrow>B(w)" abbreviation ddlequiv::\<rho> (infixr"\<^bold>\<leftrightarrow>"48) where "A\<^bold>\<leftrightarrow>B \<equiv> \<lambda>w. A(w)\<longleftrightarrow>B(w)" abbreviation ddlbox::\<gamma> ("\<^bold>\<box>") where "\<^bold>\<box>A \<equiv> \<lambda>w.\<forall>v. A(v)" (*A = (\<lambda>w. True)*) abbreviation ddlboxa::\<gamma> ("\<^bold>\<box>\<^sub>a") where "\<^bold>\<box>\<^sub>aA \<equiv> \<lambda>w. (\<forall>x. av(w)(x) \<longrightarrow> A(x))" (*in all actual worlds*) abbreviation ddlboxp::\<gamma> ("\<^bold>\<box>\<^sub>p") where "\<^bold>\<box>\<^sub>pA \<equiv> \<lambda>w. (\<forall>x. pv(w)(x) \<longrightarrow> A(x))" (*in all potential worlds*) abbreviation ddldia::\<gamma> ("\<^bold>\<diamond>") where "\<^bold>\<diamond>A \<equiv> \<^bold>\<not>\<^bold>\<box>(\<^bold>\<not>A)" abbreviation ddldiaa::\<gamma> ("\<^bold>\<diamond>\<^sub>a") where "\<^bold>\<diamond>\<^sub>aA \<equiv> \<^bold>\<not>\<^bold>\<box>\<^sub>a(\<^bold>\<not>A)" abbreviation ddldiap::\<gamma> ("\<^bold>\<diamond>\<^sub>p") where "\<^bold>\<diamond>\<^sub>pA \<equiv> \<^bold>\<not>\<^bold>\<box>\<^sub>p(\<^bold>\<not>A)" abbreviation ddlo::\<rho> ("\<^bold>O\<^bold>\<langle>_\<^bold>|_\<^bold>\<rangle>"[52]53) where "\<^bold>O\<^bold>\<langle>B\<^bold>|A\<^bold>\<rangle> \<equiv> \<lambda>w. ob(A)(B)" (*it ought to be \<psi>, given \<phi> *) abbreviation ddloa::\<gamma> ("\<^bold>O\<^sub>a") where "\<^bold>O\<^sub>aA \<equiv> \<lambda>w. ob(av(w))(A) \<and> (\<exists>x. av(w)(x) \<and> \<not>A(x))" (*actual obligation*) abbreviation ddlop::\<gamma> ("\<^bold>O\<^sub>p") where "\<^bold>O\<^sub>pA \<equiv> \<lambda>w. ob(pv(w))(A) \<and> (\<exists>x. pv(w)(x) \<and> \<not>A(x))" (*primary obligation*) abbreviation ddltop::\<tau> ("\<^bold>\<top>") where "\<^bold>\<top> \<equiv> \<lambda>w. True" abbreviation ddlbot::\<tau> ("\<^bold>\<bottom>") where "\<^bold>\<bottom> \<equiv> \<lambda>w. False" (*Possibilist Quantification.*) abbreviation ddlforall ("\<^bold>\<forall>") where "\<^bold>\<forall>\<Phi> \<equiv> \<lambda>w.\<forall>x. (\<Phi> x w)" abbreviation ddlforallB (binder"\<^bold>\<forall>"[8]9) where "\<^bold>\<forall>x. \<phi>(x) \<equiv> \<^bold>\<forall>\<phi>" abbreviation ddlexists ("\<^bold>\<exists>") where "\<^bold>\<exists>\<Phi> \<equiv> \<lambda>w.\<exists>x. (\<Phi> x w)" abbreviation ddlexistsB (binder"\<^bold>\<exists>"[8]9) where "\<^bold>\<exists>x. \<phi>(x) \<equiv> \<^bold>\<exists>\<phi>" abbreviation ddlvalid::"\<tau> \<Rightarrow> bool" ("\<lfloor>_\<rfloor>"[7]105) where "\<lfloor>A\<rfloor> \<equiv> \<forall>w. A w" (*Global validity*) abbreviation ddlvalidcw::"\<tau> \<Rightarrow> bool" ("\<lfloor>_\<rfloor>\<^sub>l"[7]105) where "\<lfloor>A\<rfloor>\<^sub>l \<equiv> A cw" (*Local validity (in cw)*) (* A is obliagtory *) abbreviation ddlobl::\<gamma> ("\<^bold>\<circle><_>") where "\<^bold>\<circle><A> \<equiv> \<^bold>O\<^bold>\<langle>A\<^bold>|\<^bold>\<top>\<^bold>\<rangle>" (*New syntax: A is obligatory.*) (* Consistency *) lemma True nitpick [satisfy,user_axioms,show_all,format=3] oops end
#define BOOST_TEST_MODULE pcraster aguila user_defined_classifier #include <boost/test/unit_test.hpp> BOOST_AUTO_TEST_CASE(test) { // using namespace com; bool testImplemented = false; BOOST_WARN(testImplemented); }
lemma measurable_compose_countable_restrict: assumes P: "countable {i. P i}" and f: "f \<in> M \<rightarrow>\<^sub>M count_space UNIV" and Q: "\<And>i. P i \<Longrightarrow> pred M (Q i)" shows "pred M (\<lambda>x. P (f x) \<and> Q (f x) x)"
-- Andreas, 2022-03-07, issue #5809 reported by jamestmartin -- Regression in Agda 2.6.1. -- Not reducing irrelevant projections lead to non-inferable elim-terms -- and consequently to internal errors. -- -- The fix is to treat irrelevant projections as just functions, -- retaining their parameters, so that they remain inferable -- even if not in normal form. {-# OPTIONS --irrelevant-projections #-} {-# OPTIONS --allow-unsolved-metas #-} -- {-# OPTIONS --no-double-check #-} -- {-# OPTIONS -v impossible:100 #-} -- {-# OPTIONS -v tc:40 #-} open import Agda.Builtin.Equality record Squash {ℓ} (A : Set ℓ) : Set ℓ where constructor squash field .unsquash : A open Squash .test : ∀ {ℓ} {A : Set ℓ} (x : A) (y : Squash A) → {!!} test x y = {!!} where help : unsquash (squash x) ≡ unsquash y help = refl -- WAS: internal error. -- Should succeed with unsolved metas.
State Before: ι : Type u' ι' : Type ?u.174501 R : Type u_1 K : Type ?u.174507 M : Type u_2 M' : Type ?u.174513 M'' : Type ?u.174516 V : Type u V' : Type ?u.174521 v : ι → M inst✝⁶ : Semiring R inst✝⁵ : AddCommMonoid M inst✝⁴ : AddCommMonoid M' inst✝³ : AddCommMonoid M'' inst✝² : Module R M inst✝¹ : Module R M' inst✝ : Module R M'' a b : R x y : M s : Set ι ⊢ ¬LinearIndependent R (v ∘ Subtype.val) ↔ ∃ f, f ∈ Finsupp.supported R R s ∧ ↑(Finsupp.total ι M R v) f = 0 ∧ f ≠ 0 State After: no goals Tactic: simp [linearIndependent_comp_subtype, and_left_comm]
SUBROUTINE IM_CBAR ( clrbar, iret ) C************************************************************************ C* IM_CBAR * C* * C* This routine will draw a color bar for imagery. * C* * C* IM_CBAR ( CLRBAR, IRET ) * C* * C* Input parameters: * C* CLRBAR CHAR* Color bar input * C* * C* Output parameters: * C* IRET INTEGER Return code * C* 0 = normal return * C** * C* Log: * C* J. Cowie/COMET 2/95 Copied from GG_CBAR() * C* J. Cowie/COMET 7/96 Updated computation and drawing * C* J. Cowie/COMET 1/97 Changed IMGDEF common variable names * C* A. Hardy/GSC 4/98 Added calls to GQFILL and GSFILL * C* S. Jacobs/NCEP 2/99 Moved calls to GQLINE and GSLINE * C* T. Piper/GSC 3/99 Corrected prolog * C* S. Jacobs/NCEP 5/99 Moved the labels to center of color box * C* T. Piper/GSC 9/00 Fixed horizontal bar labeling bug * C* T. Piper/GSC 7/01 Fixed typo of variable sizfil * C* M. Li/SAIC 11/03 Changed calling sequence * C* T. Piper/SAIC 07/06 Put () around -2 to eliminate warning * C* T. Piper/SAIC 07/06 Changed GTEXT for units label; top, * C* horizontal case; 2nd to last arg 1 to -2* C* S. Chiswell/Unidata 11/06 Added parsing of text attributes * C* M. James/Unidata 06/10 Moved label frequency logic from IMNIDH * C* M. James/Unidata 11/13 Hydrometeor classification labeling and * C* national composite GINI support added. * C* M. James/Unidata 04/14 High-res NEXRCOMP reworked * C* M. James/Unidata 04/15 Colorbar fix for TDWR * C************************************************************************ INCLUDE 'IMGDEF.CMN' C* CHARACTER*(*) clrbar, dhc(16)*4 INTEGER idcols (256), clevst, clevsp, dhci, idx C* CHARACTER orient*1, label*10, clrtxt(2)*128 REAL size (2), pos (2), xbox (5), ybox (5) LOGICAL cbrflg, hrulim DATA dhc / 'ND', 'BI', 'GC', 'IC', + 'DS', 'WS', 'RA', 'HR', + 'BD', 'GR', 'HA', '', + '', '', 'UK', 'RF' / C------------------------------------------------------------------------ iret = 0 C C* Split clrtxt string to color bar and text attribute strings. C CALL ST_CLST ( clrbar, '|', ' ', 2, clrtxt, num, ier) C C* Parse the color bar input. C CALL IN_CBAR ( clrtxt(1), icbar, size, ilblfq, orient, cbrflg, + ixjust, iyjust, pos, ier ) IF ( .not. cbrflg ) RETURN C C* Save the current line attributes. C CALL GQLINE ( jltyp, jlthw, jwidth, jwhw, ier ) CALL GSLINE ( 1, 0, 1, 0, ier ) C C* Save Users Text Attributes C CALL GQTEXT ( itxfn, itxhw, sztext, itxwid, ibrdr, + irrotn, ijust, ier ) C C* Parse the text_info input and set text attributes. C CALL IN_TXTN ( clrtxt(2), ifont, iswhw, siztxt, + itxwd, ibordr, irotat, ijstif, ier ) C CALL GSTEXT ( ifont, iswhw, siztxt, itxwd, + ibordr, irotat, ijstif, ier ) C C* Get plot bounds in View coordinates. C CALL GQBND ( 'V', xl, yb, xr, yt, ier ) C CALL ST_LCUC ( orient, orient, ier ) C C* Determine the corners of the color bar. C IF ( orient .ne. 'H' ) THEN ihoriz = 2 ivert = 1 ELSE ihoriz = 1 ivert = 2 ENDIF IF ( ixjust .eq. 3 ) THEN xll = pos (1) - size (ihoriz) xur = pos (1) ELSE IF ( ixjust .eq. 2 ) THEN xll = pos (1) - size (ihoriz) / 2 xur = pos (1) + size (ihoriz) / 2 ELSE xll = pos (1) xur = pos (1) + size (ihoriz) ENDIF IF ( iyjust .eq. 3 ) THEN yll = pos (2) - size (ivert) yur = pos (2) ELSE IF ( iyjust .eq. 2 ) THEN yll = pos (2) - size (ivert) / 2 yur = pos (2) + size (ivert) / 2 ELSE yll = pos (2) yur = pos (2) + size (ivert) ENDIF xll = xll * ( xr - xl ) + xl xur = xur * ( xr - xl ) + xl yll = yll * ( yt - yb ) + yb yur = yur * ( yt - yb ) + yb C CALL GQFILL ( sizfil, ifltyp, ier ) CALL GSFILL ( 1.0, 1, ier ) C C* Create data level numbers for the color bar C IF ( imtype .eq. 135) THEN imndlv = 199 END IF CALL GQCLRS ( imbank, ncolr, ier ) ratio = FLOAT (ncolr - 1) / (imndlv - 1) DO ic = 1, imndlv idcols (ic) = (ic - 1) * ratio + .5 END DO C C* Fill the color boxes C knt = 0 nflvl = imndlv clevst = 1 clevsp = nflvl SELECT CASE (imtype) C HHC CASE ( 2**(24) ) DO idx = 1, imndlv cmblev ( idx ) = '' END DO dhci = 1 DO idx = 1, imndlv,10 cmblev ( idx ) = dhc ( dhci ) dhci = dhci + 1 END DO cmblev ( imndlv ) = 'RF' C EET composite GINI 2**25 C C - this is only for labelling, not data values. data values range from C 2-71 (69 increments) for 0 <= EET < 70k ft C 0 = Missing data C 1 = Bad data / flagged C CASE ( 2**(25) ) DO idx = 1,nflvl cmblev ( idx ) = '' END DO DO idx = 3,73,10 val = idx - 3 CALL ST_INCH ( int(val), cmblev (idx), ier ) END DO DO idx = 131,200,10 val = idx - 131 CALL ST_INCH ( int(val), cmblev (idx), ier ) END DO CALL ST_INCH ( int(70), cmblev (199), ier ) cmblev ( 131 ) = 'TOP' cmblev ( 1 ) = ' ' END SELECT DO i = clevst, clevsp knt = knt + 1 IF ( orient .ne. 'H' ) THEN C C* Set up for the vertically oriented color bar C diff = ( yur - yll ) / nflvl xbox(1) = xll ybox(1) = yll + (knt-1) * diff xbox(2) = xur ybox(2) = yll + (knt-1) * diff xbox(3) = xur ybox(3) = yll + knt * diff xbox(4) = xll ybox(4) = yll + knt * diff ELSE C C* Set up for the horizontally oriented color bar C diff = ( xur - xll ) / nflvl xbox(1) = xll + (knt-1) * diff ybox(1) = yll xbox(4) = xll + knt * diff ybox(4) = yll xbox(3) = xll + knt * diff ybox(3) = yur xbox(2) = xll + (knt-1) * diff ybox(2) = yur END IF C C* Set bar color, fill it, set line color, draw it C CALL GSCOLB ( imbank, idcols(i), ier ) CALL GFILL ( 'N', 4, xbox, ybox, ier ) CALL GSCOLB ( 0, icbar, ier ) IF ( imbank .ne. 1 )CALL GLINE ( 'N', 2, xbox, ybox, ier ) C C* Plot label. C C* Label Frequency Not Implemented - Label All C* Special handling for NEXRAD 256-level products C IF ( ilblfq .ne. 0 .and. cmblev (i) .ne. ' ' ) THEN IF ( imftyp .eq. 13 ) THEN SELECT CASE ( imtype ) CASE (94,32,180,186) IF ( ( i .eq. clevsp ) .or. + ( MOD ( i + 3 , 10 ) .eq. 0 ) ) THEN label = cmblev (i) ELSE label = ' ' END IF CASE (99,182) IF ( MOD ( i-10 , 20 ) .eq. 0 ) THEN label = cmblev (i) ELSE label = ' ' END IF CASE (170,172,173,174,175) IF ( ( i .eq. 1 ) .or. + ( MOD ( i , 20 ) .eq. 0 ) ) THEN label = cmblev (i) ELSE label = ' ' END IF CASE (159) IF ( MOD ( i , 16 ) .eq. 0 ) THEN label = cmblev (i) ELSE label = ' ' END IF CASE (163) IF ( MOD ( i , 20 ) .eq. 3 .or. + (i .eq. 33) .or. (i .eq. 12)) THEN label = cmblev (i) ELSE label = ' ' END IF CASE (161) IF ( ( i .eq. 1 ) .or. + ( MOD ( i , 30 ) .eq. 0 ) ) THEN label = cmblev (i) ELSE label = ' ' END IF CASE (134) IF ( ( i .eq. 1 ) .or. + ( MOD ( i , 12 ) .eq. 0 ) ) THEN label = cmblev (i) ELSE label = '' END IF CASE (135) IF ( ( i .ge. 3 ) .and. + ( i .le. 73 ) .and. + ( MOD ( i , 10 ) .eq. 2) ) THEN label = cmblev (i) ELSE IF ( ( i .ge. 131 ) .and. + ( i .le. 200 ) .and. + ( MOD ( i , 10 ) .eq. 0 ) ) THEN label = cmblev (i) ELSE IF ( i .eq. 2 ) THEN label = cmblev (i) ELSE IF ( ( i .eq. 71 ) .or. + ( i .eq. 199 ) .or. + ( i .eq. 130 ) ) THEN label = cmblev (i) ELSE label = '' END IF CASE DEFAULT label = cmblev (i) END SELECT ELSE label = cmblev (i) END IF C* VERTICAL IF ( orient .ne. 'H' ) THEN iyoff = 0 C* RIGHT IF ( ilblfq .ge. 0 ) THEN ixoff = 1 xlabl = xbox(2) ylabl = ( ybox(2) + ybox(3) ) / 2.0 C* LEFT ELSE CALL ST_LSTR ( label, ixoff, ier) ixoff = (-2) * ixoff xlabl = xbox(1) ylabl = ( ybox(1) + ybox(4) ) / 2.0 END IF C* HORIZONTAL ELSE CALL ST_LSTR ( label, ixoff, ier) ixoff = ixoff * (-1) C* BOTTOM IF ( ilblfq .lt. 0 ) THEN iyoff = -2 xlabl = ( xbox(1) + xbox(4) ) / 2.0 ylabl = ybox(1) C* TOP ELSE iyoff = 1 xlabl = ( xbox(2) + xbox(3) ) / 2.0 ylabl = ybox(2) END IF END IF CALL GTEXT ( 'N', xlabl, ylabl, label, 0., + ixoff, iyoff, ier ) C END IF END IF END DO C C* Draw box around the whole bar C xbox(1) = xll ybox(1) = yll xbox(2) = xur ybox(2) = yll xbox(3) = xur ybox(3) = yur xbox(4) = xll ybox(4) = yur xbox(5) = xll ybox(5) = yll CALL GLINE ( 'N', 5, xbox, ybox, ier ) C C* Reset the line and fill attributes C CALL GSFILL ( sizfil, ifltyp, ier ) CALL GSLINE ( jltyp, jlthw, jwidth, jwhw, ier ) C C* Plot units label C C* VERTICAL IF ( orient .ne. 'H' ) THEN C* RIGHT IF ( ilblfq .ge. 0 ) THEN CALL GTEXT ( 'N', xur, yur, cmbunt, 0., -1, 2, ier ) C* LEFT ELSE CALL GTEXT ( 'N', xll, yur, cmbunt, 0., -4, 2, ier ) END IF C* HORIZONTAL ELSE C* TOP IF ( ilblfq .ge. 0 ) THEN CALL GTEXT ( 'N', xll, yll, cmbunt, 0., 1, -2, ier ) C* BOTTOM ELSE CALL GTEXT ( 'N', xur, yll, cmbunt, 0., 1, -2, ier ) END IF END IF C C* Reset User's Text Attributes C CALL GSTEXT ( itxfn, itxhw, sztext, itxwid, ibrdr, + irrotn, ijust, ier ) C* RETURN END
close all; clear all; clc; rng('default'); png_export = true; pdf_export = false; load ('bin/figure_1_spherical_dict_model_1_somp_success_with_k.mat'); mf = spx.graphics.Figures(); mf.new_figure('Recovery probability with K for SOMP'); hold all; legends = cell(1, num_ss); for ns=1:num_ss S = Ss(ns); plot(Ks, bp_success_with_k(ns, :)); legends{ns} = sprintf('S=%d', S); end grid on; xlabel('Sparsity Level'); ylabel('Empirical Recovery Rate'); legend(legends);
\newif\ifshowsolutions \input{../preamble} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % HEADER %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \chead{ {\vbox{ \vspace{2mm} \large Machine Learning \& Data Mining \hfill Caltech CS/CNS/EE 155 \hfill \\[1pt] Set 3\hfill January $20^\text{th}$, 2021 \\ } } } \begin{document} \pagestyle{fancy} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % POLICIES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section*{Policies} \begin{itemize} \item Due 9 PM, January $27^\text{th}$, via Gradescope. \item You are free to collaborate on all of the problems, subject to the collaboration policy stated in the syllabus. \item In this course, we will be using Google Colab for code submissions. You will need a Google account. \item We ask that you use Python 3 (set that as your Colab runtime's Python version) and sklearn version 0.22 (should be the default version for Python 3 in Colab) for your code, and that you comment your code such that the TAs can follow along and run it without any issues. \end{itemize} \section*{Submission Instructions} \begin{itemize} \item You are highly encouraged to use the submission template: \url{https://github.com/lakigigar/Caltech-CS155-2021/blob/main/psets/set3/set3template.tex} \item Submit your report as a single .pdf file to Gradescope (entry code N8XV6Z), under "Set 3 Report". \item In the report, \textbf{include any images generated by your code} along with your answers to the questions. \item Submit your code by \textbf{sharing a link in your report} to your Google Colab notebook for each problem (see naming instructions below). Make sure to set sharing permissions to at least "Anyone with the link can view". \textbf{Links that can not be run by TAs will not be counted as turned in.} Check your links in an incognito window before submitting to be sure. \item For instructions specifically pertaining to the Gradescope submission process, see \url{https://www.gradescope.com/get_started#student-submission}. \end{itemize} \section*{Google Colab Instructions} For each notebook in the course gitub \url{https://github.com/lakigigar/Caltech-CS155-2021}, you need to save a copy to your drive. \begin{enumerate} \item Open the github preview of the notebook, and click the icon to open the colab preview. \item On the colab preview, go to File $\rightarrow$ Save a copy in Drive. \item Edit your file name to “\url{lastname_firstname_originaltitle}”, e.g.”\url{devasenapathy_kriti_set3-prob2.ipynb}” \end{enumerate} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % PROBLEM 1 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \newpage \section{Decision Trees [30 Points]} \materials{Lecture 5} \problem[7] Consider the following data, where given information about some food you must predict whether it is healthy: \begin{table}[ht] \centering \begin{tabular}{c | c c c | c} \hline No. & Package Type & Unit Price $>$ \$5 & Contains $>$ 5 grams of fat & Healthy? \\ [0.5ex] \hline 1 & Canned & Yes & Yes & No \\ 2 & Bagged & Yes & No & Yes \\ 3 & Bagged & No & Yes & Yes \\ 4 & Canned & No & No & Yes \\ [1ex] \hline \end{tabular} \end{table} Train a decision tree by hand using top-down greedy induction. Use \emph{entropy} (with natural log) as the impurity measure. Since the data can be classified without error, the stopping criterion will be no impurity in the leaves. Submit a drawing of your tree showing the impurity reduction yielded by each split (including root) in your decision tree. \problem[4] Compared to a linear classifier, is a decision tree always preferred for classification problems? If not, draw a simple 2-D dataset that can be perfectly classified by a simple linear classifier but which requires an overly complex decision tree to perfectly classify. \problem[15] Consider the following 2D data set: % \begin{figure}[H] % \begin{center} % \includegraphics[width=3.3in]{plots/3C.png} % \end{center} % \end{figure} \subproblem[5] Suppose we train a decision tree on this dataset using top-down greedy induction, with the Gini index as the impurity measure. We define our stopping condition to be if no split of a node results in any reduction in impurity. Submit a drawing of the resulting tree. What is its classification error ((number of misclassified points) / (number of total points))? \subproblem[5] Submit a drawing of a two-level decision tree that classifies the above dataset with zero classification error. (You don't need to use any particular training algorithm to produce the tree.) Is there any impurity measure (i.e. any function that maps the data points under a particular node in a tree to a real number) that would have led top-down greedy induction with the same stopping condition to produce the tree you drew? If so, give an example of one, and briefly describe its pros and cons as an impurity measure for training decision trees in general (on arbitrary datasets). \subproblem[5] Suppose there are 100 data points in some 2-D dataset. What is the largest number of unique thresholds (i.e., internal nodes) you might need in order to achieve zero classification training error (on the training set)? Please justify your answer. \problem[4] Suppose in top-down greedy induction we want to split a leaf node that contains N data points composed of D continuous features. What is the worst-case complexity (big-O in terms of N and D) of the number of possible splits we must consider in order to find the one that most reduces impurity? Please justify your answer. Note: Recall that at each node-splitting step in training a DT, you must consider all possible splits that you can make. While there are an infinite number of possible decision boundaries since we are using continuous features, there are not an infinite number of boundaries that result in unique child sets (which is what we mean by ``split''). \newpage \section{Overfitting Decision Trees [20 Points, 10 EC Points]} \materials{Lecture 5} In this problem, you will use the Diabetic Retinopathy Debrecen Data Set, which contains features extracted from images to determine whether or not the images contain signs of diabetic retinopathy. Additional information about this dataset can be found at the link below: \url{https://archive.ics.uci.edu/ml/datasets/Diabetic+Retinopathy+Debrecen+Data+Set} In the following question, your goal is to predict the diagnosis of diabetic retinopathy, which is the final column in the data matrix. Use the first 900 rows as training data, and the last 251 rows as validation data. Please feel free to use additional packages such as Scikit-Learn. Include your code in your submission. \indent\problem[7] % indent for consistency Choose one of the following options: \textbf{i.} Train a decision tree classifier using Gini as the impurity measure and minimal leaf node size as early stopping criterion. Try different minimal leaf node sizes from 1 to 25 in increments of 1. Then, on a single plot, plot both training and test classification error versus leaf node size. To do this, fill in the \texttt{classification_err} and \texttt{eval_tree_based_model_min_samples} functions in the code template for this problem. \textbf{ii.} Train a decision tree classifier using Gini as the impurity measure and maximal tree depth as early stopping criterion. Try different tree depths from 2 to 20 in increments of 1. Then, on a single plot, plot both training and test classification error versus tree depth. To do this, fill in the \texttt{eval_tree_based_model_max_depth} function in the code template for this problem. \problem[3] For either the minimal leaf node size or maximum depth parameters tested in the last question, which parameter value minimizes the test error? What effects does early stopping have on the performance of a decision tree model? Please justify your answer based on the plot you derived from the previous problem. \problem[2] % indent for consistency Choose one of the following options: \textbf{i.} Train a random forest classifier using Gini as the impurity measure, minimal leaf node size as early stopping criterion, and 1,000 trees in the forest. Try different node sizes from 1 to 25 in increments of 1. Then, on a single plot, plot both training and test classification error versus leaf node size. \textbf{ii.} Train a random forest classifier using Gini as the impurity measure, maximal tree depth as early stopping criterion, and 1,000 trees in the forest. Try different tree depths from 2 to 20 in increments of 1. Then, on a single plot, plot both training and test classification error versus tree depth. \problem[3] For either the minimal leaf node size and maximum depth parameters tested, which parameter value minimizes the random forest test error? What effects does early stopping have on the performance of a random forest model? Please justify your answer based on the two plots you derived. \problem[5] Do you observe any differences between the curves for the random forest and decision tree plots? If so, explain what could account for these differences. \problem[7 EC] Complete the other option from \textbf{Problem A}. \problem[2 EC] Complete the other option from \textbf{Problem C}. \problem[1 EC] From the parameters tested in \textbf{Problem F}, which parameter value minimizes the test error? From the parameters tested in \textbf{Problem G}, which parameter value minimizes the random forest test error? \newpage \section{The AdaBoost Algorithm [20 Points 20 EC Points]} \materials{Lecture 6} In this problem, you will show that the choice of the $\alpha_t$ parameter in the AdaBoost algorithm corresponds to greedily minimizing an exponential upper bound on the loss term at each iteration. \problem[3 EC] Let $h_t: \mathbb{R}^m \rightarrow \{-1,1\}$ be the weak classifier obtained at step $t$, and let $\alpha_t$ be its weight. Recall that the final classifier is $$H(x) = \text{sign}(f(x)) = \text{sign} \left(\sum\limits_{t=1}^T \alpha_{t}h_t(x) \right).$$ Suppose $\{(x_1, y_1), ..., (x_N, y_N)\} \subset \mathbb{R}^m \times \{-1,1\}$ is our training dataset. Show that the training set error of the final classifier can be bounded from above if an an exponential loss function is used: $$E = \frac{1}{N} \sum\limits_{i=1}^N \exp(-y_{i}f(x_i)) \geq \frac{1}{N} \sum\limits_{i=1}^N \mathbbm{1}(H(x_i) \neq y_i),$$ where $\mathbbm{1}$ is the indicator function. \problem[3 EC] Find $D_{T + 1}(i)$ in terms of $Z_t$, $\alpha_t$, $x_i$, $y_i$, and the classifier $h_t$, where $T$ is the last timestep and $t \in \{1, \ldots, T\}$. Recall that $Z_t$ is the normalization factor for distribution $D_{t+1}$: $$Z_t = \sum\limits_{i=1}^N D_t(i) \exp(-\alpha_{t}y_{i}h_{t}(x_{i})).$$ \problem[2 EC] Show that $E = \sum_{i=1}^N \frac{1}{N} e^{\sum_{t=1}^T -\alpha_t y_i h_t(x_i)}.$ \problem[5 EC] Show that $$E = \prod\limits_{t=1}^T Z_t.$$ \begin{hint} Recall that $\sum_{i = 1}^N D_t(i) = 1$ because $D$ is a distribution. \end{hint} \problem[5 EC] Show that the normalizer $Z_t$ can be written as \[Z_t = (1 - \epsilon_t) \exp(-\alpha_t) + \epsilon_{t} \exp(\alpha_t)\] where $\epsilon_t$ is the training set error of weak classifier $h_t$ for the weighted dataset: \[\epsilon_t = \sum\limits_{i=1}^N D_t(i)\mathbbm1(h_t(x_i) \neq y_i).\] \problem[2 EC] We derived all of this because it is hard to directly minimize the training set error, but we can greedily minimize the upper bound $E$ on this error. Show that choosing $\alpha_t$ greedily to minimize $Z_t$ at each iteration leads to the choices in AdaBoost: $$\alpha_{t}^* = \frac{1}{2} \ln \left(\frac{1 - \epsilon_t}{\epsilon_t} \right).$$ \begin{problem}[14] Implement the \texttt{GradientBoosting.fit()} and \texttt{AdaBoost.fit()} methods in the notebook provided for you. Some important notes and guidelines follow: \begin{itemize} \item For both methods, make sure to work with the class attributes provided to you. Namely, after \texttt{GradientBoosting.fit()} is called, \texttt{self.clfs} should be appropriately filled with the \texttt{self.n_clfs} trained weak hypotheses. Similarly, after \texttt{AdaBoost.fit()} is called, \texttt{self.clfs} and \texttt{self.coefs} should be appropriately filled with the \texttt{self.n_clfs} trained weak hypotheses and their coefficients, respectively. \item \texttt{AdaBoost.fit()} should additionally return an $(N, T)$ shaped numpy array \texttt{D} such that \texttt{D[:, t]} contains $D_{t+1}$ for each $t \in \{0, \ldots, \texttt{self.n_clfs}\}$. \item For the \texttt{AdaBoost.fit()} method, \textbf{use the 0/1 loss} instead of the exponential loss. \item The only Sklearn classes that you may use in implementing your boosting fit functions are the DecisionTreeRegressor and DecisionTreeClassifier, not GradientBoostingRegressor. \end{itemize} \end{problem} \begin{problem}[2] Describe and explain the behaviour of the loss curves for gradient boosting and for AdaBoost. You should consider two kinds of behaviours: the smoothness of the curves and the final values that the curves approach. \end{problem} \begin{problem}[2] Compare the final loss values of the two models. Which performed better on the classification dataset? \end{problem} \begin{problem}[2] For AdaBoost, where are the dataset weights the largest, and where are they the smallest? \end{problem} \begin{hint} Watch how the dataset weights change across time in the animation. \end{hint} \end{document}
clear all syms a b c x % Rezolvarea primei ecuatii sol1=solve(a*x^2+b*x+c) pretty(sol1) % Rezolvarea ecuatiei a doua sol2=solve('cos(2*x)+sin(x)=1') % Transformarea solutiei simbolice in numerica numsol2=double(sol2) % Reprezentarea grafica a functiei pe intervalul dat ezplot('cos(2*x)+sin(x)-1',[0, 2*pi]) hold on grid % Marcarea solutiilor obtinute pe grafic plot(numsol2,zeros(size(numsol2)),'rd')
#Let's build a dataset : height of 10 sorgho and poacee sample in 3 environmental conditions (A, B, C) A=c(rep("sorgho" , 10) , rep("poacee" , 10) ) B=rnorm(20,10,4) C=rnorm(20,8,3) D=rnorm(20,5,4) data=data.frame(A,B,C,D) colnames(data)=c("specie","cond_A","cond_B","cond_C") #Let's calculate the average value for each condition and each specie with the *aggregate* function bilan=aggregate(cbind(cond_A,cond_B,cond_C)~specie , data=data , mean) rownames(bilan)=bilan[,1] bilan=as.matrix(bilan[,-1]) #Then it is easy to make a classical barplot : lim=1.2*max(bilan) ze_barplot = barplot(bilan , beside=T , legend.text=T , col=c("blue" , "skyblue") , ylim=c(0,lim)) #I becomes a bit more tricky when we want to add the error bar representing the confidence interval. #First I create a smell function that takes...in entry error.bar <- function(x, y, upper, lower=upper, length=0.1,...){ arrows(x,y+upper, x, y-lower, angle=90, code=3, length=length, ...) } #Then I calculate the standard deviation for each specie and condition : stdev=aggregate(cbind(cond_A,cond_B,cond_C)~specie , data=data , sd) rownames(stdev)=stdev[,1] stdev=as.matrix(stdev[,-1]) * 1.96 / 10 I am ready to add the error bar on the plot using my "error bar" function ! png("#4_barplot_with_IC.png" , width = 480, height = 480) ze_barplot = barplot(bilan , beside=T , legend.text=T , col=c("blue" , "skyblue") , ylim=c(0,lim) , ylab="height") error.bar(ze_barplot,bilan, stdev) dev.off()
/- Copyright (c) 2018 Kenny Lau. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kenny Lau ! This file was ported from Lean 3 source module data.int.sqrt ! leanprover-community/mathlib commit c3291da49cfa65f0d43b094750541c0731edc932 ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathbin.Data.Nat.Sqrt /-! # Square root of integers > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. This file defines the square root function on integers. `int.sqrt z` is the greatest integer `r` such that `r * r ≤ z`. If `z ≤ 0`, then `int.sqrt z = 0`. -/ namespace Int #print Int.sqrt /- /-- `sqrt z` is the square root of an integer `z`. If `z` is positive, it returns the largest integer `r` such that `r * r ≤ n`. If it is negative, it returns `0`. For example, `sqrt (-1) = 0`, `sqrt 1 = 1`, `sqrt 2 = 1` -/ @[pp_nodot] def sqrt (z : ℤ) : ℤ := Nat.sqrt <| Int.toNat z #align int.sqrt Int.sqrt -/ #print Int.sqrt_eq /- theorem sqrt_eq (n : ℤ) : sqrt (n * n) = n.natAbs := by rw [sqrt, ← nat_abs_mul_self, to_nat_coe_nat, Nat.sqrt_eq] #align int.sqrt_eq Int.sqrt_eq -/ #print Int.exists_mul_self /- theorem exists_mul_self (x : ℤ) : (∃ n, n * n = x) ↔ sqrt x * sqrt x = x := ⟨fun ⟨n, hn⟩ => by rw [← hn, sqrt_eq, ← Int.ofNat_mul, nat_abs_mul_self], fun h => ⟨sqrt x, h⟩⟩ #align int.exists_mul_self Int.exists_mul_self -/ #print Int.sqrt_nonneg /- theorem sqrt_nonneg (n : ℤ) : 0 ≤ sqrt n := coe_nat_nonneg _ #align int.sqrt_nonneg Int.sqrt_nonneg -/ end Int
Sholay was released on 15 August 1975 , Indian Independence Day , in Mumbai . Due to lacklustre reviews and a lack of effective visual marketing tools , it saw poor financial returns in its first two weeks . From the third week , however , viewership picked up owing to positive word of mouth . During the initial slow period , the director and writer considered re @-@ shooting some scenes so that Amitabh Bachchan 's character would not die . When business picked up , they abandoned this idea . After being helped additionally by a soundtrack release containing dialogue snippets , Sholay soon became an " overnight sensation " . The film was then released in other distribution zones such as Delhi , Uttar Pradesh , Bengal , and Hyderabad on 11 October 1975 . It became the highest grossing Bollywood film of 1975 , and film ranking website Box Office India has given the film a verdict of " All Time Blockbuster " .
Some Crystal Clear Inspiration! Thanks! what a privilege to have met you in Bali Dawn AND been introduced to Kundalini Yoga! Thank you very much for this nice post and information.
import neurokit2 as nk import numpy as np from matplotlib import pyplot as plt from scipy import signal as scisig LAD_recordings = np.load('../data/test_sets0.npy') for recording in LAD_recordings: # iterate 12 leads b, a = scisig.butter(4, 0.0003, btype='highpass') for lead in recording: filtered = scisig.filtfilt(b, a, lead) lead, _ = nk.ecg_process(lead, 257) nk.ecg_plot(lead) plt.show() break break
Formal statement is: lemma prime_elem_iff_prime_abs [simp]: "prime_elem k \<longleftrightarrow> prime \<bar>k\<bar>" for k :: int Informal statement is: An integer $k$ is a prime element if and only if $\bar{k}$ is a prime number.
#-------------------------------------------------------------- # getWaveData -- Load data for SRL .wav file with .pps & .seg # information (if available). # # Usage: waveData <- getWaveData(basename) # # Args: # basename -- The basename (with path but sans extension) to # the .wav file. This is also assumed to be the # basename of the .pps and .seg file in the same # location. # # Returns: # A list of three elements: # wav -- A class "Wave" object from package "tuneR". # pps -- A data.frame with columns time, f0, and vcd that # are the time in msec of the epoch marker, the f0 # if a voiced epoch or 0 if unvoiced, and the voicing # status (1=voiced; 0=unvoiced) respectively. # seg -- A data.frame with columns segnam, beg, end listing # the segment name, beginning time, and ending time # of the segment in microseconds. # Note: If .wav file is not found, the function returns NULL # rather than a list. If either or both of .pps or .seg # files is not found, that list element is set to NULL. # # 1/11/20 -- htb #------------------------------------------------------------- # getWaveData <- function(basename) { require(tuneR) wname <- paste0(basename,".wav") pname <- paste0(basename,".pps") sname <- paste0(basename,".seg") if (!file.exists(wname)) return(NULL) w <- readWave(wname) if(file.exists(pname)) { p <- read.table(pname) names(p) <- c("time","f0","vcd") } else { p <- NULL } if(file.exists(sname)) { s <- read.table(sname) names(s) <- c("segnam","beg","end") } else { s <- NULL } rtn <- list(wav=w, pps=p, seg=s) rtn }
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Linear model search algorithm for Phoenix.""" from model_search import block_builder from model_search.architecture import architecture_utils from model_search.proto import phoenix_spec_pb2 from model_search.search import common from model_search.search import search_algorithm import numpy as np import sklearn import sklearn.linear_model def _one_nonzero_per_row(matrix): """For each row in matrix, randomly zero all but one of the nonzero values.""" # TODO(b/172564129): can it be done without a loop? out = np.zeros_like(matrix) for i in range(matrix.shape[0]): nonzero_indices = np.flatnonzero(matrix[i]) keep = np.random.choice(nonzero_indices) out[i, keep] = matrix[i, keep] return out def _contains_row(matrix, row): for r in matrix: if np.all(r == row): return True return False class LinearModel(search_algorithm.SearchAlgorithm): """Proposes new trials using a linear model. The model suggests an architecture no deeper than those in the trials. Sometimes (by a coin flip) adds one layer of depth to the suggestion. """ def __init__(self, phoenix_spec): """Args: phoenix_spec: phoenix_spec_pb2 for the experiment.""" self._phoenix_spec = phoenix_spec self._block_indices = np.unique( [block_builder.BlockType.EMPTY_BLOCK.value] + [idx.value for idx in common.block_indices(phoenix_spec)]) def _predict_best_architecture(self, architectures, losses): """Fits a linear model for loss = f(architecture) and finds its argmin. Main computational subroutine for trial data already in feature vector form. Args: architectures: (n_trials, depth) integer matrix of architectures. losses: (n_trials) positive validation error. Returns: predicted_loss: Scalar loss predicted for the chosen architecture. ints_best: (depth) integer vector representing the architecture that minimizes loss according to the model. """ if self._phoenix_spec.linear_model.remove_outliers and len(losses) >= 10: median, decile = np.percentile(losses, [50, 90], interpolation="higher") keep = losses < min(decile, 10 * median) architectures = architectures[keep] losses = losses[keep] n_trials, depth = architectures.shape n_blocks = len(self._block_indices) # Reshaping because we have many values (layers) of one categorical feature # rather than many different categorical features. encoder = sklearn.preprocessing.OneHotEncoder( categories=[list(self._block_indices)]) flat = architectures.reshape(-1, 1) x_onehot_flat = encoder.fit_transform(flat) x = x_onehot_flat.reshape((n_trials, depth * n_blocks)) assert np.all(np.sum(x, axis=1) == depth) # Use ridge regession in case problem is underdetermined, which is likely. model = sklearn.linear_model.Ridge( alpha=self._phoenix_spec.linear_model.ridge_penalty) model = model.fit(x, losses) weights = model.coef_ weights_bylayer = weights.reshape((depth, n_blocks)) # Pick the block with minimum weight per layer. Break ties randomly. weights_min_bylayer = np.amin(weights_bylayer, axis=1) indicator_best = 1.0 * (weights_bylayer == weights_min_bylayer[:, None]) onehot_best = _one_nonzero_per_row(indicator_best) predicted_loss = model.predict(onehot_best.reshape((1, -1)))[0] ints_best = encoder.inverse_transform(onehot_best).flatten() assert ints_best.shape == (depth,) return predicted_loss, ints_best def _suggest_by_padding(self, architectures, losses): """Pads architectures with EMPTY_BLOCK and call _predict_best_architecture. Variable-length architectures are padded into fixed dimensionality at either head or base, as determined by spec.network_alignment. Args: architectures: List of iterables of block_builder.BlockType values (or integers). losses: Iterable of floats: objective value to be minimized. Returns: loss: Estimated loss value of best architecture according to the model. trimmed: Best architecture according to the model. """ depths = np.array([len(arch) for arch in architectures]) maxdepth = np.amax(depths) extended = np.array( [self._pad_architecture(arch, maxdepth) for arch in architectures]) loss, suggestion = self._predict_best_architecture(extended, losses) trimmed = np.array([ block for block in suggestion if block != block_builder.BlockType.EMPTY_BLOCK.value ]) return loss, trimmed def _pad_architecture(self, arch, maxdepth): """Pad with empty blocks according to spec network alignment.""" empties = [block_builder.BlockType.EMPTY_BLOCK.value] * ( maxdepth - len(arch)) align = self._phoenix_spec.linear_model.network_alignment if align == phoenix_spec_pb2.LinearModelSpec.NET_ALIGN_BASE: return empties + list(arch) elif (align == phoenix_spec_pb2.LinearModelSpec.NET_ALIGN_HEAD or align == phoenix_spec_pb2.LinearModelSpec.NET_ALIGN_UNSPECIFIED): return list(arch) + empties else: raise ValueError("Phoenix spec network_alignment unknown value") def _load_trials(self, trials): """Load trial architectures from filesystem.""" completed_trials = trials architectures = [] losses = [] for trial in completed_trials: directory = architecture_utils.DirectoryHandler.trial_dir(trial) architecture = architecture_utils.get_architecture(directory) # The location of the flatten block is fixed # by the transition from convolutional to fully-connected layers. # It should not be a part of our search problem. # It will be placed by architecture_utils.fix_architecture_order(). filtered = np.array([ block for block in architecture if block not in block_builder.FLATTEN_TYPES ]) architectures.append(filtered) losses.append(trial.final_measurement.objective_value) return architectures, np.array(losses) def get_suggestion(self, trials, hparams, my_trial_id=None, model_dir=None): """See base class SearchAlgorithm.""" architectures, losses = self._load_trials(trials) # No feasible trials yet. if len(architectures) < self._phoenix_spec.linear_model.trials_before_fit: return common.encode_architecture(hparams.initial_architecture, self._phoenix_spec.problem_type), None _, suggestion = self._suggest_by_padding(architectures, losses) # Decide whether to allow growth. # TODO(b/172564129): refactor common behavior with other search algorithms. allowed_depth = common.get_allowed_depth( len(architectures), depth_thresholds=self._phoenix_spec.increase_complexity_minimum_trials, max_depth=self._phoenix_spec.maximum_depth) explore_mode = common.random( self._phoenix_spec.increase_complexity_probability) new_block = block_builder.BlockType[hparams.new_block_type] if suggestion.size <= allowed_depth and explore_mode: # increase_structure_depth expects that the architecture contains a # flatten block, which may not be true for the linear model's output. suggestion = np.array( architecture_utils.fix_architecture_order( suggestion, self._phoenix_spec.problem_type)) suggestion = architecture_utils.increase_structure_depth( suggestion, new_block, self._phoenix_spec.problem_type) elif _contains_row(architectures, suggestion): # The linear model suggested an architecture we've already tried # in a previous trial, so we mutate it. # TODO(b/172564129): more intelligent _contains_row check: should handle # when mutate_replace output has been tried, but not just a while loop, # since that could run forver if # of untried architectures is small. suggestion = common.mutate_replace(suggestion, new_block) else: # The linear model suggested a novel architecture; use it. pass suggestion = [block_builder.BlockType(b) for b in suggestion] return np.array( architecture_utils.fix_architecture_order( suggestion, self._phoenix_spec.problem_type)), None
lemma frontier_subset_eq: "frontier S \<subseteq> S \<longleftrightarrow> closed S"
Endless Summer glow is an airbrush tanning, facial, and waxing service at La Renew Day Spa
module Util.Numeric where import Control.Arrow (second) import Numeric.Log import Numeric.SpecFunctions (logGamma) average :: Fractional a => [a] -> a average = go 0 0 where go n x [] = x / fromIntegral n go n x (z : zs) = go (n + 1) (x + z) zs shiftByMax :: (Precise d, RealFloat d, Ord d) => [(a, Log d)] -> [(a, Log d)] shiftByMax xs = map (second (/ x')) xs where x' = maximum $ map snd xs exponentiateWeights :: (Precise d, RealFloat d, Ord d) => [(a, Log d)] -> [(a, d)] exponentiateWeights = map (second (exp . ln)) . shiftByMax weightedAverage :: Fractional a => [(a, a)] -> a weightedAverage = go 0 0 where go w x [] = x / w go w x ((y, ll) : ys) = go (w + ll) (ll * y + x) ys weightedAverageGeneric :: Fractional a => (a -> b -> b) -> b -> (b -> b -> b) -> [(b, a)] -> b weightedAverageGeneric scale zero plus = go 0 zero where go w x [] = scale (recip w) x go w x ((y, ll) : ys) = go (w + ll) (scale ll y `plus` x) ys logFact :: Int -> Double logFact n = logGamma (fromIntegral (n + 1))
#' Random diffnet network #' #' Simulates a diffusion network by creating a random dynamic network and #' adoption threshold levels. #' #' @param n Integer scalar. Number of vertices. #' @param t Integer scalar. Time length. #' @param seed.nodes Either a character scalar or a vector. Type of seed nodes (see details). #' @param seed.p.adopt Numeric scalar. Proportion of early adopters. #' @param seed.graph Baseline graph used for the simulation (see details). #' @param rgraph.args List. Arguments to be passed to rgraph. #' @param rewire Logical scalar. When TRUE, network slices are generated by rewiring #' (see \code{\link{rewire_graph}}). #' @param rewire.args List. Arguments to be passed to \code{\link{rewire_graph}}. #' @param threshold.dist Either a function to be applied via \code{\link{sapply}}, #' a numeric scalar, or a vector/matrix with \eqn{n} elements. Sets the adoption #' threshold for each node. #' @param exposure.args List. Arguments to be passed to \code{\link{exposure}}. #' @param name Character scalar. Passed to \code{\link{as_diffnet}}. #' @param behavior Character scalar. Passed to \code{\link{as_diffnet}}. #' @param stop.no.diff Logical scalar. When \code{TRUE}, the function will return #' with error if there was no diffusion. Otherwise it throws a warning. #' @return A random \code{\link{diffnet}} class object. #' @family simulation functions #' @details #' #' Instead of randomizing whether an individual adopts the innovation or not, this #' toy model randomizes threshold levels, seed adopters and network structure, so #' an individual adopts the innovation in time \eqn{T} iff his exposure is above or #' equal to his threshold. The simulation is done in the following steps: #' #' \enumerate{ #' \item Using \code{seed.graph}, a baseline graph is created. #' \item Given the baseline graph, the set of initial adopters is defined #' using \code{seed.nodes}. #' \item Afterwards, if \code{rewire=TRUE} \eqn{t-1} slices of the network are created #' by iteratively rewiring the baseline graph. #' \item The \code{threshold.dist} function is applied to each node in the graph. #' \item Simulation starts at \eqn{t=2} assigning adopters in each time period #' accordingly to each vertex's threshold and exposure. #' } #' #' When \code{seed.nodes} is a character scalar it can be \code{"marginal"}, \code{"central"} or \code{"random"}, #' So each of these values sets the initial adopters using the vertices with lowest #' degree, with highest degree or completely randomly. The number of early adoptes #' is set as \code{seed.p.adopt * n}. Please note that when marginal nodes are #' set as seed it may be the case that no diffusion process is attained as the #' chosen set of first adopters can be isolated. Any other case will be considered #' as an index (via \code{\link{[<-}} methods), hence the user can manually set the set of initial adopters, for example #' if the user sets \code{seed.nodes=c(1, 4, 7)} then nodes 1, 4 and 7 will be #' selected as initial adopters. #' #' The argument \code{seed.graph} can be either a function that generates a graph #' (Any class of accepted graph format (see \code{\link{netdiffuseR-graphs}})), a #' graph itself or a character scalar in which the user sets the algorithm used to #' generate the first network (network in t=1), this can be either "scale-free" #' (Barabasi-Albert model using the \code{\link{rgraph_ba}} function, the default), #' \code{"bernoulli"} (Erdos-Renyi model using the \code{\link{rgraph_er}} function), #' or \code{"small-world"} (Watts-Strogatz model using the \code{\link{rgraph_ws}} #' function). The list \code{rgraph.args} passes arguments to the chosen algorithm. #' #' When \code{rewire=TRUE}, the networks that follow t=1 will be generated using the #' \code{\link{rewire_graph}} function as \eqn{G(t) = R(G(t-1))}, where \eqn{R} #' is the rewiring algorithm. #' #' If a function, the argument \code{threshold.dist} sets the threshold for each vertex in the graph. #' It is applied using \code{sapply} as follows #' #' \preformatted{ #' sapply(1:n, threshold.dist) #' } #' #' By default sets the threshold to be random for each node in the graph. #' #' If \code{seed.graph} is provided, no random graph is generated and the simulation #' is applied using that graph instead. #' #' \code{rewire.args} has the following default options: #' #' \tabular{ll}{ #' \code{p} \tab \code{.1} \cr #' \code{undirected} \tab \code{getOption("diffnet.undirected", FALSE)} \cr #' \code{self} \tab \code{getOption("diffnet.self", FALSE)} #' } #' #' \code{exposure.args} has the following default options: #' #' \tabular{ll}{ #' \code{outgoing} \tab \code{TRUE} \cr #' \code{valued} \tab \code{getOption("diffnet.valued", FALSE)} \cr #' \code{normalized} \tab \code{TRUE} #' } #' #' @examples #' # Asimple example ----------------------------------------------------------- #' set.seed(123) #' z <- rdiffnet(100,10) #' z #' summary(z) #' #' # A more complex example: Adopt if at least one neighbor has adopted -------- #' y <- rdiffnet(100, 10, threshold.dist=function(x) 1, #' exposure.args=list(valued=FALSE, normalized=FALSE)) #' #' # Re thinking the Adoption of Tetracycline ---------------------------------- #' newMI <- rdiffnet(seed.graph = medInnovationsDiffNet$graph, #' threshold.dist = threshold(medInnovationsDiffNet), rewire=FALSE) #' #' #' @author George G. Vega Yon #' @name rdiffnet NULL rdiffnet_make_threshold <- function(x, n) { # Using sapply to compute the threshold if (inherits(x, "function")) { thr <- sapply(1:n, x) } else if ((length(x)==1) && is.numeric(x)) { thr <- rep(x, n) } else { # Setting depending on class if (any(class(x) %in% c("data.frame", "matrix"))) { thr <- as.vector(as.matrix(x)) # Must match the length of n if (length(thr) != n) stop("Incorrect length for -threshold.dist- (",length(x),")", ". It should be a vector of length ",n,".") } else if (is.vector(x)) { thr <- x # Must match the length of n if (length(thr) != n) stop("Incorrect length for -threshold.dist- (",length(x),")", ". It should be a vector of length ",n,".") } else { stop("-threshold.dist- must be either a numeric vector of length -n-, a numeric scalar, or a function.") } } thr } rdiffnet_check_seed_graph <- function(seed.graph, rgraph.args, t, n) { test <- class(seed.graph) if ("function" %in% test) { # Does it returns a graph test <- seed.graph() # Coercing into appropiate type if (inherits(test, "dgCMatrix")) { sgraph <- test } else if (inherits(test, "matrix")) { sgraph <- methods::as(test, "dgCMatrix") } else if (inherits(test, "array")) { sgraph <- apply(test, 3, function(x) methods::as(x, "dgCMatrix")) } else if (inherits(test, "diffnet")) { sgraph <- test$graph } else if (inherits(test, "list")) { sgraph <- test } # In the case of calling a function } else if ("character" %in% test) { # Scale-free networks ------------------------------------------------------ if (seed.graph == "scale-free") { if (!length(rgraph.args$m0)) rgraph.args$t <- n-1L sgraph <- do.call(rgraph_ba, rgraph.args) # Bernoulli graphs --------------------------------------------------------- } else if (seed.graph == "bernoulli") { rgraph.args$n <- n sgraph <- do.call(rgraph_er, rgraph.args) # Small-world network ------------------------------------------------------ } else if (seed.graph == "small-world") { rgraph.args$n <- n if (!length(rgraph.args$k)) rgraph.args$k <- 2L if (!length(rgraph.args$p)) rgraph.args$p <- .1 sgraph <- do.call(rgraph_ws, rgraph.args) } else stop("Invalid -seed.graph-. It should be either ", "'scale-free\', \'bernoulli\' or \'small-world\'.") # Creating t duplicates graph <- rep(list(sgraph), t) } else if (any(c("matrix", "dgCMatrix", "array") %in% test)) { # If not dgCMatrix if (("array" %in% test) & !("matrix" %in% test)) sgraph <- apply(seed.graph, 3, function(x) methods::as(x, "dgCMatrix")) else sgraph <- methods::as(seed.graph, "dgCMatrix") } else if ("list" %in% test) { sgraph <- seed.graph } else if ("diffnet" %in% test) { sgraph <- seed.graph$graph } else stop("Invalid argument for -seed.graph-. No support for objects of class -",test,"-.") sgraph } #' @rdname rdiffnet #' @export #' @param R Integer scalar. Number of simulations to be done. #' @param statistic A Function to be applied to each simulated diffusion network. #' @param ... Further arguments to be passed to \code{rdiffnet}. #' @param ncpus Integer scalar. Number of processors to be used (see details). #' @param cl An object of class \code{\link[parallel:makeCluster]{c("SOCKcluster", "cluster")}} #' (see details). #' @details #' The function \code{rdiffnet_multiple} is a wrapper of \code{rdiffnet} wich allows #' simulating multiple diffusion networks with the same parameters and apply #' the same function to all of them. This function is designed to allow the user #' to perform larger simulation studies in which the distribution of a particular #' statistic is observed. #' #' When \code{cl} is provided, then simulations are done via #' \code{\link[parallel:parSapply]{parSapply}}. If \code{ncpus} is greater than #' 1, then the function creates a cluster via \code{\link[parallel:makeCluster]{makeCluster}} #' which is stopped (removed) once the process is complete. #' #' @return \code{rdiffnet_multiple} returns either a vector or an array depending #' on what \code{statistic} is (see \code{\link{sapply}} and #' \code{\link[parallel:parSapply]{parSapply}}). #' #' @examples #' # Simulation study comparing the diffusion with diff sets of seed nodes ----- #' #' # Random seed nodes #' set.seed(1) #' ans0 <- rdiffnet_multiple(R=50, statistic=function(x) sum(!is.na(x$toa)), #' n = 100, t = 4, seed.nodes = "random", stop.no.diff=FALSE) #' #' # Central seed nodes #' set.seed(1) #' ans1 <- rdiffnet_multiple(R=50, statistic=function(x) sum(!is.na(x$toa)), #' n = 100, t = 4, seed.nodes = "central", stop.no.diff=FALSE) #' #' boxplot(cbind(Random = ans0, Central = ans1), main="Number of adopters") rdiffnet_multiple <- function( R, statistic, ..., ncpus = 1L, cl = NULL ) { # Checking the type of answer that it returns # Calling parallel if ((ncpus > 1) | length(cl)) { # Creating the cluster if (!length(cl)) { cl <- parallel::makeCluster(ncpus) on.exit(parallel::stopCluster(cl)) # Loading R packages parallel::clusterEvalQ(cl, library(netdiffuseR)) } # Calling the function parallel::parSapply(cl, X=seq_len(R), function(i, statistic, ...) { statistic(netdiffuseR::rdiffnet(...)) }, statistic = statistic, ...) } else { # If no parallel apply sapply(X=seq_len(R), function(i, statistic, ...) { statistic(netdiffuseR::rdiffnet(...)) }, statistic = statistic, ...) } } #' @rdname rdiffnet #' @export rdiffnet <- function( n, t, seed.nodes = "random", seed.p.adopt = 0.05, seed.graph = "scale-free", rgraph.args = list(), rewire = TRUE, rewire.args = list(), threshold.dist = runif(n), exposure.args = list(), name = "A diffusion network", behavior = "Random contagion", stop.no.diff = TRUE ) { # Checking options if (!length(rewire.args[["p"]])) rewire.args[["p"]] <- .1 if (!length(rewire.args[["undirected"]])) rewire.args[["undirected"]] <- getOption("diffnet.undirected", FALSE) if (!length(rewire.args[["self"]])) rewire.args[["self"]] <- getOption("diffnet.self", FALSE) if (!length(exposure.args[["outgoing"]])) exposure.args[["outgoing"]] <- TRUE if (!length(exposure.args[["valued"]])) exposure.args[["valued"]] <- getOption("diffnet.valued", FALSE) if (!length(exposure.args[["normalized"]])) exposure.args[["normalized"]] <- TRUE # Step 0.0: Creating the network seed ---------------------------------------- # Checking the class of the seed.graph sgraph <- rdiffnet_check_seed_graph(seed.graph, rgraph.args, t, n) # Checking baseline graph -------------------------------------------------- meta <- classify_graph(sgraph) # Was n set? if (!missing(n) && n != meta$n) { warning("While the user set n=",n,", nnodes(seed.graph)=", meta$n,". The later will be used.") n <- meta$n } if (missing(n)) n <- meta$n # If static, t must be provided, otherwise t should be missing if (meta$nper == 1) { if (missing(t)) stop("When -seed.graph- is static, -t- must be provided.") else sgraph <- rep(list(sgraph), t) } else { if (!missing(t)) warning("When -seed.graph- is dynamic, -t- shouldn't be provided.") t <- meta$nper } # Step 0.1: Rewiring or not ------------------------------------------------ # Rewiring if (rewire) sgraph <- do.call(rewire_graph, c(list(graph=sgraph), rewire.args)) sgraph <- lapply(sgraph, `attr<-`, which="undirected", value=NULL) # Number of initial adopters if ((seed.p.adopt > 1) | (seed.p.adopt < 0)) { stop("The proportion of initial adopters should be a number in [0,1]") } if (n*seed.p.adopt < 1) warning("Set of initial adopters set to 1.") n0 <- max(1, n*seed.p.adopt) # Step 0.1: Setting the seed nodes ------------------------------------------- cumadopt <- matrix(0L, ncol=t, nrow=n) toa <- matrix(NA, ncol=1, nrow= n) if (length(seed.nodes) == 1) { if (seed.nodes %in% c("central","marginal")) { # Creating a degree ranking d <- dgr(sgraph)[,1,drop=FALSE] decre <- ifelse(seed.nodes == "central", TRUE, FALSE) d <- rownames(d[order(d, decreasing = decre),,drop=FALSE]) d <- d[1:floor(n0)] d <- as.numeric(d) } else if (seed.nodes == "random") { d <- sample.int(n, floor(n0)) } else stop("Unsupported -seed.nodes- value. It must be either \"central\", \"marginal\", or \"random\"") } else if (!inherits(seed.nodes, "character")) { d <- seed.nodes } else stop("Unsupported -seed.nodes- value. See the manual for references.") # Setting seed nodes via vector toa[d] <- 1L cumadopt[d,] <- 1L # Step 3.0: Thresholds ------------------------------------------------------- thr <- rdiffnet_make_threshold(threshold.dist, n) # Running the simulation for (i in 2:t) { # Computing exposure exposure.args[c("graph", "cumadopt")] <- list(sgraph[i], cumadopt[,i,drop=FALSE]) expo <- do.call(exposure, exposure.args) whoadopts <- which( (expo >= thr) & is.na(toa)) toa[whoadopts] <- i cumadopt[whoadopts, i:t] <- 1L } reachedt <- max(toa, na.rm=TRUE) # Checking the result if (reachedt == 1) { if (stop.no.diff) stop("No diffusion in this network (Ups!) try changing the seed or the parameters.") else warning("No diffusion in this network.") } # Checking attributes isself <- any(sapply(sgraph, function(x) any(Matrix::diag(x) != 0) )) # Creating diffnet object new_diffnet( graph = sgraph, toa = as.integer(toa), self = isself, t0 = 1, t1 = t, vertex.static.attrs = data.frame(real_threshold=thr), name = name, behavior = behavior ) }
/- Copyright (c) 2023 María Inés de Frutos-Fernández. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: María Inés de Frutos-Fernández -/ import from_mathlib.filter import from_mathlib.ring_seminorm /-! # seminorm_from_const In this file, we prove [BGR, Proposition 1.3.2/2] : starting from a power-multiplicative seminorm on a commutative ring `R` and a nonzero `c : R`, we create a new power-multiplicative seminorm for which `c` is multiplicative. ## Main Definitions * `seminorm_from_const'` : the real-valued function sending `x ∈ R` to the limit of `(f (x * c^n))/((f c)^n)`. * `seminorm_from_const` : the function `seminorm_from_const'` is a `ring_seminorm` on `R`. ## Main Results * `seminorm_from_const_is_nonarchimedean` : the function `seminorm_from_const' hf1 hc hpm` is nonarchimedean when f is nonarchimedean. * `seminorm_from_const_is_pow_mul` : the function `seminorm_from_const' hf1 hc hpm` is power-multiplicative. * `seminorm_from_const_c_is_mul` : for every `x : R`, `seminorm_from_const' hf1 hc hpm (c * x)` equals the product `seminorm_from_const' hf1 hc hpm c * seminorm_from_const' hf1 hc hpm x`. ## References * [S. Bosch, U. Güntzer, R. Remmert, *Non-Archimedean Analysis*][bosch-guntzer-remmert] ## Tags seminorm_from_const, seminorm, nonarchimedean -/ noncomputable theory open_locale topology section ring variables {R : Type*} [comm_ring R] (c : R) (f : ring_seminorm R) (hf1 : f 1 ≤ 1) (hc : 0 ≠ f c) (hpm : is_pow_mul f) /-- For a ring seminorm `f` on `R` and `c ∈ R`, the sequence given by `(f (x * c^n))/((f c)^n)`. -/ def seminorm_from_const_seq (x : R) : ℕ → ℝ := λ n, (f (x * c^n))/((f c)^n) /-- The terms in the sequence `seminorm_from_const_seq c f x` are nonnegative. -/ lemma seminorm_from_const_seq_nonneg (x : R) (n : ℕ) : 0 ≤ seminorm_from_const_seq c f x n := div_nonneg (map_nonneg f (x * c ^ n)) (pow_nonneg (map_nonneg f c) n) /-- The image of `seminorm_from_const_seq c f x` is bounded below by zero. -/ lemma seminorm_from_const_is_bounded (x : R) : bdd_below (set.range (seminorm_from_const_seq c f x)) := begin use 0, rw mem_lower_bounds, intros r hr, obtain ⟨n, hn⟩ := hr, rw ← hn, exact seminorm_from_const_seq_nonneg c f x n, end variable {f} /-- `seminorm_from_const_seq c f 0` is the constant sequence zero. -/ lemma seminorm_from_const_seq_zero (hf : f 0 = 0) : seminorm_from_const_seq c f 0 = 0 := begin simp only [seminorm_from_const_seq], ext n, rw [zero_mul, hf, zero_div], refl, end variable {c} include hc hpm /-- If `1 ≤ n`, then `seminorm_from_const_seq c f 1 n = 1`. -/ lemma seminorm_from_const_seq_one (n : ℕ) (hn : 1 ≤ n) : seminorm_from_const_seq c f 1 n = 1 := begin simp only [seminorm_from_const_seq], rw [one_mul, hpm _ hn, div_self (pow_ne_zero n (ne.symm hc))], end include hf1 /-- `seminorm_from_const_seq c f x` is antitone. -/ lemma seminorm_from_const_seq_antitone (x : R) : antitone (seminorm_from_const_seq c f x) := begin intros m n hmn, simp only [seminorm_from_const_seq], nth_rewrite 0 ← nat.add_sub_of_le hmn, rw [pow_add, ← mul_assoc], have hc_pos : 0 < f c := lt_of_le_of_ne (map_nonneg f _) hc, apply le_trans ((div_le_div_right (pow_pos hc_pos _)).mpr (map_mul_le_mul f _ _)), by_cases heq : m = n, { have : n - m = 0, { rw heq, exact nat.sub_self n, }, rw [this, heq, div_le_div_right (pow_pos hc_pos _), pow_zero], conv_rhs{rw ← mul_one (f (x * c ^ n))}, exact mul_le_mul_of_nonneg_left hf1 (map_nonneg f _) }, { have h1 : 1 ≤ n - m, { rw [nat.one_le_iff_ne_zero, ne.def, nat.sub_eq_zero_iff_le, not_le], exact lt_of_le_of_ne hmn heq,}, rw [hpm c h1, mul_div_assoc, div_eq_mul_inv, pow_sub₀ _ (ne.symm hc) hmn, mul_assoc, mul_comm (f c ^ m)⁻¹, ← mul_assoc (f c ^ n), mul_inv_cancel (pow_ne_zero n (ne.symm hc)), one_mul, div_eq_mul_inv] } end /-- The real-valued function sending `x ∈ R` to the limit of `(f (x * c^n))/((f c)^n)`. -/ def seminorm_from_const' (x : R) : ℝ := classical.some (real.tendsto_of_is_bounded_antitone (seminorm_from_const_is_bounded c f x) (seminorm_from_const_seq_antitone hf1 hc hpm x)) /-- We prove that `seminorm_from_const' hf1 hc hpm x` is the limit of the sequence `seminorm_from_const_seq c f x` as `n` tends to infinity. -/ lemma seminorm_from_const_is_limit (x : R) : filter.tendsto ((seminorm_from_const_seq c f x)) filter.at_top (𝓝 (seminorm_from_const' hf1 hc hpm x)) := classical.some_spec (real.tendsto_of_is_bounded_antitone (seminorm_from_const_is_bounded c f x) (seminorm_from_const_seq_antitone hf1 hc hpm x)) /-- `seminorm_from_const' hf1 hc hpm 0 = 0`. -/ lemma seminorm_from_const_zero : seminorm_from_const' hf1 hc hpm 0 = 0 := tendsto_nhds_unique (seminorm_from_const_is_limit hf1 hc hpm 0) (by simpa [seminorm_from_const_seq_zero c (map_zero _)] using tendsto_const_nhds) /-- `seminorm_from_const' hf1 hc hpm 1 = 1`. -/ lemma seminorm_from_const_is_norm_one_class : seminorm_from_const' hf1 hc hpm 1 = 1 := begin apply tendsto_nhds_unique_of_eventually_eq (seminorm_from_const_is_limit hf1 hc hpm 1) tendsto_const_nhds, simp only [filter.eventually_eq, filter.eventually_at_top, ge_iff_le], exact ⟨1, seminorm_from_const_seq_one hc hpm⟩, end /-- `seminorm_from_const' hf1 hc hpm` is submultiplicative. -/ lemma seminorm_from_const_mul (x y : R) : seminorm_from_const' hf1 hc hpm (x * y) ≤ seminorm_from_const' hf1 hc hpm x * seminorm_from_const' hf1 hc hpm y := begin have hlim : filter.tendsto (λ n, seminorm_from_const_seq c f (x * y) (2 *n)) filter.at_top (𝓝 (seminorm_from_const' hf1 hc hpm (x * y) )), { refine filter.tendsto.comp (seminorm_from_const_is_limit hf1 hc hpm (x * y)) _, apply filter.tendsto_at_top_at_top_of_monotone, { intros n m hnm, simp only [mul_le_mul_left, nat.succ_pos', hnm], }, { rintro n, use n, linarith, }}, apply le_of_tendsto_of_tendsto' hlim (filter.tendsto.mul (seminorm_from_const_is_limit hf1 hc hpm x) (seminorm_from_const_is_limit hf1 hc hpm y)), intro n, simp only [seminorm_from_const_seq], rw [div_mul_div_comm, ← pow_add, two_mul, div_le_div_right (pow_pos (lt_of_le_of_ne (map_nonneg f _) hc) _), pow_add, ← mul_assoc, mul_comm (x * y), ← mul_assoc, mul_assoc, mul_comm (c^n)], exact map_mul_le_mul f (x * c ^ n) (y * c ^ n), end /-- `seminorm_from_const' hf1 hc hpm` is invariant under negation of `x`. -/ lemma seminorm_from_const_neg (x : R) : seminorm_from_const' hf1 hc hpm (-x) = seminorm_from_const' hf1 hc hpm x := begin apply tendsto_nhds_unique_of_eventually_eq (seminorm_from_const_is_limit hf1 hc hpm (-x)) (seminorm_from_const_is_limit hf1 hc hpm x), simp only [filter.eventually_eq, filter.eventually_at_top], use 0, intros n hn, simp only [seminorm_from_const_seq], rw [neg_mul, map_neg_eq_map], end /-- `seminorm_from_const' hf1 hc hpm` satisfies the triangle inequality. -/ lemma seminorm_from_const_add (x y : R) : seminorm_from_const' hf1 hc hpm (x + y) ≤ seminorm_from_const' hf1 hc hpm x + seminorm_from_const' hf1 hc hpm y := begin apply le_of_tendsto_of_tendsto' (seminorm_from_const_is_limit hf1 hc hpm (x + y)) (filter.tendsto.add (seminorm_from_const_is_limit hf1 hc hpm x) (seminorm_from_const_is_limit hf1 hc hpm y)), intro n, have h_add : f ((x + y) * c ^ n) ≤ (f (x * c ^ n)) + (f (y * c ^ n)), { rw add_mul, exact map_add_le_add f _ _ }, simp only [seminorm_from_const_seq], rw div_add_div_same, exact (div_le_div_right (pow_pos (lt_of_le_of_ne (map_nonneg f _) hc) _)).mpr h_add, end /-- The function `seminorm_from_const` is a `ring_seminorm` on `R`. -/ def seminorm_from_const : ring_seminorm R := { to_fun := seminorm_from_const' hf1 hc hpm, map_zero' := seminorm_from_const_zero hf1 hc hpm, add_le' := seminorm_from_const_add hf1 hc hpm, neg' := seminorm_from_const_neg hf1 hc hpm, mul_le' := seminorm_from_const_mul hf1 hc hpm } lemma seminorm_from_const_def (x : R) : seminorm_from_const hf1 hc hpm x = seminorm_from_const' hf1 hc hpm x := rfl /-- `seminorm_from_const' hf1 hc hpm 1 ≤ 1`. -/ lemma seminorm_from_const_is_norm_le_one_class : seminorm_from_const' hf1 hc hpm 1 ≤ 1 := le_of_eq (seminorm_from_const_is_norm_one_class hf1 hc hpm) /-- The function `seminorm_from_const' hf1 hc hpm` is nonarchimedean. -/ lemma seminorm_from_const_is_nonarchimedean (hna : is_nonarchimedean f) : is_nonarchimedean (seminorm_from_const' hf1 hc hpm) := begin intros x y, apply le_of_tendsto_of_tendsto' (seminorm_from_const_is_limit hf1 hc hpm (x + y)) (filter.tendsto.max (seminorm_from_const_is_limit hf1 hc hpm x) (seminorm_from_const_is_limit hf1 hc hpm y)), intro n, have hmax : f ((x + y) * c ^ n) ≤ max (f (x * c ^ n)) (f (y * c ^ n)), { rw add_mul, exact hna _ _ }, rw le_max_iff at hmax ⊢, cases hmax; [left, right]; exact (div_le_div_right (pow_pos (lt_of_le_of_ne (map_nonneg f c) hc) _)).mpr hmax, end /-- The function `seminorm_from_const' hf1 hc hpm` is power-multiplicative. -/ lemma seminorm_from_const_is_pow_mul : is_pow_mul (seminorm_from_const' hf1 hc hpm) := begin intros x m hm, simp only [seminorm_from_const'], have hpow := filter.tendsto.pow (seminorm_from_const_is_limit hf1 hc hpm x) m, have hlim : filter.tendsto (λ n, seminorm_from_const_seq c f (x^m) (m*n)) filter.at_top (𝓝 (seminorm_from_const' hf1 hc hpm (x^m) )), { refine filter.tendsto.comp (seminorm_from_const_is_limit hf1 hc hpm (x^m)) _, apply filter.tendsto_at_top_at_top_of_monotone, { intros n k hnk, exact mul_le_mul_left' hnk m, }, { rintro n, use n, exact le_mul_of_one_le_left' hm, }}, apply tendsto_nhds_unique hlim, convert filter.tendsto.pow (seminorm_from_const_is_limit hf1 hc hpm x) m, ext n, simp only [seminorm_from_const_seq], rw [div_pow, ← hpm _ hm, ← pow_mul, mul_pow, ← pow_mul, mul_comm m n], end /-- The function `seminorm_from_const' hf1 hc hpm` is bounded above by `x`. -/ lemma seminorm_from_const_le_seminorm (x : R) : seminorm_from_const' hf1 hc hpm x ≤ f x := begin apply le_of_tendsto (seminorm_from_const_is_limit hf1 hc hpm x), simp only [filter.eventually_at_top, ge_iff_le], use 1, rintros n hn, apply le_trans ((div_le_div_right (pow_pos (lt_of_le_of_ne (map_nonneg f c) hc) _)).mpr (map_mul_le_mul _ _ _)), rw [hpm c hn, mul_div_assoc, div_self (pow_ne_zero n hc.symm), mul_one], end /-- If `x : R` is multiplicative for `f`, then `seminorm_from_const' hf1 hc hpm x = f x`. -/ lemma seminorm_from_const_apply_of_is_mul {x : R} (hx : ∀ y : R, f (x * y) = f x * f y) : seminorm_from_const' hf1 hc hpm x = f x := begin have hlim : filter.tendsto (seminorm_from_const_seq c f x) filter.at_top (𝓝 (f x)), { have hseq : seminorm_from_const_seq c f x = λ n, f x, { ext n, by_cases hn : n = 0, { simp only [seminorm_from_const_seq], rw [hn, pow_zero, pow_zero, mul_one, div_one], }, { simp only [seminorm_from_const_seq], rw [hx (c ^n), hpm _ (nat.one_le_iff_ne_zero.mpr hn), mul_div_assoc, div_self (pow_ne_zero n hc.symm), mul_one], }}, rw hseq, exact tendsto_const_nhds }, exact tendsto_nhds_unique (seminorm_from_const_is_limit hf1 hc hpm x) hlim, end /-- If `x : R` is multiplicative for `f`, then it is multiplicative for `seminorm_from_const' hf1 hc hpm`. -/ lemma seminorm_from_const_is_mul_of_is_mul {x : R} (hx : ∀ y : R, f (x * y) = f x * f y) (y : R) : seminorm_from_const' hf1 hc hpm (x * y) = seminorm_from_const' hf1 hc hpm x * seminorm_from_const' hf1 hc hpm y := begin have hlim : filter.tendsto (seminorm_from_const_seq c f (x * y)) filter.at_top (𝓝 (seminorm_from_const' hf1 hc hpm x * seminorm_from_const' hf1 hc hpm y)), { rw seminorm_from_const_apply_of_is_mul hf1 hc hpm hx, have hseq : seminorm_from_const_seq c f (x * y) = λ n, f x * seminorm_from_const_seq c f y n, { ext n, simp only [seminorm_from_const_seq], rw [mul_assoc, hx, mul_div_assoc], }, simpa [hseq] using filter.tendsto.const_mul _(seminorm_from_const_is_limit hf1 hc hpm y) }, exact tendsto_nhds_unique (seminorm_from_const_is_limit hf1 hc hpm (x * y)) hlim, end /-- `seminorm_from_const' hf1 hc hpm c = f c`. -/ lemma seminorm_from_const_apply_c : seminorm_from_const' hf1 hc hpm c = f c := begin have hlim : filter.tendsto (seminorm_from_const_seq c f c) filter.at_top (𝓝 (f c)), { have hseq : seminorm_from_const_seq c f c = λ n, f c, { ext n, simp only [seminorm_from_const_seq], rw [← pow_succ, hpm _ le_add_self, pow_succ, mul_div_assoc, div_self (pow_ne_zero n hc.symm), mul_one], }, rw hseq, exact tendsto_const_nhds }, exact tendsto_nhds_unique (seminorm_from_const_is_limit hf1 hc hpm c) hlim, end /-- For every `x : R`, `seminorm_from_const' hf1 hc hpm (c * x)` equals the product `seminorm_from_const' hf1 hc hpm c * seminorm_from_const' hf1 hc hpm x`. -/ lemma seminorm_from_const_c_is_mul (x : R) : seminorm_from_const' hf1 hc hpm (c * x) = seminorm_from_const' hf1 hc hpm c * seminorm_from_const' hf1 hc hpm x := begin have hlim : filter.tendsto (λ n, seminorm_from_const_seq c f x (n + 1)) filter.at_top (𝓝 (seminorm_from_const' hf1 hc hpm x)), { refine filter.tendsto.comp (seminorm_from_const_is_limit hf1 hc hpm x) _, apply filter.tendsto_at_top_at_top_of_monotone, { intros n m hnm, exact add_le_add_right hnm 1, }, { rintro n, use n, linarith, }}, rw seminorm_from_const_apply_c hf1 hc hpm, apply tendsto_nhds_unique (seminorm_from_const_is_limit hf1 hc hpm (c * x)), have hterm : seminorm_from_const_seq c f (c * x) = (λ n, f c * (seminorm_from_const_seq c f x (n + 1))), { simp only [seminorm_from_const_seq], ext n, rw [mul_comm c, pow_succ, pow_succ, mul_div, div_eq_mul_inv _ (f c * f c ^ n), mul_inv, ← mul_assoc, mul_comm (f c), mul_assoc _ (f c), mul_inv_cancel hc.symm, mul_one, mul_assoc, div_eq_mul_inv] }, simpa [hterm] using filter.tendsto.mul tendsto_const_nhds hlim, end end ring section field variables {K : Type*} [field K] /-- If `K` is a field, the function `seminorm_from_const` is a `ring_norm` on `K`. -/ def seminorm_from_const_ring_norm_of_field {k : K} {g : ring_seminorm K} (hg1 : g 1 ≤ 1) (hg_k : g k ≠ 0) (hg_pm : is_pow_mul g) : ring_norm K := (seminorm_from_const hg1 hg_k.symm hg_pm).to_ring_norm (ring_seminorm.ne_zero_iff.mpr⟨k, by simpa [seminorm_from_const_def, seminorm_from_const_apply_c] using hg_k⟩) lemma seminorm_from_const_ring_norm_of_field_def {k : K} {g : ring_seminorm K} (hg1 : g 1 ≤ 1) (hg_k : g k ≠ 0) (hg_pm : is_pow_mul g) (x : K) : seminorm_from_const_ring_norm_of_field hg1 hg_k hg_pm x = seminorm_from_const' hg1 hg_k.symm hg_pm x := rfl end field
The " dog stinkhorn " ( Mutinus caninus ) is smaller , has a distinct oval or spindle @-@ shaped tip on a slender stem and lacks the bright coloring of M. elegans ; it has less of the stalk covered by gleba . The portion of the stalk below the spore mass is pitted in M. caninus , compared to " pebbly " in M. elegans . M. caninus is also less common than M. elegans . Mutinus <unk> is similar in size and shape , except it does not have a distinct color demarcation between the upper and lower parts of the stalk ; instead , the entire stem shows red pigments . The stalk of M. ravenelii is less tapered than M. elegans , and it has a clearly differentiated swollen head .
/- Copyright (c) 2018 Simon Hudon. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Simon Hudon, Patrick Massot ! This file was ported from Lean 3 source module algebra.module.pi ! leanprover-community/mathlib commit a437a2499163d85d670479f69f625f461cc5fef9 ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathlib.Algebra.Module.Basic import Mathlib.Algebra.Regular.SMul import Mathlib.Algebra.Ring.Pi import Mathlib.GroupTheory.GroupAction.Pi /-! # Pi instances for modules This file defines instances for module and related structures on Pi Types -/ universe u v w variable {I : Type u} -- The indexing type variable {f : I → Type v} -- The family of types already equipped with instances variable (x y : ∀ i, f i) (i : I) namespace Pi theorem IsSMulRegular.pi {α : Type _} [∀ i, SMul α <| f i] {k : α} (hk : ∀ i, IsSMulRegular (f i) k) : IsSMulRegular (∀ i, f i) k := fun _ _ h => funext fun i => hk i (congr_fun h i : _) #align is_smul_regular.pi Pi.IsSMulRegular.pi instance smulWithZero (α) [Zero α] [∀ i, Zero (f i)] [∀ i, SMulWithZero α (f i)] : SMulWithZero α (∀ i, f i) := { Pi.instSMul with smul_zero := fun _ => funext fun _ => smul_zero _ zero_smul := fun _ => funext fun _ => zero_smul _ _ } #align pi.smul_with_zero Pi.smulWithZero instance smulWithZero' {g : I → Type _} [∀ i, Zero (g i)] [∀ i, Zero (f i)] [∀ i, SMulWithZero (g i) (f i)] : SMulWithZero (∀ i, g i) (∀ i, f i) := { Pi.smul' with smul_zero := fun _ => funext fun _ => smul_zero _ zero_smul := fun _ => funext fun _ => zero_smul _ _ } #align pi.smul_with_zero' Pi.smulWithZero' instance mulActionWithZero (α) [MonoidWithZero α] [∀ i, Zero (f i)] [∀ i, MulActionWithZero α (f i)] : MulActionWithZero α (∀ i, f i) := { Pi.mulAction _, Pi.smulWithZero _ with } #align pi.mul_action_with_zero Pi.mulActionWithZero instance mulActionWithZero' {g : I → Type _} [∀ i, MonoidWithZero (g i)] [∀ i, Zero (f i)] [∀ i, MulActionWithZero (g i) (f i)] : MulActionWithZero (∀ i, g i) (∀ i, f i) := { Pi.mulAction', Pi.smulWithZero' with } #align pi.mul_action_with_zero' Pi.mulActionWithZero' variable (I f) instance module (α) {r : Semiring α} {m : ∀ i, AddCommMonoid <| f i} [∀ i, Module α <| f i] : @Module α (∀ i : I, f i) r (@Pi.addCommMonoid I f m) := { Pi.distribMulAction _ with add_smul := fun _ _ _ => funext fun _ => add_smul _ _ _ zero_smul := fun _ => funext fun _ => zero_smul α _ } #align pi.module Pi.module /- Extra instance to short-circuit type class resolution. For unknown reasons, this is necessary for certain inference problems. E.g., for this to succeed: ```lean example (β X : Type _) [NormedAddCommGroup β] [NormedSpace ℝ β] : Module ℝ (X → β) := inferInstance ``` See: https://leanprover.zulipchat.com/#narrow/stream/113488-general/topic/Typeclass.20resolution.20under.20binders/near/281296989 -/ /-- A special case of `Pi.module` for non-dependent types. Lean struggles to elaborate definitions elsewhere in the library without this. -/ instance Function.module (α β : Type _) [Semiring α] [AddCommMonoid β] [Module α β] : Module α (I → β) := Pi.module _ _ _ #align function.module Pi.Function.module variable {I f} instance module' {g : I → Type _} {r : ∀ i, Semiring (f i)} {m : ∀ i, AddCommMonoid (g i)} [∀ i, Module (f i) (g i)] : Module (∀ i, f i) (∀ i, g i) where add_smul := by intros ext1 apply add_smul zero_smul := by intros ext1 -- Porting note: not sure why `apply zero_smul` fails here. rw [zero_smul] #align pi.module' Pi.module' instance noZeroSMulDivisors (α) {_ : Semiring α} {_ : ∀ i, AddCommMonoid <| f i} [∀ i, Module α <| f i] [∀ i, NoZeroSMulDivisors α <| f i] : NoZeroSMulDivisors α (∀ i : I, f i) := ⟨fun {_ _} h => or_iff_not_imp_left.mpr fun hc => funext fun i => (smul_eq_zero.mp (congr_fun h i)).resolve_left hc⟩ /-- A special case of `Pi.noZeroSMulDivisors` for non-dependent types. Lean struggles to synthesize this instance by itself elsewhere in the library. -/ instance _root_.Function.noZeroSMulDivisors {ι α β : Type _} {_ : Semiring α} {_ : AddCommMonoid β} [Module α β] [NoZeroSMulDivisors α β] : NoZeroSMulDivisors α (ι → β) := Pi.noZeroSMulDivisors _ #align function.no_zero_smul_divisors Function.noZeroSMulDivisors end Pi
data Tree elem = Empty | Node (Tree elem) elem (Tree elem) Functor Tree where map func Empty = Empty map func (Node left e right) = Node (map func left) (func e) (map func right) Foldable Tree where foldr func acc Empty = acc foldr func acc (Node left e right) = let leftfold = foldr func acc left rightfold = foldr func leftfold right in func e rightfold
[STATEMENT] lemma homeomorphism_injective_closed_map: assumes contf: "continuous_on S f" and imf: "f ` S = T" and injf: "inj_on f S" and oo: "\<And>U. closedin (top_of_set S) U \<Longrightarrow> closedin (top_of_set T) (f ` U)" obtains g where "homeomorphism S T f g" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<And>g. homeomorphism S T f g \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] proof [PROOF STATE] proof (state) goal (1 subgoal): 1. (\<And>g. homeomorphism S T f g \<Longrightarrow> thesis) \<Longrightarrow> homeomorphism S T f ?g2 [PROOF STEP] have "continuous_on T (inv_into S f)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. continuous_on T (inv_into S f) [PROOF STEP] by (metis contf continuous_on_inverse_closed_map imf injf inv_into_f_f oo) [PROOF STATE] proof (state) this: continuous_on T (inv_into S f) goal (1 subgoal): 1. (\<And>g. homeomorphism S T f g \<Longrightarrow> thesis) \<Longrightarrow> homeomorphism S T f ?g2 [PROOF STEP] with imf injf contf [PROOF STATE] proof (chain) picking this: f ` S = T inj_on f S continuous_on S f continuous_on T (inv_into S f) [PROOF STEP] show "homeomorphism S T f (inv_into S f)" [PROOF STATE] proof (prove) using this: f ` S = T inj_on f S continuous_on S f continuous_on T (inv_into S f) goal (1 subgoal): 1. homeomorphism S T f (inv_into S f) [PROOF STEP] by (auto simp: homeomorphism_def) [PROOF STATE] proof (state) this: homeomorphism S T f (inv_into S f) goal: No subgoals! [PROOF STEP] qed
theory HoareLogicTutorial imports Main "~~/src/HOL/Hoare/Hoare_Logic" begin (* The minimum of two integers x and y: *) lemma Min: "VARS (z :: int) {True} IF x \<le> y THEN z := x ELSE z := y FI { z = min x y }" apply vcg apply (simp add: min_def) done (* Iteratively copy an integer variable x to y: *) lemma Copy: "VARS (a :: int) y {0 \<le> x} a := x; y := 0; WHILE a \<noteq> 0 INV { x=y+a } DO y := y + 1 ; a := a - 1 OD {x = y}" apply vcg_simp done (* Multiplication *) lemma Multipl: "VARS (z :: int) i {0 \<le> y} i := y; z := 0; WHILE i \<noteq> 0 INV { z = (y - i) * x } DO z := z + x; i := i - 1 OD {z = x * y}" apply vcg apply (auto simp add: algebra_simps) done (* Iterative multiplication through addition: *) lemma Multi: "VARS (a :: int) z {0 \<le> y} a := 0; z := 0; WHILE a \<noteq> y INV {z = x * a} DO z := z + x ; a := a + 1 OD {z = x * y}" (* "Replace Inv with your invariant." *) apply vcg_simp apply (erule conjE) apply (simp add: distrib_left) done (* A factorial algorithm: *) lemma DownFact: "VARS (z :: nat) (y::nat) {True} z := x; y := 1; WHILE z > 0 INV { Inv } DO y := y * z; z := z - 1 OD {y = fact x}" (* "Replace Inv with your invariant." *) oops (* Integer division of x by y: *) lemma Div: "VARS (r :: int) d {y \<noteq> 0} r := x; d := 0; WHILE y \<le> r INV { Inv } DO r := r - y; d := d + 1 OD { Postcondition }" (* "Replace Inv with your invariant." *) (* "Replace Postcondition with your postcondition." *) oops end
business law services | | The Law Office of Wesley Scott Jones, P.C. Forming a Corporation to start or operate a business in North Carolina can be beneficial to the business owners in two important ways: 1) it can help prevent the business owner from paying too much in federal and state income taxes and 2) it can help protect the business owners from incurring Personal Liability for acts carried out by the business. So what are the main steps in forming a North Carolina Corporation? You must select a Business Name and make sure that name is available for use in North Carolina. You must file proper and complete Articles of Incorporation with the North Carolina Secretary of State. Before filing the Articles of Incorporation, you must determine the number of shares the corporation will be authorized to issue (Issued Shares), the class of shares to be issued, who will serve as the Registered Agent, what will be the Registered Agent Address, and who will serve as the Incorporator. After the Articles of Incorporation have been filed, must must hold an Organizational Meeting. At the Organizational Meeting, you must elect Directors, appoint Officers, adopt a set of Bylaws, adopt a Corporate Seal, and issue Stock Certificates to the business owners (called Shareholders). Before operating the business, you will want to apply for a Federal Identification Number (EIN Number) and State Identification Number for banking and tax purposes. If you qualify and wish to be treated as an S-Corporation for tax purposes, you must complete and file IRS Form 2553. Other Considerations: Should you file an Assumed Name Certificate? Do you need a Privilege License to operate your business? Do the business owners need a Shareholders Agreement? When should you file your first Annual Report with the North Carolina Secretary of State’s office? What Corporate Formalities should I follow to keep my corporation valid as a legal entity? If you are planning to form a North Carolina Corporation, the foregoing topics are just a few that a business owner must successfully navigate through to begin operating a business. If you need help or advice, call an experienced Business Attorney. Call Wesley Scott Jones now at 910-256-5800 for a free telephone consultation. Wesley Jones is a Business Lawyer serving in Wilmington, North Carolina serving all of Southeastern North Carolina including New Hanover County (e.g. Wilmington, Kure Beach, Wrightsville Beach, Carolina Beach and the areas of Ogden, Masonboro, Myrtle Grove, Landfall, and Mayfair), all of Pender County (e.g. Burgaw, Surf City, Hampstead and Topsail Beach) and all of Brunswick County (e.g. Bald Head Island, Bolivia, Calabash, Leland, Shallotte, Southport, Saint James, Ocean Isle, and Oak Island). The following are just a few of the Business Services that Wesley Scott Jones provides to clients: Forming new Corporations and Limited Liability Companies, drafting Articles of Incorporation, Articles of Organization, Shareholders Agreements, Organizational Minutes, Operating Agreements, Annual Meeting Minutes, Bylaws, Annual Reports, Assignments, Board of Director’s Meeting Minutes, Business Entity Startup and Formation, Representing clients who are Buying or Selling a Business, Business Dissolution and Liquidation, Contract Review and Drafting (including Non-Compete or Noncompetition Agreements, Confidentiality Agreements, and Nondisclosure Agreements), Due Diligence Research, Leases, Licensing, Non-Profit Corporations, Non-Solicitation Agreements, Professional Malpractice, Promissory Notes, and Regulatory Compliance. The Parties will need a Bill of Sale and Assignment of Property. The Buyer should determine if it needs to form a new Business Entity with which to buy the Seller’s assets. The Buyer should consider whether it wants or need a Noncompetion Agreement with the Seller and/or the Seller’s Shareholders/Members, and/or Key Employees. If Intellectual Property is involved, the parties will need an Assignment of Trademarks, Patents, and/or Copyrights, as applicable. If the Seller’s website domain, email addresses, and/or telephone numbers are involved, the parties will need an Assignment Agreement to cover these items. The parties will need an Assignment of Contracts, Leases, and/or Liabilities as applicable. If the Buyer wants the Seller’s owners and/or Key Employees to assist Buyer with Buyer’s new business for a period of time after Closing, the Buyer will need an Employment/Consulting Agreement with these parties. If the Buyer is not paying cash at Closing, the Seller will at a minimum want a Promissory Note signed by the Buyer, and a Seller should consider whether additional security is needed in the form of a Personal Guarantee Agreement signed by the Buyer’s individual Owners, a Security Agreement on transferred personal property, or a Deed of Trust on transferred Real Property. The Buyer will want appropriate Approvals by the Seller’s Stockholders/Members and Board or Directors/Members. The Seller will want appropriate Approvals by the Buyer’s Stockholders/Members and Board of Directors/Members. If you are planning to Purchase the Assets of an existing business, the foregoing documents are just a few that a potential Buyer and Seller may want to include in an Asset Purchase Agreement. If you need help or advice, call an experienced Business Attorney. Call Wesley Scott Jones now at 910-256-5800 for a free telephone consultation. The following are just a few of the Business Services that Wesley Scott Jones provides to clients: Annual Meeting Minutes, Annual Reports, Assignments, Board of Director’s Meeting Minutes, Business Entity Startup and Formation, Buying or Selling a Business, and Business Dissolution and Liquidation, Bylaws, Contract Review and Drafting (including Non-Compete or Noncompetition Agreements, Confidentiality Agreements, and Nondisclosure Agreements), Due Diligence Research, Drafting Shareholder and Operating Agreements, Drafting Shareholders and Directors Meeting Minutes, Leases, Licensing, Limited Liability Company Formation, LLCs, Meeting Minutes, Non-Profit Corporations, Non-Solicitation Agreements, Organizational Minutes, Partnership Formation, Professional Malpractice, Promissory Notes, Regulations, Shareholder Agreements, Shareholder’s Meeting Minutes, and Starting and Operating a Franchisee Business. Do you need a Nondisclosure Agreement with the Seller? Is there an existing Broker/Finder Agreement that affects the transaction? Have you determined all of the Related Parties on which due diligence should be performed (Seller, Stockholders/Members, Subsidiaries, or other Affiliated Entities)? What is the Lien Status of the property to be conveyed? Can the Seller produce a Certificate of Good Standing from the Secretary of State? Are there any Third Party Consents required by Contracts with third parties that are to be assigned by Seller and assumed by Buyer? Have you reviewed the Seller’s Financial Information? If you are planning to Purchase the Assets of an existing business, these are just a few of the considerations that must be thought through and preferably verified prior to Closing on an Asset Purchase transaction in North Carolina. If you need help, call an experienced Business Attorney for advice and help. Call Wesley Jones now at 910-256-5800 for a free telephone consultation. If you are buying a new business, either by an Asset Purchase or Stock Purchase, you should consider whether you should require the Seller and its owners and affiliates to sign a Non-Compete Agreement. When you buy an existing business, part of what you are buying is the established know-how and goodwill that the Seller has accumulated through the years that makes the business successful. You will be making an enormous investment into this business so you probably want to make sure the Seller, to whom you just paid a lot of money, will not set up shop right across the street in competition with you. Even if you are starting a new business or operating an existing business, you do not want Key Employees to steal your proprietary secrets and know-how that you have perfected over the years. Way to often, employees will work for an employer for a number of years, acquire their customers lists, pricing guides, and other business operation methods, only to decide that they can do it better. Why work for the owner when you can be the owner? Most businesses can benefit from have Key Employees sign a Non-Compete Agreement. The terms of a Noncompetition Agreement will vary based upon your particular situation. However, all Noncompetition Agreements in North Carolina must be supported by adequate and New Consideration and they must be reasonable in scope as to Geographical and Time restrictions. If you are Buying a New Business and want to protect yourself from the Seller competing directly against you and the new business OR if you have a New or Current Business where key employees have access to all of your proprietary business information, call an experienced lawyer to help you through the process. Call Wesley Jones now at 910-256-5800 for a free telephone consultation. Wesley Jones is a Business Law Attorney serving all of Southeastern North Carolina including New Hanover County (e.g. Wilmington, Kure Beach, Wrightsville Beach, Carolina Beach and the areas of Ogden, Masonboro, Myrtle Grove, Landfall, and Mayfair), all of Pender County (e.g. Burgaw, Surf City, Hampstead and Topsail Beach) and all of Brunswick County (e.g. Bald Head Island, Bolivia, Calabash, Leland, Shallotte, Southport, Saint James, Ocean Isle, and Oak Island). When taking the leap to start a business, it pays to have a few skilled professionals behind you. Although you may be a natural with managing finances and employees along with marketing and delivering the goods or services the firm will provide, there’s true piece of mind when all of the bases are truly covered. Local business attorney Wesley Scott Jones has helped hundreds of clients in the greater Wilmington area develop a blueprint for success. As with most endeavors, creating a foolproof plan is key. With that idea in mind, please consider these tips for streamlining a business startup. Hire an attorney, such as Wes, who is well versed in all aspects of business and contract law. Hire a local accountant who is also familiar with the territory. As a business owner, you will discover that in the long run, having experts on your team in those two disciplines is priceless. Please note that the above tips are merely suggestions and each company will have its own list of areas to address. Because every business is unique and laws and financial guidelines vary state by state and even county by county, having skilled professionals at your disposal is just the responsible thing to do. Attorney Wesley Scott Jones would be happy to discuss more about the essential steps you’ll need to consider for your specific company. To schedule an appointment for a free consultation please give his office a call at now 910-256-5800. Wesley Jones is a Business Lawyer serving all of Southeastern North Carolina including New Hanover County (e.g. Wilmington, Kure Beach, Wrightsville Beach, Carolina Beach and the areas of Ogden, Masonboro, Myrtle Grove, Landfall, and Mayfair), all of Pender County (e.g. Burgaw, Surf City, Hampstead and Topsail Beach) and all of Brunswick County (e.g. Bald Head Island, Bolivia, Calabash, Leland, Shallotte, Southport, Saint James, Ocean Isle, and Oak Island).
open import Agda.Builtin.Nat open import Agda.Builtin.Bool test1 : Nat → Nat test1 zero = 0 test1 (suc zero) = 1 test1 (suc n) = {!n!} test2 : Nat → Nat → Nat test2 zero zero = zero test2 zero (suc y) = y test2 x y = {!x!} test3 : Bool → Bool → Bool → Bool test3 true true true = true test3 x true false = x test3 false y true = y test3 true false z = z test3 x y z = {!x y z!}
(* Author: Tobias Nipkow, TU Muenchen *) section \<open>Sum and product over lists\<close> theory Groups_List imports List begin locale monoid_list = monoid begin definition F :: "'a list \<Rightarrow> 'a" where eq_foldr [code]: "F xs = foldr f xs \<^bold>1" lemma Nil [simp]: "F [] = \<^bold>1" by (simp add: eq_foldr) lemma Cons [simp]: "F (x # xs) = x \<^bold>* F xs" by (simp add: eq_foldr) lemma append [simp]: "F (xs @ ys) = F xs \<^bold>* F ys" by (induct xs) (simp_all add: assoc) end locale comm_monoid_list = comm_monoid + monoid_list begin lemma rev [simp]: "F (rev xs) = F xs" by (simp add: eq_foldr foldr_fold fold_rev fun_eq_iff assoc left_commute) end locale comm_monoid_list_set = list: comm_monoid_list + set: comm_monoid_set begin lemma distinct_set_conv_list: "distinct xs \<Longrightarrow> set.F g (set xs) = list.F (map g xs)" by (induct xs) simp_all lemma set_conv_list [code]: "set.F g (set xs) = list.F (map g (remdups xs))" by (simp add: distinct_set_conv_list [symmetric]) end subsection \<open>List summation\<close> context monoid_add begin sublocale sum_list: monoid_list plus 0 defines sum_list = sum_list.F .. end context comm_monoid_add begin sublocale sum_list: comm_monoid_list plus 0 rewrites "monoid_list.F plus 0 = sum_list" proof - show "comm_monoid_list plus 0" .. then interpret sum_list: comm_monoid_list plus 0 . from sum_list_def show "monoid_list.F plus 0 = sum_list" by simp qed sublocale sum: comm_monoid_list_set plus 0 rewrites "monoid_list.F plus 0 = sum_list" and "comm_monoid_set.F plus 0 = sum" proof - show "comm_monoid_list_set plus 0" .. then interpret sum: comm_monoid_list_set plus 0 . from sum_list_def show "monoid_list.F plus 0 = sum_list" by simp from sum_def show "comm_monoid_set.F plus 0 = sum" by (auto intro: sym) qed end text \<open>Some syntactic sugar for summing a function over a list:\<close> syntax (ASCII) "_sum_list" :: "pttrn => 'a list => 'b => 'b" ("(3SUM _<-_. _)" [0, 51, 10] 10) syntax "_sum_list" :: "pttrn => 'a list => 'b => 'b" ("(3\<Sum>_\<leftarrow>_. _)" [0, 51, 10] 10) translations \<comment> \<open>Beware of argument permutation!\<close> "\<Sum>x\<leftarrow>xs. b" == "CONST sum_list (CONST map (\<lambda>x. b) xs)" context includes lifting_syntax begin lemma sum_list_transfer [transfer_rule]: "(list_all2 A ===> A) sum_list sum_list" if [transfer_rule]: "A 0 0" "(A ===> A ===> A) (+) (+)" unfolding sum_list.eq_foldr [abs_def] by transfer_prover end text \<open>TODO duplicates\<close> lemmas sum_list_simps = sum_list.Nil sum_list.Cons lemmas sum_list_append = sum_list.append lemmas sum_list_rev = sum_list.rev lemma (in monoid_add) fold_plus_sum_list_rev: "fold plus xs = plus (sum_list (rev xs))" proof fix x have "fold plus xs x = sum_list (rev xs @ [x])" by (simp add: foldr_conv_fold sum_list.eq_foldr) also have "\<dots> = sum_list (rev xs) + x" by simp finally show "fold plus xs x = sum_list (rev xs) + x" . qed lemma (in comm_monoid_add) sum_list_map_remove1: "x \<in> set xs \<Longrightarrow> sum_list (map f xs) = f x + sum_list (map f (remove1 x xs))" by (induct xs) (auto simp add: ac_simps) lemma (in monoid_add) size_list_conv_sum_list: "size_list f xs = sum_list (map f xs) + size xs" by (induct xs) auto lemma (in monoid_add) length_concat: "length (concat xss) = sum_list (map length xss)" by (induct xss) simp_all lemma (in monoid_add) length_product_lists: "length (product_lists xss) = foldr (*) (map length xss) 1" proof (induct xss) case (Cons xs xss) then show ?case by (induct xs) (auto simp: length_concat o_def) qed simp lemma (in monoid_add) sum_list_map_filter: assumes "\<And>x. x \<in> set xs \<Longrightarrow> \<not> P x \<Longrightarrow> f x = 0" shows "sum_list (map f (filter P xs)) = sum_list (map f xs)" using assms by (induct xs) auto lemma sum_list_filter_le_nat: fixes f :: "'a \<Rightarrow> nat" shows "sum_list (map f (filter P xs)) \<le> sum_list (map f xs)" by(induction xs; simp) lemma (in comm_monoid_add) distinct_sum_list_conv_Sum: "distinct xs \<Longrightarrow> sum_list xs = Sum (set xs)" by (induct xs) simp_all lemma sum_list_upt[simp]: "m \<le> n \<Longrightarrow> sum_list [m..<n] = \<Sum> {m..<n}" by(simp add: distinct_sum_list_conv_Sum) context ordered_comm_monoid_add begin lemma sum_list_nonneg: "(\<And>x. x \<in> set xs \<Longrightarrow> 0 \<le> x) \<Longrightarrow> 0 \<le> sum_list xs" by (induction xs) auto lemma sum_list_nonpos: "(\<And>x. x \<in> set xs \<Longrightarrow> x \<le> 0) \<Longrightarrow> sum_list xs \<le> 0" by (induction xs) (auto simp: add_nonpos_nonpos) lemma sum_list_nonneg_eq_0_iff: "(\<And>x. x \<in> set xs \<Longrightarrow> 0 \<le> x) \<Longrightarrow> sum_list xs = 0 \<longleftrightarrow> (\<forall>x\<in> set xs. x = 0)" by (induction xs) (simp_all add: add_nonneg_eq_0_iff sum_list_nonneg) end context canonically_ordered_monoid_add begin lemma sum_list_eq_0_iff [simp]: "sum_list ns = 0 \<longleftrightarrow> (\<forall>n \<in> set ns. n = 0)" by (simp add: sum_list_nonneg_eq_0_iff) lemma member_le_sum_list: "x \<in> set xs \<Longrightarrow> x \<le> sum_list xs" by (induction xs) (auto simp: add_increasing add_increasing2) lemma elem_le_sum_list: "k < size ns \<Longrightarrow> ns ! k \<le> sum_list (ns)" by (rule member_le_sum_list) simp end lemma (in ordered_cancel_comm_monoid_diff) sum_list_update: "k < size xs \<Longrightarrow> sum_list (xs[k := x]) = sum_list xs + x - xs ! k" apply(induction xs arbitrary:k) apply (auto simp: add_ac split: nat.split) apply(drule elem_le_sum_list) by (simp add: local.add_diff_assoc local.add_increasing) lemma (in monoid_add) sum_list_triv: "(\<Sum>x\<leftarrow>xs. r) = of_nat (length xs) * r" by (induct xs) (simp_all add: distrib_right) lemma (in monoid_add) sum_list_0 [simp]: "(\<Sum>x\<leftarrow>xs. 0) = 0" by (induct xs) (simp_all add: distrib_right) text\<open>For non-Abelian groups \<open>xs\<close> needs to be reversed on one side:\<close> lemma (in ab_group_add) uminus_sum_list_map: "- sum_list (map f xs) = sum_list (map (uminus \<circ> f) xs)" by (induct xs) simp_all lemma (in comm_monoid_add) sum_list_addf: "(\<Sum>x\<leftarrow>xs. f x + g x) = sum_list (map f xs) + sum_list (map g xs)" by (induct xs) (simp_all add: algebra_simps) lemma (in ab_group_add) sum_list_subtractf: "(\<Sum>x\<leftarrow>xs. f x - g x) = sum_list (map f xs) - sum_list (map g xs)" by (induct xs) (simp_all add: algebra_simps) lemma (in semiring_0) sum_list_const_mult: "(\<Sum>x\<leftarrow>xs. c * f x) = c * (\<Sum>x\<leftarrow>xs. f x)" by (induct xs) (simp_all add: algebra_simps) lemma (in semiring_0) sum_list_mult_const: "(\<Sum>x\<leftarrow>xs. f x * c) = (\<Sum>x\<leftarrow>xs. f x) * c" by (induct xs) (simp_all add: algebra_simps) lemma (in ordered_ab_group_add_abs) sum_list_abs: "\<bar>sum_list xs\<bar> \<le> sum_list (map abs xs)" by (induct xs) (simp_all add: order_trans [OF abs_triangle_ineq]) lemma sum_list_mono: fixes f g :: "'a \<Rightarrow> 'b::{monoid_add, ordered_ab_semigroup_add}" shows "(\<And>x. x \<in> set xs \<Longrightarrow> f x \<le> g x) \<Longrightarrow> (\<Sum>x\<leftarrow>xs. f x) \<le> (\<Sum>x\<leftarrow>xs. g x)" by (induct xs) (simp, simp add: add_mono) lemma sum_list_strict_mono: fixes f g :: "'a \<Rightarrow> 'b::{monoid_add, strict_ordered_ab_semigroup_add}" shows "\<lbrakk> xs \<noteq> []; \<And>x. x \<in> set xs \<Longrightarrow> f x < g x \<rbrakk> \<Longrightarrow> sum_list (map f xs) < sum_list (map g xs)" proof (induction xs) case Nil thus ?case by simp next case C: (Cons _ xs) show ?case proof (cases xs) case Nil thus ?thesis using C.prems by simp next case Cons thus ?thesis using C by(simp add: add_strict_mono) qed qed text \<open>A much more general version of this monotonicity lemma can be formulated with multisets and the multiset order\<close> lemma sum_list_mono2: fixes xs :: "'a ::ordered_comm_monoid_add list" shows "\<lbrakk> length xs = length ys; \<And>i. i < length xs \<longrightarrow> xs!i \<le> ys!i \<rbrakk> \<Longrightarrow> sum_list xs \<le> sum_list ys" apply(induction xs ys rule: list_induct2) by(auto simp: nth_Cons' less_Suc_eq_0_disj imp_ex add_mono) lemma (in monoid_add) sum_list_distinct_conv_sum_set: "distinct xs \<Longrightarrow> sum_list (map f xs) = sum f (set xs)" by (induct xs) simp_all lemma (in monoid_add) interv_sum_list_conv_sum_set_nat: "sum_list (map f [m..<n]) = sum f (set [m..<n])" by (simp add: sum_list_distinct_conv_sum_set) lemma (in monoid_add) interv_sum_list_conv_sum_set_int: "sum_list (map f [k..l]) = sum f (set [k..l])" by (simp add: sum_list_distinct_conv_sum_set) text \<open>General equivalence between \<^const>\<open>sum_list\<close> and \<^const>\<open>sum\<close>\<close> lemma (in monoid_add) sum_list_sum_nth: "sum_list xs = (\<Sum> i = 0 ..< length xs. xs ! i)" using interv_sum_list_conv_sum_set_nat [of "(!) xs" 0 "length xs"] by (simp add: map_nth) lemma sum_list_map_eq_sum_count: "sum_list (map f xs) = sum (\<lambda>x. count_list xs x * f x) (set xs)" proof(induction xs) case (Cons x xs) show ?case (is "?l = ?r") proof cases assume "x \<in> set xs" have "?l = f x + (\<Sum>x\<in>set xs. count_list xs x * f x)" by (simp add: Cons.IH) also have "set xs = insert x (set xs - {x})" using \<open>x \<in> set xs\<close>by blast also have "f x + (\<Sum>x\<in>insert x (set xs - {x}). count_list xs x * f x) = ?r" by (simp add: sum.insert_remove eq_commute) finally show ?thesis . next assume "x \<notin> set xs" hence "\<And>xa. xa \<in> set xs \<Longrightarrow> x \<noteq> xa" by blast thus ?thesis by (simp add: Cons.IH \<open>x \<notin> set xs\<close>) qed qed simp lemma sum_list_map_eq_sum_count2: assumes "set xs \<subseteq> X" "finite X" shows "sum_list (map f xs) = sum (\<lambda>x. count_list xs x * f x) X" proof- let ?F = "\<lambda>x. count_list xs x * f x" have "sum ?F X = sum ?F (set xs \<union> (X - set xs))" using Un_absorb1[OF assms(1)] by(simp) also have "\<dots> = sum ?F (set xs)" using assms(2) by(simp add: sum.union_disjoint[OF _ _ Diff_disjoint] del: Un_Diff_cancel) finally show ?thesis by(simp add:sum_list_map_eq_sum_count) qed lemma sum_list_replicate: "sum_list (replicate n c) = of_nat n * c" by(induction n)(auto simp add: distrib_right) lemma sum_list_nonneg: "(\<And>x. x \<in> set xs \<Longrightarrow> (x :: 'a :: ordered_comm_monoid_add) \<ge> 0) \<Longrightarrow> sum_list xs \<ge> 0" by (induction xs) simp_all lemma sum_list_Suc: "sum_list (map (\<lambda>x. Suc(f x)) xs) = sum_list (map f xs) + length xs" by(induction xs; simp) lemma (in monoid_add) sum_list_map_filter': "sum_list (map f (filter P xs)) = sum_list (map (\<lambda>x. if P x then f x else 0) xs)" by (induction xs) simp_all text \<open>Summation of a strictly ascending sequence with length \<open>n\<close> can be upper-bounded by summation over \<open>{0..<n}\<close>.\<close> lemma sorted_wrt_less_sum_mono_lowerbound: fixes f :: "nat \<Rightarrow> ('b::ordered_comm_monoid_add)" assumes mono: "\<And>x y. x\<le>y \<Longrightarrow> f x \<le> f y" shows "sorted_wrt (<) ns \<Longrightarrow> (\<Sum>i\<in>{0..<length ns}. f i) \<le> (\<Sum>i\<leftarrow>ns. f i)" proof (induction ns rule: rev_induct) case Nil then show ?case by simp next case (snoc n ns) have "sum f {0..<length (ns @ [n])} = sum f {0..<length ns} + f (length ns)" by simp also have "sum f {0..<length ns} \<le> sum_list (map f ns)" using snoc by (auto simp: sorted_wrt_append) also have "length ns \<le> n" using sorted_wrt_less_idx[OF snoc.prems(1), of "length ns"] by auto finally have "sum f {0..<length (ns @ [n])} \<le> sum_list (map f ns) + f n" using mono add_mono by blast thus ?case by simp qed subsection \<open>Horner sums\<close> context comm_semiring_0 begin definition horner_sum :: \<open>('b \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> 'b list \<Rightarrow> 'a\<close> where horner_sum_foldr: \<open>horner_sum f a xs = foldr (\<lambda>x b. f x + a * b) xs 0\<close> lemma horner_sum_simps [simp]: \<open>horner_sum f a [] = 0\<close> \<open>horner_sum f a (x # xs) = f x + a * horner_sum f a xs\<close> by (simp_all add: horner_sum_foldr) lemma horner_sum_eq_sum_funpow: \<open>horner_sum f a xs = (\<Sum>n = 0..<length xs. ((*) a ^^ n) (f (xs ! n)))\<close> proof (induction xs) case Nil then show ?case by simp next case (Cons x xs) then show ?case by (simp add: sum.atLeast0_lessThan_Suc_shift sum_distrib_left del: sum.op_ivl_Suc) qed end context includes lifting_syntax begin lemma horner_sum_transfer [transfer_rule]: \<open>((B ===> A) ===> A ===> list_all2 B ===> A) horner_sum horner_sum\<close> if [transfer_rule]: \<open>A 0 0\<close> and [transfer_rule]: \<open>(A ===> A ===> A) (+) (+)\<close> and [transfer_rule]: \<open>(A ===> A ===> A) (*) (*)\<close> by (unfold horner_sum_foldr) transfer_prover end context comm_semiring_1 begin lemma horner_sum_eq_sum: \<open>horner_sum f a xs = (\<Sum>n = 0..<length xs. f (xs ! n) * a ^ n)\<close> proof - have \<open>(*) a ^^ n = (*) (a ^ n)\<close> for n by (induction n) (simp_all add: ac_simps) then show ?thesis by (simp add: horner_sum_eq_sum_funpow ac_simps) qed lemma horner_sum_append: \<open>horner_sum f a (xs @ ys) = horner_sum f a xs + a ^ length xs * horner_sum f a ys\<close> using sum.atLeastLessThan_shift_bounds [of _ 0 \<open>length xs\<close> \<open>length ys\<close>] atLeastLessThan_add_Un [of 0 \<open>length xs\<close> \<open>length ys\<close>] by (simp add: horner_sum_eq_sum sum_distrib_left sum.union_disjoint ac_simps nth_append power_add) end context linordered_semidom begin lemma horner_sum_nonnegative: \<open>0 \<le> horner_sum of_bool 2 bs\<close> by (induction bs) simp_all end context unique_euclidean_semiring_numeral begin lemma horner_sum_bound: \<open>horner_sum of_bool 2 bs < 2 ^ length bs\<close> proof (induction bs) case Nil then show ?case by simp next case (Cons b bs) moreover define a where \<open>a = 2 ^ length bs - horner_sum of_bool 2 bs\<close> ultimately have *: \<open>2 ^ length bs = horner_sum of_bool 2 bs + a\<close> by simp have \<open>1 < a * 2\<close> if \<open>0 < a\<close> using that add_mono [of 1 a 1 a] by (simp add: mult_2_right discrete) with Cons show ?case by (simp add: algebra_simps *) qed end lemma nat_horner_sum [simp]: \<open>nat (horner_sum of_bool 2 bs) = horner_sum of_bool 2 bs\<close> by (induction bs) (auto simp add: nat_add_distrib horner_sum_nonnegative) context unique_euclidean_semiring_numeral begin lemma horner_sum_less_eq_iff_lexordp_eq: \<open>horner_sum of_bool 2 bs \<le> horner_sum of_bool 2 cs \<longleftrightarrow> lexordp_eq (rev bs) (rev cs)\<close> if \<open>length bs = length cs\<close> proof - have \<open>horner_sum of_bool 2 (rev bs) \<le> horner_sum of_bool 2 (rev cs) \<longleftrightarrow> lexordp_eq bs cs\<close> if \<open>length bs = length cs\<close> for bs cs using that proof (induction bs cs rule: list_induct2) case Nil then show ?case by simp next case (Cons b bs c cs) with horner_sum_nonnegative [of \<open>rev bs\<close>] horner_sum_nonnegative [of \<open>rev cs\<close>] horner_sum_bound [of \<open>rev bs\<close>] horner_sum_bound [of \<open>rev cs\<close>] show ?case by (auto simp add: horner_sum_append not_le Cons intro: add_strict_increasing2 add_increasing) qed from that this [of \<open>rev bs\<close> \<open>rev cs\<close>] show ?thesis by simp qed lemma horner_sum_less_iff_lexordp: \<open>horner_sum of_bool 2 bs < horner_sum of_bool 2 cs \<longleftrightarrow> ord_class.lexordp (rev bs) (rev cs)\<close> if \<open>length bs = length cs\<close> proof - have \<open>horner_sum of_bool 2 (rev bs) < horner_sum of_bool 2 (rev cs) \<longleftrightarrow> ord_class.lexordp bs cs\<close> if \<open>length bs = length cs\<close> for bs cs using that proof (induction bs cs rule: list_induct2) case Nil then show ?case by simp next case (Cons b bs c cs) with horner_sum_nonnegative [of \<open>rev bs\<close>] horner_sum_nonnegative [of \<open>rev cs\<close>] horner_sum_bound [of \<open>rev bs\<close>] horner_sum_bound [of \<open>rev cs\<close>] show ?case by (auto simp add: horner_sum_append not_less Cons intro: add_strict_increasing2 add_increasing) qed from that this [of \<open>rev bs\<close> \<open>rev cs\<close>] show ?thesis by simp qed end subsection \<open>Further facts about \<^const>\<open>List.n_lists\<close>\<close> lemma length_n_lists: "length (List.n_lists n xs) = length xs ^ n" by (induct n) (auto simp add: comp_def length_concat sum_list_triv) lemma distinct_n_lists: assumes "distinct xs" shows "distinct (List.n_lists n xs)" proof (rule card_distinct) from assms have card_length: "card (set xs) = length xs" by (rule distinct_card) have "card (set (List.n_lists n xs)) = card (set xs) ^ n" proof (induct n) case 0 then show ?case by simp next case (Suc n) moreover have "card (\<Union>ys\<in>set (List.n_lists n xs). (\<lambda>y. y # ys) ` set xs) = (\<Sum>ys\<in>set (List.n_lists n xs). card ((\<lambda>y. y # ys) ` set xs))" by (rule card_UN_disjoint) auto moreover have "\<And>ys. card ((\<lambda>y. y # ys) ` set xs) = card (set xs)" by (rule card_image) (simp add: inj_on_def) ultimately show ?case by auto qed also have "\<dots> = length xs ^ n" by (simp add: card_length) finally show "card (set (List.n_lists n xs)) = length (List.n_lists n xs)" by (simp add: length_n_lists) qed subsection \<open>Tools setup\<close> lemmas sum_code = sum.set_conv_list lemma sum_set_upto_conv_sum_list_int [code_unfold]: "sum f (set [i..j::int]) = sum_list (map f [i..j])" by (simp add: interv_sum_list_conv_sum_set_int) lemma sum_set_upt_conv_sum_list_nat [code_unfold]: "sum f (set [m..<n]) = sum_list (map f [m..<n])" by (simp add: interv_sum_list_conv_sum_set_nat) subsection \<open>List product\<close> context monoid_mult begin sublocale prod_list: monoid_list times 1 defines prod_list = prod_list.F .. end context comm_monoid_mult begin sublocale prod_list: comm_monoid_list times 1 rewrites "monoid_list.F times 1 = prod_list" proof - show "comm_monoid_list times 1" .. then interpret prod_list: comm_monoid_list times 1 . from prod_list_def show "monoid_list.F times 1 = prod_list" by simp qed sublocale prod: comm_monoid_list_set times 1 rewrites "monoid_list.F times 1 = prod_list" and "comm_monoid_set.F times 1 = prod" proof - show "comm_monoid_list_set times 1" .. then interpret prod: comm_monoid_list_set times 1 . from prod_list_def show "monoid_list.F times 1 = prod_list" by simp from prod_def show "comm_monoid_set.F times 1 = prod" by (auto intro: sym) qed end text \<open>Some syntactic sugar:\<close> syntax (ASCII) "_prod_list" :: "pttrn => 'a list => 'b => 'b" ("(3PROD _<-_. _)" [0, 51, 10] 10) syntax "_prod_list" :: "pttrn => 'a list => 'b => 'b" ("(3\<Prod>_\<leftarrow>_. _)" [0, 51, 10] 10) translations \<comment> \<open>Beware of argument permutation!\<close> "\<Prod>x\<leftarrow>xs. b" \<rightleftharpoons> "CONST prod_list (CONST map (\<lambda>x. b) xs)" context includes lifting_syntax begin lemma prod_list_transfer [transfer_rule]: "(list_all2 A ===> A) prod_list prod_list" if [transfer_rule]: "A 1 1" "(A ===> A ===> A) (*) (*)" unfolding prod_list.eq_foldr [abs_def] by transfer_prover end lemma prod_list_zero_iff: "prod_list xs = 0 \<longleftrightarrow> (0 :: 'a :: {semiring_no_zero_divisors, semiring_1}) \<in> set xs" by (induction xs) simp_all end
subroutine sslope(sinsolelslope,solelslope,effdec,slb,hrangl) use simsphere_mod implicit none real :: effdec, slb, hrangl real :: sinsolelslope,solelslope,dipan,dipaz ! INCLUDE 'modvars.h' ! subroutine calculates solar elevation angle and azimuth ! for sloping terrain when slope is non-zero. ! elevation for northwest corner of square box is ZNW, etc. ! grid size (called xmeshl) is in same units as ZNW ! calculate dip angle and azimuth angle of sloping surface plane ! dipaz = 0.0 ! dipan = 0.0 ! edip = (ZNW + ZSW - ZNE - ZSE)/2.0 ! sdip = (ZNW - ZSW + ZNE - ZSE)/2.0 ! if ( edip. ne. 0 ) then ! dipan = abs (atan (edip / xmeshl / (cos(atan(sdip/edip))))) ! dipaz = 3. * 3.14159/ 2. + atan( sdip / edip) ! if ( edip. lt. 0. ) then ! dipaz = dipaz - 3.14159 ! end if ! else ! dipan = abs (atan (sdip / xmeshl )) ! dipaz = 3.14159 ! if ( sdip. gt. 0. ) then ! dipaz = 0.0 ! end if ! end if dipaz = ASPECT / 57.2958 ! if (dipaz. gt. 6.2835) then ! azimuthangle = azimuthangle - 360. ! endif dipan = SLOPE / 57.2958 ! compute solar elevation angle function for sloping terrain sinsolelslope = cos(dipan) * ( sin(slb) * sin(effdec) + cos(slb) * & cos(effdec) * cos(hrangl)) + sin(dipan) * & ( cos(dipaz) * ( tan(slb) * ( sin(slb) * sin(effdec) +& cos(slb) * cos(effdec) * cos(hrangl)) - sin(effdec) / & cos(slb)) + sin(dipaz) * cos(effdec) * sin(hrangl)) solelslope = asin(sinsolelslope) if ( solelslope <= 0.01) then solelslope = 0.01 end if call albedo(sinsolelslope) ! note that solar elevation can be less than zero for non-zero slope ! although greater than zero for flat slope. ! we use solar elevation for slope in albedo but not for path ! which depends on solar angle with respect to flat plane return end
/* specfunc/hyperg.c * * Copyright (C) 1996, 1997, 1998, 1999, 2000 Gerard Jungman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Author: G. Jungman */ /* Miscellaneous implementations of use * for evaluation of hypergeometric functions. */ #include <config.h> #include <gsl/gsl_math.h> #include <gsl/gsl_errno.h> #include "gsl_sf_exp.h" #include "gsl_sf_gamma.h" #include "error.h" #include "hyperg.h" #define SUM_LARGE (1.0e-5*GSL_DBL_MAX) int gsl_sf_hyperg_1F1_series_e(const double a, const double b, const double x, gsl_sf_result * result ) { double an = a; double bn = b; double n = 1.0; double del = 1.0; double abs_del = 1.0; double max_abs_del = 1.0; double sum_val = 1.0; double sum_err = 0.0; while(abs_del/fabs(sum_val) > GSL_DBL_EPSILON) { double u, abs_u; if(bn == 0.0) { DOMAIN_ERROR(result); } if(an == 0.0 || n > 1000.0) { result->val = sum_val; result->err = sum_err; result->err += 2.0 * GSL_DBL_EPSILON * n * fabs(sum_val); return GSL_SUCCESS; } u = x * (an/(bn*n)); abs_u = fabs(u); if(abs_u > 1.0 && max_abs_del > GSL_DBL_MAX/abs_u) { result->val = sum_val; result->err = fabs(sum_val); GSL_ERROR ("overflow", GSL_EOVRFLW); } del *= u; sum_val += del; if(fabs(sum_val) > SUM_LARGE) { result->val = sum_val; result->err = fabs(sum_val); GSL_ERROR ("overflow", GSL_EOVRFLW); } abs_del = fabs(del); max_abs_del = GSL_MAX_DBL(abs_del, max_abs_del); sum_err += 2.0*GSL_DBL_EPSILON*abs_del; an += 1.0; bn += 1.0; n += 1.0; } result->val = sum_val; result->err = sum_err; result->err += abs_del; result->err += 2.0 * GSL_DBL_EPSILON * n * fabs(sum_val); return GSL_SUCCESS; } int gsl_sf_hyperg_1F1_large_b_e(const double a, const double b, const double x, gsl_sf_result * result) { if(fabs(x/b) < 1.0) { const double u = x/b; const double v = 1.0/(1.0-u); const double pre = pow(v,a); const double uv = u*v; const double uv2 = uv*uv; const double t1 = a*(a+1.0)/(2.0*b)*uv2; const double t2a = a*(a+1.0)/(24.0*b*b)*uv2; const double t2b = 12.0 + 16.0*(a+2.0)*uv + 3.0*(a+2.0)*(a+3.0)*uv2; const double t2 = t2a*t2b; result->val = pre * (1.0 - t1 + t2); result->err = pre * GSL_DBL_EPSILON * (1.0 + fabs(t1) + fabs(t2)); result->err += 2.0 * GSL_DBL_EPSILON * fabs(result->val); return GSL_SUCCESS; } else { DOMAIN_ERROR(result); } } int gsl_sf_hyperg_U_large_b_e(const double a, const double b, const double x, gsl_sf_result * result, double * ln_multiplier ) { double N = floor(b); /* b = N + eps */ double eps = b - N; if(fabs(eps) < GSL_SQRT_DBL_EPSILON) { double lnpre_val; double lnpre_err; gsl_sf_result M; if(b > 1.0) { double tmp = (1.0-b)*log(x); gsl_sf_result lg_bm1; gsl_sf_result lg_a; gsl_sf_lngamma_e(b-1.0, &lg_bm1); gsl_sf_lngamma_e(a, &lg_a); lnpre_val = tmp + x + lg_bm1.val - lg_a.val; lnpre_err = lg_bm1.err + lg_a.err + GSL_DBL_EPSILON * (fabs(x) + fabs(tmp)); gsl_sf_hyperg_1F1_large_b_e(1.0-a, 2.0-b, -x, &M); } else { gsl_sf_result lg_1mb; gsl_sf_result lg_1pamb; gsl_sf_lngamma_e(1.0-b, &lg_1mb); gsl_sf_lngamma_e(1.0+a-b, &lg_1pamb); lnpre_val = lg_1mb.val - lg_1pamb.val; lnpre_err = lg_1mb.err + lg_1pamb.err; gsl_sf_hyperg_1F1_large_b_e(a, b, x, &M); } if(lnpre_val > GSL_LOG_DBL_MAX-10.0) { result->val = M.val; result->err = M.err; *ln_multiplier = lnpre_val; GSL_ERROR ("overflow", GSL_EOVRFLW); } else { gsl_sf_result epre; int stat_e = gsl_sf_exp_err_e(lnpre_val, lnpre_err, &epre); result->val = epre.val * M.val; result->err = epre.val * M.err + epre.err * fabs(M.val); result->err += 2.0 * GSL_DBL_EPSILON * fabs(result->val); *ln_multiplier = 0.0; return stat_e; } } else { double omb_lnx = (1.0-b)*log(x); gsl_sf_result lg_1mb; double sgn_1mb; gsl_sf_result lg_1pamb; double sgn_1pamb; gsl_sf_result lg_bm1; double sgn_bm1; gsl_sf_result lg_a; double sgn_a; gsl_sf_result M1, M2; double lnpre1_val, lnpre2_val; double lnpre1_err, lnpre2_err; double sgpre1, sgpre2; gsl_sf_hyperg_1F1_large_b_e( a, b, x, &M1); gsl_sf_hyperg_1F1_large_b_e(1.0-a, 2.0-b, x, &M2); gsl_sf_lngamma_sgn_e(1.0-b, &lg_1mb, &sgn_1mb); gsl_sf_lngamma_sgn_e(1.0+a-b, &lg_1pamb, &sgn_1pamb); gsl_sf_lngamma_sgn_e(b-1.0, &lg_bm1, &sgn_bm1); gsl_sf_lngamma_sgn_e(a, &lg_a, &sgn_a); lnpre1_val = lg_1mb.val - lg_1pamb.val; lnpre1_err = lg_1mb.err + lg_1pamb.err; lnpre2_val = lg_bm1.val - lg_a.val - omb_lnx - x; lnpre2_err = lg_bm1.err + lg_a.err + GSL_DBL_EPSILON * (fabs(omb_lnx)+fabs(x)); sgpre1 = sgn_1mb * sgn_1pamb; sgpre2 = sgn_bm1 * sgn_a; if(lnpre1_val > GSL_LOG_DBL_MAX-10.0 || lnpre2_val > GSL_LOG_DBL_MAX-10.0) { double max_lnpre_val = GSL_MAX(lnpre1_val,lnpre2_val); double max_lnpre_err = GSL_MAX(lnpre1_err,lnpre2_err); double lp1 = lnpre1_val - max_lnpre_val; double lp2 = lnpre2_val - max_lnpre_val; double t1 = sgpre1*exp(lp1); double t2 = sgpre2*exp(lp2); result->val = t1*M1.val + t2*M2.val; result->err = fabs(t1)*M1.err + fabs(t2)*M2.err; result->err += GSL_DBL_EPSILON * exp(max_lnpre_err) * (fabs(t1*M1.val) + fabs(t2*M2.val)); result->err += 2.0 * GSL_DBL_EPSILON * fabs(result->val); *ln_multiplier = max_lnpre_val; GSL_ERROR ("overflow", GSL_EOVRFLW); } else { double t1 = sgpre1*exp(lnpre1_val); double t2 = sgpre2*exp(lnpre2_val); result->val = t1*M1.val + t2*M2.val; result->err = fabs(t1) * M1.err + fabs(t2)*M2.err; result->err += GSL_DBL_EPSILON * (exp(lnpre1_err)*fabs(t1*M1.val) + exp(lnpre2_err)*fabs(t2*M2.val)); result->err += 2.0 * GSL_DBL_EPSILON * fabs(result->val); *ln_multiplier = 0.0; return GSL_SUCCESS; } } } /* [Carlson, p.109] says the error in truncating this asymptotic series * is less than the absolute value of the first neglected term. * * A termination argument is provided, so that the series will * be summed at most up to n=n_trunc. If n_trunc is set negative, * then the series is summed until it appears to start diverging. */ int gsl_sf_hyperg_2F0_series_e(const double a, const double b, const double x, int n_trunc, gsl_sf_result * result ) { const int maxiter = 2000; double an = a; double bn = b; double n = 1.0; double sum = 1.0; double del = 1.0; double abs_del = 1.0; double max_abs_del = 1.0; double last_abs_del = 1.0; while(abs_del/fabs(sum) > GSL_DBL_EPSILON && n < maxiter) { double u = an * (bn/n * x); double abs_u = fabs(u); if(abs_u > 1.0 && (max_abs_del > GSL_DBL_MAX/abs_u)) { result->val = sum; result->err = fabs(sum); GSL_ERROR ("overflow", GSL_EOVRFLW); } del *= u; sum += del; abs_del = fabs(del); if(abs_del > last_abs_del) break; /* series is probably starting to grow */ last_abs_del = abs_del; max_abs_del = GSL_MAX(abs_del, max_abs_del); an += 1.0; bn += 1.0; n += 1.0; if(an == 0.0 || bn == 0.0) break; /* series terminated */ if(n_trunc >= 0 && n >= n_trunc) break; /* reached requested timeout */ } result->val = sum; result->err = GSL_DBL_EPSILON * n + abs_del; if(n >= maxiter) GSL_ERROR ("error", GSL_EMAXITER); else return GSL_SUCCESS; }
"Hello, World! "
total record Bar where constructor MkBar good : (Int -> Int) -> Bar bad : (Bar -> Int) -> Bar
import matplotlib.pyplot as plt import seaborn as sns import numpy as np def var_vs_target(dataframe, kind, ref = None, col = 4, xsize = 12, **kwargs): df_col = dataframe.select_dtypes(kind) col_idx = df_col.columns[df_col.columns != ref] lenght = len(col_idx) row =len(list(range(0, lenght, col))) fig, axs = plt.subplots(row, col, figsize=(xsize,((xsize/col)*.75)*row), constrained_layout=True) for i, ax in enumerate(axs.flat): if i < lenght: if kind == 'category': sns.countplot(data=dataframe, x=col_idx[i] , hue=ref, ax=ax, alpha=.9, **kwargs) else: sns.boxplot(data=dataframe, x=ref, y=col_idx[i], ax=ax, width=.4, saturation=.8, **kwargs) mean = df_col[col_idx[i]].mean() ax.axhline(y=mean, c='r', ls='--', label='mean') ax.legend(loc='best', framealpha=.3) else: ax.remove() def hist(dataframe, col = 4, ref = None, xsize = 12, **kwargs): col_idx = dataframe.columns[dataframe.columns != ref] lenght = len(col_idx) row = len(list(range(0, lenght, col))) label = dataframe[ref].unique() g0 = dataframe[dataframe[ref] == 0] g1 = dataframe[dataframe[ref] == 1] fig, axs = plt.subplots(row, col, figsize=(xsize,((xsize/col)*.75)*row), constrained_layout=True) for i, ax in enumerate(axs.flat): if i < lenght: if dataframe.dtypes[i].name != 'category': ax.hist(dataframe[col_idx[i]], histtype='stepfilled', alpha=.6, density=True, color='gray', **kwargs) sns.kdeplot(g0[col_idx[i]], ax=ax) sns.kdeplot(g1[col_idx[i]], ax=ax) else: ax.hist(dataframe[col_idx[i]], histtype='bar', alpha=.8) ax.hist(g0[col_idx[i]], histtype='bar', alpha=.8) ax.set_title(col_idx[i]) ax.legend(label, title=ref, loc='best', framealpha=.3) else: ax.remove() def corr_matrix(dataframe, **kwargs): plt.figure(figsize=(9,7)) sns.heatmap(dataframe.corr(), annot=True, cmap='coolwarm', mask=np.triu(dataframe.corr(), k=1), square=True, fmt='2.1f', **kwargs) plt.show()
module Data.QuadTree.LensProofs.Valid-LensBCD where open import Haskell.Prelude renaming (zero to Z; suc to S) open import Data.Lens.Lens open import Data.Logic open import Data.QuadTree.InternalAgda open import Agda.Primitive open import Data.Lens.Proofs.LensLaws open import Data.Lens.Proofs.LensPostulates open import Data.Lens.Proofs.LensComposition open import Data.QuadTree.Implementation.QuadrantLenses open import Data.QuadTree.Implementation.Definition open import Data.QuadTree.Implementation.ValidTypes open import Data.QuadTree.Implementation.SafeFunctions open import Data.QuadTree.Implementation.PublicFunctions open import Data.QuadTree.Implementation.DataLenses -- The lens laws have been proven for LensA, and the proof is quite long. -- The implementation of lens b/c/d is basically identical, so I won't bother to proof them for now --- Lens laws for lensB postulate ValidLens-LensB-ViewSet : {t : Set} {{eqT : Eq t}} {dep : Nat} -> ViewSet (lensB {t} {dep}) ValidLens-LensB-SetView : {t : Set} {{eqT : Eq t}} {dep : Nat} -> SetView (lensB {t} {dep}) ValidLens-LensB-SetSet : {t : Set} {{eqT : Eq t}} {dep : Nat} -> SetSet (lensB {t} {dep}) ValidLens-LensB : {t : Set} {{eqT : Eq t}} {dep : Nat} -> ValidLens (VQuadrant t {S dep}) (VQuadrant t {dep}) ValidLens-LensB {t} {dep} = CValidLens lensB (ValidLens-LensB-ViewSet) (ValidLens-LensB-SetView) (ValidLens-LensB-SetSet) --- Lens laws for lensC postulate ValidLens-LensC-ViewSet : {t : Set} {{eqT : Eq t}} {dep : Nat} -> ViewSet (lensC {t} {dep}) ValidLens-LensC-SetView : {t : Set} {{eqT : Eq t}} {dep : Nat} -> SetView (lensC {t} {dep}) ValidLens-LensC-SetSet : {t : Set} {{eqT : Eq t}} {dep : Nat} -> SetSet (lensC {t} {dep}) ValidLens-LensC : {t : Set} {{eqT : Eq t}} {dep : Nat} -> ValidLens (VQuadrant t {S dep}) (VQuadrant t {dep}) ValidLens-LensC {t} {dep} = CValidLens lensC (ValidLens-LensC-ViewSet) (ValidLens-LensC-SetView) (ValidLens-LensC-SetSet) --- Lens laws for lensD postulate ValidLens-LensD-ViewSet : {t : Set} {{eqT : Eq t}} {dep : Nat} -> ViewSet (lensD {t} {dep}) ValidLens-LensD-SetView : {t : Set} {{eqT : Eq t}} {dep : Nat} -> SetView (lensD {t} {dep}) ValidLens-LensD-SetSet : {t : Set} {{eqT : Eq t}} {dep : Nat} -> SetSet (lensD {t} {dep}) ValidLens-LensD : {t : Set} {{eqT : Eq t}} {dep : Nat} -> ValidLens (VQuadrant t {S dep}) (VQuadrant t {dep}) ValidLens-LensD {t} {dep} = CValidLens lensD (ValidLens-LensD-ViewSet) (ValidLens-LensD-SetView) (ValidLens-LensD-SetSet)
From stdpp Require Import gmap. From iris.bi Require Import interface. From iris.proofmode Require Import tactics. (** This file constructs a simple non step-indexed linear separation logic as predicates over heaps (modeled as maps from integer locations to integer values). It shows that Iris's [bi] canonical structure can be inhabited, and the Iris proof mode can be used to prove lemmas in this separation logic. *) Definition loc := Z. Definition val := Z. Record heapProp := HeapProp { heapProp_holds :> gmap loc val → Prop; }. Arguments heapProp_holds : simpl never. Add Printing Constructor heapProp. Section ofe. Inductive heapProp_equiv' (P Q : heapProp) : Prop := { heapProp_in_equiv : ∀ σ, P σ ↔ Q σ }. Instance heapProp_equiv : Equiv heapProp := heapProp_equiv'. Instance heapProp_equivalence : Equivalence (≡@{heapProp}). Proof. split; repeat destruct 1; constructor; naive_solver. Qed. Canonical Structure heapPropO := discreteO heapProp. End ofe. (** logical entailement *) Inductive heapProp_entails (P Q : heapProp) : Prop := { heapProp_in_entails : ∀ σ, P σ → Q σ }. (** logical connectives *) Definition heapProp_emp_def : heapProp := {| heapProp_holds σ := σ = ∅ |}. Definition heapProp_emp_aux : seal (@heapProp_emp_def). Proof. by eexists. Qed. Definition heapProp_emp := unseal heapProp_emp_aux. Definition heapProp_emp_eq : @heapProp_emp = @heapProp_emp_def := seal_eq heapProp_emp_aux. Definition heapProp_pure_def (φ : Prop) : heapProp := {| heapProp_holds _ := φ |}. Definition heapProp_pure_aux : seal (@heapProp_pure_def). Proof. by eexists. Qed. Definition heapProp_pure := unseal heapProp_pure_aux. Definition heapProp_pure_eq : @heapProp_pure = @heapProp_pure_def := seal_eq heapProp_pure_aux. Definition heapProp_and_def (P Q : heapProp) : heapProp := {| heapProp_holds σ := P σ ∧ Q σ |}. Definition heapProp_and_aux : seal (@heapProp_and_def). Proof. by eexists. Qed. Definition heapProp_and := unseal heapProp_and_aux. Definition heapProp_and_eq: @heapProp_and = @heapProp_and_def := seal_eq heapProp_and_aux. Definition heapProp_or_def (P Q : heapProp) : heapProp := {| heapProp_holds σ := P σ ∨ Q σ |}. Definition heapProp_or_aux : seal (@heapProp_or_def). Proof. by eexists. Qed. Definition heapProp_or := unseal heapProp_or_aux. Definition heapProp_or_eq: @heapProp_or = @heapProp_or_def := seal_eq heapProp_or_aux. Definition heapProp_impl_def (P Q : heapProp) : heapProp := {| heapProp_holds σ := P σ → Q σ |}. Definition heapProp_impl_aux : seal (@heapProp_impl_def). Proof. by eexists. Qed. Definition heapProp_impl := unseal heapProp_impl_aux. Definition heapProp_impl_eq : @heapProp_impl = @heapProp_impl_def := seal_eq heapProp_impl_aux. Definition heapProp_forall_def {A} (Ψ : A → heapProp) : heapProp := {| heapProp_holds σ := ∀ a, Ψ a σ |}. Definition heapProp_forall_aux : seal (@heapProp_forall_def). Proof. by eexists. Qed. Definition heapProp_forall {A} := unseal heapProp_forall_aux A. Definition heapProp_forall_eq : @heapProp_forall = @heapProp_forall_def := seal_eq heapProp_forall_aux. Definition heapProp_exist_def {A} (Ψ : A → heapProp) : heapProp := {| heapProp_holds σ := ∃ a, Ψ a σ |}. Definition heapProp_exist_aux : seal (@heapProp_exist_def). Proof. by eexists. Qed. Definition heapProp_exist {A} := unseal heapProp_exist_aux A. Definition heapProp_exist_eq : @heapProp_exist = @heapProp_exist_def := seal_eq heapProp_exist_aux. Definition heapProp_sep_def (P Q : heapProp) : heapProp := {| heapProp_holds σ := ∃ σ1 σ2, σ = σ1 ∪ σ2 ∧ σ1 ##ₘ σ2 ∧ P σ1 ∧ Q σ2 |}. Definition heapProp_sep_aux : seal (@heapProp_sep_def). Proof. by eexists. Qed. Definition heapProp_sep := unseal heapProp_sep_aux. Definition heapProp_sep_eq: @heapProp_sep = @heapProp_sep_def := seal_eq heapProp_sep_aux. Definition heapProp_wand_def (P Q : heapProp) : heapProp := {| heapProp_holds σ := ∀ σ', σ ##ₘ σ' → P σ' → Q (σ ∪ σ') |}. Definition heapProp_wand_aux : seal (@heapProp_wand_def). Proof. by eexists. Qed. Definition heapProp_wand := unseal heapProp_wand_aux. Definition heapProp_wand_eq: @heapProp_wand = @heapProp_wand_def := seal_eq heapProp_wand_aux. Definition heapProp_persistently_def (P : heapProp) : heapProp := {| heapProp_holds σ := P ∅ |}. Definition heapProp_persistently_aux : seal (@heapProp_persistently_def). Proof. by eexists. Qed. Definition heapProp_persistently := unseal heapProp_persistently_aux. Definition heapProp_persistently_eq: @heapProp_persistently = @heapProp_persistently_def := seal_eq heapProp_persistently_aux. (** Iris's [bi] class requires the presence of a later modality, but for non step-indexed logics, it can be defined as the identity. *) Definition heapProp_later (P : heapProp) : heapProp := P. Definition unseal_eqs := (heapProp_emp_eq, heapProp_pure_eq, heapProp_and_eq, heapProp_or_eq, heapProp_impl_eq, heapProp_forall_eq, heapProp_exist_eq, heapProp_sep_eq, heapProp_wand_eq, heapProp_persistently_eq). Ltac unseal := rewrite !unseal_eqs /=. Section mixins. (** Enable [simpl] locally, which is useful for proofs in the model. *) Local Arguments heapProp_holds !_ _ /. Lemma heapProp_bi_mixin : BiMixin heapProp_entails heapProp_emp heapProp_pure heapProp_and heapProp_or heapProp_impl (@heapProp_forall) (@heapProp_exist) heapProp_sep heapProp_wand heapProp_persistently. Proof. split. - (* [PreOrder heapProp_entails] *) split; repeat destruct 1; constructor; naive_solver. - (* [P ≡ Q ↔ (P ⊢ Q) ∧ (Q ⊢ P)] *) intros P Q; split. + intros [HPQ]; split; split; naive_solver. + intros [[HPQ] [HQP]]; split; naive_solver. - (* [Proper (iff ==> dist n) bi_pure] *) unseal=> n φ1 φ2 Hφ; split; naive_solver. - (* [NonExpansive2 bi_and] *) unseal=> n P1 P2 [HP] Q1 Q2 [HQ]; split; naive_solver. - (* [NonExpansive2 bi_or] *) unseal=> n P1 P2 [HP] Q1 Q2 [HQ]; split; naive_solver. - (* [NonExpansive2 bi_impl] *) unseal=> n P1 P2 [HP] Q1 Q2 [HQ]; split; naive_solver. - (* [Proper (pointwise_relation _ (dist n) ==> dist n) (bi_forall A)] *) unseal=> A n Φ1 Φ2 HΦ; split=> σ /=; split=> ? x; by apply HΦ. - (* [Proper (pointwise_relation _ (dist n) ==> dist n) (bi_exist A)] *) unseal=> A n Φ1 Φ2 HΦ; split=> σ /=; split=> -[x ?]; exists x; by apply HΦ. - (* [NonExpansive2 bi_sep] *) unseal=> n P1 P2 [HP] Q1 Q2 [HQ]; split; naive_solver. - (* [NonExpansive2 bi_wand] *) unseal=> n P1 P2 [HP] Q1 Q2 [HQ]; split; naive_solver. - (* [NonExpansive2 bi_persistently] *) unseal=> n P1 P2 [HP]; split; naive_solver. - (* [φ → P ⊢ ⌜ φ ⌝] *) unseal=> φ P ?; by split. - (* [(φ → True ⊢ P) → ⌜ φ ⌝ ⊢ P] *) unseal=> φ P HP; split=> σ ?. by apply HP. - (* [P ∧ Q ⊢ P] *) unseal=> P Q; split=> σ [??]; done. - (* [P ∧ Q ⊢ Q] *) unseal=> P Q; split=> σ [??]; done. - (* [(P ⊢ Q) → (P ⊢ R) → P ⊢ Q ∧ R] *) unseal=> P Q R [HPQ] [HPR]; split=> σ; split; auto. - (* [P ⊢ P ∨ Q] *) unseal=> P Q; split=> σ; by left. - (* [Q ⊢ P ∨ Q] *) unseal=> P Q; split=> σ; by right. - (* [(P ⊢ R) → (Q ⊢ R) → P ∨ Q ⊢ R] *) unseal=> P Q R [HPQ] [HQR]; split=> σ [?|?]; auto. - (* [(P ∧ Q ⊢ R) → P ⊢ Q → R] *) unseal=> P Q R HPQR; split=> σ ??. by apply HPQR. - (* [(P ⊢ Q → R) → P ∧ Q ⊢ R] *) unseal=> P Q R HPQR; split=> σ [??]. by apply HPQR. - (* [(∀ a, P ⊢ Ψ a) → P ⊢ ∀ a, Ψ a] *) unseal=> A P Ψ HPΨ; split=> σ ? a. by apply HPΨ. - (* [(∀ a, Ψ a) ⊢ Ψ a] *) unseal=> A Ψ a; split=> σ ?; done. - (* [Ψ a ⊢ ∃ a, Ψ a] *) unseal=> A Ψ a; split=> σ ?. by exists a. - (* [(∀ a, Φ a ⊢ Q) → (∃ a, Φ a) ⊢ Q] *) unseal=> A Φ Q HΦQ; split=> σ [a ?]. by apply (HΦQ a). - (* [(P ⊢ Q) → (P' ⊢ Q') → P ∗ P' ⊢ Q ∗ Q'] *) unseal=> P P' Q Q' [HPQ] [HP'Q']; split; naive_solver. - (* [P ⊢ emp ∗ P] *) unseal=> P; split=> σ ? /=. eexists ∅, σ. rewrite left_id_L. split_and!; done || apply map_disjoint_empty_l. - (* [emp ∗ P ⊢ P] *) unseal=> P; split; intros ? (?&σ&->&?&->&?). by rewrite left_id_L. - (* [P ∗ Q ⊢ Q ∗ P] *) unseal=> P Q; split; intros ? (σ1&σ2&->&?&?&?). exists σ2, σ1. by rewrite map_union_comm. - (* [(P ∗ Q) ∗ R ⊢ P ∗ (Q ∗ R)] *) unseal=> P Q R; split; intros ? (?&σ3&->&?&(σ1&σ2&->&?&?&?)&?). exists σ1, (σ2 ∪ σ3). split_and!; [by rewrite assoc_L|solve_map_disjoint|done|]. exists σ2, σ3; split_and!; [done|solve_map_disjoint|done..]. - (* [(P ∗ Q ⊢ R) → P ⊢ Q -∗ R] *) unseal=> P Q R [HPQR]; split=> σ1 ? σ2 ??. apply HPQR. by exists σ1, σ2. - (* [(P ⊢ Q -∗ R) → P ∗ Q ⊢ R] *) unseal=> P Q R [HPQR]; split; intros ? (σ1&σ2&->&?&?&?). by apply HPQR. - (* [(P ⊢ Q) → <pers> P ⊢ <pers> Q] *) unseal=> P Q [HPQ]; split=> σ. apply HPQ. - (* [<pers> P ⊢ <pers> <pers> P] *) unseal=> P; split=> σ; done. - (* [emp ⊢ <pers> emp] *) unseal; split=> σ; done. - (* [(∀ a, <pers> (Ψ a)) ⊢ <pers> (∀ a, Ψ a)] *) unseal=> A Ψ; split=> σ; done. - (* [<pers> (∃ a, Ψ a) ⊢ ∃ a, <pers> (Ψ a)] *) unseal=> A Ψ; split=> σ; done. - (* [<pers> P ∗ Q ⊢ <pers> P] *) unseal=> P Q; split; intros ? (σ1&σ2&->&?&?&?); done. - (* [<pers> P ∧ Q ⊢ P ∗ Q] *) unseal=> P Q; split=> σ [??]. eexists ∅, σ. rewrite left_id_L. split_and!; done || apply map_disjoint_empty_l. Qed. Lemma heapProp_bi_later_mixin : BiLaterMixin heapProp_entails heapProp_pure heapProp_or heapProp_impl (@heapProp_forall) (@heapProp_exist) heapProp_sep heapProp_persistently heapProp_later. Proof. eapply bi_later_mixin_id; [done|apply heapProp_bi_mixin]. Qed. End mixins. Canonical Structure heapPropI : bi := {| bi_ofe_mixin := ofe_mixin_of heapProp; bi_bi_mixin := heapProp_bi_mixin; bi_bi_later_mixin := heapProp_bi_later_mixin |}. Instance heapProp_pure_forall : BiPureForall heapPropI. Proof. intros A φ. rewrite /bi_forall /bi_pure /=. unseal. by split. Qed. Lemma heapProp_proofmode_test {A} (P Q R : heapProp) (Φ Ψ : A → heapProp) : P ∗ Q -∗ □ R -∗ □ (R -∗ ∃ x, Φ x) -∗ ∃ x, Φ x ∗ Φ x ∗ P ∗ Q. Proof. iIntros "[HP HQ] #HR #HRΦ". iDestruct ("HRΦ" with "HR") as (x) "#HΦ". iExists x. iFrame. by iSplitL. Qed.
The brothers Asa and Nelson Tift received the contract to convert the blockade runner into an ironclad in early 1862 with the name of Atlanta , after the city in Georgia . This was largely financed by contributions from the women of Savannah . Fingal was cut down to her main deck and large wooden sponsons were built out from the sides of her hull to support her casemate . After the conversion , Atlanta was 204 feet ( 62 @.@ 2 m ) long overall and had a beam of 41 feet ( 12 m ) . Her depth of hold was now 17 feet ( 5 @.@ 2 m ) and she now had a draft of 15 feet 9 inches ( 4 @.@ 8 m ) . Atlanta now displaced 1 @,@ 006 long tons ( 1 @,@ 022 t ) and her speed was estimated at 7 – 10 knots ( 13 – 19 km / h ; 8 @.@ 1 – 11 @.@ 5 mph ) .
Formal statement is: lemma map_poly_eq_0_iff: assumes "f 0 = 0" "\<And>x. x \<in> set (coeffs p) \<Longrightarrow> x \<noteq> 0 \<Longrightarrow> f x \<noteq> 0" shows "map_poly f p = 0 \<longleftrightarrow> p = 0" Informal statement is: If $f(0) = 0$ and $f(x) \neq 0$ for all nonzero coefficients of $p$, then $f(p) = 0$ if and only if $p = 0$.
myTestRule { #Input parameter is: # Input string - %-separated key=value strings #Output parameter is: # String array buffer writeLine("stdout","Input string is *Str"); msiString2KeyValPair(*Str,*Keyval); writeKeyValPairs("stdout",*Keyval," : "); msiString2StrArray(*Str,*Stray); msiStrArray2String(*Stray, *Str2); writeLine("stdout","After conversion to array and back, string is"); writeLine("stdout",*Str2); } INPUT *Str="key1=value1%key2=value2%key3=value3" OUTPUT ruleExecOut
```python %matplotlib inline ``` Audio manipulation with torchaudio ================================== ``torchaudio`` provides powerful audio I/O functions, preprocessing transforms and dataset. In this tutorial, we will look into how to prepare audio data and extract features that can be fed to NN models. ```python # When running this tutorial in Google Colab, install the required packages # with the following. # !pip install torchaudio librosa boto3 import torch import torchaudio import torchaudio.functional as F import torchaudio.transforms as T print(torch.__version__) print(torchaudio.__version__) ``` Preparing data and utility functions (skip this section) -------------------------------------------------------- ```python #@title Prepare data and utility functions. {display-mode: "form"} #@markdown #@markdown You do not need to look into this cell. #@markdown Just execute once and you are good to go. #@markdown #@markdown In this tutorial, we will use a speech data from [VOiCES dataset](https://iqtlabs.github.io/voices/), which is licensed under Creative Commos BY 4.0. #------------------------------------------------------------------------------- # Preparation of data and helper functions. #------------------------------------------------------------------------------- import io import os import math import tarfile import multiprocessing import scipy import librosa import boto3 from botocore import UNSIGNED from botocore.config import Config import requests import matplotlib import matplotlib.pyplot as plt import pandas as pd import time from IPython.display import Audio, display [width, height] = matplotlib.rcParams['figure.figsize'] if width < 10: matplotlib.rcParams['figure.figsize'] = [width * 2.5, height] _SAMPLE_DIR = "_sample_data" SAMPLE_WAV_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/steam-train-whistle-daniel_simon.wav" SAMPLE_WAV_PATH = os.path.join(_SAMPLE_DIR, "steam.wav") SAMPLE_WAV_SPEECH_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav" SAMPLE_WAV_SPEECH_PATH = os.path.join(_SAMPLE_DIR, "speech.wav") SAMPLE_RIR_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/distant-16k/room-response/rm1/impulse/Lab41-SRI-VOiCES-rm1-impulse-mc01-stu-clo.wav" SAMPLE_RIR_PATH = os.path.join(_SAMPLE_DIR, "rir.wav") SAMPLE_NOISE_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/distant-16k/distractors/rm1/babb/Lab41-SRI-VOiCES-rm1-babb-mc01-stu-clo.wav" SAMPLE_NOISE_PATH = os.path.join(_SAMPLE_DIR, "bg.wav") SAMPLE_MP3_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/steam-train-whistle-daniel_simon.mp3" SAMPLE_MP3_PATH = os.path.join(_SAMPLE_DIR, "steam.mp3") SAMPLE_GSM_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/steam-train-whistle-daniel_simon.gsm" SAMPLE_GSM_PATH = os.path.join(_SAMPLE_DIR, "steam.gsm") SAMPLE_TAR_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit.tar.gz" SAMPLE_TAR_PATH = os.path.join(_SAMPLE_DIR, "sample.tar.gz") SAMPLE_TAR_ITEM = "VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav" S3_BUCKET = "pytorch-tutorial-assets" S3_KEY = "VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav" YESNO_DATASET_PATH = os.path.join(_SAMPLE_DIR, "yes_no") os.makedirs(YESNO_DATASET_PATH, exist_ok=True) os.makedirs(_SAMPLE_DIR, exist_ok=True) def _fetch_data(): uri = [ (SAMPLE_WAV_URL, SAMPLE_WAV_PATH), (SAMPLE_WAV_SPEECH_URL, SAMPLE_WAV_SPEECH_PATH), (SAMPLE_RIR_URL, SAMPLE_RIR_PATH), (SAMPLE_NOISE_URL, SAMPLE_NOISE_PATH), (SAMPLE_MP3_URL, SAMPLE_MP3_PATH), (SAMPLE_GSM_URL, SAMPLE_GSM_PATH), (SAMPLE_TAR_URL, SAMPLE_TAR_PATH), ] for url, path in uri: with open(path, 'wb') as file_: file_.write(requests.get(url).content) _fetch_data() def _download_yesno(): if os.path.exists(os.path.join(YESNO_DATASET_PATH, "waves_yesno.tar.gz")): return torchaudio.datasets.YESNO(root=YESNO_DATASET_PATH, download=True) YESNO_DOWNLOAD_PROCESS = multiprocessing.Process(target=_download_yesno) YESNO_DOWNLOAD_PROCESS.start() def _get_sample(path, resample=None): effects = [ ["remix", "1"] ] if resample: effects.extend([ ["lowpass", f"{resample // 2}"], ["rate", f'{resample}'], ]) return torchaudio.sox_effects.apply_effects_file(path, effects=effects) def get_speech_sample(*, resample=None): return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample) def get_sample(*, resample=None): return _get_sample(SAMPLE_WAV_PATH, resample=resample) def get_rir_sample(*, resample=None, processed=False): rir_raw, sample_rate = _get_sample(SAMPLE_RIR_PATH, resample=resample) if not processed: return rir_raw, sample_rate rir = rir_raw[:, int(sample_rate*1.01):int(sample_rate*1.3)] rir = rir / torch.norm(rir, p=2) rir = torch.flip(rir, [1]) return rir, sample_rate def get_noise_sample(*, resample=None): return _get_sample(SAMPLE_NOISE_PATH, resample=resample) def print_stats(waveform, sample_rate=None, src=None): if src: print("-" * 10) print("Source:", src) print("-" * 10) if sample_rate: print("Sample Rate:", sample_rate) print("Shape:", tuple(waveform.shape)) print("Dtype:", waveform.dtype) print(f" - Max: {waveform.max().item():6.3f}") print(f" - Min: {waveform.min().item():6.3f}") print(f" - Mean: {waveform.mean().item():6.3f}") print(f" - Std Dev: {waveform.std().item():6.3f}") print() print(waveform) print() def plot_waveform(waveform, sample_rate, title="Waveform", xlim=None, ylim=None): waveform = waveform.numpy() num_channels, num_frames = waveform.shape time_axis = torch.arange(0, num_frames) / sample_rate figure, axes = plt.subplots(num_channels, 1) if num_channels == 1: axes = [axes] for c in range(num_channels): axes[c].plot(time_axis, waveform[c], linewidth=1) axes[c].grid(True) if num_channels > 1: axes[c].set_ylabel(f'Channel {c+1}') if xlim: axes[c].set_xlim(xlim) if ylim: axes[c].set_ylim(ylim) figure.suptitle(title) plt.show(block=False) def plot_specgram(waveform, sample_rate, title="Spectrogram", xlim=None): waveform = waveform.numpy() num_channels, num_frames = waveform.shape time_axis = torch.arange(0, num_frames) / sample_rate figure, axes = plt.subplots(num_channels, 1) if num_channels == 1: axes = [axes] for c in range(num_channels): axes[c].specgram(waveform[c], Fs=sample_rate) if num_channels > 1: axes[c].set_ylabel(f'Channel {c+1}') if xlim: axes[c].set_xlim(xlim) figure.suptitle(title) plt.show(block=False) def play_audio(waveform, sample_rate): waveform = waveform.numpy() num_channels, num_frames = waveform.shape if num_channels == 1: display(Audio(waveform[0], rate=sample_rate)) elif num_channels == 2: display(Audio((waveform[0], waveform[1]), rate=sample_rate)) else: raise ValueError("Waveform with more than 2 channels are not supported.") def inspect_file(path): print("-" * 10) print("Source:", path) print("-" * 10) print(f" - File size: {os.path.getsize(path)} bytes") print(f" - {torchaudio.info(path)}") def plot_spectrogram(spec, title=None, ylabel='freq_bin', aspect='auto', xmax=None): fig, axs = plt.subplots(1, 1) axs.set_title(title or 'Spectrogram (db)') axs.set_ylabel(ylabel) axs.set_xlabel('frame') im = axs.imshow(librosa.power_to_db(spec), origin='lower', aspect=aspect) if xmax: axs.set_xlim((0, xmax)) fig.colorbar(im, ax=axs) plt.show(block=False) def plot_mel_fbank(fbank, title=None): fig, axs = plt.subplots(1, 1) axs.set_title(title or 'Filter bank') axs.imshow(fbank, aspect='auto') axs.set_ylabel('frequency bin') axs.set_xlabel('mel bin') plt.show(block=False) def get_spectrogram( n_fft = 400, win_len = None, hop_len = None, power = 2.0, ): waveform, _ = get_speech_sample() spectrogram = T.Spectrogram( n_fft=n_fft, win_length=win_len, hop_length=hop_len, center=True, pad_mode="reflect", power=power, ) return spectrogram(waveform) def plot_pitch(waveform, sample_rate, pitch): figure, axis = plt.subplots(1, 1) axis.set_title("Pitch Feature") axis.grid(True) end_time = waveform.shape[1] / sample_rate time_axis = torch.linspace(0, end_time, waveform.shape[1]) axis.plot(time_axis, waveform[0], linewidth=1, color='gray', alpha=0.3) axis2 = axis.twinx() time_axis = torch.linspace(0, end_time, pitch.shape[1]) ln2 = axis2.plot( time_axis, pitch[0], linewidth=2, label='Pitch', color='green') axis2.legend(loc=0) plt.show(block=False) def plot_kaldi_pitch(waveform, sample_rate, pitch, nfcc): figure, axis = plt.subplots(1, 1) axis.set_title("Kaldi Pitch Feature") axis.grid(True) end_time = waveform.shape[1] / sample_rate time_axis = torch.linspace(0, end_time, waveform.shape[1]) axis.plot(time_axis, waveform[0], linewidth=1, color='gray', alpha=0.3) time_axis = torch.linspace(0, end_time, pitch.shape[1]) ln1 = axis.plot(time_axis, pitch[0], linewidth=2, label='Pitch', color='green') axis.set_ylim((-1.3, 1.3)) axis2 = axis.twinx() time_axis = torch.linspace(0, end_time, nfcc.shape[1]) ln2 = axis2.plot( time_axis, nfcc[0], linewidth=2, label='NFCC', color='blue', linestyle='--') lns = ln1 + ln2 labels = [l.get_label() for l in lns] axis.legend(lns, labels, loc=0) plt.show(block=False) DEFAULT_OFFSET = 201 SWEEP_MAX_SAMPLE_RATE = 48000 DEFAULT_LOWPASS_FILTER_WIDTH = 6 DEFAULT_ROLLOFF = 0.99 DEFAULT_RESAMPLING_METHOD = 'sinc_interpolation' def _get_log_freq(sample_rate, max_sweep_rate, offset): """Get freqs evenly spaced out in log-scale, between [0, max_sweep_rate // 2] offset is used to avoid negative infinity `log(offset + x)`. """ half = sample_rate // 2 start, stop = math.log(offset), math.log(offset + max_sweep_rate // 2) return torch.exp(torch.linspace(start, stop, sample_rate, dtype=torch.double)) - offset def _get_inverse_log_freq(freq, sample_rate, offset): """Find the time where the given frequency is given by _get_log_freq""" half = sample_rate // 2 return sample_rate * (math.log(1 + freq / offset) / math.log(1 + half / offset)) def _get_freq_ticks(sample_rate, offset, f_max): # Given the original sample rate used for generating the sweep, # find the x-axis value where the log-scale major frequency values fall in time, freq = [], [] for exp in range(2, 5): for v in range(1, 10): f = v * 10 ** exp if f < sample_rate // 2: t = _get_inverse_log_freq(f, sample_rate, offset) / sample_rate time.append(t) freq.append(f) t_max = _get_inverse_log_freq(f_max, sample_rate, offset) / sample_rate time.append(t_max) freq.append(f_max) return time, freq def plot_sweep(waveform, sample_rate, title, max_sweep_rate=SWEEP_MAX_SAMPLE_RATE, offset=DEFAULT_OFFSET): x_ticks = [100, 500, 1000, 5000, 10000, 20000, max_sweep_rate // 2] y_ticks = [1000, 5000, 10000, 20000, sample_rate//2] time, freq = _get_freq_ticks(max_sweep_rate, offset, sample_rate // 2) freq_x = [f if f in x_ticks and f <= max_sweep_rate // 2 else None for f in freq] freq_y = [f for f in freq if f >= 1000 and f in y_ticks and f <= sample_rate // 2] figure, axis = plt.subplots(1, 1) axis.specgram(waveform[0].numpy(), Fs=sample_rate) plt.xticks(time, freq_x) plt.yticks(freq_y, freq_y) axis.set_xlabel('Original Signal Frequency (Hz, log scale)') axis.set_ylabel('Waveform Frequency (Hz)') axis.xaxis.grid(True, alpha=0.67) axis.yaxis.grid(True, alpha=0.67) figure.suptitle(f'{title} (sample rate: {sample_rate} Hz)') plt.show(block=True) def get_sine_sweep(sample_rate, offset=DEFAULT_OFFSET): max_sweep_rate = sample_rate freq = _get_log_freq(sample_rate, max_sweep_rate, offset) delta = 2 * math.pi * freq / sample_rate cummulative = torch.cumsum(delta, dim=0) signal = torch.sin(cummulative).unsqueeze(dim=0) return signal def benchmark_resample( method, waveform, sample_rate, resample_rate, lowpass_filter_width=DEFAULT_LOWPASS_FILTER_WIDTH, rolloff=DEFAULT_ROLLOFF, resampling_method=DEFAULT_RESAMPLING_METHOD, beta=None, librosa_type=None, iters=5 ): if method == "functional": begin = time.time() for _ in range(iters): F.resample(waveform, sample_rate, resample_rate, lowpass_filter_width=lowpass_filter_width, rolloff=rolloff, resampling_method=resampling_method) elapsed = time.time() - begin return elapsed / iters elif method == "transforms": resampler = T.Resample(sample_rate, resample_rate, lowpass_filter_width=lowpass_filter_width, rolloff=rolloff, resampling_method=resampling_method, dtype=waveform.dtype) begin = time.time() for _ in range(iters): resampler(waveform) elapsed = time.time() - begin return elapsed / iters elif method == "librosa": waveform_np = waveform.squeeze().numpy() begin = time.time() for _ in range(iters): librosa.resample(waveform_np, sample_rate, resample_rate, res_type=librosa_type) elapsed = time.time() - begin return elapsed / iters ``` Audio I/O ========= torchaudio integrates ``libsox`` and provides a rich set of audio I/O. Quering audio metadata ---------------------- ``torchaudio.info`` function fetches metadata of audio. You can provide a path-like object or file-like object. ```python metadata = torchaudio.info(SAMPLE_WAV_PATH) print(metadata) ``` Where - ``sample_rate`` is the sampling rate of the audio - ``num_channels`` is the number of channels - ``num_frames`` is the number of frames per channel - ``bits_per_sample`` is bit depth - ``encoding`` is the sample coding format The values ``encoding`` can take are one of the following - ``"PCM_S"``: Signed integer linear PCM - ``"PCM_U"``: Unsigned integer linear PCM - ``"PCM_F"``: Floating point linear PCM - ``"FLAC"``: Flac, `Free Lossless Audio Codec <https://xiph.org/flac/>`__ - ``"ULAW"``: Mu-law, [`wikipedia <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`__] - ``"ALAW"``: A-law [`wikipedia <https://en.wikipedia.org/wiki/A-law_algorithm>`__] - ``"MP3"`` : MP3, MPEG-1 Audio Layer III - ``"VORBIS"``: OGG Vorbis [`xiph.org <https://xiph.org/vorbis/>`__] - ``"AMR_NB"``: Adaptive Multi-Rate [`wikipedia <https://en.wikipedia.org/wiki/Adaptive_Multi-Rate_audio_codec>`__] - ``"AMR_WB"``: Adaptive Multi-Rate Wideband [`wikipedia <https://en.wikipedia.org/wiki/Adaptive_Multi-Rate_Wideband>`__] - ``"OPUS"``: Opus [`opus-codec.org <https://opus-codec.org/>`__] - ``"GSM"``: GSM-FR [`wikipedia <https://en.wikipedia.org/wiki/Full_Rate>`__] - ``"UNKNOWN"`` None of avobe **Note** - ``bits_per_sample`` can be ``0`` for formats with compression and/or variable bit rate. (such as mp3) - ``num_frames`` can be ``0`` for GSM-FR format. ```python metadata = torchaudio.info(SAMPLE_MP3_PATH) print(metadata) metadata = torchaudio.info(SAMPLE_GSM_PATH) print(metadata) ``` Querying file-like object ~~~~~~~~~~~~~~~~~~~~~~~~~ ``info`` function works on file-like object as well. ```python print("Source:", SAMPLE_WAV_URL) with requests.get(SAMPLE_WAV_URL, stream=True) as response: metadata = torchaudio.info(response.raw) print(metadata) ``` **Note** When passing file-like object, ``info`` function does not read all the data, instead it only reads the beginning portion of data. Therefore, depending on the audio format, it cannot get the correct metadata, including the format itself. The following example illustrates this. - Use ``format`` argument to tell what audio format it is. - The returned metadata has ``num_frames = 0`` ```python print("Source:", SAMPLE_MP3_URL) with requests.get(SAMPLE_MP3_URL, stream=True) as response: metadata = torchaudio.info(response.raw, format="mp3") print(f"Fetched {response.raw.tell()} bytes.") print(metadata) ``` Loading audio data into Tensor ------------------------------ To load audio data, you can use ``torchaudio.load``. This function accepts path-like object and file-like object. The returned value is a tuple of waveform (``Tensor``) and sample rate (``int``). By default, the resulting tensor object has ``dtype=torch.float32`` and its value range is normalized within ``[-1.0, 1.0]``. For the list of supported format, please refer to `the torchaudio documentation <https://pytorch.org/audio>`__. ```python waveform, sample_rate = torchaudio.load(SAMPLE_WAV_SPEECH_PATH) print_stats(waveform, sample_rate=sample_rate) plot_waveform(waveform, sample_rate) plot_specgram(waveform, sample_rate) play_audio(waveform, sample_rate) ``` Loading from file-like object ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``torchaudio``\ ’s I/O functions now support file-like object. This allows to fetch audio data and decode at the same time from the location other than local file system. The following examples illustrates this. ```python # Load audio data as HTTP request with requests.get(SAMPLE_WAV_SPEECH_URL, stream=True) as response: waveform, sample_rate = torchaudio.load(response.raw) plot_specgram(waveform, sample_rate, title="HTTP datasource") # Load audio from tar file with tarfile.open(SAMPLE_TAR_PATH, mode='r') as tarfile_: fileobj = tarfile_.extractfile(SAMPLE_TAR_ITEM) waveform, sample_rate = torchaudio.load(fileobj) plot_specgram(waveform, sample_rate, title="TAR file") # Load audio from S3 client = boto3.client('s3', config=Config(signature_version=UNSIGNED)) response = client.get_object(Bucket=S3_BUCKET, Key=S3_KEY) waveform, sample_rate = torchaudio.load(response['Body']) plot_specgram(waveform, sample_rate, title="From S3") ``` Tips on slicing ~~~~~~~~~~~~~~~ Providing ``num_frames`` and ``frame_offset`` arguments will slice the resulting Tensor object while decoding. The same result can be achieved using the regular Tensor slicing, (i.e. ``waveform[:, frame_offset:frame_offset+num_frames]``) however, providing ``num_frames`` and ``frame_offset`` arguments is more efficient. This is because the function will stop data acquisition and decoding once it finishes decoding the requested frames. This is advantageous when the audio data are transfered via network as the data transfer will stop as soon as the necessary amount of data is fetched. The following example illustrates this; ```python # Illustration of two different decoding methods. # The first one will fetch all the data and decode them, while # the second one will stop fetching data once it completes decoding. # The resulting waveforms are identical. frame_offset, num_frames = 16000, 16000 # Fetch and decode the 1 - 2 seconds print("Fetching all the data...") with requests.get(SAMPLE_WAV_SPEECH_URL, stream=True) as response: waveform1, sample_rate1 = torchaudio.load(response.raw) waveform1 = waveform1[:, frame_offset:frame_offset+num_frames] print(f" - Fetched {response.raw.tell()} bytes") print("Fetching until the requested frames are available...") with requests.get(SAMPLE_WAV_SPEECH_URL, stream=True) as response: waveform2, sample_rate2 = torchaudio.load( response.raw, frame_offset=frame_offset, num_frames=num_frames) print(f" - Fetched {response.raw.tell()} bytes") print("Checking the resulting waveform ... ", end="") assert (waveform1 == waveform2).all() print("matched!") ``` Saving audio to file -------------------- To save audio data in the formats intepretable by common applications, you can use ``torchaudio.save``. This function accepts path-like object and file-like object. When passing file-like object, you also need to provide ``format`` argument so that the function knows which format it should be using. In case of path-like object, the function will detemine the format based on the extension. If you are saving to a file without extension, you need to provide ``format`` argument. When saving as WAV format, the default encoding for ``float32`` Tensor is 32-bit floating-point PCM. You can provide ``encoding`` and ``bits_per_sample`` argument to change this. For example, to save data in 16 bit signed integer PCM, you can do the following. **Note** Saving data in encodings with lower bit depth reduces the resulting file size but loses precision. ```python waveform, sample_rate = get_sample() print_stats(waveform, sample_rate=sample_rate) # Save without any encoding option. # The function will pick up the encoding which # the provided data fit path = "save_example_default.wav" torchaudio.save(path, waveform, sample_rate) inspect_file(path) # Save as 16-bit signed integer Linear PCM # The resulting file occupies half the storage but loses precision path = "save_example_PCM_S16.wav" torchaudio.save( path, waveform, sample_rate, encoding="PCM_S", bits_per_sample=16) inspect_file(path) ``` ``torchaudio.save`` can also handle other formats. To name a few; ```python waveform, sample_rate = get_sample(resample=8000) formats = [ "mp3", "flac", "vorbis", "sph", "amb", "amr-nb", "gsm", ] for format in formats: path = f"save_example.{format}" torchaudio.save(path, waveform, sample_rate, format=format) inspect_file(path) ``` Saving to file-like object ~~~~~~~~~~~~~~~~~~~~~~~~~~ Similar to the other I/O functions, you can save audio into file-like object. When saving to file-like object, ``format`` argument is required. ```python waveform, sample_rate = get_sample() # Saving to Bytes buffer buffer_ = io.BytesIO() torchaudio.save(buffer_, waveform, sample_rate, format="wav") buffer_.seek(0) print(buffer_.read(16)) ``` Resampling ========== To resample an audio waveform from one freqeuncy to another, you can use ``transforms.Resample`` or ``functional.resample``. ``transforms.Resample`` precomputes and caches the kernel used for resampling, while ``functional.resample`` computes it on the fly, so using ``transforms.Resample`` will result in a speedup if resampling multiple waveforms using the same parameters (see Benchmarking section). Both resampling methods use `bandlimited sinc interpolation <https://ccrma.stanford.edu/~jos/resample/>`__ to compute signal values at arbitrary time steps. The implementation involves convolution, so we can take advantage of GPU / multithreading for performance improvements. When using resampling in multiple subprocesses, such as data loading with multiple worker processes, your application might create more threads than your system can handle efficiently. Setting ``torch.set_num_threads(1)`` might help in this case. Because a finite number of samples can only represent a finite number of frequencies, resampling does not produce perfect results, and a variety of parameters can be used to control for its quality and computational speed. We demonstrate these properties through resampling a logarithmic sine sweep, which is a sine wave that increases exponentially in frequency over time. The spectrograms below show the frequency representation of the signal, where the x-axis labels correspond to the frequency of the original waveform (in log scale), the y-axis corresponds to the frequency of the plotted waveform, and the color intensity refers to amplitude. ```python sample_rate = 48000 resample_rate = 32000 waveform = get_sine_sweep(sample_rate) plot_sweep(waveform, sample_rate, title="Original Waveform") play_audio(waveform, sample_rate) resampler = T.Resample(sample_rate, resample_rate, dtype=waveform.dtype) resampled_waveform = resampler(waveform) plot_sweep(resampled_waveform, resample_rate, title="Resampled Waveform") play_audio(waveform, sample_rate) ``` Controling resampling quality with parameters --------------------------------------------- Lowpass filter width ~~~~~~~~~~~~~~~~~~~~ Because the filter used for interpolation extends infinitely, the ``lowpass_filter_width`` parameter is used to control for the width of the filter to use to window the interpolation. It is also referred to as the number of zero crossings, since the interpolation passes through zero at every time unit. Using a larger ``lowpass_filter_width`` provides a sharper, more precise filter, but is more computationally expensive. ```python sample_rate = 48000 resample_rate = 32000 resampled_waveform = F.resample(waveform, sample_rate, resample_rate, lowpass_filter_width=6) plot_sweep(resampled_waveform, resample_rate, title="lowpass_filter_width=6") resampled_waveform = F.resample(waveform, sample_rate, resample_rate, lowpass_filter_width=128) plot_sweep(resampled_waveform, resample_rate, title="lowpass_filter_width=128") ``` Rolloff ~~~~~~~ The ``rolloff`` parameter is represented as a fraction of the Nyquist frequency, which is the maximal frequency representable by a given finite sample rate. ``rolloff`` determines the lowpass filter cutoff and controls the degree of aliasing, which takes place when frequencies higher than the Nyquist are mapped to lower frequencies. A lower rolloff will therefore reduce the amount of aliasing, but it will also reduce some of the higher frequencies. ```python sample_rate = 48000 resample_rate = 32000 resampled_waveform = F.resample(waveform, sample_rate, resample_rate, rolloff=0.99) plot_sweep(resampled_waveform, resample_rate, title="rolloff=0.99") resampled_waveform = F.resample(waveform, sample_rate, resample_rate, rolloff=0.8) plot_sweep(resampled_waveform, resample_rate, title="rolloff=0.8") ``` Window function ~~~~~~~~~~~~~~~ By default, torchaudio’s resample uses the Hann window filter, which is a weighted cosine function. It additionally supports the Kaiser window, which is a near optimal window function that contains an additional ``beta`` parameter that allows for the design of the smoothness of the filter and width of impulse. This can be controlled using the ``resampling_method`` parameter. ```python sample_rate = 48000 resample_rate = 32000 resampled_waveform = F.resample(waveform, sample_rate, resample_rate, resampling_method="sinc_interpolation") plot_sweep(resampled_waveform, resample_rate, title="Hann Window Default") resampled_waveform = F.resample(waveform, sample_rate, resample_rate, resampling_method="kaiser_window") plot_sweep(resampled_waveform, resample_rate, title="Kaiser Window Default") ``` Comparison against librosa -------------------------- torchaudio’s resample function can be used to produce results similar to that of librosa (resampy)’s kaiser window resampling, with some noise ```python sample_rate = 48000 resample_rate = 32000 ### kaiser_best resampled_waveform = F.resample( waveform, sample_rate, resample_rate, lowpass_filter_width=64, rolloff=0.9475937167399596, resampling_method="kaiser_window", beta=14.769656459379492 ) plot_sweep(resampled_waveform, resample_rate, title="Kaiser Window Best (torchaudio)") librosa_resampled_waveform = torch.from_numpy( librosa.resample(waveform.squeeze().numpy(), sample_rate, resample_rate, res_type='kaiser_best')).unsqueeze(0) plot_sweep(librosa_resampled_waveform, resample_rate, title="Kaiser Window Best (librosa)") mse = torch.square(resampled_waveform - librosa_resampled_waveform).mean().item() print("torchaudio and librosa kaiser best MSE:", mse) ### kaiser_fast resampled_waveform = F.resample( waveform, sample_rate, resample_rate, lowpass_filter_width=16, rolloff=0.85, resampling_method="kaiser_window", beta=8.555504641634386 ) plot_specgram(resampled_waveform, resample_rate, title="Kaiser Window Fast (torchaudio)") librosa_resampled_waveform = torch.from_numpy( librosa.resample(waveform.squeeze().numpy(), sample_rate, resample_rate, res_type='kaiser_fast')).unsqueeze(0) plot_sweep(librosa_resampled_waveform, resample_rate, title="Kaiser Window Fast (librosa)") mse = torch.square(resampled_waveform - librosa_resampled_waveform).mean().item() print("torchaudio and librosa kaiser fast MSE:", mse) ``` Performance Benchmarking ------------------------ Below are benchmarks for downsampling and upsampling waveforms between two pairs of sampling rates. We demonstrate the performance implications that the ``lowpass_filter_wdith``, window type, and sample rates can have. Additionally, we provide a comparison against ``librosa``\ ’s ``kaiser_best`` and ``kaiser_fast`` using their corresponding parameters in ``torchaudio``. To elaborate on the results: - a larger ``lowpass_filter_width`` results in a larger resampling kernel, and therefore increases computation time for both the kernel computation and convolution - using ``kaiser_window`` results in longer computation times than the default ``sinc_interpolation`` because it is more complex to compute the intermediate window values - a large GCD between the sample and resample rate will result in a simplification that allows for a smaller kernel and faster kernel computation. ```python configs = { "downsample (48 -> 44.1 kHz)": [48000, 44100], "downsample (16 -> 8 kHz)": [16000, 8000], "upsample (44.1 -> 48 kHz)": [44100, 48000], "upsample (8 -> 16 kHz)": [8000, 1600], } for label in configs: times, rows = [], [] sample_rate = configs[label][0] resample_rate = configs[label][1] waveform = get_sine_sweep(sample_rate) # sinc 64 zero-crossings f_time = benchmark_resample("functional", waveform, sample_rate, resample_rate, lowpass_filter_width=64) t_time = benchmark_resample("transforms", waveform, sample_rate, resample_rate, lowpass_filter_width=64) times.append([None, 1000 * f_time, 1000 * t_time]) rows.append(f"sinc (width 64)") # sinc 6 zero-crossings f_time = benchmark_resample("functional", waveform, sample_rate, resample_rate, lowpass_filter_width=16) t_time = benchmark_resample("transforms", waveform, sample_rate, resample_rate, lowpass_filter_width=16) times.append([None, 1000 * f_time, 1000 * t_time]) rows.append(f"sinc (width 16)") # kaiser best lib_time = benchmark_resample("librosa", waveform, sample_rate, resample_rate, librosa_type="kaiser_best") f_time = benchmark_resample( "functional", waveform, sample_rate, resample_rate, lowpass_filter_width=64, rolloff=0.9475937167399596, resampling_method="kaiser_window", beta=14.769656459379492) t_time = benchmark_resample( "transforms", waveform, sample_rate, resample_rate, lowpass_filter_width=64, rolloff=0.9475937167399596, resampling_method="kaiser_window", beta=14.769656459379492) times.append([1000 * lib_time, 1000 * f_time, 1000 * t_time]) rows.append(f"kaiser_best") # kaiser fast lib_time = benchmark_resample("librosa", waveform, sample_rate, resample_rate, librosa_type="kaiser_fast") f_time = benchmark_resample( "functional", waveform, sample_rate, resample_rate, lowpass_filter_width=16, rolloff=0.85, resampling_method="kaiser_window", beta=8.555504641634386) t_time = benchmark_resample( "transforms", waveform, sample_rate, resample_rate, lowpass_filter_width=16, rolloff=0.85, resampling_method="kaiser_window", beta=8.555504641634386) times.append([1000 * lib_time, 1000 * f_time, 1000 * t_time]) rows.append(f"kaiser_fast") df = pd.DataFrame(times, columns=["librosa", "functional", "transforms"], index=rows) df.columns = pd.MultiIndex.from_product([[f"{label} time (ms)"],df.columns]) display(df.round(2)) ``` Data Augmentation ================= ``torchaudio`` provides a variety of ways to augment audio data. Applying effects and filtering ------------------------------ ``torchaudio.sox_effects`` module provides ways to apply filiters like ``sox`` command on Tensor objects and file-object audio sources directly. There are two functions for this; - ``torchaudio.sox_effects.apply_effects_tensor`` for applying effects on Tensor - ``torchaudio.sox_effects.apply_effects_file`` for applying effects on other audio source Both function takes effects in the form of ``List[List[str]]``. This mostly corresponds to how ``sox`` command works, but one caveat is that ``sox`` command adds some effects automatically, but torchaudio’s implementation does not do that. For the list of available effects, please refer to `the sox documentation <http://sox.sourceforge.net/sox.html>`__. **Tip** If you need to load and resample your audio data on-the-fly, then you can use ``torchaudio.sox_effects.apply_effects_file`` with ``"rate"`` effect. **Note** ``apply_effects_file`` accepts file-like object or path-like object. Similar to ``torchaudio.load``, when the audio format cannot be detected from either file extension or header, you can provide ``format`` argument to tell what format the audio source is. **Note** This process is not differentiable. ```python # Load the data waveform1, sample_rate1 = get_sample(resample=16000) # Define effects effects = [ ["lowpass", "-1", "300"], # apply single-pole lowpass filter ["speed", "0.8"], # reduce the speed # This only changes sample rate, so it is necessary to # add `rate` effect with original sample rate after this. ["rate", f"{sample_rate1}"], ["reverb", "-w"], # Reverbration gives some dramatic feeling ] # Apply effects waveform2, sample_rate2 = torchaudio.sox_effects.apply_effects_tensor( waveform1, sample_rate1, effects) plot_waveform(waveform1, sample_rate1, title="Original", xlim=(-.1, 3.2)) plot_waveform(waveform2, sample_rate2, title="Effects Applied", xlim=(-.1, 3.2)) print_stats(waveform1, sample_rate=sample_rate1, src="Original") print_stats(waveform2, sample_rate=sample_rate2, src="Effects Applied") ``` Note that the number of frames and number of channels are different from the original after the effects. Let’s listen to the audio. Doesn’t it sound more dramatic? ```python plot_specgram(waveform1, sample_rate1, title="Original", xlim=(0, 3.04)) play_audio(waveform1, sample_rate1) plot_specgram(waveform2, sample_rate2, title="Effects Applied", xlim=(0, 3.04)) play_audio(waveform2, sample_rate2) ``` Simulating room reverbration ---------------------------- `Convolution reverb <https://en.wikipedia.org/wiki/Convolution_reverb>`__ is a technique used to make a clean audio data sound like in a different environment. Using Room Impulse Response (RIR), we can make a clean speech sound like uttered in a conference room. For this process, we need RIR data. The following data are from VOiCES dataset, but you can record one by your self. Just turn on microphone and clap you hands. ```python sample_rate = 8000 rir_raw, _ = get_rir_sample(resample=sample_rate) plot_waveform(rir_raw, sample_rate, title="Room Impulse Response (raw)", ylim=None) plot_specgram(rir_raw, sample_rate, title="Room Impulse Response (raw)") play_audio(rir_raw, sample_rate) ``` First, we need to clean up the RIR. We extract the main impulse, normalize the signal power, then flip the time axis. ```python rir = rir_raw[:, int(sample_rate*1.01):int(sample_rate*1.3)] rir = rir / torch.norm(rir, p=2) rir = torch.flip(rir, [1]) print_stats(rir) plot_waveform(rir, sample_rate, title="Room Impulse Response", ylim=None) ``` Then we convolve the speech signal with the RIR filter. ```python speech, _ = get_speech_sample(resample=sample_rate) speech_ = torch.nn.functional.pad(speech, (rir.shape[1]-1, 0)) augmented = torch.nn.functional.conv1d(speech_[None, ...], rir[None, ...])[0] plot_waveform(speech, sample_rate, title="Original", ylim=None) plot_waveform(augmented, sample_rate, title="RIR Applied", ylim=None) plot_specgram(speech, sample_rate, title="Original") play_audio(speech, sample_rate) plot_specgram(augmented, sample_rate, title="RIR Applied") play_audio(augmented, sample_rate) ``` Adding background noise ----------------------- To add background noise to audio data, you can simply add audio Tensor and noise Tensor. A commonly way to adjust the intensity of noise is to change Signal-to-Noise Ratio (SNR). [`wikipedia <https://en.wikipedia.org/wiki/Signal-to-noise_ratio>`__] \begin{align}\mathrm{SNR} = \frac{P_\mathrm{signal}}{P_\mathrm{noise}}\end{align} \begin{align}{\mathrm {SNR_{{dB}}}}=10\log _{{10}}\left({\mathrm {SNR}}\right)\end{align} ```python sample_rate = 8000 speech, _ = get_speech_sample(resample=sample_rate) noise, _ = get_noise_sample(resample=sample_rate) noise = noise[:, :speech.shape[1]] plot_waveform(noise, sample_rate, title="Background noise") plot_specgram(noise, sample_rate, title="Background noise") play_audio(noise, sample_rate) speech_power = speech.norm(p=2) noise_power = noise.norm(p=2) for snr_db in [20, 10, 3]: snr = math.exp(snr_db / 10) scale = snr * noise_power / speech_power noisy_speech = (scale * speech + noise) / 2 plot_waveform(noisy_speech, sample_rate, title=f"SNR: {snr_db} [dB]") plot_specgram(noisy_speech, sample_rate, title=f"SNR: {snr_db} [dB]") play_audio(noisy_speech, sample_rate) ``` Applying codec to Tensor object ------------------------------- ``torchaudio.functional.apply_codec`` can apply codecs to Tensor object. **Note** This process is not differentiable. ```python waveform, sample_rate = get_speech_sample(resample=8000) plot_specgram(waveform, sample_rate, title="Original") play_audio(waveform, sample_rate) configs = [ ({"format": "wav", "encoding": 'ULAW', "bits_per_sample": 8}, "8 bit mu-law"), ({"format": "gsm"}, "GSM-FR"), ({"format": "mp3", "compression": -9}, "MP3"), ({"format": "vorbis", "compression": -1}, "Vorbis"), ] for param, title in configs: augmented = F.apply_codec(waveform, sample_rate, **param) plot_specgram(augmented, sample_rate, title=title) play_audio(augmented, sample_rate) ``` Simulating a phone recoding --------------------------- Combining the previous techniques, we can simulate audio that sounds like a person talking over a phone in a echoey room with people talking in the background. ```python sample_rate = 16000 speech, _ = get_speech_sample(resample=sample_rate) plot_specgram(speech, sample_rate, title="Original") play_audio(speech, sample_rate) # Apply RIR rir, _ = get_rir_sample(resample=sample_rate, processed=True) speech_ = torch.nn.functional.pad(speech, (rir.shape[1]-1, 0)) speech = torch.nn.functional.conv1d(speech_[None, ...], rir[None, ...])[0] plot_specgram(speech, sample_rate, title="RIR Applied") play_audio(speech, sample_rate) # Add background noise # Because the noise is recorded in the actual environment, we consider that # the noise contains the acoustic feature of the environment. Therefore, we add # the noise after RIR application. noise, _ = get_noise_sample(resample=sample_rate) noise = noise[:, :speech.shape[1]] snr_db = 8 scale = math.exp(snr_db / 10) * noise.norm(p=2) / speech.norm(p=2) speech = (scale * speech + noise) / 2 plot_specgram(speech, sample_rate, title="BG noise added") play_audio(speech, sample_rate) # Apply filtering and change sample rate speech, sample_rate = torchaudio.sox_effects.apply_effects_tensor( speech, sample_rate, effects=[ ["lowpass", "4000"], ["compand", "0.02,0.05", "-60,-60,-30,-10,-20,-8,-5,-8,-2,-8", "-8", "-7", "0.05"], ["rate", "8000"], ], ) plot_specgram(speech, sample_rate, title="Filtered") play_audio(speech, sample_rate) # Apply telephony codec speech = F.apply_codec(speech, sample_rate, format="gsm") plot_specgram(speech, sample_rate, title="GSM Codec Applied") play_audio(speech, sample_rate) ``` Feature Extractions =================== ``torchaudio`` implements feature extractions commonly used in audio domain. They are available in ``torchaudio.functional`` and ``torchaudio.transforms``. ``functional`` module implements features as a stand alone functions. They are stateless. ``transforms`` module implements features in object-oriented manner, using implementations from ``functional`` and ``torch.nn.Module``. Because all the transforms are subclass of ``torch.nn.Module``, they can be serialized using TorchScript. For the complete list of available features, please refer to the documentation. In this tutorial, we will look into conversion between time domain and frequency domain (``Spectrogram``, ``GriffinLim``, ``MelSpectrogram``) and augmentation technique called SpecAugment. Spectrogram ----------- To get the frequency representation of audio signal, you can use ``Spectrogram`` transform. ```python waveform, sample_rate = get_speech_sample() n_fft = 1024 win_length = None hop_length = 512 # define transformation spectrogram = T.Spectrogram( n_fft=n_fft, win_length=win_length, hop_length=hop_length, center=True, pad_mode="reflect", power=2.0, ) # Perform transformation spec = spectrogram(waveform) print_stats(spec) plot_spectrogram(spec[0], title='torchaudio') ``` GriffinLim ---------- To recover a waveform from spectrogram, you can use ``GriffinLim``. ```python torch.random.manual_seed(0) waveform, sample_rate = get_speech_sample() plot_waveform(waveform, sample_rate, title="Original") play_audio(waveform, sample_rate) n_fft = 1024 win_length = None hop_length = 512 spec = T.Spectrogram( n_fft=n_fft, win_length=win_length, hop_length=hop_length, )(waveform) griffin_lim = T.GriffinLim( n_fft=n_fft, win_length=win_length, hop_length=hop_length, ) waveform = griffin_lim(spec) plot_waveform(waveform, sample_rate, title="Reconstructed") play_audio(waveform, sample_rate) ``` Mel Filter Bank --------------- ``torchaudio.functional.create_fb_matrix`` can generate the filter bank to convert frequency bins to Mel-scale bins. Since this function does not require input audio/features, there is no equivalent transform in ``torchaudio.transforms``. ```python n_fft = 256 n_mels = 64 sample_rate = 6000 mel_filters = F.create_fb_matrix( int(n_fft // 2 + 1), n_mels=n_mels, f_min=0., f_max=sample_rate/2., sample_rate=sample_rate, norm='slaney' ) plot_mel_fbank(mel_filters, "Mel Filter Bank - torchaudio") ``` Comparison against librosa ~~~~~~~~~~~~~~~~~~~~~~~~~~ As a comparison, here is the equivalent way to get the mel filter bank with ``librosa``. ```python mel_filters_librosa = librosa.filters.mel( sample_rate, n_fft, n_mels=n_mels, fmin=0., fmax=sample_rate/2., norm='slaney', htk=True, ).T plot_mel_fbank(mel_filters_librosa, "Mel Filter Bank - librosa") mse = torch.square(mel_filters - mel_filters_librosa).mean().item() print('Mean Square Difference: ', mse) ``` MelSpectrogram -------------- Mel-scale spectrogram is a combination of Spectrogram and mel scale conversion. In ``torchaudio``, there is a transform ``MelSpectrogram`` which is composed of ``Spectrogram`` and ``MelScale``. ```python waveform, sample_rate = get_speech_sample() n_fft = 1024 win_length = None hop_length = 512 n_mels = 128 mel_spectrogram = T.MelSpectrogram( sample_rate=sample_rate, n_fft=n_fft, win_length=win_length, hop_length=hop_length, center=True, pad_mode="reflect", power=2.0, norm='slaney', onesided=True, n_mels=n_mels, mel_scale="htk", ) melspec = mel_spectrogram(waveform) plot_spectrogram( melspec[0], title="MelSpectrogram - torchaudio", ylabel='mel freq') ``` Comparison against librosa ~~~~~~~~~~~~~~~~~~~~~~~~~~ As a comparison, here is the equivalent way to get Mel-scale spectrogram with ``librosa``. ```python melspec_librosa = librosa.feature.melspectrogram( waveform.numpy()[0], sr=sample_rate, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=True, pad_mode="reflect", power=2.0, n_mels=n_mels, norm='slaney', htk=True, ) plot_spectrogram( melspec_librosa, title="MelSpectrogram - librosa", ylabel='mel freq') mse = torch.square(melspec - melspec_librosa).mean().item() print('Mean Square Difference: ', mse) ``` MFCC ---- ```python waveform, sample_rate = get_speech_sample() n_fft = 2048 win_length = None hop_length = 512 n_mels = 256 n_mfcc = 256 mfcc_transform = T.MFCC( sample_rate=sample_rate, n_mfcc=n_mfcc, melkwargs={ 'n_fft': n_fft, 'n_mels': n_mels, 'hop_length': hop_length, 'mel_scale': 'htk', } ) mfcc = mfcc_transform(waveform) plot_spectrogram(mfcc[0]) ``` Comparing against librosa ~~~~~~~~~~~~~~~~~~~~~~~~~ ```python melspec = librosa.feature.melspectrogram( y=waveform.numpy()[0], sr=sample_rate, n_fft=n_fft, win_length=win_length, hop_length=hop_length, n_mels=n_mels, htk=True, norm=None) mfcc_librosa = librosa.feature.mfcc( S=librosa.core.spectrum.power_to_db(melspec), n_mfcc=n_mfcc, dct_type=2, norm='ortho') plot_spectrogram(mfcc_librosa) mse = torch.square(mfcc - mfcc_librosa).mean().item() print('Mean Square Difference: ', mse) ``` Pitch ----- ```python waveform, sample_rate = get_speech_sample() pitch = F.detect_pitch_frequency(waveform, sample_rate) plot_pitch(waveform, sample_rate, pitch) play_audio(waveform, sample_rate) ``` Kaldi Pitch (beta) ------------------ Kaldi Pitch feature [1] is pitch detection mechanism tuned for ASR application. This is a beta feature in torchaudio, and only ``functional`` form is available. 1. A pitch extraction algorithm tuned for automatic speech recognition Ghahremani, B. BabaAli, D. Povey, K. Riedhammer, J. Trmal and S. Khudanpur 2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), Florence, 2014, pp. 2494-2498, doi: 10.1109/ICASSP.2014.6854049. [`abstract <https://ieeexplore.ieee.org/document/6854049>`__], [`paper <https://danielpovey.com/files/2014_icassp_pitch.pdf>`__] ```python waveform, sample_rate = get_speech_sample(resample=16000) pitch_feature = F.compute_kaldi_pitch(waveform, sample_rate) pitch, nfcc = pitch_feature[..., 0], pitch_feature[..., 1] plot_kaldi_pitch(waveform, sample_rate, pitch, nfcc) play_audio(waveform, sample_rate) ``` Feature Augmentation ==================== SpecAugment ----------- `SpecAugment <https://ai.googleblog.com/2019/04/specaugment-new-data-augmentation.html>`__ is a popular augmentation technique applied on spectrogram. ``torchaudio`` implements ``TimeStrech``, ``TimeMasking`` and ``FrequencyMasking``. TimeStrech ~~~~~~~~~~ ```python spec = get_spectrogram(power=None) strech = T.TimeStretch() rate = 1.2 spec_ = strech(spec, rate) plot_spectrogram(F.complex_norm(spec_[0]), title=f"Stretched x{rate}", aspect='equal', xmax=304) plot_spectrogram(F.complex_norm(spec[0]), title="Original", aspect='equal', xmax=304) rate = 0.9 spec_ = strech(spec, rate) plot_spectrogram(F.complex_norm(spec_[0]), title=f"Stretched x{rate}", aspect='equal', xmax=304) ``` TimeMasking ~~~~~~~~~~~ ```python torch.random.manual_seed(4) spec = get_spectrogram() plot_spectrogram(spec[0], title="Original") masking = T.TimeMasking(time_mask_param=80) spec = masking(spec) plot_spectrogram(spec[0], title="Masked along time axis") ``` FrequencyMasking ~~~~~~~~~~~~~~~~ ```python torch.random.manual_seed(4) spec = get_spectrogram() plot_spectrogram(spec[0], title="Original") masking = T.FrequencyMasking(freq_mask_param=80) spec = masking(spec) plot_spectrogram(spec[0], title="Masked along frequency axis") ``` Datasets ======== ``torchaudio`` provides easy access to common, publicly accessible datasets. Please checkout the official documentation for the list of available datasets. Here, we take ``YESNO`` dataset and look into how to use it. ```python YESNO_DOWNLOAD_PROCESS.join() dataset = torchaudio.datasets.YESNO(YESNO_DATASET_PATH, download=True) for i in [1, 3, 5]: waveform, sample_rate, label = dataset[i] plot_specgram(waveform, sample_rate, title=f"Sample {i}: {label}") play_audio(waveform, sample_rate) ```
Formal statement is: lemma trivial_limit_at_right_real [simp]: "\<not> trivial_limit (at_right x)" for x :: "'a::{no_top,dense_order,linorder_topology}" Informal statement is: The limit at the right of any real number is not trivial.
subroutine best_theta(best,cal,srtd,hes,tdip,tvctpar,vtx,mode & ,montecarlo) C C best_theta returns the best reconstructed angle for the scattered electron C given the CAL, SRTD, HES and track dip angle for the electron and the C measured vertex. C C INPUT: C Real cal(4) C cal(1) = x cal from elecpo C cal(2) = y cal from elecpo C cal(3) = z cal from elecpo C cal(4) = electron energy (not used by best_theta) C C Real srtd(4) C srtd(1) = x from srtdelec C srtd(2) = y from srtdelec C srtd(3) = z from srtdelec (not used) C srtd(4) = srtd mips C (if mips is in the range 0.4 to 1000 then srtd can be used) C (NO srtd mips are returned by srtdelec if less than 0.4) C C Real hes(4) C hes(1) = x from hesclu C hes(2) = y from hesclu C hes(3) = z from hesclu (not used) C hes(4) = hes mips C (if mips is in the range 5 to 1000 then hes can be used) C (NO HES mips are returned by hesclu if less than 5 mips) C C Real tdip = tracking dip value from VCTRHL C C Real tvctpar = tracking angle from VCTPAR (not used yet set to -1. ) C C Real vtx(3) = vertex (x,y,z) position C C Integer mode = mode to run best_theta C mode = 1 means just calculate the best angle and quit C if called twice for the same event, the calculation C is not re-done, but the old result is returned for speed. C (Recommended) C C mode = 2 means Do full calculation each time. Calculate C all angles (cal,hes,tdip...) and leave them in C common block. If called more than once per event, C recalculate each time. C C Logical Montecarlo = True if montecarlo C C C Author: Mike Wodarczyk 13-June-1998 Version 1.0 C C Revision 1: Mike Wodarczyk 15-June-1998 Version 1.1 C Modified HES crack code so that Module 12 works. C Modified HES Z in data and MC. C Modified SRTD outer edge to handle SRTD crack along C Rcal half edges. C C Revision 2: Mike Wodarczyk 18-June-1998 Version 1.2 C Fixed Lower y srtd edge cut. C C Revision 3: Jonathan Scott 21-April-1999 C Require tdip input to be tracking angle (not cot(theta)) C Implicit None #include "best_theta.inc" Real cal(4),srtd(4),hes(4),tdip,tvctpar,vtx(3) Integer mode Logical Montecarlo Real best real dataRCALz,mcRCALz, rcalz parameter ( dataRCALz = -153.88 ) parameter ( mcRCALz = -152.13 ) real datahesz,mchesz, hesz parameter ( datahesz = -155.21 ) ! best position for no bias parameter ( mchesz = -154.14 ) ! mc hes Z real dataFhesz,mcFhesz, fhesz parameter ( dataFhesz = 227.2 ) parameter ( mcFhesz = 227.2 ) real datasrtdz, mcsrtdz, srtdz parameter ( datasrtdz = -149.31 ) ! same as HES Z shift parameter ( mcsrtdz = -148.25 ) ! midpoint of 2 srtd planes integer whatToUse integer useCAL, useHES, useSRTD, useVCTRHL, useVCTPAR parameter ( useCAL = 1 ) parameter ( useHES = 2 ) parameter ( useSRTD = 3 ) parameter ( useVCTRHL = 4 ) parameter ( useVCTPAR = 5 ) real calradius real modpos real hesgap C leave a gap of 2cm on BOTH SIDES of Module crack parameter ( hesgap = 2.0 ) real SRTDedge C Use SRTD away from the edge by SRTDedge cm parameter ( SRTDedge = 2.0 ) real MAXMIP PARAMETER ( MAXMIP = 1000. ) ! stay away from high default values. if ( mode.eq.1.and.cal(1) .eq. savecal(1) .and. cal(2) .eq. & savecal(2) ) then C C Don't re-calculate if the user just asks again for best the best theta C best = lastBestTheta return endif C C Save cal position as a record of the event C savecal(1) = cal(1) savecal(2) = cal(2) if (montecarlo) then rcalz = mcrcalz hesz = mchesz fhesz = mcfhesz srtdz = mcsrtdz else rcalz = datarcalz hesz = datahesz fhesz = datafhesz srtdz = datasrtdz endif WhatToUse = useCAL ! start by using cal hesexpected = 0 hesfound = 0 calradius = sqrt(cal(1)**2+cal(2)**2) C ------------------------------- C See if we can use HES C ------------------------------- C C IN RCAL use HES over CAL when not in module cracks and C the position is more than 20 cm from RCAL beampipe C (near the beampipe the hes diodes end and the SRTD C takes over anyway. C if ( cal(3).lt.-140 .and. calRadius.gt.20.) then C in the RCAL more than 20 cm from beampipe. modpos = abs(cal(1)) - 20.33/2. + hesgap if ( modpos - int(modpos/20.33)*20.33 .gt. 2*hesgap .or. & modpos.lt.0 ) then C C The cal says that we are NOT IN THE MODULE CRACK so I expect there C to be a HES MIP. C hesexpected = 1 if (hes(4).ge.5..and.hes(4).lt.MAXMIP) then hesfound = 1 WhatToUse = UseHES endif endif ! not in crack endif ! in rcal C C In FCAL use the hes whenever available C if ( cal(3).gt.220.and.hes(4).gt.5.and.hes(4).lt.MAXMIP) then WhatToUse = UseHES endif C ------------------------------- C See if we can use SRTD C C require an cal position to be in RCAL and the SRTD mips to be o.k. C then require that the electron is within SRTDedge cm of the srtd edge. C C ------------------------------- if ( cal(3).lt.-140 .and.srtd(4).gt.0.4.and.srtd(4).lt.MAXMIP ) & then if (srtd(1).gt.(-34+srtdedge).and.srtd(1).lt.(34-srtdedge).and & .(srtd(1).gt.(-10-srtdedge).and.srtd(2).gt.(-28+srtdedge) & .or.srtd(1).le.(-10-srtdedge).and.srtd(2).gt.-40+srtdedge) & .and.(srtd(1).ge.(10+srtdedge).and.srtd(2).lt.(40-srtdedge & ).or.srtd(1).lt.(10+srtdedge).and.srtd(2).lt.28-srtdedge) & )then WhatToUse = UseSRTD endif endif C ------------------------------- C See if we can use VCTRHL dip angle C C We must be more than 80 cm away from 0,0 for there to be C enough stereo hits for the track to give good resolution. C ------------------------------- if ( calRadius.gt.80) then WhatToUse = UseVCTRHL endif C ------------------------------- C See if we can use VCTPAR angle C C VCTPAR angle must be in range and the VCTRHL dip requirements C must already be satisfied. C ------------------------------- c if ( tvctpar.gt.0.and.tvctpar.lt.3.2.and.useVCTRHL) then c WhatToUse = UseVCTPAR c endif C ------------------------------------- C Now I know what to use. C calculate the best angle. C C if mode = 2 then calculate all angles. C ------------------------------------- C C Start with Elecpo C if ( WhatToUse .eq. UseCal .or. mode.eq.2) then if ( cal(3).lt.-140 ) then C ... USE SHIFTED RCAL thetacal = atan2(sqrt(cal(1)**2+cal(2)**2),(rcalz-vtx(3))) else thetacal = atan2(sqrt(cal(1)**2+cal(2)**2),(cal(3)-vtx(3))) endif endif C C Calculate HES theta C if ( WhatToUse .eq. UseHES .or. (mode.eq.2.and.hes(4).ge.5.and $ .hes(4).lt.MAXMIP)) then if ( cal(3).lt.-140 ) then C ... USE SHIFTED RCAL thetahes = atan2(sqrt(hes(1)**2+hes(2)**2),(hesz-vtx(3))) else thetahes = atan2(sqrt(hes(1)**2+hes(2)**2),(fhesz-vtx(3))) endif endif C C Calculate SRTD Theta C if ( WhatToUse .eq. UseSRTD .or. (mode.eq.2.and.srtd(4).gt.0.4.and & .srtd(4).lt.MAXMIP)) then thetasrtd = atan2(sqrt(srtd(1)**2+srtd(2)**2), & (srtdz-vtx(3))) endif C C Calculate VCTRHL Theta C if ( WhatToUse .eq. UseVCTRHL .or.(mode.eq.2.and.calradius.gt.50)) & then thetavctrhl = tdip if (thetaVCTRHL.lt.0) thetaVCTRHL = 3.14159 + thetaVCTRHL endif C C Calculate VCTPAR Theta C (not needed since VCTPAR is already an angle) best = -1. if (whatToUse .eq. useCal) best = thetacal if (whatToUse .eq. useHES) best = thetaHES if (whatToUse .eq. useSRTD) best = thetaSRTD if (whatToUse .eq. useVCTRHL) best = thetaVCTRHL if (whatToUse .eq. useVCTPAR) best = tvctpar lastbestTheta = best return end
MODULE SETDBG_I INTERFACE !...Translated by Pacific-Sierra Research 77to90 4.3E 14:04:58 1/ 3/07 !...Modified by Charlotte Froese Fischer ! Gediminas Gaigalas 10/05/17 SUBROUTINE SETDBG (DEBUG, fullname) LOGICAL :: DEBUG CHARACTER(LEN=*) :: fullname END SUBROUTINE SETDBG END INTERFACE END MODULE
{-# OPTIONS --without-K #-} module well-typed-syntax-interpreter where open import common public open import well-typed-syntax import well-typed-syntax-pre-interpreter open import well-typed-syntax-eq-dec max-level : Level max-level = well-typed-syntax-pre-interpreter.max-level Context⇓ : (Γ : Context) → Set (lsuc max-level) Context⇓ = well-typed-syntax-pre-interpreter.inner.Context⇓ (λ ℓ P Γ' dummy val → context-pick-if {P = P} dummy val) (λ ℓ P dummy val → context-pick-if-refl {P = P} {dummy}) Typ⇓ : {Γ : Context} → Typ Γ → Context⇓ Γ → Set max-level Typ⇓ = well-typed-syntax-pre-interpreter.inner.Typ⇓ (λ ℓ P Γ' dummy val → context-pick-if {P = P} dummy val) (λ ℓ P dummy val → context-pick-if-refl {P = P} {dummy}) Term⇓ : ∀ {Γ : Context} {T : Typ Γ} → Term T → (Γ⇓ : Context⇓ Γ) → Typ⇓ T Γ⇓ Term⇓ = well-typed-syntax-pre-interpreter.inner.Term⇓ (λ ℓ P Γ' dummy val → context-pick-if {P = P} dummy val) (λ ℓ P dummy val → context-pick-if-refl {P = P} {dummy})
(* * Copyright (c) 2021 BedRock Systems, Inc. * This software is distributed under the terms of the BedRock Open-Source License. * See the LICENSE-BedRock file in the repository root for details. *) Require Export iris.bi.embedding. Require Import bedrock.lang.bi.prelude. (** * Composing embeddings *) (** Given embeddings [Embed PROP1 PROP2] and [Embed PROP2 PROP3], [compose_embed PROP2] is the induced embedding [Embed PROP1 PROP3]. Its [BiEmbed], etc instances are available after [Import compose_embed_instances]. *) Definition compose_embed_def {A B C} `{!Embed B C, !Embed A B} : Embed A C := λ P, embed (embed P). Definition compose_embed_aux : seal (@compose_embed_def). Proof. by eexists. Qed. Definition compose_embed := compose_embed_aux.(unseal). Definition compose_embed_eq : @compose_embed = _ := compose_embed_aux.(seal_eq). #[global] Arguments compose_embed {_}%type_scope _%type_scope {_}%type_scope {_ _} _%bi_scope : assert. Section instances. Context {PROP1 PROP2 PROP3 : bi}. Context `{!BiEmbed PROP2 PROP3, !BiEmbed PROP1 PROP2}. #[local] Ltac unseal := unfold embed, bi_embed_embed; cbn; rewrite !compose_embed_eq; unfold compose_embed_def; cbn. Lemma compose_embedding_mixin : BiEmbedMixin PROP1 PROP3 (compose_embed PROP2). Proof. split. - intros n P1 P2 ?. unseal. solve_proper. - intros P1 P2 ?. unseal. solve_proper. - intros P. unseal. by rewrite !embed_emp_valid. - intros. unseal. by rewrite !embed_interal_inj. - unseal. by rewrite !embed_emp_2. - intros. unseal. by rewrite !embed_impl_2. - intros. unseal. by rewrite !embed_forall_2. - intros. unseal. by rewrite !embed_exist_1. - intros. unseal. by rewrite !embed_sep. - intros. unseal. by rewrite !embed_wand_2. - intros. unseal. by rewrite !embed_persistently. Qed. #[local] Instance compose_embedding : BiEmbed PROP1 PROP3 := {| bi_embed_mixin := compose_embedding_mixin |}. Lemma embed_embed P : embed P ⊣⊢@{PROP3} embed (embed P). Proof. rewrite {1}/embed/bi_embed_embed/=. by rewrite compose_embed_eq. Qed. #[local] Instance compose_embed_emp `{!BiEmbedEmp PROP2 PROP3, !BiEmbedEmp PROP1 PROP2} : BiEmbedEmp PROP1 PROP3. Proof. rewrite/BiEmbedEmp. by rewrite embed_embed !embed_emp_1. Qed. #[local] Instance compose_embed_later `{!BiEmbedLater PROP2 PROP3, !BiEmbedLater PROP1 PROP2} : BiEmbedLater PROP1 PROP3. Proof. intros P. by rewrite !embed_embed !embed_later. Qed. #[local] Instance compose_embed_internal_eq `{!BiInternalEq PROP1, !BiInternalEq PROP2, !BiInternalEq PROP3} `{!BiEmbedInternalEq PROP2 PROP3, !BiEmbedInternalEq PROP1 PROP2} : BiEmbedInternalEq PROP1 PROP3. Proof. intros A x y. by rewrite embed_embed !embed_internal_eq_1. Qed. #[local] Instance compose_embed_bupd `{!BiBUpd PROP1, !BiBUpd PROP2, !BiBUpd PROP3} `{!BiEmbedBUpd PROP2 PROP3, !BiEmbedBUpd PROP1 PROP2} : BiEmbedBUpd PROP1 PROP3. Proof. intros P. by rewrite !embed_embed !embed_bupd. Qed. #[local] Instance compose_embed_fupd `{!BiFUpd PROP1, !BiFUpd PROP2, !BiFUpd PROP3} `{!BiEmbedFUpd PROP2 PROP3, !BiEmbedFUpd PROP1 PROP2} : BiEmbedFUpd PROP1 PROP3. Proof. intros E1 E2 P. by rewrite !embed_embed !embed_fupd. Qed. #[local] Instance compose_embed_plainly `{!BiPlainly PROP1, !BiPlainly PROP2, !BiPlainly PROP3} `{!BiEmbedPlainly PROP2 PROP3, !BiEmbedPlainly PROP1 PROP2} : BiEmbedPlainly PROP1 PROP3. Proof. intros P. by rewrite !embed_embed !embed_plainly. Qed. End instances. Module compose_embed_instances. #[export] Hint Resolve compose_embedding compose_embed_emp compose_embed_later compose_embed_internal_eq compose_embed_bupd compose_embed_fupd compose_embed_plainly : typeclass_instances. End compose_embed_instances.
import pq_induction_principles universe u section pq_group_equalizer_eta_L_eta variables {G : Type u} [group G] lemma of_inj : function.injective (of : G → pq_group G) := begin intros x y hxy, have hxy1 := congr_arg counit hxy, simp only [counit_of] at hxy1, assumption, end lemma of_mul_eq (x y : G) : of (of x * of y) = of (of x) * of (of y) ↔ of x * of y = of (x * y) := begin split, { intro hxy, have hxy1 := congr_arg (L_of_morph counit (functoriality_group_to_pq counit)) hxy, simp only [monoid_hom.map_mul, L_of_morph_of, counit_of] at hxy1, symmetry, exact hxy1, }, { intro hxy, have hxy1 := congr_arg (L_of_morph of of_is_pq_morphism) hxy, simp only [monoid_hom.map_mul, L_of_morph_of] at hxy1, rw hxy, rw hxy1, }, end lemma of_mul_eq_three (x y z : G) : of (of x * of y * of z) = of (of x) * of (of y) * of (of z) ↔ of x * of y * of z = of (x * y * z) := begin split, { intro hxy, have hxy1 := congr_arg (L_of_morph counit (functoriality_group_to_pq counit)) hxy, simp only [monoid_hom.map_mul, L_of_morph_of, counit_of] at hxy1, symmetry, exact hxy1, }, { intro hxy, have hxy1 := congr_arg (L_of_morph of of_is_pq_morphism) hxy, simp only [monoid_hom.map_mul, L_of_morph_of] at hxy1, rw hxy, rw hxy1, }, end lemma counit_prod_map_of (x : list G) : counit (list.map of x).prod = x.prod := begin induction x, { simp only [list.prod_nil, list.map, monoid_hom.map_one], }, { simp only [monoid_hom.map_mul, list.prod_cons, list.map, counit_of, x_ih], }, end lemma L_of_prod_map_of (x : list G) : (L_of_morph of of_is_pq_morphism) ((list.map of x).prod) = (list.map (of ∘ of) x).prod := begin induction x, { simp only [list.prod_nil, list.map, monoid_hom.map_one], }, { simp only [monoid_hom.map_mul, function.comp_app, list.prod_cons, list.map], simp only [x_ih, L_of_morph_of], }, end lemma of_mul_eq_list (x : list G) : of ((x.map of).prod) = (x.map (of ∘ of)).prod ↔ of (x.prod) = (x.map of).prod := begin induction x with g x hx, { simp only [list.prod_nil, list.map, of_1_eq_unit, eq_self_iff_true], }, { simp only [function.comp_app, list.prod_cons, list.map], split, { intro hxg, have hxg1 := congr_arg (L_of_morph counit (functoriality_group_to_pq counit)) hxg, simp only [monoid_hom.map_mul, L_of_morph_of, counit_of, counit_prod_map_of] at hxg1, rw hxg1, apply congr_arg, clear hxg1, clear hxg, clear hx, induction x, { simp only [list.prod_nil, list.map, monoid_hom.map_one], }, { simp only [monoid_hom.map_mul, function.comp_app, list.prod_cons, list.map], simp only [L_of_morph_of, counit_of, x_ih], }, }, { intro hxg, have hxg1 := congr_arg (L_of_morph of of_is_pq_morphism) hxg, simp only [monoid_hom.map_mul, L_of_morph_of, L_of_prod_map_of] at hxg1, rw ←hxg1, rw hxg, }, }, end theorem eta_eq_L_eta (x : pq_group G) : (of x = L_of_morph of of_is_pq_morphism x) ↔ (∃ y, x = of y) := begin split, { revert x, refine pq_group_list _, intros x hx, rw L_of_prod_map_of at hx, rw of_mul_eq_list at hx, use x.prod, symmetry, assumption, /- revert x, refine pq_group_word_induction _ _, { intro h1, use 1, rw of_1_eq_unit, /- conv { to_rhs, congr, rw ←gpow_zero (1 : G), }, rw of_pow_eq_pow_of, simp only [gpow_zero], -/ }, { intros x y hx hxy, simp only [monoid_hom.map_mul, L_of_morph_of] at hxy, --have hxy1 := congr_arg (L_of_morph counit (functoriality_group_bundled_to_pq counit)) hxy, --simp only [monoid_hom.map_mul, L_of_morph_of, counit_of] at hxy1, suffices : of x = (L_of_morph of of_is_pq_morphism) x, { specialize hx this, cases hx with z hz, use (y * z), rw hz, rw ←of_mul_eq, simp only [hz, L_of_morph_of] at hxy, assumption, }, clear hx, revert x, refine pq_group_word_induction _ _, { simp only [forall_prop_of_true, mul_one, monoid_hom.map_one], exact of_1_eq_unit, }, { intros x g hx hxy, simp only [monoid_hom.map_mul, L_of_morph_of] at *, sorry, }, /- suffices : ∃ z : G, of (of y) * (L_of_morph of of_is_pq_morphism) x = of (of z), { cases this with z hz, use z, rw ←hxy at hz, apply of_inj, assumption, }, sorry, -/ }, -/ }, { intro hx, cases hx with y hy, rw hy, rw L_of_morph_of, }, end end pq_group_equalizer_eta_L_eta
subroutine GSMCAMRPOP( & phi & ,iphilo0,iphilo1 & ,iphihi0,iphihi1 & ,rhs & ,irhslo0,irhslo1 & ,irhshi0,irhshi1 & ,icoloredboxlo0,icoloredboxlo1 & ,icoloredboxhi0,icoloredboxhi1 & ,dx & ,alpha & ,beta & ) implicit none integer iphilo0,iphilo1 integer iphihi0,iphihi1 REAL*8 phi( & iphilo0:iphihi0, & iphilo1:iphihi1) integer irhslo0,irhslo1 integer irhshi0,irhshi1 REAL*8 rhs( & irhslo0:irhshi0, & irhslo1:irhshi1) integer icoloredboxlo0,icoloredboxlo1 integer icoloredboxhi0,icoloredboxhi1 REAL*8 dx REAL*8 alpha REAL*8 beta REAL*8 lambda, dxinv, sum_b, lphi integer i,j integer idir dxinv = (1.0d0)/(dx*dx) sum_b = 0.0 do idir = 0, 2 -1 sum_b = sum_b + (2.0d0)*dxinv enddo lambda = (1.0d0)/(alpha - beta*sum_b) do j = icoloredBoxlo1,icoloredBoxhi1,2 do i = icoloredBoxlo0,icoloredBoxhi0,2 lphi = & ( phi(i+1,j ) & + phi(i-1,j ) $ -(2.0d0)*phi(i ,j )) $ +( phi(i ,j+1) & + phi(i ,j-1) $ -(2.0d0)*phi(i ,j )) lphi = lphi*dxinv phi(i,j) = $ phi( i,j) + & lambda*( rhs( i,j) - lphi) enddo enddo return end subroutine GSRBHELMHOLTZ( & phi & ,iphilo0,iphilo1 & ,iphihi0,iphihi1 & ,nphicomp & ,rhs & ,irhslo0,irhslo1 & ,irhshi0,irhshi1 & ,nrhscomp & ,iregionlo0,iregionlo1 & ,iregionhi0,iregionhi1 & ,dx & ,alpha & ,beta & ,redBlack & ) implicit none integer nphicomp integer iphilo0,iphilo1 integer iphihi0,iphihi1 REAL*8 phi( & iphilo0:iphihi0, & iphilo1:iphihi1, & 0:nphicomp-1) integer nrhscomp integer irhslo0,irhslo1 integer irhshi0,irhshi1 REAL*8 rhs( & irhslo0:irhshi0, & irhslo1:irhshi1, & 0:nrhscomp-1) integer iregionlo0,iregionlo1 integer iregionhi0,iregionhi1 REAL*8 dx REAL*8 alpha REAL*8 beta integer redBlack REAL*8 lambda, dxinv, sum_b, lphi, helmop integer i,j integer n,ncomp,idir,indtot,imin,imax dxinv = (1.0d0)/(dx*dx) sum_b = 0.0 do idir = 0, 2 -1 sum_b = sum_b + (2.0d0)*dxinv enddo lambda = -(1.0d0)/(alpha - beta*sum_b) ncomp = nphicomp if (ncomp .ne. nrhscomp) then call MAYDAYERROR() endif do n = 0, ncomp - 1 do j=iregionlo1, iregionhi1 imin = iregionlo0 indtot = imin + j imin = imin + abs(mod(indtot + redBlack, 2)) imax = iregionhi0 do i = imin, imax, 2 lphi = (phi(i+1,j,n) & + phi(i-1,j,n) & + phi(i,j+1,n) & + phi(i,j-1,n) & -(2.0d0)*2*phi(i,j,n))*dxinv helmop = alpha*phi(i,j,n) + beta*lphi phi(i,j,n) = phi(i,j,n) + & lambda*(helmop - rhs(i,j,n)) enddo enddo enddo return end subroutine GSRBLAPLACIAN( & phi & ,iphilo0,iphilo1 & ,iphihi0,iphihi1 & ,nphicomp & ,rhs & ,irhslo0,irhslo1 & ,irhshi0,irhshi1 & ,nrhscomp & ,iregionlo0,iregionlo1 & ,iregionhi0,iregionhi1 & ,dx & ,redBlack & ) implicit none integer nphicomp integer iphilo0,iphilo1 integer iphihi0,iphihi1 REAL*8 phi( & iphilo0:iphihi0, & iphilo1:iphihi1, & 0:nphicomp-1) integer nrhscomp integer irhslo0,irhslo1 integer irhshi0,irhshi1 REAL*8 rhs( & irhslo0:irhshi0, & irhslo1:irhshi1, & 0:nrhscomp-1) integer iregionlo0,iregionlo1 integer iregionhi0,iregionhi1 REAL*8 dx integer redBlack REAL*8 lambda, dxinv, sum_b, lphi, lap integer i,j integer n,ncomp,idir,indtot,imin,imax dxinv = (1.0d0)/(dx*dx) sum_b = 0.0 do idir = 0, 2 -1 sum_b = sum_b + (2.0d0)*dxinv enddo lambda = -(1.0d0)/sum_b ncomp = nphicomp if (ncomp .ne. nrhscomp) then call MAYDAYERROR() endif do n = 0, ncomp - 1 do j=iregionlo1, iregionhi1 imin = iregionlo0 indtot = imin + j imin = imin + abs(mod(indtot + redBlack, 2)) imax = iregionhi0 do i = imin, imax, 2 lphi = ( & phi(i+1,j,n) & + phi(i-1,j,n) & + phi(i,j+1,n) & + phi(i,j-1,n) & ) * dxinv phi(i,j,n) = lambda*(rhs(i,j,n)-lphi) enddo enddo enddo return end subroutine GSRBLAZY( & phi & ,iphilo0,iphilo1 & ,iphihi0,iphihi1 & ,lphi & ,ilphilo0,ilphilo1 & ,ilphihi0,ilphihi1 & ,rhs & ,irhslo0,irhslo1 & ,irhshi0,irhshi1 & ,icoloredboxlo0,icoloredboxlo1 & ,icoloredboxhi0,icoloredboxhi1 & ,alpha & ,beta & ,dx & ) implicit none integer iphilo0,iphilo1 integer iphihi0,iphihi1 REAL*8 phi( & iphilo0:iphihi0, & iphilo1:iphihi1) integer ilphilo0,ilphilo1 integer ilphihi0,ilphihi1 REAL*8 lphi( & ilphilo0:ilphihi0, & ilphilo1:ilphihi1) integer irhslo0,irhslo1 integer irhshi0,irhshi1 REAL*8 rhs( & irhslo0:irhshi0, & irhslo1:irhshi1) integer icoloredboxlo0,icoloredboxlo1 integer icoloredboxhi0,icoloredboxhi1 REAL*8 alpha REAL*8 beta REAL*8 dx integer i,j, idir REAL*8 dxinv, sum_b, lambda dxinv = (1.0d0)/(dx*dx) sum_b = 0.0 do idir = 0, 2 -1 sum_b = sum_b + (2.0d0)*dxinv enddo lambda = -(1.0d0)/(alpha - beta*sum_b) do j = icoloredBoxlo1,icoloredBoxhi1,2 do i = icoloredBoxlo0,icoloredBoxhi0,2 phi(i,j) = $ phi( i,j) - & lambda*( $ rhs( i,j) - $ lphi( i,j)) enddo enddo return end subroutine AMRPMULTICOLOR( & phi & ,iphilo0,iphilo1 & ,iphihi0,iphihi1 & ,rhs & ,irhslo0,irhslo1 & ,irhshi0,irhshi1 & ,weight & ,alpha & ,beta & ,dx & ,icoloredboxlo0,icoloredboxlo1 & ,icoloredboxhi0,icoloredboxhi1 & ) implicit none integer iphilo0,iphilo1 integer iphihi0,iphihi1 REAL*8 phi( & iphilo0:iphihi0, & iphilo1:iphihi1) integer irhslo0,irhslo1 integer irhshi0,irhshi1 REAL*8 rhs( & irhslo0:irhshi0, & irhslo1:irhshi1) REAL*8 weight REAL*8 alpha REAL*8 beta REAL*8 dx(0:1) integer icoloredboxlo0,icoloredboxlo1 integer icoloredboxhi0,icoloredboxhi1 integer i,j REAL*8 laplphi, dx0,dx1 dx0 = beta/(dx(0) * dx(0)) dx1 = beta/(dx(1) * dx(1)) do j = icoloredBoxlo1,icoloredBoxhi1,2 do i = icoloredBoxlo0,icoloredBoxhi0,2 laplphi = & ( phi(i+1,j ) & + phi(i-1,j ) $ -(2.0d0)*phi(i ,j ))*dx0 $ +( phi(i ,j+1) & + phi(i ,j-1) $ -(2.0d0)*phi(i ,j ))*dx1 laplphi = laplphi + alpha * phi(i,j) phi(i,j) = phi(i,j) + & weight*(rhs(i,j) - laplphi) enddo enddo return end subroutine OPERATORLAP( & lofphi & ,ilofphilo0,ilofphilo1 & ,ilofphihi0,ilofphihi1 & ,nlofphicomp & ,phi & ,iphilo0,iphilo1 & ,iphihi0,iphihi1 & ,nphicomp & ,iregionlo0,iregionlo1 & ,iregionhi0,iregionhi1 & ,dx & ,alpha & ,beta & ) implicit none integer nlofphicomp integer ilofphilo0,ilofphilo1 integer ilofphihi0,ilofphihi1 REAL*8 lofphi( & ilofphilo0:ilofphihi0, & ilofphilo1:ilofphihi1, & 0:nlofphicomp-1) integer nphicomp integer iphilo0,iphilo1 integer iphihi0,iphihi1 REAL*8 phi( & iphilo0:iphihi0, & iphilo1:iphihi1, & 0:nphicomp-1) integer iregionlo0,iregionlo1 integer iregionhi0,iregionhi1 REAL*8 dx REAL*8 alpha REAL*8 beta REAL*8 dxinv, lap integer n,ncomp integer i,j ncomp = nphicomp if (ncomp .ne. nlofphicomp) then call MAYDAYERROR() endif dxinv = (1.0d0)/(dx*dx) do n = 0, ncomp-1 do j = iregionlo1,iregionhi1 do i = iregionlo0,iregionhi0 lap = ( phi(i+1,j ,n) & + phi(i-1,j ,n) & + phi(i ,j+1,n) & + phi(i ,j-1,n) & -(2*2)*phi(i,j,n) ) & * dxinv lofphi(i,j,n) = alpha*phi(i,j,n)+beta*lap enddo enddo enddo return end subroutine OPERATORLAPRES( & r & ,irlo0,irlo1 & ,irhi0,irhi1 & ,nrcomp & ,phi & ,iphilo0,iphilo1 & ,iphihi0,iphihi1 & ,nphicomp & ,rhs & ,irhslo0,irhslo1 & ,irhshi0,irhshi1 & ,nrhscomp & ,iregionlo0,iregionlo1 & ,iregionhi0,iregionhi1 & ,dx & ,alpha & ,beta & ) implicit none integer nrcomp integer irlo0,irlo1 integer irhi0,irhi1 REAL*8 r( & irlo0:irhi0, & irlo1:irhi1, & 0:nrcomp-1) integer nphicomp integer iphilo0,iphilo1 integer iphihi0,iphihi1 REAL*8 phi( & iphilo0:iphihi0, & iphilo1:iphihi1, & 0:nphicomp-1) integer nrhscomp integer irhslo0,irhslo1 integer irhshi0,irhshi1 REAL*8 rhs( & irhslo0:irhshi0, & irhslo1:irhshi1, & 0:nrhscomp-1) integer iregionlo0,iregionlo1 integer iregionhi0,iregionhi1 REAL*8 dx REAL*8 alpha REAL*8 beta REAL*8 dxinv, lap integer n,ncomp integer i,j ncomp = nphicomp dxinv = (1.0d0)/(dx*dx) do n = 0, ncomp-1 do j = iregionlo1,iregionhi1 do i = iregionlo0,iregionhi0 lap = ( phi(i+1,j ,n) & + phi(i-1,j ,n) & + phi(i ,j+1,n) & + phi(i ,j-1,n) & -(2*2)*phi(i,j,n) ) & * dxinv r(i,j,n) = -alpha*phi(i,j,n) -beta*lap + & rhs(i,j,n) enddo enddo enddo return end subroutine RESTRICTRES( & res & ,ireslo0,ireslo1 & ,ireshi0,ireshi1 & ,nrescomp & ,phi & ,iphilo0,iphilo1 & ,iphihi0,iphihi1 & ,nphicomp & ,rhs & ,irhslo0,irhslo1 & ,irhshi0,irhshi1 & ,nrhscomp & ,alpha & ,beta & ,iregionlo0,iregionlo1 & ,iregionhi0,iregionhi1 & ,dx & ) implicit none integer nrescomp integer ireslo0,ireslo1 integer ireshi0,ireshi1 REAL*8 res( & ireslo0:ireshi0, & ireslo1:ireshi1, & 0:nrescomp-1) integer nphicomp integer iphilo0,iphilo1 integer iphihi0,iphihi1 REAL*8 phi( & iphilo0:iphihi0, & iphilo1:iphihi1, & 0:nphicomp-1) integer nrhscomp integer irhslo0,irhslo1 integer irhshi0,irhshi1 REAL*8 rhs( & irhslo0:irhshi0, & irhslo1:irhshi1, & 0:nrhscomp-1) REAL*8 alpha REAL*8 beta integer iregionlo0,iregionlo1 integer iregionhi0,iregionhi1 REAL*8 dx REAL*8 denom,dxinv,lofphi integer n,ncomp integer i,j integer ii,jj ncomp = nphicomp dxinv = (1.0d0) / (dx*dx) denom = 2 *2 do n = 0, ncomp-1 do j = iregionlo1,iregionhi1 do i = iregionlo0,iregionhi0 ii = i/2 jj = j/2 lofphi = alpha * phi(i,j,n) & + beta * & ( phi(i+1,j ,n) & + phi(i-1,j ,n) & + phi(i ,j+1,n) & + phi(i ,j-1,n) & - phi(i ,j ,n) * 2 * 2 & ) * dxinv res(ii,jj,n) = res(ii,jj,n) & + (rhs(i,j,n) - lofphi) / denom enddo enddo enddo return end subroutine PROLONG( & phi & ,iphilo0,iphilo1 & ,iphihi0,iphihi1 & ,nphicomp & ,coarse & ,icoarselo0,icoarselo1 & ,icoarsehi0,icoarsehi1 & ,ncoarsecomp & ,iregionlo0,iregionlo1 & ,iregionhi0,iregionhi1 & ,m & ) implicit none integer nphicomp integer iphilo0,iphilo1 integer iphihi0,iphihi1 REAL*8 phi( & iphilo0:iphihi0, & iphilo1:iphihi1, & 0:nphicomp-1) integer ncoarsecomp integer icoarselo0,icoarselo1 integer icoarsehi0,icoarsehi1 REAL*8 coarse( & icoarselo0:icoarsehi0, & icoarselo1:icoarsehi1, & 0:ncoarsecomp-1) integer iregionlo0,iregionlo1 integer iregionhi0,iregionhi1 integer m INTEGER ncomp, n integer i,j integer ii,jj ncomp = nphicomp do n = 0, ncomp-1 do j = iregionlo1,iregionhi1 do i = iregionlo0,iregionhi0 ii = i/m jj = j/m phi(i,j,n) = phi(i,j,n) + & coarse(ii,jj,n) enddo enddo enddo return end subroutine PROLONG_2( & phi & ,iphilo0,iphilo1 & ,iphihi0,iphihi1 & ,nphicomp & ,coarse & ,icoarselo0,icoarselo1 & ,icoarsehi0,icoarsehi1 & ,ncoarsecomp & ,iregionlo0,iregionlo1 & ,iregionhi0,iregionhi1 & ,m & ) implicit none integer nphicomp integer iphilo0,iphilo1 integer iphihi0,iphihi1 REAL*8 phi( & iphilo0:iphihi0, & iphilo1:iphihi1, & 0:nphicomp-1) integer ncoarsecomp integer icoarselo0,icoarselo1 integer icoarsehi0,icoarsehi1 REAL*8 coarse( & icoarselo0:icoarsehi0, & icoarselo1:icoarsehi1, & 0:ncoarsecomp-1) integer iregionlo0,iregionlo1 integer iregionhi0,iregionhi1 integer m INTEGER ncomp, n integer i,j integer offs(2) integer ic,jc REAL*8 f0, den, fx(2) den = (1.0d0)/(4**2) fx(1) = (3.0d0)*den fx(2) = (3.0d0)**2*den f0 = (1.0d0)*den ncomp = nphicomp do j = iregionlo1,iregionhi1 do i = iregionlo0,iregionhi0 ic = i/m jc = j/m offs(1) = 2*mod(i,2) - 1 offs(2) = 2*mod(j,2) - 1 do n = 0, ncomp-1 phi(i,j,n) = phi(i,j,n) $ + fx(2)* $ coarse(ic,jc,n) $ + f0*coarse(ic+offs(1),jc+offs(2),n) phi(i,j,n) = phi(i,j,n) $ + fx(2 -1)* $ ( $ coarse(ic+offs(1),jc,n) $ + coarse(ic,jc+offs(2),n) ) enddo enddo enddo return end subroutine NEWGETFLUX( & flux & ,ifluxlo0,ifluxlo1 & ,ifluxhi0,ifluxhi1 & ,nfluxcomp & ,phi & ,iphilo0,iphilo1 & ,iphihi0,iphihi1 & ,nphicomp & ,iboxlo0,iboxlo1 & ,iboxhi0,iboxhi1 & ,beta_dx & ,a_idir & ) implicit none integer CHF_ID(0:5,0:5) data CHF_ID/ 1,0,0,0,0,0 ,0,1,0,0,0,0 ,0,0,1,0,0,0 ,0,0,0,1,0,0 ,0 &,0,0,0,1,0 ,0,0,0,0,0,1 / integer nfluxcomp integer ifluxlo0,ifluxlo1 integer ifluxhi0,ifluxhi1 REAL*8 flux( & ifluxlo0:ifluxhi0, & ifluxlo1:ifluxhi1, & 0:nfluxcomp-1) integer nphicomp integer iphilo0,iphilo1 integer iphihi0,iphihi1 REAL*8 phi( & iphilo0:iphihi0, & iphilo1:iphihi1, & 0:nphicomp-1) integer iboxlo0,iboxlo1 integer iboxhi0,iboxhi1 REAL*8 beta_dx integer a_idir INTEGER ncomp,n integer ii, jj integer i , j ncomp = nphicomp ii = CHF_ID(a_idir, 0) jj = CHF_ID(a_idir, 1) do n = 0, ncomp-1 do j = iboxlo1,iboxhi1 do i = iboxlo0,iboxhi0 flux(i,j,n) = & (phi(i,j,n)- & phi(i-ii,j-jj,n))*beta_dx enddo enddo enddo return end
(* Title: Well-Quasi-Orders Author: Christian Sternagel <[email protected]> Maintainer: Christian Sternagel License: LGPL *) section \<open>A Proof of Higman's Lemma via Open Induction\<close> theory Higman_OI imports Open_Induction.Open_Induction Minimal_Elements Almost_Full begin subsection \<open>Some facts about the suffix relation\<close> lemma wfp_on_strict_suffix: "wfp_on strict_suffix A" by (rule wfp_on_mono [OF subset_refl, of _ _ "measure_on length A"]) (auto simp: strict_suffix_def suffix_def) lemma po_on_strict_suffix: "po_on strict_suffix A" by (force simp: strict_suffix_def po_on_def transp_on_def irreflp_on_def) subsection \<open>Lexicographic Order on Infinite Sequences\<close> lemma antisymp_on_LEX: assumes "irreflp_on P A" and "antisymp_on P A" shows "antisymp_on (LEX P) (SEQ A)" proof fix f g assume SEQ: "f \<in> SEQ A" "g \<in> SEQ A" and "LEX P f g" and "LEX P g f" then obtain i j where "P (f i) (g i)" and "P (g j) (f j)" and "\<forall>k<i. f k = g k" and "\<forall>k<j. g k = f k" by (auto simp: LEX_def) then have "P (f (min i j)) (f (min i j))" using assms(2) and SEQ by (cases "i = j") (auto simp: antisymp_on_def min_def, force) with assms(1) and SEQ show "f = g" by (auto simp: irreflp_on_def) qed lemma LEX_trans: assumes "transp_on P A" and "f \<in> SEQ A" and "g \<in> SEQ A" and "h \<in> SEQ A" and "LEX P f g" and "LEX P g h" shows "LEX P f h" using assms by (auto simp: LEX_def transp_on_def) (metis less_trans linorder_neqE_nat) lemma qo_on_LEXEQ: "transp_on P A \<Longrightarrow> qo_on (LEXEQ P) (SEQ A)" by (auto simp: qo_on_def reflp_on_def transp_on_def [of "LEXEQ P"] dest: LEX_trans) context minimal_element begin lemma glb_LEX_lexmin: assumes "chain_on (LEX P) C (SEQ A)" and "C \<noteq> {}" shows "glb (LEX P) C (lexmin C)" proof have "C \<subseteq> SEQ A" using assms by (auto simp: chain_on_def) then have "lexmin C \<in> SEQ A" using \<open>C \<noteq> {}\<close> by (intro lexmin_SEQ_mem) note * = \<open>C \<subseteq> SEQ A\<close> \<open>C \<noteq> {}\<close> note lex = LEX_imp_less [folded irreflp_on_def, OF po [THEN po_on_imp_irreflp_on]] \<comment> \<open>\<open>lexmin C\<close> is a lower bound\<close> show "lb (LEX P) C (lexmin C)" proof fix f assume "f \<in> C" then show "LEXEQ P (lexmin C) f" proof (cases "f = lexmin C") define i where "i = (LEAST i. f i \<noteq> lexmin C i)" case False then have neq: "\<exists>i. f i \<noteq> lexmin C i" by blast from LeastI_ex [OF this, folded i_def] and not_less_Least [where P = "\<lambda>i. f i \<noteq> lexmin C i", folded i_def] have neq: "f i \<noteq> lexmin C i" and eq: "\<forall>j<i. f j = lexmin C j" by auto then have **: "f \<in> eq_upto C (lexmin C) i" "f i \<in> ith (eq_upto C (lexmin C) i) i" using \<open>f \<in> C\<close> by force+ moreover from ** have "\<not> P (f i) (lexmin C i)" using lexmin_minimal [OF *, of "f i" i] and \<open>f \<in> C\<close> and \<open>C \<subseteq> SEQ A\<close> by blast moreover obtain g where "g \<in> eq_upto C (lexmin C) (Suc i)" using eq_upto_lexmin_non_empty [OF *] by blast ultimately have "P (lexmin C i) (f i)" using neq and \<open>C \<subseteq> SEQ A\<close> and assms(1) and lex [of g f i] and lex [of f g i] by (auto simp: eq_upto_def chain_on_def) with eq show ?thesis by (auto simp: LEX_def) qed simp qed \<comment> \<open>\<open>lexmin C\<close> is greater than or equal to any other lower bound\<close> fix f assume lb: "lb (LEX P) C f" then show "LEXEQ P f (lexmin C)" proof (cases "f = lexmin C") define i where "i = (LEAST i. f i \<noteq> lexmin C i)" case False then have neq: "\<exists>i. f i \<noteq> lexmin C i" by blast from LeastI_ex [OF this, folded i_def] and not_less_Least [where P = "\<lambda>i. f i \<noteq> lexmin C i", folded i_def] have neq: "f i \<noteq> lexmin C i" and eq: "\<forall>j<i. f j = lexmin C j" by auto obtain h where "h \<in> eq_upto C (lexmin C) (Suc i)" and "h \<in> C" using eq_upto_lexmin_non_empty [OF *] by (auto simp: eq_upto_def) then have [simp]: "\<And>j. j < Suc i \<Longrightarrow> h j = lexmin C j" by auto with lb and \<open>h \<in> C\<close> have "LEX P f h" using neq by (auto simp: lb_def) then have "P (f i) (h i)" using neq and eq and \<open>C \<subseteq> SEQ A\<close> and \<open>h \<in> C\<close> by (intro lex) auto with eq show ?thesis by (auto simp: LEX_def) qed simp qed lemma dc_on_LEXEQ: "dc_on (LEXEQ P) (SEQ A)" proof fix C assume "chain_on (LEXEQ P) C (SEQ A)" and "C \<noteq> {}" then have chain: "chain_on (LEX P) C (SEQ A)" by (auto simp: chain_on_def) then have "C \<subseteq> SEQ A" by (auto simp: chain_on_def) then have "lexmin C \<in> SEQ A" using \<open>C \<noteq> {}\<close> by (intro lexmin_SEQ_mem) have "glb (LEX P) C (lexmin C)" by (rule glb_LEX_lexmin [OF chain \<open>C \<noteq> {}\<close>]) then have "glb (LEXEQ P) C (lexmin C)" by (auto simp: glb_def lb_def) with \<open>lexmin C \<in> SEQ A\<close> show "\<exists>f \<in> SEQ A. glb (LEXEQ P) C f" by blast qed end text \<open> Properties that only depend on finite initial segments of a sequence (i.e., which are open with respect to the product topology). \<close> definition "pt_open_on Q A \<longleftrightarrow> (\<forall>f\<in>A. Q f \<longleftrightarrow> (\<exists>n. (\<forall>g\<in>A. (\<forall>i<n. g i = f i) \<longrightarrow> Q g)))" lemma pt_open_onD: "pt_open_on Q A \<Longrightarrow> Q f \<Longrightarrow> f \<in> A \<Longrightarrow> (\<exists>n. (\<forall>g\<in>A. (\<forall>i<n. g i = f i) \<longrightarrow> Q g))" unfolding pt_open_on_def by blast lemma pt_open_on_good: "pt_open_on (good Q) (SEQ A)" proof (unfold pt_open_on_def, intro ballI) fix f assume f: "f \<in> SEQ A" show "good Q f = (\<exists>n. \<forall>g\<in>SEQ A. (\<forall>i<n. g i = f i) \<longrightarrow> good Q g)" proof assume "good Q f" then obtain i and j where *: "i < j" "Q (f i) (f j)" by auto have "\<forall>g\<in>SEQ A. (\<forall>i<Suc j. g i = f i) \<longrightarrow> good Q g" proof (intro ballI impI) fix g assume "g \<in> SEQ A" and "\<forall>i<Suc j. g i = f i" then show "good Q g" using * by (force simp: good_def) qed then show "\<exists>n. \<forall>g\<in>SEQ A. (\<forall>i<n. g i = f i) \<longrightarrow> good Q g" .. next assume "\<exists>n. \<forall>g\<in>SEQ A. (\<forall>i<n. g i = f i) \<longrightarrow> good Q g" with f show "good Q f" by blast qed qed context minimal_element begin lemma pt_open_on_imp_open_on_LEXEQ: assumes "pt_open_on Q (SEQ A)" shows "open_on (LEXEQ P) Q (SEQ A)" proof fix C assume chain: "chain_on (LEXEQ P) C (SEQ A)" and ne: "C \<noteq> {}" and "\<exists>g\<in>SEQ A. glb (LEXEQ P) C g \<and> Q g" then obtain g where g: "g \<in> SEQ A" and "glb (LEXEQ P) C g" and Q: "Q g" by blast then have glb: "glb (LEX P) C g" by (auto simp: glb_def lb_def) from chain have "chain_on (LEX P) C (SEQ A)" and C: "C \<subseteq> SEQ A" by (auto simp: chain_on_def) note * = glb_LEX_lexmin [OF this(1) ne] have "lexmin C \<in> SEQ A" using ne and C by (intro lexmin_SEQ_mem) from glb_unique [OF _ g this glb *] and antisymp_on_LEX [OF po_on_imp_irreflp_on [OF po] po_on_imp_antisymp_on [OF po]] have [simp]: "lexmin C = g" by auto from assms [THEN pt_open_onD, OF Q g] obtain n :: nat where **: "\<And>h. h \<in> SEQ A \<Longrightarrow> (\<forall>i<n. h i = g i) \<longrightarrow> Q h" by blast from eq_upto_lexmin_non_empty [OF C ne, of n] obtain f where "f \<in> eq_upto C g n" by auto then have "f \<in> C" and "Q f" using ** [of f] and C by force+ then show "\<exists>f\<in>C. Q f" by blast qed lemma open_on_good: "open_on (LEXEQ P) (good Q) (SEQ A)" by (intro pt_open_on_imp_open_on_LEXEQ pt_open_on_good) end lemma open_on_LEXEQ_imp_pt_open_on_counterexample: fixes a b :: "'a" defines "A \<equiv> {a, b}" and "P \<equiv> (\<lambda>x y. False)" and "Q \<equiv> (\<lambda>f. \<forall>i. f i = b)" assumes [simp]: "a \<noteq> b" shows "minimal_element P A" and "open_on (LEXEQ P) Q (SEQ A)" and "\<not> pt_open_on Q (SEQ A)" proof - show "minimal_element P A" by standard (auto simp: P_def po_on_def irreflp_on_def transp_on_def wfp_on_def) show "open_on (LEXEQ P) Q (SEQ A)" by (auto simp: P_def open_on_def chain_on_def SEQ_def glb_def lb_def LEX_def) show "\<not> pt_open_on Q (SEQ A)" proof define f :: "nat \<Rightarrow> 'a" where "f \<equiv> (\<lambda>x. b)" have "f \<in> SEQ A" by (auto simp: A_def f_def) moreover assume "pt_open_on Q (SEQ A)" ultimately have "Q f \<longleftrightarrow> (\<exists>n. (\<forall>g\<in>SEQ A. (\<forall>i<n. g i = f i) \<longrightarrow> Q g))" unfolding pt_open_on_def by blast moreover have "Q f" by (auto simp: Q_def f_def) moreover have "\<exists>g\<in>SEQ A. (\<forall>i<n. g i = f i) \<and> \<not> Q g" for n by (intro bexI [of _ "f(n := a)"]) (auto simp: f_def Q_def A_def) ultimately show False by blast qed qed lemma higman: assumes "almost_full_on P A" shows "almost_full_on (list_emb P) (lists A)" proof interpret minimal_element strict_suffix "lists A" by (unfold_locales) (intro po_on_strict_suffix wfp_on_strict_suffix)+ fix f presume "f \<in> SEQ (lists A)" with qo_on_LEXEQ [OF po_on_imp_transp_on [OF po_on_strict_suffix]] and dc_on_LEXEQ and open_on_good show "good (list_emb P) f" proof (induct rule: open_induct_on) case (less f) define h where "h i = hd (f i)" for i show ?case proof (cases "\<exists>i. f i = []") case False then have ne: "\<forall>i. f i \<noteq> []" by auto with \<open>f \<in> SEQ (lists A)\<close> have "\<forall>i. h i \<in> A" by (auto simp: h_def ne_lists) from almost_full_on_imp_homogeneous_subseq [OF assms this] obtain \<phi> :: "nat \<Rightarrow> nat" where mono: "\<And>i j. i < j \<Longrightarrow> \<phi> i < \<phi> j" and P: "\<And>i j. i < j \<Longrightarrow> P (h (\<phi> i)) (h (\<phi> j))" by blast define f' where "f' i = (if i < \<phi> 0 then f i else tl (f (\<phi> (i - \<phi> 0))))" for i have f': "f' \<in> SEQ (lists A)" using ne and \<open>f \<in> SEQ (lists A)\<close> by (auto simp: f'_def dest: list.set_sel) have [simp]: "\<And>i. \<phi> 0 \<le> i \<Longrightarrow> h (\<phi> (i - \<phi> 0)) # f' i = f (\<phi> (i - \<phi> 0))" "\<And>i. i < \<phi> 0 \<Longrightarrow> f' i = f i" using ne by (auto simp: f'_def h_def) moreover have "strict_suffix (f' (\<phi> 0)) (f (\<phi> 0))" using ne by (auto simp: f'_def) ultimately have "LEX strict_suffix f' f" by (auto simp: LEX_def) with LEX_imp_not_LEX [OF this] have "strict (LEXEQ strict_suffix) f' f" using po_on_strict_suffix [of UNIV] unfolding po_on_def irreflp_on_def transp_on_def by blast from less(2) [OF f' this] have "good (list_emb P) f'" . then obtain i j where "i < j" and emb: "list_emb P (f' i) (f' j)" by (auto simp: good_def) consider "j < \<phi> 0" | "\<phi> 0 \<le> i" | "i < \<phi> 0" and "\<phi> 0 \<le> j" by arith then show ?thesis proof (cases) case 1 with \<open>i < j\<close> and emb show ?thesis by (auto simp: good_def) next case 2 with \<open>i < j\<close> and P have "P (h (\<phi> (i - \<phi> 0))) (h (\<phi> (j - \<phi> 0)))" by auto with emb have "list_emb P (h (\<phi> (i - \<phi> 0)) # f' i) (h (\<phi> (j - \<phi> 0)) # f' j)" by auto then have "list_emb P (f (\<phi> (i - \<phi> 0))) (f (\<phi> (j - \<phi> 0)))" using 2 and \<open>i < j\<close> by auto moreover with 2 and \<open>i <j\<close> have "\<phi> (i - \<phi> 0) < \<phi> (j - \<phi> 0)" using mono by auto ultimately show ?thesis by (auto simp: good_def) next case 3 with emb have "list_emb P (f i) (f' j)" by auto moreover have "f (\<phi> (j - \<phi> 0)) = h (\<phi> (j - \<phi> 0)) # f' j" using 3 by auto ultimately have "list_emb P (f i) (f (\<phi> (j - \<phi> 0)))" by auto moreover have "i < \<phi> (j - \<phi> 0)" using mono [of 0 "j - \<phi> 0"] and 3 by force ultimately show ?thesis by (auto simp: good_def) qed qed auto qed qed blast end
ZGERQF Example Program Results Minimum-norm solution ( -2.8501, 6.4683) ( 1.6264, -0.7799) ( 6.9290, 4.6481) ( 1.4048, 3.2400)
(* This program is free software; you can redistribute it and/or *) (* modify it under the terms of the GNU Lesser General Public License *) (* as published by the Free Software Foundation; either version 2.1 *) (* of the License, or (at your option) any later version. *) (* *) (* This program is distributed in the hope that it will be useful, *) (* but WITHOUT ANY WARRANTY; without even the implied warranty of *) (* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *) (* GNU Lesser General Public License for more details. *) (* *) (* You should have received a copy of the GNU Lesser General Public *) (* License along with this program; if not, write to the Free *) (* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA *) (* 02110-1301 USA *) (********************************************************************** Proof of Huffman algorithm: Extraction.v Extraction for the huffman algorithm Create a file huffman.ml where the function huffman is the algorithm [email protected] (2003) **********************************************************************) From Huffman Require Import Huffman. From Huffman Require Import Code. From Huffman Require Import ISort. From Coq Require Extraction. Extraction Inline list_length_induction. Extraction NoInline code insert isort map frequency_list huffman encode decode. Extraction "huffman.ml" code huffman encode decode.
According to a study by Robert <unk> , by 2002 there had been least eight adaptations of the play as a musical , though " never with conspicuous success " . The earliest such version was a 1927 American show entitled Oh Earnest . The journalist Mark <unk> comments , " The libretto of a 1957 musical adaptation , Half in Earnest , deposited in the British Library , is scarcely more encouraging . The curtain rises on Algy strumming away at the piano , singing ' I can play <unk> , Lane ' . Other songs include — almost predictably — ' A <unk> I Must Go ' . "
postulate X : Set T : X → Set H : ∀ x → T x → Set g : ∀ x → T x → Set g x t = ∀ x → H x t
############################################################################# ## ## equivalent mappings ## stack.gi ## Sergio Siccha ## ## Copyright 2017 by the authors. ## This file is free software, see license file. ## ## Implementation of MyStack ## ############################################################################# ############################### # Operation StackCreate # Input: # length - # Filters: # IsInt # # Output: # stack; ############################### InstallMethod( StackCreate, "Initialize empty stack of given length.", [ IsInt ], function( length ) local stack; stack := rec( elements := [], last := 0 ); stack.elements[ length+1 ] := fail; ## force allocation of storage Objectify( StackType, stack ); return stack; end ); ############################### # Operation StackPush # Input: # obj - # Filters: # IsObject # # Output: # none ############################### InstallMethod( StackPush, "Push obj to stack.", [ IsMyStack, IsObject ], function( stack, obj ) stack!.last := stack!.last + 1; stack!.elements[ stack!.last ] := obj; end ); ############################### # Operation StackPop # Input: # stack - # Filters: # IsMyStack # # Output: # last element of stack ############################### InstallMethod( StackPop, "Pop the last object that was added to stack", [ IsMyStack ], function( stack ) stack!.last := stack!.last - 1; return stack!.elements[ stack!.last + 1 ]; end ); ############################### # Operation StackPopAll # Input: # stack - # Filters: # IsMyStack # # Output: # all elements of stack ############################### InstallMethod( StackPopAll, "Pop all objects of stack.", [ IsMyStack ], function( stack ) local last; stack!.last := 0; return stack!.elements{ [ 1 .. last ] }; end ); ############################### # Operation StackPeek # Input: # stack - # Filters: # IsMyStack # # Output: # last element of stack ############################### InstallMethod( StackPeek, "Peek the last object that was added to stack", [ IsMyStack ], function( stack ) return stack!.elements[ stack!.last ]; end ); ############################### # Operation StackIsEmpty # Input: # stack - # Filters: # IsMyStack # # Output: # stack!.last = 0; ############################### InstallMethod( StackIsEmpty, "Checks whether stack is empty.", [ IsMyStack ], function( stack ) return stack!.last = 0; end );
Harrison confessed that it was not easy for him to come up with the lyrics to " Crazy in Love " in that length of time . Two hours later , he had penned the verses and the hook in spite of being hung over . Harrison had also made provision for a backing track ; he played all the instruments on the track . The bridge was written by Beyoncé , who was inspired by looking at herself in the mirror ; she was not wearing matching clothes and her hair was untidy as she kept saying , " I 'm looking so crazy right now . " Harrison sang back to her and said , " That 's the hook . " It also inspired the title of the song . After that Beyoncé had filled up the middle eight , she came up with the catchphrase - " Uh @-@ oh , uh @-@ oh , you know " - alongside Harrison .
stem <- c("甲", "乙", "丙", "丁", "戊", "己", "庚", "辛", "壬", "癸") branch <- c("子", "丑", "寅", "卯", "辰", "巳", "午", "未", "申", "酉", "戌", "亥") stembranch <- function(year) { return(paste0(stem[(year-3)%%10], branch[(year-3)%%12])) }
using NNPACK, Test, Random Random.seed!(144) @testset "activation" begin xs = rand(5,5) @test all(sum(softmax(xs), dims = 1) .≈ Float32(1)) @test sum(softmax(vec(xs))) ≈ Float32(1) end @testset "conv2d" begin x = reshape(Float32[1:16;], 4, 4, 1, 1) w = reshape(Float32[1:9;], 3, 3, 1, 1) @test dropdims(conv(x, w, pad=1), dims=(3,4)) ≈ Float32.([ 29 99 207 263 62 192 372 446 83 237 417 485 75 198 330 365]) x = reshape(Float64[1:20;], 5, 4, 1, 1) w = reshape(Float64[1:4;], 2, 2, 1, 1) @test dropdims(conv(x, w), dims = (3,4)) ≈ Float32.([ 29 79 129; 39 89 139; 49 99 149; 59 109 159 ]) @test_throws Exception dropdims(conv(x, w; stride=2), dims = (3,4)) ≈ Float32.([ 29 49; 129 149 ]) @test dropdims(conv(x, w; pad=1), dims = (3,4)) ≈ Float32.([ 1 9 29 49 48; 4 29 79 129 115; 7 39 89 139 122; 10 49 99 149 129; 13 59 109 159 136; 10 40 70 100 80 ]) @test size(∇conv_filter(reshape(rand(4,3), 4, 3, 1, 1), x, w)) == size(w) @test size(∇conv_data(reshape(rand(4,3), 4, 3, 1, 1), x, w)) == size(x) end @testset "maxpool2d" begin x = reshape(Float32[1:16;], 4, 4, 1, 1) @test dropdims(maxpool(x, (2,2)), dims = (3,4)) ≈ Float32.([ 6.0 14.0; 8.0 16.0 ]) x = reshape(Float64[1:20;], 5, 4, 1, 1) @test_throws Exception dropdims(maxpool(x, (2,2)), dims = (3,4)) ≈ Float32.([7 17; 9 19]) @test_throws Exception dropdims(maxpool(x, (2,2); stride=(2,2)), dims = (3,4)) ≈ Float32.([7 17; 9 19]) @test_throws Exception dropdims(maxpool(x, (2,2); pad=(1,1)), dims = (3,4)) ≈ Float32.([ 1 11 16; 3 13 18; 5 15 20; ]) x = reshape(Float64[1:16;], 4, 4, 1, 1) @test dropdims(maxpool(x, (2,2)), dims = (3,4)) == [6 14; 8 16] @test dropdims(maxpool(x, (2,2); stride=(2,2)), dims = (3,4)) == [6 14; 8 16] @test dropdims(maxpool(x, (3,3); stride=(1,1), pad=(1,1)), dims = (3,4)) == [ 6.0 10.0 14.0 14.0; 7.0 11.0 15.0 15.0; 8.0 12.0 16.0 16.0; 8.0 12.0 16.0 16.0; ] end
(* Title: Quantum-Semantics/vars.thy Author: David Sanan, Nanyang Technological University Copyright 2020 License: BSD *) theory vars imports HOL.Complex begin subsection \<open>Syntax\<close> text \<open>Datatype for quantum programs\<close> class nat_abs= fixes from_nat ::"nat \<Rightarrow> 'a" fixes to_nat ::"'a \<Rightarrow> nat" fixes subset_nat :: "'a set" assumes "a \<in> subset_nat \<Longrightarrow> from_nat (to_nat a) = a " assumes "to_nat (from_nat b) = b" class real_abs= nat_abs+ fixes from_real ::"real \<Rightarrow> 'a" fixes to_real ::"'a \<Rightarrow> real" fixes subset_real :: "'a set" assumes "a \<in> subset_real \<Longrightarrow> from_real (to_real a) = a " assumes "to_real (from_real b) = b" locale vars = fixes variables :: "'v set" fixes v_domain :: "'v \<Rightarrow> ('b::nat_abs) set" fixes get_value :: "'s \<Rightarrow> 'v \<Rightarrow> 'b" fixes set_value ::"'s \<Rightarrow> 'v \<Rightarrow> 'b \<Rightarrow> 's" fixes conv::"('v \<Rightarrow> 'b) \<Rightarrow> 'v set \<Rightarrow> 's" assumes "finite variables" assumes "v \<in> variables \<Longrightarrow> (get_value s v) \<in> v_domain v" assumes eq_get_set:"v \<in> variables \<Longrightarrow> x \<in> (v_domain v) \<Longrightarrow> get_value (set_value s v x) v = x" assumes abs_elem: "s = conv (get_value s) variables" begin definition not_access_v::"('s \<Rightarrow> 'n) \<Rightarrow> 'v \<Rightarrow> bool" where "not_access_v f v \<equiv> \<forall>s f1 f2. f1 \<in> v_domain v \<and> f2 \<in> v_domain v \<longrightarrow> f (conv ((get_value s)(v:=f1)) variables) = f (conv ((get_value s)(v:=f2)) variables)" definition access_v::"('s \<Rightarrow> 'n) \<Rightarrow> 'v \<Rightarrow> bool" where "access_v f v \<equiv> \<exists>s f1 f2. f1 \<in> v_domain v \<and> f2 \<in> v_domain v \<and> f (conv ((get_value s)(v:=f1)) variables) \<noteq> f (conv ((get_value s)(v:=f2)) variables)" definition not_access_locals::"('s \<Rightarrow> 'n) \<Rightarrow> bool" where "not_access_locals f \<equiv> \<forall>v\<in>variables. not_access_v f v" lemma access_neg:"access_v f v = (\<not> (not_access_v f v))" unfolding access_v_def not_access_v_def by auto definition modify_v:: "('s \<Rightarrow> 's) \<Rightarrow> 'v \<Rightarrow> bool" where "modify_v f v \<equiv> \<exists>s. get_value (f s) v \<noteq> get_value s v" definition not_modify_v::"('s \<Rightarrow> 's) \<Rightarrow> 'v \<Rightarrow> bool" where "not_modify_v f v \<equiv> \<forall>s. get_value (f s) v = get_value s v" lemma modify_neg:"modify_v f v = ( \<not> (not_modify_v f v))" unfolding modify_v_def not_modify_v_def by auto end end
Require Import Rsequence. Require Import Rseries_def Rseries_base_facts Rseries_pos_facts Rseries_cv_facts. Require Import Lra Rtactic. Local Open Scope R_scope. Local Open Scope Rseq_scope. Lemma Rser_cv_square_inv : {l | Rser_cv (fun i => / (Rseq_shift INR i) ^ 2)%R l}. Proof. apply Rser_cv_sig_shift_rev. apply Rser_pos_maj_cv with (/ (Rseq_shift INR * Rseq_shifts INR 2)). intro n ; left ; apply Rinv_0_lt_compat, pow_lt, lt_0_INR, lt_O_Sn. intro n ; left ; apply Rinv_0_lt_compat, Rmult_lt_0_compat ; apply lt_0_INR, lt_O_Sn. intro p ; unfold Rseq_shift, Rseq_shifts, Rseq_mult, Rseq_inv, pow ; rewrite Rmult_1_r, Rinv_mult_distr, Rinv_mult_distr ; try (now (apply Rgt_not_eq, lt_0_INR, lt_0_Sn)). apply Rmult_le_compat ; try (now (left ; apply Rinv_0_lt_compat, lt_0_INR, lt_0_Sn)). apply Rle_Rinv ; try (now (apply lt_0_INR, lt_O_Sn)) ; apply le_INR ; apply le_n_Sn. reflexivity. exists 1 ; apply Rseq_cv_eq_compat with (1 - (/ Rseq_shifts INR 2)). intro p ; unfold Rseq_minus, Rseq_constant ; induction p. unfold Rseq_mult, Rseq_inv, Rseq_shift, Rseq_shifts ; simpl ; field. rewrite Rseq_sum_simpl, IHp ; unfold Rseq_constant, Rseq_mult, Rseq_inv, Rseq_shifts, Rseq_shift, Rminus ; rewrite Rplus_assoc ; apply Rplus_eq_compat_l. simpl ; field ; split ; apply Rgt_not_eq. destruct p ; [| assert (H := lt_0_INR _ (lt_0_Sn p))] ; lra. destruct p ; [| assert (H := lt_0_INR _ (lt_0_Sn p))] ; lra. rewrite <- Rminus_0_r ; apply Rseq_cv_minus_compat ; [apply Rseq_constant_cv |]. apply Rseq_cv_pos_infty_inv_compat ; eapply Rseq_cv_pos_infty_eq_compat ; [| eapply Rseq_cv_finite_plus_pos_infty_l ; [eapply (Rseq_poly_cv 1) | eexact (Rseq_constant_cv 2)]]. intro p ; unfold Rseq_shifts, Rseq_poly, Rseq_plus, Rseq_constant, pow ; rewrite plus_INR ; simpl ; ring. apply lt_O_Sn. Qed. Lemma Rser_cv_inv_poly : forall d, (2 <= d)%nat -> {l | Rser_abs_cv (Rseq_inv_poly d) l}. Proof. intros d Hd. unfold Rser_abs_cv. cut ({l | Rser_cv (Rseq_inv_poly d) l}). intros [l Hl]; exists l. eapply Rser_cv_eq_compat; [|apply Hl]; intro i. destruct i; unfold Rseq_abs; symmetry; apply Rabs_right. apply Rle_refl. unfold Rseq_inv_poly. apply Rle_ge; apply pow_le. apply Rlt_le; apply Rinv_0_lt_compat; INR_solve. apply Rser_pos_maj_cv_shift with (fun (i : nat) => (/ (INR (S i) ^ 2))%R). unfold Rseq_inv_poly; intro n; rewrite <- Rinv_pow; [|INR_solve]. split. apply Rlt_le; apply Rinv_0_lt_compat; apply pow_lt; INR_solve. apply Rle_Rinv. unfold pow; INR_solve; simpl mult; apply lt_O_Sn. apply pow_lt; INR_solve. apply Rle_pow; auto; INR_solve. apply Rser_cv_square_inv. Qed.
import category_theory.category.basic import category_theory.functor import algebra.category.Group.images import algebra.category.Group.colimits import algebra.category.Group.abelian import algebra.category.Module.monoidal import algebra.category.Ring.basic import category_theory.abelian.basic import category_theory.limits.shapes.finite_limits import topology.instances.real import topology.category.Top import topology.category.UniformSpace /-! This is a demo of the category theory library in mathlib, as part of "Lean for the Curious Mathematician 2020". You can get this file by: * installing Lean if necessary: https://leanprover-community.github.io/get_started.html#regular-install * `leanproject get mathlib` * `code mathlib` * open the file `docs/tutorial/lftcm2020/src/demos/category_theory.lean` If you've already got a copy of `mathlib`, you should update it now, using ``` cd /path/to/mathlib/ git pull leanproject get-cache ``` There are also exercises associated with this demo, in `exercise_sources/thursday/category_theory/` with hints at `hints/category_theory/` and (partial) solutions at `solutions/thursday/category_theory/` Any of Exercises 1-7 should be approachable after the demo. The later exercises are quite hard, and will take you longer than the afternoon problem session! -/ open category_theory /-! ## Categories Categories are implemented in mathlib as a typeclass, parametrised by the type of objects. Thus to talk about an arbitrary category, we can write -/ variables (C : Type) [category C] /-! There is special notation for the morphisms in a category: if `X Y : C`, we write * `X ⟶ Y` for the type of morphisms from `X` to `Y`. (To enter the special arrow `⟶`, type `\hom`, or hover over the symbol to see the hint.) * `𝟙 X` is a the identity morphisms on `X` (i.e., a term of type `X ⟶ X`). * If `f : X ⟶ Y` and `g : Y ⟶ Z`, then we write `f ≫ g` for the composition, a morphism `X ⟶ Z`. -/ example {W X Y Z : C} (f : W ⟶ X) (g : X ⟶ Y) (h : Y ⟶ Z) : (f ≫ (𝟙 X ≫ g)) ≫ h = f ≫ g ≫ h := begin rw category.id_comp, rw category.assoc, -- alternatively, just `simp` will do end /-! ## Functors To introduce functors, we'll need a second category around. -/ variables (D : Type) [category D] /-! We write a functor as `F : C ⥤ D`. (Unlike categories, which are partially unbundled, a functor is "fully bundled", containing the function on objects as field. This parallels the design for algebraic structures.) -/ example (F : C ⥤ D) (X : C) : F.map (𝟙 X) = 𝟙 (F.obj X) := F.map_id X example (F : C ⥤ D) {X Y Z : C} (f : X ⟶ Y) (g : Y ⟶ Z) : F.map (f ≫ g) = F.map f ≫ F.map g := F.map_comp f g /-! The identity functor is written as `𝟭 C`, and functor composition is written `⋙`. -/ example (F : C ⥤ D) {X Y : C} (f : X ⟶ Y) : (𝟭 C ⋙ F).map (f ≫ 𝟙 Y) = F.map f := begin rw functor.comp_map, rw functor.map_comp, rw category_theory.functor.map_id, -- yuck! we really should fix this rw functor.id_map, rw functor.map_comp, rw category_theory.functor.map_id, rw category.comp_id, -- or just replace the entire proof with `by simp` end /-! To build a functor `F : C ⥤ D` we need to specify four fields * `obj : C → D` * `map : ∀ {X Y : C} (f : X ⟶ Y), obj X ⟶ obj Y` * `map_id'` and `map_comp'`, expressing the functor laws. -/ example {X : C} : C ⥤ Type* := { obj := λ Y, X ⟶ Y, map := λ Y Y' f g, g ≫ f, map_id' := λ X, begin funext, simp, end, map_comp' := λ X Y Z f g, begin funext, simp, end } /-! However Lean will automatically attempt to fill in the `map_id'` and `map_comp'` fields itself, because these fields are marked with `auto_param`. This lets us specify a tactic to use to try to synthesize the field. (In fact, the whole category theory library started off as an experiment to see how far we could push this automation.) -/ example {X : C} : C ⥤ Type* := { obj := λ Y, X ⟶ Y, map := λ Y Y' f g, g ≫ f, } /-! Lean automatically checked functoriality here! This was pretty easy: we just need to use `category.comp_id` and `category.assoc`. The more powerful we make the `simp` lemmas, the more boring goals can be discharged automatically. Most of the `auto_param`s appearing in mathlib so far are in the `category_theory` library, where they are nearly all filled using the tactic `tidy`, which repeatedly attempts to use one of a list of "conservative" tactics. You can see what `tidy` is doing using `tidy?`: -/ example {X : C} : C ⥤ Type* := { obj := λ Y, X ⟶ Y, map := λ Y Y' f g, g ≫ f, map_id' := by tidy?, map_comp' := by tidy? } /-! Sebastien's talk on differential geometry tomorrow will give another example of `auto_param` being used. You can also watch me doing a speed-run https://youtu.be/oz3z2NSNY8c of Floris's "pointed map" exercises from yesterday, taking advantage of `auto_param`. -/ /-! ## Natural transformations The collection of functors from `C` to `D` has been given the structure of a category: to talk about the natural transformations, you just write `F ⟶ G` using the usual "morphism" arrow. If `α : F ⟶ G`, then `α.app X` is the component at `X`, i.e. a morphism `F.obj X ⟶ G.obj X`. -/ example {F G : C ⥤ D} {α : F ⟶ G} {X Y : C} (f : X ⟶ Y) : F.map f ≫ α.app Y = α.app X ≫ G.map f := α.naturality f -- or just `by simp` /-! Again, to construct a natural transformation `F ⟶ G` we need to provide two fields * `app : Π X : C, F.obj X ⟶ G.obj X` and * `naturality'`, which often is provided by automation. -/ /-! ## A note on universes Before we go on, we should mention a slight complication: out in the world we meet both small and large categories. In set-theoretic foundations, this distinction is about whether the objects form a set or merely a class. In the type-theoretic foundations used in Lean, this distinction is about whether the objects and morphisms live in the same universe, or if the objects live one universe higher up. Rather than making separate definitions for the two cases, we simply allow the objects and morphisms to live in two unrelated universes. To talk about a general category we thus write -/ universes u v variables (E : Type u) [category.{v} E] /-! This says that the objects live in universe `u`, while the morphisms live in universe `v`. In fact, the definition `category` is paramaterised by two universe levels, and when we write `category.{v} E` Lean actually understands this as `category.{v u} E`, automatically filling in the second argument from the universe level of `E`. There are abbreviations available for the two standard cases: * if `E : Type (u+1)`, then `large_category E` means `category.{u (u+1)} E` * if `E : Type u`, then `small_category E` means `category.{u u} E`. However you'll rarely use these except when setting up particular examples. All the "concrete" categories, like `Group`, `Ring`, and `Top`, described below, are instances of `large_category`. Typically the indexing diagrams for limits and colimits are instances of `small_category`. If you're talking about an arbitrary category, and you don't mind whether it is small or large, you should just allow two independent universe variables, as above. -/ /-! ## Concrete categories We've set up a number of concrete categories in mathlib. -/ example (R S : CommRing) (f : R ⟶ S) (x y : R) : f (x * y) = f x * f y := by simp /-! Note here we have a particularly succinct way of introducing a commutative ring: we just write `R : CommRing`, rather than `(R : Type) [comm_ring R]`. Rather than writing `f : R →+* S` for a `ring_hom`, we can just use the morphism arrow, and Lean works out the appropriate notion automatically. There's a coercion from `CommRing` to `Type`, so we can still talk about elements by writing `x : R`, and morphisms automatically behave properly as functions (e.g. in `f (x * y)`). -/ /-! ## Limits and colimits We talk about limits using the following notions: * For `F : J ⥤ C`, `c : cone F` consists of * `c.X : C` an object in `C`, and * `c.π`, a natural transformation with components `c.π.app j : c.X ⟶ F.obj j`. * For `c : cone F`, `is_limit c` expresses that `c` is a limit cone. * `has_limit F`, a typeclass specifying a particular choice of limit cone for a functor `F`. * `has_limits C`, a typeclass specifying a choice of limit for any functor into `C`. (There are also all the dual notions, `cocone`, `is_colimit`, `has_colimit`, etc.) There are also typeclasses for various "special shapes", in particular * `has_equalizers` * `has_pullbacks` * `has_binary_products` / `has_finite_products` / `has_products` * `has_terminal` A related typeclass `has_zero_morphisms C` specifies a choice of zero morphism in each hom space, satisfying the usual axioms (equivalent to `C` being enriched in pointed sets), and using that we can also express some other special shapes, including * `has_kernels` * `has_binary_biproducts` / `has_finite_biproducts` * `has_zero_object` For most of the concrete categories, these instances are all available when appropriate. -/ /-! ### Examples of using (co)limits in `Top` -/ noncomputable theory open category_theory.limits def R : Top := Top.of ℝ def I : Top := Top.of (set.Icc 0 1 : set ℝ) def pt : Top := Top.of unit -- Let's construct the mapping cylinder. def to_pt (X : Top) : X ⟶ pt := { to_fun := λ _, unit.star, continuous_to_fun := continuous_const } def I₀ : pt ⟶ I := { to_fun := λ _, ⟨(0 : ℝ), by norm_num [set.left_mem_Icc]⟩, continuous_to_fun := continuous_const } def I₁ : pt ⟶ I := { to_fun := λ _, ⟨(1 : ℝ), by norm_num [set.right_mem_Icc]⟩, continuous_to_fun := continuous_const } -- We now construct a cylinder as a categorical limit. -- `limits.prod` is a shorthand for constructing a limit over the two point diagram: def cylinder (X : Top) : Top := prod X I -- To define a map to the cylinder, we give a map to each factor. -- `prod.lift` is a helper method, providing a wrapper around `limit.lift` for binary products. def cylinder₀ (X : Top) : X ⟶ cylinder X := prod.lift (𝟙 X) (to_pt X ≫ I₀) def cylinder₁ (X : Top) : X ⟶ cylinder X := prod.lift (𝟙 X) (to_pt X ≫ I₁) /-- The mapping cylinder is the pushout of the diagram ``` X ↙ ↘ Y (X x I) ``` (`pushout` is implemented just as a wrapper around `colimit`) -/ def mapping_cylinder {X Y : Top} (f : X ⟶ Y) : Top := pushout f (cylinder₁ X) /-! It's perhaps worth admitting here that constructing objects using categorical (co)limits typically gives quite ghastly "definitional" properties --- if you want to use these objects, you're going to have to work through their universal properties. This is not necessarily a bad thing, but takes some getting used to. -/ /-! ## Applications We're only just getting to the point in mathlib where we're ready to do the sorts of mathematics that rely on category theory as a basic language. There's lots more to come --- big chunks of algebraic geometry, homological algebra, quantum topology, etc. One important way in which we'll use the category theory library is to achieve polymorphism. We don't want to separately prove theorems about sheaves of sets, sheaves of rings, etc. Instead we'd like to talk about sheaves in an arbitrary category, possibly with some additional typeclasses providing extra structure (`has_products`, `concrete_category`, `monoidal_category`, etc), and prove our theorems there. -/ /-! ## Odds and ends There's a bunch in mathlib's `category_theory/` folder that hasn't been mentioned at all here, including: * Adjunctions * Equivalences * Monads * Abelian categories * Monoidal categories * ... Built on top of the category theory library we have things like * (Co)homology of chain complexes in `algebra.homology.homology`. * The (pre)sheaf of continuous functions in `topology.sheaves.sheaf_of_functions`. * The Giry monad in `measure_theory.category.Meas`. -/ #print category_theory.adjunction.right_adjoint_preserves_limits #print category_theory.abelian -- When this tutorial was written we didn't have a single instance of `abelian` in the library. example : abelian AddCommGroup.{0} := by apply_instance example (R : Ring) : abelian (Module R) := by apply_instance example (R : CommRing.{u}) : monoidal_category (Module.{u} R) := by apply_instance example : reflective (forget₂ CpltSepUniformSpace UniformSpace) := by apply_instance
(***************************************************************************** * Featherweight-OCL --- A Formal Semantics for UML-OCL Version OCL 2.5 * for the OMG Standard. * http://www.brucker.ch/projects/hol-testgen/ * * UML_Tools.thy --- * This file is part of HOL-TestGen. * * Copyright (c) 2012-2015 Université Paris-Saclay, Univ. Paris-Sud, France * 2013-2015 IRT SystemX, France * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * * Neither the name of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************) (* < *) theory UML_Tools imports UML_Logic begin lemmas substs1 = StrongEq_L_subst2_rev foundation15[THEN iffD2, THEN StrongEq_L_subst2_rev] foundation7'[THEN iffD2, THEN foundation15[THEN iffD2, THEN StrongEq_L_subst2_rev]] foundation14[THEN iffD2, THEN StrongEq_L_subst2_rev] foundation13[THEN iffD2, THEN StrongEq_L_subst2_rev] lemmas substs2 = StrongEq_L_subst3_rev foundation15[THEN iffD2, THEN StrongEq_L_subst3_rev] foundation7'[THEN iffD2, THEN foundation15[THEN iffD2, THEN StrongEq_L_subst3_rev]] foundation14[THEN iffD2, THEN StrongEq_L_subst3_rev] foundation13[THEN iffD2, THEN StrongEq_L_subst3_rev] lemmas substs4 = StrongEq_L_subst4_rev foundation15[THEN iffD2, THEN StrongEq_L_subst4_rev] foundation7'[THEN iffD2, THEN foundation15[THEN iffD2, THEN StrongEq_L_subst4_rev]] foundation14[THEN iffD2, THEN StrongEq_L_subst4_rev] foundation13[THEN iffD2, THEN StrongEq_L_subst4_rev] lemmas substs = substs1 substs2 substs4 [THEN iffD2] substs4 thm substs ML\<open> fun ocl_subst_asm_tac ctxt = FIRST'(map (fn C => (eresolve0_tac [C]) THEN' (simp_tac ctxt)) @{thms "substs"}) val ocl_subst_asm = fn ctxt => SIMPLE_METHOD (ocl_subst_asm_tac ctxt 1); val _ = Theory.setup (Method.setup (Binding.name "ocl_subst_asm") (Scan.succeed (ocl_subst_asm)) "ocl substition step") \<close> lemma test1 : "\<tau> \<Turnstile> A \<Longrightarrow> \<tau> \<Turnstile> (A and B \<triangleq> B)" apply(tactic "ocl_subst_asm_tac @{context} 1") apply(simp) done lemma test2 : "\<tau> \<Turnstile> A \<Longrightarrow> \<tau> \<Turnstile> (A and B \<triangleq> B)" by(ocl_subst_asm, simp) lemma test3 : "\<tau> \<Turnstile> A \<Longrightarrow> \<tau> \<Turnstile> (A and A)" by(ocl_subst_asm, simp) lemma test4 : "\<tau> \<Turnstile> not A \<Longrightarrow> \<tau> \<Turnstile> (A and B \<triangleq> false)" by(ocl_subst_asm, simp) lemma test5 : "\<tau> \<Turnstile> (A \<triangleq> null) \<Longrightarrow> \<tau> \<Turnstile> (B \<triangleq> null) \<Longrightarrow> \<not> (\<tau> \<Turnstile> (A and B))" by(ocl_subst_asm,ocl_subst_asm,simp) lemma test6 : "\<tau> \<Turnstile> not A \<Longrightarrow> \<not> (\<tau> \<Turnstile> (A and B))" by(ocl_subst_asm, simp) lemma test7 : "\<not> (\<tau> \<Turnstile> (\<upsilon> A)) \<Longrightarrow> \<tau> \<Turnstile> (not B) \<Longrightarrow> \<not> (\<tau> \<Turnstile> (A and B))" by(ocl_subst_asm,ocl_subst_asm,simp) (* a proof that shows that not everything is humpty dumpty ... *) lemma X: "\<not> (\<tau> \<Turnstile> (invalid and B))" apply(insert foundation8[of "\<tau>" "B"], elim disjE, simp add:defined_bool_split, elim disjE) apply(ocl_subst_asm, simp) apply(ocl_subst_asm, simp) apply(ocl_subst_asm, simp) apply(ocl_subst_asm, simp) done (* easier is: *) (* just to show the power of this extremely useful foundational rule:*) lemma X': "\<not> (\<tau> \<Turnstile> (invalid and B))" by(simp add:foundation10') lemma Y: "\<not> (\<tau> \<Turnstile> (null and B))" by(simp add: foundation10') lemma Z: "\<not> (\<tau> \<Turnstile> (false and B))" by(simp add: foundation10') lemma Z': "(\<tau> \<Turnstile> (true and B)) = (\<tau> \<Turnstile> B)" by(simp) end (* > *)
Set Warnings "-notation-overridden,-parsing". Require Export Tactics. Require Export Logic. Require Export Lists. Require Coq.omega.Omega. Inductive ev : nat -> Prop := | ev_0 : ev 0 | ev_SS : forall n : nat, ev n -> ev (S (S n)). Fail Inductive wrong_ev (n : nat) : Prop := | wrong_ev_0 : wrong_ev 0 | wrong_ev_SS : forall n, wrong_ev n -> wrong_ev (S (S n)). Theorem ev_4 : ev 4. Proof. apply ev_SS. apply ev_SS. apply ev_0. Qed. Theorem ev_4' : ev 4. Proof. apply (ev_SS 2 (ev_SS 0 ev_0)). Qed. Theorem ev_plus4 : forall n, ev n -> ev (4 + n). Proof. intros n. simpl. intros Hn. apply ev_SS. apply ev_SS. apply Hn. Qed. (* Exercise: 1 star (ev_double) *) Theorem ev_double : forall n, ev (double n). Proof. intros. rewrite double_plus. induction n. - simpl. apply ev_0. - simpl. rewrite <- plus_n_Sm. apply ev_SS. apply IHn. Qed. (* Using Evidence in Proofs *) Theorem ev_minus2 : forall n, ev n -> ev (pred (pred n)). Proof. intros n E. inversion E as [| n' E']. - (* E = ev_0 *) simpl. apply ev_0. - (* E = ev_SS n' E' *) simpl. apply E'. Qed. Theorem ev_minus2' : forall n, ev n -> ev (pred (pred n)). Proof. intros n E. destruct E. - simpl. apply ev_0. - simpl. apply E. Qed. Theorem evSS_ev : forall n, ev (S (S n)) -> ev n. Proof. intros n E. inversion E as [| n' E']. apply E'. Qed. Theorem one_not_even : ~ ev 1. Proof. intros H. inversion H. Qed. (* Exercise: 1 star (SSSSev_even) *) Theorem SSSSev_even : forall n, ev (S (S (S (S n)))) -> ev n. Proof. intros n E. inversion E as [| n' E']. inversion E'. apply H1. Qed. (* Exercise: 1 star (even5_nonsense) *) Theorem even5_nonsense : ev 5 -> 2 + 2 = 9. Proof. intros. inversion H. inversion H1. inversion H3. Qed. Lemma ev_even_firsttry : forall n, ev n -> exists k, n = double k. Proof. intros n E. inversion E as [| p E']. - (* E = ev_0 *) exists 0. reflexivity. - (* E = ev_SS n' E' *) assert (I : (exists r, p = double r) -> (exists k, S (S p) = double k)). { intros [r Hr]. rewrite Hr. exists (S r). reflexivity. } apply I. Abort. Lemma ev_even : forall n, ev n -> exists k, n = double k. Proof. intros n E. induction E as [| n' E' IH]. - (* E = ev_0 *) exists 0. reflexivity. - (* E = ev_SS n' E' with IH : exists k', n' = double k' *) destruct IH as [k' Hk']. rewrite Hk'. exists (S k'). simpl. reflexivity. Qed. Theorem ev_even_iff : forall n, ev n <-> exists k, n = double k. Proof. intros. unfold iff. split. - apply ev_even. - intros [k' Hk']. rewrite Hk'. apply ev_double. Qed. (* Exercise: 2 stars (ev_sum) *) Theorem ev_sum : forall n m, ev n -> ev m -> ev (n + m). Proof. intros n m En Em. induction En. - simpl. apply Em. - simpl. apply ev_SS. apply IHEn. Qed. (* Exercise: 4 stars, advanced, optional (ev' ev) *) Inductive ev' : nat -> Prop := | ev'_0 : ev' 0 | ev'_1 : ev' 2 | ev'_sum : forall n m, ev' n -> ev' m -> ev' (n + m). Theorem ev'_ev : forall n, ev' n <-> ev n. Proof. intros. split. - intros E. induction E. + apply ev_0. + apply ev_SS. apply ev_0. + apply ev_sum. * apply IHE1. * apply IHE2. - intros E. induction E. + apply ev'_0. + assert (I : forall n, S (S n) = n + 2). { - intros. induction n0. + reflexivity. + simpl. rewrite IHn0. reflexivity. } rewrite I. apply ev'_sum. * apply IHE. * apply ev'_1. Qed. (* Exercise: 3 stars, advanced, recommended (ev_ev__ev) *) Theorem ev_ev__ev : forall n m, ev (n + m) -> ev n -> ev m. Proof. intros n m Enm En. induction En. - simpl in Enm. apply Enm. - simpl in Enm. apply IHEn. apply evSS_ev in Enm. apply Enm. Qed. (* Exercise: 3 stars, optional (ev_plus_plus) *) Theorem ev_plus_plus : forall n m p, ev (n+m) -> ev (n+p) -> ev (m+p). Proof. intros. apply ev_sum with (n + m) (n + p) in H. - rewrite plus_swap in H. rewrite <- plus_assoc in H. rewrite plus_assoc with n n (m + p) in H. apply ev_ev__ev with (n + n) (m + p) in H. + apply H. + rewrite <- double_plus. apply ev_double. - apply H0. Qed. Module Playground. Inductive le : nat -> nat -> Prop := | le_n : forall n, le n n | le_S : forall n m, (le n m) -> (le n (S m)). Notation "m <= n" := (le m n). Theorem test_le1 : 3 <= 3. Proof. apply le_n. Qed. Theorem test_le2 : 3 <= 6. Proof. apply le_S. apply le_S. apply le_S. apply le_n. Qed. Theorem test_le3 : (2 <= 1) -> 2 + 2 = 5. Proof. intros H. inversion H. inversion H2. Qed. End Playground. Definition lt (n m:nat) := le (S n) m. Notation "m < n" := (lt m n). Inductive square_of : nat -> nat -> Prop := | sq : forall n:nat, square_of n (n * n). Inductive next_nat : nat -> nat -> Prop := | nn : forall n:nat, next_nat n (S n). Inductive next_even : nat -> nat -> Prop := | ne_1 : forall n, ev (S n) -> next_even n (S n) | ne_2 : forall n, ev (S (S n)) -> next_even n (S (S n)). (* Exercise: 3 stars, optional (le_exercises) *) Lemma le_trans : forall m n o, m <= n -> n <= o -> m <= o. Proof. intros m n o Hmn Hmo. induction Hmo. - apply Hmn. - apply le_S. apply IHHmo. Qed. Theorem O_le_n : forall n, 0 <= n. Proof. induction n. - apply le_n. - apply le_S. apply IHn. Qed. Theorem n_le_m__Sn_le_Sm : forall n m, n <= m -> S n <= S m. Proof. intros n m H. induction H. - apply le_n. - apply le_S. apply IHle. Qed. Theorem le_Sn_le : forall n m, S n <= m -> n <= m. Proof. intros. apply le_trans with (S n). - apply le_S. apply le_n. - apply H. Qed. Theorem Sn_le_Sm__n_le_m : forall n m, S n <= S m -> n <= m. Proof. intros n m H. inversion H. - apply le_n. - apply le_trans with (S n). + apply le_S. apply le_n. + apply H1. Qed. Theorem le_plus_l : forall a b, a <= a + b. Proof. intros. induction a. - simpl. apply O_le_n. - simpl. apply n_le_m__Sn_le_Sm. apply IHa. Qed. Theorem le_plus_r : forall a b, b <= a + b. Proof. intros. induction a. - simpl. apply le_n. - simpl. apply le_S. apply IHa. Qed. Theorem plus_lt : forall n1 n2 m, n1 + n2 < m -> n1 < m /\ n2 < m. Proof. unfold lt. intros n1 n2 m H. split. - apply le_trans with (S (n1 + n2)). + apply n_le_m__Sn_le_Sm. apply le_plus_l. + apply H. - apply le_trans with (S (n1 + n2)). + apply n_le_m__Sn_le_Sm. apply le_plus_r. + apply H. Qed. Theorem lt_S : forall n m, n < m -> n < S m. Proof. unfold lt. intros. apply le_Sn_le in H. apply n_le_m__Sn_le_Sm. apply H. Qed. Theorem ble_n_Sn : forall n, leb n (S n) = true. Proof. intros n. induction n. - reflexivity. - simpl. rewrite -> IHn. reflexivity. Qed. Theorem n_ble_m__Sn_ble_Sm : forall n m, leb n m = true -> leb (S n) (S m) = true. Proof. intros. simpl. apply H. Qed. Theorem Sn_ble_Sm__n_ble_m : forall n m, leb (S n) (S m) = true -> leb n m = true. Proof. intros. simpl in H. apply H. Qed. (* if (S 4) <= 5 then 4 <= 5. YES if 4 <= (S 3) then 4 <= 3. NO if 4 <= 4 then (S 4) <= 5 NO if 4 <= 4 then 4 <= (S 4) YES if (S 4) <= (S 5) then 4 <= 5 YES if (S 4) <= (S 4) then (S 4) <= 4 NO if (S 4) <= (S 4) then 4 <= (S 4) YES if 4 <= 5 then (S 4) <= (S 5) YES if leb 4 (S 3) = true then leb 4 3 = true NO if leb (S 3) 4 = true then leb 3 4 = true YES if leb 4 4 = true then leb 4 (S 4) = true YES if leb (S 4) (S 5) = true then leb 4 5 = true YES if leb 4 5 = true then leb (S 4) (S 5) = true YES O_le_n : 0 <= n. le_n_Sm_le : n <= m -> n <= (S m) n_le_m__Sn_le_Sm: n ≤ m → S n ≤ S m. Sn_le_Sm__n_le_m: S n ≤ S m → n ≤ m. *) Theorem ble_Sn_m_n_m : forall n m, leb (S n) m = true -> leb n m = true. Proof. intros. generalize dependent n. induction m. - intros n contra. inversion contra. - intros n H. induction n. + reflexivity. + simpl. apply IHm. apply Sn_ble_Sm__n_ble_m in H. apply H. Qed. Theorem ble_n_m_n_Sm : forall n m, leb n m = true -> leb n (S m) = true. Proof. intros. apply ble_Sn_m_n_m. simpl. apply H. Qed. Theorem le_n_Sm_le : forall n m, n <= m -> n <= (S m). Proof. intros. apply le_trans with (S n). - apply le_S. apply le_n. - apply n_le_m__Sn_le_Sm. apply H. Qed. Theorem le_Sn_m_le : forall n m, (S n) <= m -> n <= m. Proof. intros. induction m. - inversion H. - apply le_S. apply Sn_le_Sm__n_le_m in H. apply H. Qed. Theorem leb_complete : forall n m, leb n m = true -> n <= m. Proof. intros. generalize dependent m. induction n. - intros. apply O_le_n. - intros m H. induction m. + inversion H. + apply n_le_m__Sn_le_Sm. apply IHn. simpl in H. apply H. Qed. Theorem leb_correct : forall n m, n <= m -> leb n m = true. Proof. intros. induction m. - inversion H. reflexivity. - inversion H. + simpl. symmetry. apply leb_refl. + apply ble_n_m_n_Sm. apply IHm. apply H1. Qed. Theorem leb_true_trans : forall n m o, leb n m = true -> leb m o = true -> leb n o = true. Proof. intros. apply leb_correct. apply leb_complete in H. apply leb_complete in H0. apply le_trans with m. - apply H. - apply H0. Qed. (* Exercise: 2 stars, optional (leb_iff) *) Theorem leb_iff : forall n m, leb n m = true <-> n <= m. Proof. intros n m. split. - intros H. apply leb_complete. apply H. - intros H. apply leb_correct. apply H. Qed. (* Exercise: 3 stars, recommended (R_provability) *) Inductive R : nat -> nat -> nat -> Prop := | c1 : R 0 0 0 | c2 : forall m n o, R m n o -> R (S m) n (S o) | c3 : forall m n o, R m n o -> R m (S n) (S o) | c4 : forall m n o, R (S m) (S n) (S (S o)) -> R m n o | c5 : forall m n o, R m n o -> R n m o. Lemma c2_inverse : forall m n o, R (S m) n (S o) -> R m n o. Proof. intros. apply c3 in H. apply c4 in H. apply H. Qed. Lemma c3_inverse : forall m n o, R m (S n) (S o) -> R m n o. Proof. intros. apply c2 in H. apply c4 in H. apply H. Qed. Lemma c4_inverse : forall m n o, R m n o -> R (S m) (S n) (S (S o)). Proof. intros. apply c2 in H. apply c3 in H. apply H. Qed. Theorem test_R1 : R 1 1 2. Proof. apply c2. apply c3. apply c1. Qed. Theorem test_R2 : R 2 2 6. Proof. apply c2. apply c3. apply c2. apply c3. Abort. (* Not Provable *) (* If we dropped constructor c5 from the definition of R, would the set of provable propositions change? Briefly (1 sentence) explain your answer. No it wouldn't because proposition c2 and c3 already cover that scenario *) (* If we dropped constructor c4 from the definition of R, would the set of provable propositions change? Briefly (1 sentence) explain your answer. Not it wouldn't because proposition c4 increments m, n and o and the set of the rest of the proposition do not offer any convergence on o. *) Lemma R_m_n_O : forall o, R 0 0 o -> o = 0. Proof. Admitted. (* Exercise: 3 stars, optional (R_fact) *) Definition fR : nat -> nat -> nat := plus. Example test_R_fR_equ1: R 2 3 5 <-> fR 2 3 = 5. Proof. split. - reflexivity. - intros. apply c2. apply c3. apply c2. apply c3. apply c3. apply c1. Qed. Lemma fR_O_n : forall n, fR O n = n. Proof. intros. reflexivity. Qed. Lemma fR_n_O : forall n, fR n O = n. Proof. intros. induction n. - reflexivity. - simpl. apply eq_S. apply IHn. Qed. Lemma fR_m_Sn : forall m n, fR m (S n) = S(fR m n). Proof. intros. induction m. - reflexivity. - simpl. apply eq_S. apply IHm. Qed. Lemma fR_n_m_O : forall m n, fR m n = 0 -> m = 0 /\ n = 0. Proof. intros. induction n. - split. + induction m. * reflexivity. * inversion H. + reflexivity. - split. + induction m. * reflexivity. * inversion H. + rewrite fR_m_Sn in H. inversion H. Qed. Lemma n_m_O_fR : forall m n, m = 0 /\ n = 0 -> fR m n = 0. Proof. intros. induction n. - destruct H. rewrite H. reflexivity. - rewrite fR_m_Sn. inversion H. inversion H1. Qed. Theorem R_equiv_fR : forall m n o, R m n o <-> fR m n = o. Proof. Abort. (* Exercise: 4 stars, advanced (subsequence) *) Inductive subseq {X}: list X -> list X -> Prop := | sc_nil : forall l : list X, subseq [] l | sc_eq : forall (x : X) (l1 l2:list X), subseq l1 l2 -> subseq (x :: l1) (x::l2) | sc_eatl2 : forall (x : X) (l1 l2:list X), subseq l1 l2 -> subseq l1 (x :: l2). Theorem subseq_test1 : subseq [1;2;3] [1;2;3]. Proof. apply sc_eq. apply sc_eq. apply sc_eq. apply sc_nil. Qed. Theorem subseq_test2 : subseq [1;2;3] [1;1;1;2;2;3]. Proof. apply sc_eq. apply sc_eatl2. apply sc_eatl2. apply sc_eq. apply sc_eatl2. apply sc_eq. apply sc_nil. Qed. Theorem subseq_test3 : subseq [1;2;3] [1;2;7;3]. Proof. apply sc_eq. apply sc_eq. apply sc_eatl2. apply sc_eq. apply sc_nil. Qed. Theorem subseq_test4 : subseq [1;2;3] [5;6;1;9;9;2;7;3;8]. Proof. apply sc_eatl2. apply sc_eatl2. apply sc_eq. apply sc_eatl2. apply sc_eatl2. apply sc_eq. apply sc_eatl2. apply sc_eq. apply sc_eatl2. apply sc_nil. Qed. Theorem subseq_test5 : subseq [1;2;3] [1;2]. Proof. apply sc_eq. apply sc_eq. Abort. Theorem subseq_test6 : subseq [1;2;3] []. Proof. Abort. Theorem subseq_test7 : subseq [] [1;2]. Proof. apply sc_nil. Qed. Theorem subseq_refl {X:Type}: forall l : list X, subseq l l. Proof. intros l. induction l. - apply sc_nil. - apply sc_eq. apply IHl. Qed. Lemma sc_eatl2_inverse : forall (x : nat) (l1 l2 : list nat), subseq l1 l2 -> subseq l1 (x :: l2). Proof. intros. apply sc_eatl2. apply H. Qed. Theorem subseq_app {X}: forall l1 l2 l3 : list X, subseq l1 l2 -> subseq l1 (l2 ++ l3). Proof. intros. induction H. - apply sc_nil. - simpl. apply sc_eq. apply IHsubseq. - simpl. apply sc_eatl2. apply IHsubseq. Qed. Theorem subseq_shrink : forall (x : nat) (l1 l2 : list nat), subseq (x :: l1) l2 -> subseq l1 l2. Proof. intros. generalize dependent x. generalize dependent l1. induction l2. - intros. inversion H. - intros. apply sc_eatl2. inversion H. + apply H1. + apply IHl2 with x0. apply H2. Qed. Theorem subseq_trans {X}: forall l1 l2 l3 : list X, subseq l1 l2 -> subseq l2 l3 -> subseq l1 l3. Proof. intros l1 l2 l3 H1 H2. generalize dependent l1. induction H2. - intros. inversion H1. apply sc_nil. - intros. inversion H1. + intros. apply sc_nil. + apply sc_eq. apply IHsubseq. apply H3. + rewrite <- H0. apply sc_eatl2. apply IHsubseq. rewrite H0. apply H3. - intros. apply IHsubseq in H1. apply sc_eatl2. apply H1. Qed. Theorem subseq_nil {X}: forall l : list X, subseq l [] -> l = []. Proof. intros. induction l. - reflexivity. - inversion H. Qed. (* Exercise: 2 stars, optional (R_provability2) *) Inductive R' : nat -> list nat -> Prop := | c'1 : R' 0 [] | c'2 : forall n l, R' n l -> R' (S n) (n :: l) | c'3 : forall n l, R' (S n) l -> R' n l. Example test_R'1 : R' 2 [1;0]. Proof. apply c'2. apply c'2. apply c'1. Qed. Example test_R'2 : R' 1 [1;2;1;0]. Proof. apply c'3. apply c'2. apply c'3. apply c'3. apply c'2. apply c'2. apply c'2. apply c'1. Qed. Example test_R'3 : R' 6 [3;2;1;0]. Proof. apply c'3. Abort. (* Case Study: Regular Experssions *) Inductive reg_exp {T : Type} : Type := | EmptySet : reg_exp | EmptyStr : reg_exp | Char : T -> reg_exp | App : reg_exp -> reg_exp -> reg_exp | Union : reg_exp -> reg_exp -> reg_exp | Star : reg_exp -> reg_exp. Inductive exp_match {T} : list T -> reg_exp -> Prop := | MEmpty : exp_match [] EmptyStr | MChar : forall x, exp_match [x] (Char x) | MApp : forall s1 re1 s2 re2, exp_match s1 re1 -> exp_match s2 re2 -> exp_match (s1 ++ s2) (App re1 re2) | MUnionL : forall s1 re1 re2, exp_match s1 re1 -> exp_match s1 (Union re1 re2) | MUnionR : forall s1 re1 re2, exp_match s1 re2 -> exp_match s1 (Union re1 re2) | MStar0 : forall re, exp_match [] (Star re) | MStarApp: forall s1 s2 re, exp_match s1 re -> exp_match s2 (Star re) -> exp_match (s1 ++ s2) (Star re). Notation "s =~ re" := (exp_match s re) (at level 80). Example reg_exp_ex1 : [1] =~ Char 1. Proof. apply MChar. Qed. Example reg_exp_ex12 : [1; 2] =~ App (Char 1) (Char 2). Proof. apply (MApp [1] _ [2]). - apply MChar. - apply MChar. Qed. Example reg_exp_ex3 : ~ ([1; 2] =~ Char 1). Proof. intros contra. inversion contra. Qed. Fixpoint reg_exp_of_list {T} (l : list T) := match l with | [] => EmptyStr | x :: l' => App (Char x) (reg_exp_of_list l') end. Example reg_exp_ex4 : [1; 2; 3] =~ reg_exp_of_list [1; 2; 3]. Proof. simpl. apply (MApp [1]). { apply MChar. } apply (MApp [2]). { apply MChar. } apply (MApp [3]). { apply MChar. } apply MEmpty. Qed. Lemma MStar1 : forall T s (re : @reg_exp T) , s =~ re -> s =~ Star re. Proof. intros. rewrite <- app_nil_r with (l:=s). apply (MStarApp s [] re). - apply H. - apply MStar0. Qed. (* Exercise: 3 stars (exp_match ex1) *) Lemma empty_is_empty : forall T (s : list T), ~ (s =~ EmptySet). Proof. unfold not. intros T s contra. inversion contra. Qed. Lemma MUnion' : forall T (s : list T) (re1 re2 : @reg_exp T), s =~ re1 \/ s =~ re2 -> s =~ Union re1 re2. Proof. intros T s re1 re2 [H1 | H2]. - apply MUnionL. apply H1. - apply MUnionR. apply H2. Qed. Lemma MApp' : forall T (s1 s2 : list T) (re1 re2 : @reg_exp T), s1 =~ re1 /\ s2 =~ re2 -> s1 ++ s2 =~ App re1 re2. Proof. intros T s1 s2 re1 re2 [H1 H2]. apply MApp. - apply H1. - apply H2. Qed. Lemma MStar' : forall T (ss : list (list T)) (re : reg_exp), (forall s, In s ss -> s =~ re) -> fold app ss [] =~ Star re. Proof. intros. induction ss. - simpl. simpl in H. apply MStar0. - simpl. apply MStarApp. + simpl in H. apply H. left. reflexivity. + simpl in H. apply IHss. intros. apply H. right. apply H0. Qed. Lemma app_plus : forall T x (l : list T), x :: l = [x] ++ l. Proof. reflexivity. Qed. Lemma reg_exp_of_list_refl : forall T (s : list T), s =~ reg_exp_of_list s. Proof. intros. induction s. - simpl. apply MEmpty. - simpl. rewrite app_plus. apply MApp. + apply MChar. + apply IHs. Qed. (* Exercise: 4 stars, optional (reg_exp_of_list_spec) *) Lemma reg_exp_of_list_empty : forall T (s : list T), s = [] -> [] =~ reg_exp_of_list s. Proof. intros. induction s. - simpl. apply MEmpty. - simpl. inversion H. Qed. Lemma reg_exp_of_list_basic : forall T (s1 s2 : list T), s1 = s2 -> s1 =~ reg_exp_of_list s2. Proof. intros. generalize dependent s1. induction s2. - intros. simpl. rewrite H. apply MEmpty. - intros. simpl. rewrite H. rewrite app_plus. apply (MApp [x] _ _). + apply MChar. + apply reg_exp_of_list_refl. Qed. Lemma reg_exp_of_list_basic_inv : forall T (s1 s2 : list T), s1 =~ reg_exp_of_list s2 -> s1 = s2. Proof. intros. generalize dependent s1. induction s2 as [| x2]. - intros. simpl in H. inversion H. reflexivity. - intros. inversion H. inversion H3. apply f_equal. apply IHs2. apply H4. Qed. Lemma reg_exp_of_list_spec : forall T (s1 s2 : list T), s1 =~ reg_exp_of_list s2 <-> s1 = s2. Proof. split. - apply reg_exp_of_list_basic_inv. - apply reg_exp_of_list_basic. Qed. Fixpoint re_chars {T} (re : reg_exp) : list T := match re with | EmptySet => [] | EmptyStr => [] | Char x => [x] | App re1 re2 => re_chars re1 ++ re_chars re2 | Union re1 re2 => re_chars re1 ++ re_chars re2 | Star re => re_chars re end. Theorem in_re_match : forall T (s : list T) (re : reg_exp) (x : T), s =~ re -> In x s -> In x (re_chars re). Proof. intros T s re x Hmatch Hin. induction Hmatch as [| x' | s1 re1 s2 re2 Hmatch1 IH1 Hmatch2 IH2 | s1 re1 re2 Hmatch IH | re1 s2 re2 Hmatch IH | re | s1 s2 re Hmatch1 IH1 Hmatch2 IH2]. - apply Hin. - apply Hin. - simpl. rewrite In_app_iff in *. destruct Hin as [Hin | Hin]. + left. apply (IH1 Hin). + right. apply (IH2 Hin). - simpl. rewrite In_app_iff. left. apply (IH Hin). - simpl. rewrite In_app_iff. right. apply (IH Hin). - destruct Hin. - simpl. apply In_app_iff in Hin. destruct Hin as [Hin | Hin]. + apply (IH1 Hin). + apply (IH2 Hin). Qed. (* Exercise: 4 stars (re_not_empty) *) Fixpoint re_not_empty {T : Type} (re : @reg_exp T) : bool := match re with | EmptySet => false | EmptyStr => true | Char _ => true | App re1 re2 => andb (re_not_empty re1) (re_not_empty re2) | Union re1 re2 => orb (re_not_empty re1) (re_not_empty re2) | Star _ => true end. (* Compute (re_not_empty (Star (EmptySet))). *) Compute (re_not_empty (Char [1;2;3;4])). Compute (re_not_empty (App (Char 1) (EmptySet))). Lemma re_not_empty_correct : forall T (re : @reg_exp T), (exists s, s =~ re) <-> re_not_empty re = true. Proof. intros T re. split. { intros H. induction re. - (* EmptySet *) inversion H. inversion H0. - (* EmptyStr *) reflexivity. - (* Char _ *) reflexivity. - (* App _ _ *) simpl. rewrite andb_true_iff. split. + (* re1 *) apply IHre1. inversion H. inversion H0. exists s1. apply H4. + (* re2 *) apply IHre2. inversion H. inversion H0. exists s2. apply H5. - (* Union _ _ *) simpl. apply orb_true_iff. inversion H. inversion H0. + left. apply IHre1. exists x. apply H3. + right. apply IHre2. exists x. apply H3. - (* Star *) reflexivity. } { induction re. - (* EmptySet *) intros. inversion H. - (* EmptyStr *) exists []. apply MEmpty. - (* Char _ *) exists [t]. apply MChar. - (* App _ _ *) intros. inversion H. apply andb_true_iff in H1. destruct H1. apply IHre1 in H0. apply IHre2 in H1. inversion H0. inversion H1. exists (x ++ x0). apply MApp. + apply H2. + apply H3. - (* Union _ _ *) intros. inversion H. apply orb_true_iff in H1. destruct H1. + apply IHre1 in H0. inversion H0. exists x. apply MUnionL. apply H1. + apply IHre2 in H0. inversion H0. exists x. apply MUnionR. apply H1. - (* Star _ *) intros. exists []. apply MStar0. } Qed. (* The remember tactic *) Lemma star_app': forall T (s1 s2 : list T) (re re' : reg_exp), re' = Star re -> s1 =~ re' -> s2 =~ Star re -> s1 ++ s2 =~ Star re. Proof. induction re'. - intros. inversion H. - intros. inversion H. - intros. inversion H. - intros. inversion H. - intros. inversion H. - intros. inversion H. inversion H0. + simpl. apply H1. + subst. apply IHre'. Abort. Lemma star_app: forall T (s1 s2 : list T) (re : @reg_exp T), s1 =~ Star re -> s2 =~ Star re -> s1 ++ s2 =~ Star re. Proof. intros T s1 s2 re H1. remember (Star re) as re'. generalize dependent s2. induction H1. - (* MEmpty *) inversion Heqre'. - (* MChar *) inversion Heqre'. - (* MApp *) inversion Heqre'. - (* MUnionL *) inversion Heqre'. - (* MUnionR *) inversion Heqre'. - (* MStar0 *) intros. simpl. apply H. - (* MStarApp *) inversion Heqre'. intros. rewrite H0 in IHexp_match1. rewrite H0 in IHexp_match2. rewrite <- app_assoc. apply MStarApp. + rewrite H0 in H1_. apply H1_. + apply IHexp_match2. * reflexivity. * apply H. Qed. Lemma MStar'' : forall T (s : list T) (re : reg_exp), s =~ Star re -> exists ss : list (list T), s = fold app ss [] /\ forall s', In s' ss -> s' =~ re. Proof. intros. remember (Star re) as re'. generalize dependent re. induction H. - intros. inversion Heqre'. - intros. inversion Heqre'. - intros. inversion Heqre'. - intros. inversion Heqre'. - intros. inversion Heqre'. - intros. exists []. split. + reflexivity. + intros. inversion H. - intros. remember (s1 ++ s2) as ss. Admitted. (* - intros. induction (s1 ++ s2). + exists []. split. * reflexivity. * simpl. intros. inversion H1. + exists [x :: l]. split. * simpl. rewrite app_nil_r. reflexivity. * simpl. destruct IHl. destruct H1. intros. apply H2. simpl in H3. destruct H3. { - intros. remember (s1 ++ s2) as ss. induction re. + intros. inversion Heqre'. exists [s1 ++ s2]. split. * simpl. symmetry. apply app_nil_r. * rewrite H2. exists [s1 ++ s2]. split. + simpl. symmetry. apply app_nil_r. + intros. simpl in H1. destruct H1. * *) (* intros T s re. remember (Star re) as re'. generalize dependent re'. induction re. - intros. induction s. + exists []. split. * reflexivity. * intros. inversion H0. + exists ([x :: s]). split. * simpl. rewrite app_nil_r. reflexivity. * intros. simpl in H0. destruct H0. { - inversion H0. exists [s]. split. + simpl. symmetry. apply app_nil_r. + intros. inversion H. + exists [s]. simpl. split. * rewrite app_nil_r. apply H0. * intros. induction Heqre'. apply MEmpty. intros T s re. remember (Star re) as re'. generalize dependent re. induction re'. - intros. inversion Heqre'. - intros. inversion Heqre'. - intros. inversion Heqre'. - intros. inversion Heqre'. - intros. inversion Heqre'. - intros. inversion Heqre'. exists [s]. split. + simpl. symmetry. apply app_nil_r. + intros. inversion H. * rewrite <- H2 in H. simpl in H0. intros. simpl in H0. destruct H0. * rewrite Heqre' in H. apply MStar0. in H. - intros. inversion Heqre'. - intros. inversion Heqre'. - intros. inversion Heqre'. - intros. inversion Heqre'. - intros. inversion Heqre'. - intros. exists [s]. split. + simpl. rewrite app_nil_r. reflexivity. + apply IHre'. intros. simpl in H0. destruct H0. * *) (* Almost succeeded intros T s re H. remember (Star re) as re'. generalize dependent re. induction H. - intros. inversion Heqre'. - intros. inversion Heqre'. - intros. inversion Heqre'. - intros. inversion Heqre'. - intros. inversion Heqre'. - intros. exists []. split. + reflexivity. + intros. inversion H. - intros. exists [s1 ++ s2]. split. + simpl. symmetry. apply app_nil_r. + simpl. inversion Heqre'. intros. simpl in H1. destruct H1. * apply IHexp_match2. rewrite <- H1. - simpl. rewrite app_nil_r. reflexivity. - apply MStar1 in H. induction H. + intros. simpl in H. inversion H + rewrite <- H0. Search Star. induction re'. + inversion Heqre'. + inversion Heqre'. + inversion Heqre'. + inversion Heqre'. + inversion Heqre'. + inversion Heqre'. rewrite H1 in IHre'. apply IHre'. { Search Star. induction re. - inversion H. Search MStar'. inversion Heqre'. *) (* Exercise: 5 stars, advanced (pumping) *) Module Pumping. Fixpoint pumping_constant {T} (re : @reg_exp T) : nat := match re with | EmptySet => 0 | EmptyStr => 1 | Char _ => 2 | App re1 re2 => pumping_constant re1 + pumping_constant re2 | Union re1 re2 => pumping_constant re1 + pumping_constant re2 | Star _ => 1 end. Fixpoint napp {T} (n : nat) (l : list T) : list T := match n with | 0 => [] | S n' => l ++ napp n' l end. Lemma napp_plus : forall T (n m : nat) (l : list T), napp (n + m) l = napp n l ++ napp m l. Proof. intros. induction n. - reflexivity. - simpl. rewrite IHn. apply app_assoc. Qed. Lemma pumping: forall T (re : @reg_exp T) s, s =~ re -> pumping_constant re <= length s -> exists s1 s2 s3, s = s1 ++ s2 ++ s3 /\ s2 <> [] /\ forall m, s1 ++ napp m s2 ++ s3 =~ re. Import Coq.omega.Omega. Proof. intros T re s Hmatch. induction Hmatch as [ | x | s1 re1 s2 re2 Hmatch1 IH1 Hmatch2 IH2 | s1 re1 re2 Hmatch IH | re1 s2 re2 Hmatch IH | re | s1 s2 re Hmatch1 IH1 Hmatch2 IH2 ]. - (* MEmpty *) simpl. omega. - (* MChar *) simpl. omega. - (* MApp *) simpl. intros. exists s1. exists s2. exists []. simpl. split. + rewrite app_nil_r. reflexivity. + split. Abort. End Pumping. Theorem filter_not_empty_In : forall n l, filter (beq_nat n) l <> [] -> In n l. Proof. intros n l. induction l as [|m l' IHl']. - simpl. intros. apply H. reflexivity. - simpl. destruct (beq_nat n m) eqn:H. + intros. left. apply beq_nat_true_iff in H. rewrite H. reflexivity. + intros. right. apply IHl'. apply H0. Qed. Inductive reflect (P : Prop) : bool -> Prop := | ReflectT : P -> reflect P true | ReflectF : ~P -> reflect P false. Theorem iff_reflect : forall P b, (P <-> b = true) -> reflect P b. Proof. intros. destruct b. - apply ReflectT. rewrite H. reflexivity. - apply ReflectF. rewrite H. unfold not. intros. inversion H0. Qed. (* Exercise: 2 stars, recommended (reflect_iff) *) Theorem reflect_iff : forall P b, reflect P b -> (P <-> b = true). Proof. intros P b H. destruct b. - split. + intros. reflexivity. + intros. inversion H. apply H1. - split. + intros. inversion H. exfalso. apply H1. apply H0. + intros. inversion H0. Qed. Lemma beq_natP : forall n m, reflect (n = m) (beq_nat n m). Proof. intros n m. apply iff_reflect. rewrite beq_nat_true_iff. reflexivity. Qed. Theorem filter_not_empty_In' : forall n l, filter (beq_nat n) l <> [] -> In n l. Proof. intros n l. induction l as [| m l' IHl']. - simpl. intros H. apply H. reflexivity. - simpl. destruct (beq_natP n m) as [H | H]. + intros _. rewrite H. left. reflexivity. + intros. unfold not in H. right. apply IHl'. apply H0. Qed. (* Exercise: 3 stars, recommended (beq_natP_practice) *) Fixpoint count n l := match l with | [] => 0 | m :: l' => (if beq_nat n m then 1 else 0) + count n l' end. Theorem beq_natP_practice : forall n l, count n l = 0 -> ~(In n l). Proof. intros. induction l as [|m l' IHl']. - simpl. unfold not. intros contra. apply contra. - simpl. unfold not. intros. simpl in H. destruct (beq_natP n m) as [H1 | H1]. + inversion H. + simpl in H. unfold not in H1. destruct H0. * symmetry in H0. apply H1 in H0. apply H0. * apply IHl' in H0. { - apply H0. } { - apply H. } Qed. (* Additional Exercises *) (*Exercise: 3 stars, recommended (nostutter_defn) *) Inductive nostutter {X:Type} : list X -> Prop := | NSEmpty : nostutter [] | NSSingleEl : forall n : X, nostutter [n] | NSRepeated : forall (m n : X) (xs : list X), m <> n -> nostutter (n :: xs) -> nostutter (m :: n :: xs). Example test_nostutter_1: nostutter [3;1;4;1;5;6]. Proof. repeat constructor; apply beq_nat_false_iff; auto. Qed. Example test_nostutter_2: nostutter (@nil nat). Proof. repeat constructor. Qed. Example test_nostutter_3: nostutter [5]. Proof. repeat constructor; apply beq_nat_false; auto. Qed. Example test_nostutter_4: not (nostutter [3;1;1;4]). Proof. intro. repeat match goal with h: nostutter _|- _ => inversion h; clear h; subst end. contradiction H1; auto. Qed. (* Exercise: 4 stars, advanced (filter_challenge) *) Inductive inordermerge {X:Type} : list X -> list X -> list X -> Prop := | IOMEmpty : inordermerge [] [] [] | IOMMatchesL1 : forall (n : X) (l1 l2 l : list X), inordermerge l1 l2 l -> inordermerge (n :: l1) l2 (n :: l) | IOMMatchesL2 : forall (n : X) (l1 l2 l : list X), inordermerge l1 l2 l -> inordermerge l1 (n :: l2) (n :: l). Example test_inordermerge_1 : inordermerge [1;6;2] [4;3] [1;4;6;2;3]. Proof. repeat constructor. Qed. Example test_inordermerge_2 : inordermerge [1;6;2] [] [1;6;2]. Proof. repeat constructor. Qed. Example test_inordermerge_3 : not (inordermerge [1;2;3] [4;5;6] [1;4;6]). Proof. intro. inversion H. clear H. subst. inversion H3. clear H3. subst. inversion H1. Qed. Lemma head_same {X:Type}: forall (x:X) (l1 l2 : list X), l1 = l2 -> x :: l1 = x :: l2. Proof. intros. simpl. inversion H. reflexivity. Qed. (** **** Exercise: 3 stars (all_forallb) *) (** Inductively define a property [all] of lists, parameterized by a type [X] and a property [P : X -> Prop], such that [all X P l] asserts that [P] is true for every element of the list [l]. *) Inductive all {X:Type} : (X -> Prop) -> list X -> Prop := | all_nil : forall (P:X -> Prop), all P [] | all_match : forall (x:X) (l:list X) (P:X -> Prop), P x -> all P l -> all P (x::l). Example test_all_1 : all ev [0;2;4]. Proof. repeat constructor. Qed. Example test_all_2: ~(all ev [2;3;4;5]). Proof. intro. inversion H. clear H. subst. inversion H4. clear H4. subst. inversion H2. inversion H0. Qed. (** Recall the function [forallb], from the exercise [forall_exists_challenge] in chapter [Poly]: *) Fixpoint forallb X (test : X -> bool) (l : list X) : bool := match l with | [] => true | x :: l' => test x && forallb X test l' end. Lemma all_app {X:Type}: forall (x:X) (l:list X) (P: X -> Prop), all P (x :: l) -> P x /\ all P l. Proof. intros. split. - inversion H. apply H3. - inversion H. apply H4. Qed. (** Using the property [all], write down a specification for [forallb], and prove that it satisfies the specification. Try to make your specification as precise as possible. Are there any important properties of the function [forallb] which are not captured by your specification? *) Theorem all_forallb {X:Type} : forall (test : X -> bool) (P: X -> Prop) (l : list X), (forall n, test n = true /\ P n) -> all P l -> forallb X test l = true. Proof. intros. induction H0. - reflexivity. - simpl. apply andb_true_iff. split. + destruct H with (n:=x). apply H2. + apply IHall. apply H. Qed. Theorem filter_challenge {X:Type} : forall (l l1 l2: list X) (test:X->bool), (forall x, In x l1 -> test x = true) -> (forall x, In x l2 -> test x = false) -> inordermerge l1 l2 l -> filter test l = l1. Proof. intros l l1 l2 test Hl1 Hl2 H. induction H. - reflexivity. - simpl. assert (test n = true) as C. + apply Hl1. simpl. left. reflexivity. + rewrite C. apply head_same. apply IHinordermerge. * intros. apply Hl1. simpl. right. trivial. * intros. apply Hl2. trivial. - simpl. assert (test n = false) as C. + apply Hl2. simpl. left. reflexivity. + rewrite C. apply IHinordermerge. * apply Hl1. * intros. apply Hl2. simpl. right. apply H0. Qed. Theorem subseq_pad_l2 : forall (x : nat) (l1 l2 : list nat), subseq l1 l2 -> subseq l1 (x :: l2). Proof. intros. generalize dependent x. generalize dependent l2. induction l1. - intros. apply sc_nil. - intros. simpl in *. apply sc_eatl2. apply H. Qed. (* Exercise: 5 stars, advanced (filter_challenge_2) *) Theorem filter_challenge_2 {X} : forall (ls l : list X) (test:X->bool), subseq ls l -> (forall n, In n ls -> test n = true) -> length ls <= length (filter test l). Proof. intros ls l test Hsubseq G. induction Hsubseq. - apply O_le_n. - simpl. assert (test x = true) as C. + apply G. simpl. left. reflexivity. + rewrite C. simpl. apply n_le_m__Sn_le_Sm. apply IHHsubseq. intros. apply G. simpl. right. apply H. - simpl. destruct (test x) eqn:C. + simpl. apply le_S. apply IHHsubseq. intros. apply G. apply H. + apply IHHsubseq. intros. apply G. apply H. Qed. (* Exercise: 4 stars, optional (palindromes) *) Inductive matchlast {X} : X -> list X -> list X -> Prop := | match_same : forall (x:X), matchlast x [x] [] | match_tail : forall (x y:X) (l m:list X), matchlast x l m -> matchlast x (y :: l) (y :: m). Inductive pal {X} : list X -> Prop := | pal_nil : pal [] | pal_single : forall x, pal [x] | pal_match : forall (x:X) (l m:list X), pal l -> matchlast x m l -> pal (x :: m). Inductive pal2 {X} : list X -> Prop := | c : forall l, l = rev l -> pal2 l. Lemma matchx_last {X} : forall (x:X) (m:list X), matchlast x (m ++ [x]) m. Proof. induction m. - simpl. apply match_same. - simpl. apply match_tail. apply IHm. Qed. Theorem pal_app_rev {X}: forall (l:list X), pal (l ++ rev l). Proof. induction l. - simpl. apply pal_nil. - simpl. rewrite app_plus. apply pal_match with (l0:=(l ++ rev l)). + apply IHl. + rewrite app_assoc. inversion IHl. * simpl. apply match_same. * apply match_tail. apply match_same. * simpl. apply match_tail. apply matchx_last. Qed. Lemma rev_l {X}: forall (x:X) (l:list X), l = rev l -> x :: l = rev l ++ [x]. Proof. intros. rewrite app_plus. symmetry. rewrite <- rev_involutive. rewrite rev_app_distr. rewrite <- H. simpl. symmetry. rewrite <- rev_involutive. Abort. Theorem pal2_rev {X}: forall (l:list X), pal2 l -> l = rev l. Proof. induction l. - intros. reflexivity. - intros. simpl. rewrite app_plus. inversion H. simpl in H0. rewrite <- H0. reflexivity. Qed. Lemma tail_same {X:Type}: forall (x:X) (l1 l2 : list X), l1 = l2 -> l1 ++ [x] = l2 ++ [x]. Proof. intros. rewrite H. reflexivity. Qed. Theorem pal2_app_rev {X}: forall (l:list X), pal2 (l ++ rev l). Proof. intros. induction l. - simpl. apply c. reflexivity. - simpl. rewrite app_plus. apply c. inversion IHl. rewrite rev_app_distr. rewrite rev_app_distr. rewrite rev_app_distr. rewrite rev_involutive. simpl. rewrite app_plus. symmetry. rewrite app_plus. rewrite <- app_assoc. reflexivity. Qed. Lemma pal_prefix {X} : forall (x:X) (l1 l2:list X), pal ([x] ++ l1) -> l1 = l2 ++ [x]. Proof. Admitted. Theorem pal_rev {X}: forall (l:list X), pal l -> l = rev l. Proof. intros. inversion H. - reflexivity. - reflexivity. - inversion H1. + reflexivity. + subst. simpl in *. Abort. Theorem test_pal_1 : pal [1]. Proof. apply pal_single. Qed. Hint Resolve pal_nil pal_single pal_match match_same match_tail. Theorem test_pal_2 : pal [1;2;1]. Proof. apply pal_match with (l:=[2]). - apply pal_single. - apply match_tail. apply match_same. Qed. Theorem test_pal_3 : pal [1;2;3;2;1]. Proof. apply pal_match with (l:=[2;3;2]). - apply pal_match with (l:=[3]). + apply pal_single. + apply match_tail. apply match_same. - apply match_tail. apply match_tail. apply match_tail. apply match_same. Qed. Theorem test_pal_4 : pal [1;2;3;3;2;1]. Proof. apply pal_match with (l:=[2;3;3;2]). - apply pal_match with (l:=[3;3]). + apply pal_match with (l:=[]). * apply pal_nil. * apply match_same. + apply match_tail. apply match_tail. apply match_same. - apply match_tail. apply match_tail. apply match_tail. apply match_tail. apply match_same. Qed. Lemma rev_eq_pal_length: forall (X: Type) (n: nat) (l: list X), length l <= n -> l = rev l -> pal l. Proof. intros. induction n. - inversion H. assert (G: l = []). { - induction l. + reflexivity. + inversion H2. } rewrite G. apply pal_nil. - induction l. + apply pal_nil. + simpl in *. inversion H. Abort. (* Exercise: 5 stars, optional (palindrome_converse) *) Theorem palindrome_converse {X} : forall (l:list X), l = rev l -> pal l. Proof. intros. Abort. (* Exercise: 4 stars, advanced, optional (NoDup) *) (* Fixpoint In (A : Type) (x : A) (l : list A) : Prop := match l with | [] => False | x' :: l' => x' = x \/ In A x l' end *) Inductive NoDup {X}: list X -> Prop := | NoDup_nil : NoDup [] | NoDup_element : forall (x:X) (l:list X), NoDup l -> ~ In x l -> NoDup (x :: l). Example tests_nodup_1 : NoDup [1;2;3;4]. Proof. apply NoDup_element. - apply NoDup_element. + apply NoDup_element. * apply NoDup_element. { - apply NoDup_nil. } { - auto. } * simpl. unfold not. intros. destruct H. inversion H. apply H. + simpl. unfold not. intros. destruct H. * inversion H. * destruct H. { - inversion H. } { - apply H. } - unfold not. intros. destruct H. + inversion H. + destruct H. * inversion H. * simpl in H. destruct H. { - inversion H. } { - apply H. } Qed. Example tests_nodup_2 : NoDup [1;2;1]. Proof. apply NoDup_element. - apply NoDup_element. + apply NoDup_element. * apply NoDup_nil. * simpl. auto. + unfold not. intros. destruct H. inversion H. apply H. - unfold not. intros. destruct H. inversion H. Abort. Fixpoint disjoint {X} (l1 l2: list X) : Prop := ~ exists x, In x l1 /\ In x l2. Lemma disjoint_empty {X:Type} : forall (l:list X), disjoint l []. Proof. intros. induction l. - simpl. unfold not. intros. destruct H. destruct H. apply H. - simpl. unfold not. intros. destruct H. destruct H. apply H0. Qed. Lemma disjoint_refl_false {X} : forall (l:list X), l <> [] -> ~ disjoint l l. Proof. intros. induction l. - simpl. unfold not. intros. apply H. reflexivity. - simpl. unfold not. intros. apply H0. exists x. split. + left. reflexivity. + left. reflexivity. Qed. Lemma In_empty {X:Type} : forall x:X, ~ In x []. Proof. intros. unfold not. intros. inversion H. Qed. Theorem NoDup_disjoint_app {X} : forall l1 l2 : list X, disjoint l1 l2 -> NoDup (l1 ++ l2). Proof. intros. induction l2. - induction l1. + simpl. apply NoDup_nil. + simpl. rewrite app_nil_r in *. apply NoDup_element. * apply IHl1. apply disjoint_empty. * unfold not. intros. induction IHl1. { - inversion H0. } { - apply IHn. + apply disjoint_empty. + unfold not in H1. exists x. split. { - simpl. left. reflexivity. } { - simpl. intros. induction l1. - induction l2. + apply NoDup_nil. + apply NoDup_element. * simpl in *. apply IHl2. unfold not in *. intros. destruct H0. destruct H0. apply H0. * admit. - induction l2. + simpl. rewrite app_nil_r in *. apply NoDup_element. * apply IHl1. apply disjoint_empty. * destruct IHl1. { - apply disjoint_empty. } { - unfold not. intros. inversion H0. } { - simpl. unfold not in *. intros. apply H0. destruct H1. + rewrite H1. admit. + intros. induction (l1 ++ l2). - apply NoDup_nil. - apply NoDup_element. + apply IHl. + unfold not. intros. induction l1. * intros. induction l2. - induction l1. + simpl. apply NoDup_nil. + simpl. rewrite app_nil_r in *. apply NoDup_element. * apply IHl1. apply disjoint_empty. * unfold not. intros. induction H. exists x. split. { - simpl. left. reflexivity. } { - simpl. rewrite app_nil_r in IHl1. destruct IHl1. + apply disjoint_empty. + simpl in H0. apply H0. + unfold not in H. apply H. inversion H0. * inversion n. { - apply In_empty. rewrite H1 in H0. rewrite app_plus in H0. Search In. apply In_app_iff in H0. destruct H0. { - apply H. rewrite H1. Search In. inversion H0. apply H. rewrite <- H1 in H0. inversion H0. { - rewrite <- H1 in *. inversion H0. rewrite H2. apply In_empty. destruct H. simpl in H0. destruct H0. * rewrite H. apply intros. induction l1. - simpl in *. induction l2. + apply NoDup_nil. + apply NoDup_element. * apply IHl2. unfold not in *. intros. destruct H0. destruct H0. apply H0. * destruct H. exists x. split. { - unfold not in *. intros. apply H. exists x. split. { - unfold not in H. destruct H. exists Qed.
(* * Copyright 2014, NICTA * * This software may be distributed and modified according to the terms of * the BSD 2-Clause license. Note that NO WARRANTY is provided. * See "LICENSE_BSD2.txt" for details. * * @TAG(NICTA_BSD) *) theory jiraver110 imports "../CTranslation" begin install_C_file "jiraver110.c" context jiraver110 begin thm f_body_def (* this should be provable *) lemma shouldbetrue: "\<Gamma> \<turnstile> \<lbrace> True \<rbrace> \<acute>ret__int :== CALL f(0) \<lbrace> \<acute>ret__int = 1 \<rbrace>" apply vcg apply simp (* when this is provable, more will be required here *) done end end